⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm_learn.c

📁 机器学习方法支持向量机SVM源程序
💻 C
📖 第 1 页 / 共 5 页
字号:
    model->b=0;    b_calculated=1;  }  for(ii=0;(i=working2dnum[ii])>=0;ii++) {    if((a_old[i]>0) && (a[i]==0)) { /* remove from model */      pos=model->index[i];       model->index[i]=-1;      (model->sv_num)--;      model->supvec[pos]=model->supvec[model->sv_num];      model->alpha[pos]=model->alpha[model->sv_num];      model->index[(model->supvec[pos])->docnum]=pos;    }    else if((a_old[i]==0) && (a[i]>0)) { /* add to model */      model->supvec[model->sv_num]=&(docs[i]);      model->alpha[model->sv_num]=a[i]*(double)label[i];      model->index[i]=model->sv_num;      (model->sv_num)++;    }    else if(a_old[i]==a[i]) { /* nothing to do */    }    else {  /* just update alpha */      model->alpha[model->index[i]]=a[i]*(double)label[i];    }          ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;    if((a_old[i]>=ex_c) && (a[i]<ex_c)) {       (model->at_upper_bound)--;    }    else if((a_old[i]<ex_c) && (a[i]>=ex_c)) {       (model->at_upper_bound)++;    }    if((!b_calculated)        && (a[i]>learn_parm->epsilon_a) && (a[i]<ex_c)) {   /* calculate b */     	model->b=((double)label[i]*learn_parm->eps-c[i]+lin[i]); 	/* model->b=(-(double)label[i]+lin[i]); */	b_calculated=1;    }  }        /* No alpha in the working set not at bounds, so b was not     calculated in the usual way. The following handles this special     case. */  if(learn_parm->biased_hyperplane      && (model->sv_num-1 == model->at_upper_bound)) {     first_low=1;    first_high=1;    b_low=0;    b_high=0;    for(ii=0;(i=active2dnum[ii])>=0;ii++) {      ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;      if(a_old[i]<ex_c) { 	if(label[i]>0)  {	  b_temp=-(learn_parm->eps-c[i]+lin[i]);	  if((b_temp>b_low) || (first_low)) {	    b_low=b_temp;	    first_low=0;	  }	}	else {	  b_temp=-(-learn_parm->eps-c[i]+lin[i]);	  if((b_temp<b_high) || (first_high)) {	    b_high=b_temp;	    first_high=0;	  }	}      }      else {	if(label[i]<0)  {	  b_temp=-(-learn_parm->eps-c[i]+lin[i]);	  if((b_temp>b_low) || (first_low)) {	    b_low=b_temp;	    first_low=0;	  }	}	else {	  b_temp=-(learn_parm->eps-c[i]+lin[i]);	  if((b_temp<b_high) || (first_high)) {	    b_high=b_temp;	    first_high=0;	  }	}      }    }    model->b=-(b_high+b_low)/2.0;  /* select b as the middle of range */    /* printf("\nb_low=%lf, b_high=%lf,b=%lf\n",b_low,b_high,model->b); */  }  if(verbosity>=3) {    printf("done\n"); fflush(stdout);  }  return(model->sv_num-1); /* have to substract one, since element 0 is empty*/}long check_optimality(MODEL *model, long int *label, long int *unlabeled, 		      double *a, double *lin, double *c, long int totdoc, 		      LEARN_PARM *learn_parm, double *maxdiff, 		      double epsilon_crit_org, long int *misclassified, 		      long int *inconsistent, long int *active2dnum,		      long int *last_suboptimal_at, 		      long int iteration, KERNEL_PARM *kernel_parm)     /* Check KT-conditions */{  long i,ii,retrain;  double dist,ex_c,target;  if(kernel_parm->kernel_type == LINEAR) {  /* be optimistic */    learn_parm->epsilon_shrink=-learn_parm->epsilon_crit+epsilon_crit_org;    }  else {  /* be conservative */    learn_parm->epsilon_shrink=learn_parm->epsilon_shrink*0.7+(*maxdiff)*0.3;   }  retrain=0;  (*maxdiff)=0;  (*misclassified)=0;  for(ii=0;(i=active2dnum[ii])>=0;ii++) {    if((!inconsistent[i]) && label[i]) {      dist=(lin[i]-model->b)*(double)label[i];/* 'distance' from						 hyperplane*/      target=-(learn_parm->eps-(double)label[i]*c[i]);      ex_c=learn_parm->svm_cost[i]-learn_parm->epsilon_a;      if(dist <= 0) {       	(*misclassified)++;  /* does not work due to deactivation of var */      }      if((a[i]>learn_parm->epsilon_a) && (dist > target)) {	if((dist-target)>(*maxdiff))  /* largest violation */	  (*maxdiff)=dist-target;      }      else if((a[i]<ex_c) && (dist < target)) {	if((target-dist)>(*maxdiff))  /* largest violation */	  (*maxdiff)=target-dist;      }      /* Count how long a variable was at lower/upper bound (and optimal).*/      /* Variables, which were at the bound and optimal for a long */      /* time are unlikely to become support vectors. In case our */      /* cache is filled up, those variables are excluded to save */      /* kernel evaluations. (See chapter 'Shrinking').*/       if((a[i]>(learn_parm->epsilon_a)) 	 && (a[i]<ex_c)) { 	last_suboptimal_at[i]=iteration;         /* not at bound */      }      else if((a[i]<=(learn_parm->epsilon_a)) 	      && (dist < (target+learn_parm->epsilon_shrink))) {	last_suboptimal_at[i]=iteration;         /* not likely optimal */      }      else if((a[i]>=ex_c)	      && (dist > (target-learn_parm->epsilon_shrink)))  { 	last_suboptimal_at[i]=iteration;         /* not likely optimal */      }    }     }  /* termination criterion */  if((!retrain) && ((*maxdiff) > learn_parm->epsilon_crit)) {      retrain=1;  }  return(retrain);}long identify_inconsistent(double *a, long int *label, 			   long int *unlabeled, long int totdoc, 			   LEARN_PARM *learn_parm, 			   long int *inconsistentnum, long int *inconsistent){  long i,retrain;  /* Throw out examples with multipliers at upper bound. This */  /* corresponds to the -i 1 option. */  /* ATTENTION: this is just a heuristic for finding a close */  /*            to minimum number of examples to exclude to */  /*            make the problem separable with desired margin */  retrain=0;  for(i=0;i<totdoc;i++) {    if((!inconsistent[i]) && (!unlabeled[i])        && (a[i]>=(learn_parm->svm_cost[i]-learn_parm->epsilon_a))) { 	(*inconsistentnum)++;	inconsistent[i]=1;  /* never choose again */	retrain=2;          /* start over */	if(verbosity>=3) {	  printf("inconsistent(%ld)..",i); fflush(stdout);	}    }  }  return(retrain);}long identify_misclassified(double *lin, long int *label, 			    long int *unlabeled, long int totdoc, 			    MODEL *model, long int *inconsistentnum, 			    long int *inconsistent){  long i,retrain;  double dist;  /* Throw out misclassified examples. This */  /* corresponds to the -i 2 option. */  /* ATTENTION: this is just a heuristic for finding a close */  /*            to minimum number of examples to exclude to */  /*            make the problem separable with desired margin */  retrain=0;  for(i=0;i<totdoc;i++) {    dist=(lin[i]-model->b)*(double)label[i]; /* 'distance' from hyperplane*/      if((!inconsistent[i]) && (!unlabeled[i]) && (dist <= 0)) { 	(*inconsistentnum)++;	inconsistent[i]=1;  /* never choose again */	retrain=2;          /* start over */	if(verbosity>=3) {	  printf("inconsistent(%ld)..",i); fflush(stdout);	}    }  }  return(retrain);}long identify_one_misclassified(double *lin, long int *label, 				long int *unlabeled, 				long int totdoc, MODEL *model, 				long int *inconsistentnum, 				long int *inconsistent){  long i,retrain,maxex=-1;  double dist,maxdist=0;  /* Throw out the 'most misclassified' example. This */  /* corresponds to the -i 3 option. */  /* ATTENTION: this is just a heuristic for finding a close */  /*            to minimum number of examples to exclude to */  /*            make the problem separable with desired margin */  retrain=0;  for(i=0;i<totdoc;i++) {    if((!inconsistent[i]) && (!unlabeled[i])) {      dist=(lin[i]-model->b)*(double)label[i];/* 'distance' from hyperplane*/        if(dist<maxdist) {	maxdist=dist;	maxex=i;      }    }  }  if(maxex>=0) {    (*inconsistentnum)++;    inconsistent[maxex]=1;  /* never choose again */    retrain=2;          /* start over */    if(verbosity>=3) {      printf("inconsistent(%ld)..",i); fflush(stdout);    }  }  return(retrain);}void update_linear_component(DOC *docs, long int *label, 			     long int *active2dnum, double *a, 			     double *a_old, long int *working2dnum, 			     long int totdoc, long int totwords, 			     KERNEL_PARM *kernel_parm, 			     KERNEL_CACHE *kernel_cache, 			     double *lin, float *aicache, double *weights)     /* keep track of the linear component */     /* lin of the gradient etc. by updating */     /* based on the change of the variables */     /* in the current working set */{  register long i,ii,j,jj;  register double tec;  if(kernel_parm->kernel_type==0) { /* special linear case */    clear_vector_n(weights,totwords);    for(ii=0;(i=working2dnum[ii])>=0;ii++) {      if(a[i] != a_old[i]) {	add_vector_ns(weights,docs[i].words,((a[i]-a_old[i])*(double)label[i]));      }    }    for(jj=0;(j=active2dnum[jj])>=0;jj++) {      lin[j]+=sprod_ns(weights,docs[j].words);    }  }  else {                            /* general case */    for(jj=0;(i=working2dnum[jj])>=0;jj++) {      if(a[i] != a_old[i]) {	get_kernel_row(kernel_cache,docs,i,totdoc,active2dnum,aicache,		       kernel_parm);	for(ii=0;(j=active2dnum[ii])>=0;ii++) {	  tec=aicache[j];	  lin[j]+=(((a[i]*tec)-(a_old[i]*tec))*(double)label[i]);	}      }    }  }}long incorporate_unlabeled_examples(MODEL *model, long int *label, 				    long int *inconsistent, 				    long int *unlabeled, 				    double *a, double *lin, 				    long int totdoc, double *selcrit, 				    long int *select, long int *key, 				    long int transductcycle, 				    KERNEL_PARM *kernel_parm, 				    LEARN_PARM *learn_parm){  long i,j,k,j1,j2,j3,j4,unsupaddnum1=0,unsupaddnum2=0;  long pos,neg,upos,uneg,orgpos,orgneg,nolabel,newpos,newneg,allunlab;  double dist,model_length,posratio,negratio;  long check_every=2;  double loss;  static double switchsens=0.0,switchsensorg=0.0;  double umin,umax,sumalpha;  long imin=0,imax=0;  static long switchnum=0;  switchsens/=1.2;  /* assumes that lin[] is up to date -> no inactive vars */  orgpos=0;  orgneg=0;  newpos=0;  newneg=0;  nolabel=0;  allunlab=0;  for(i=0;i<totdoc;i++) {    if(!unlabeled[i]) {      if(label[i] > 0) {	orgpos++;      }      else {	orgneg++;      }    }    else {      allunlab++;      if(unlabeled[i]) {	if(label[i] > 0) {	  newpos++;	}	else if(label[i] < 0) {	  newneg++;	}      }    }    if(label[i]==0) {      nolabel++;    }  }  if(learn_parm->transduction_posratio >= 0) {    posratio=learn_parm->transduction_posratio;  }  else {    posratio=(double)orgpos/(double)(orgpos+orgneg); /* use ratio of pos/neg */  }                                                  /* in training data */  negratio=1.0-posratio;  learn_parm->svm_costratio=1.0;                     /* global */  if(posratio>0) {    learn_parm->svm_costratio_unlab=negratio/posratio;  }  else {    learn_parm->svm_costratio_unlab=1.0;  }    pos=0;  neg=0;  upos=0;  uneg=0;  for(i=0;i<totdoc;i++) {    dist=(lin[i]-model->b);  /* 'distance' from hyperplane*/    if(dist>0) {      pos++;    }    else {      neg++;    }    if(unlabeled[i]) {      if(dist>0) {	upos++;      }      else {	uneg++;      }    }    if((!unlabeled[i]) && (a[i]>(learn_parm->svm_cost[i]-learn_parm->epsilon_a))) {      /*      printf("Ubounded %ld (class %ld, unlabeled %ld)\n",i,label[i],unlabeled[i]); */    }  }  if(verbosity>=2) {    printf("POS=%ld, ORGPOS=%ld, ORGNEG=%ld\n",pos,orgpos,orgneg);    printf("POS=%ld, NEWPOS=%ld, NEWNEG=%ld\n",pos,newpos,newneg);    printf("pos ratio = %f (%f).\n",(double)(upos)/(double)(allunlab),posratio);    fflush(stdout);  }  if(transductcycle == 0) {    j1=0;     j2=0;    j4=0;    for(i=0;i<totdoc;i++) {      dist=(lin[i]-model->b);  /* 'distance' from hyperplane*/      if((label[i]==0) && (unlabeled[i])) {	selcrit[j4]=dist;	key[j4]=i;	j4++;      }    }    unsupaddnum1=0;	    unsupaddnum2=0;	    select_top_n(selcrit,j4,select,(long)(allunlab*posratio+0.5));    for(k=0;(k<(long)(allunlab*posratio+0.5));k++) {      i=key[select[k]];

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -