⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm.cpp

📁 良好的代码实现
💻 CPP
📖 第 1 页 / 共 5 页
字号:
// SVM.cpp: implementation of the CSVM class.
//
//////////////////////////////////////////////////////////////////////
#include "stdafx.h"
#include "SVM.h"
#include "Message.h"

# include <stdio.h>
# include <ctype.h>
# include <math.h>
# include <string.h>
# include <stdlib.h>
# include <time.h> 
# include <float.h>

#ifdef _DEBUG
#undef THIS_FILE
static char THIS_FILE[]=__FILE__;
#define new DEBUG_NEW
#endif

extern CCompute_Prompt com_pro;
extern CCompute_Param com_param;
extern CCompute_Result com_result;

# define VERSION       "V3.50"
# define VERSION_DATE  "01.11.00"

# define PRIMAL_OPTIMAL      1
# define DUAL_OPTIMAL        2
# define MAXITER_EXCEEDED    3
# define NAN_SOLUTION        4
# define ONLY_ONE_VARIABLE   5

# define LARGEROUND          0
# define SMALLROUND          1

# define DEF_PRECISION          1E-5
# define DEF_MAX_ITERATIONS     200
# define DEF_LINDEP_SENSITIVITY 1E-8
# define EPSILON_HIDEO          1E-20
# define EPSILON_EQ             1E-5


CSVM theSVM;
//////////////////////////////////////////////////////////////////////
// Construction/Destruction
//////////////////////////////////////////////////////////////////////

CSVM::CSVM()
{
	precision_violations=0;
	opt_precision=DEF_PRECISION;
	maxiter=DEF_MAX_ITERATIONS;
	lindep_sensitivity=DEF_LINDEP_SENSITIVITY;
	smallroundcount=0;
}

CSVM::~CSVM()
{

}

/***************************svm_learn_main****************************/
void CSVM::set_learn_parameters(LEARN_PARM* learn_parm,KERNEL_PARM* kernel_parm)
{
  learn_parm->biased_hyperplane=com_param.biased_Hyperplane;
  learn_parm->remove_inconsistent=com_param.remove_inconsitant;
  learn_parm->skip_final_opt_check=com_param.final_test;
  learn_parm->svm_maxqpsize=com_param.maximum_size;
  learn_parm->svm_newvarsinqp=com_param.new_variable;
  learn_parm->svm_iter_to_shrink=com_param.iteration_time;
  learn_parm->svm_c=com_param.C;
  learn_parm->transduction_posratio=com_param.fraction;
  learn_parm->svm_costratio=com_param.cost_factor;
  learn_parm->svm_costratio_unlab=1.0;
  learn_parm->svm_unlabbound=1E-5;
  learn_parm->epsilon_crit=0.001;
  learn_parm->epsilon_a=1E-15;
  learn_parm->compute_loo=com_param.loo;
  learn_parm->rho=com_param.rho;
  learn_parm->xa_depth=com_param.search_depth;

  kernel_parm->kernel_type=com_param.kernel_type;
  kernel_parm->poly_degree=com_param.poly_degree;
  kernel_parm->rbf_gamma=com_param.rbf_gamma;
  kernel_parm->coef_lin=com_param.poly_s;
  kernel_parm->coef_const=com_param.poly_c;
  //strcpy(kernel_parm->custom,com_param.);
}


int CSVM::svm_learn_main (int pos_label)
{  
  DOC *docs;  /* training examples */
  long *label,max_docs,max_words_doc;
  long totwords,totdoc,ll,i;
  KERNEL_CACHE kernel_cache;
  LEARN_PARM learn_parm;
  KERNEL_PARM kernel_parm;
  MODEL model;
  char docfile[200];
  char modelfile[200];
  
  if (com_pro.show_action)
	  printm("begin to compute");
  if (com_pro.show_action)
	printm("Scanning examples...");

  set_learn_parameters(&learn_parm,&kernel_parm);

  strcpy(docfile,com_param.trainfile);
  strcpy(modelfile,com_param.modelfile);
//  kernel_cache_size=com_param.cache_size;


  nol_ll(docfile,&max_docs,&max_words_doc,&ll); /* scan size of input file */
  max_words_doc+=2;
  ll+=2;
  max_docs+=2;


  docs = (DOC *)my_malloc(sizeof(DOC)*max_docs);
  label = (long *)my_malloc(sizeof(long)*max_docs);

  
  read_documents(docfile,docs,label,max_words_doc,ll,&totwords,&totdoc,pos_label);

  if(kernel_parm.kernel_type == LINEAR) /* don't need the cache */
  { 
    svm_learn(docs,label,totdoc,totwords,&learn_parm,&kernel_parm,NULL,&model);
  }
  else 
  {
    kernel_cache_init(&kernel_cache,totdoc,com_param.cache_size);
    svm_learn(docs,label,totdoc,totwords,&learn_parm,&kernel_parm,&kernel_cache,&model);
    kernel_cache_cleanup(&kernel_cache);
  }

  write_model(modelfile,&model);

  free(model.supvec);
  free(model.alpha);
  free(model.index);
  for(i=0;i<totdoc;i++) 
    free(docs[i].words);
  free(docs);
  free(label);
  if (com_pro.show_action)
	printm("Cease to compute");	
  return(0);
}
/********************************svm_learn_main****************************/

/********************************svm_classify****************************/
double CSVM::svm_classify(DOC &doc)
{
	long llsv,max_sv,max_words_sv,pred_format,j;
	double sim=0;
	MODEL model; 
	char modelfile[MAX_PATH];
		
	strcpy(modelfile,com_param.modelfile);
	pred_format=0;
	// scan size of model file
	nol_ll(modelfile,&max_sv,&max_words_sv,&llsv);
	max_words_sv+=2;
	llsv+=2;
	model.supvec = (DOC **)my_malloc(sizeof(DOC *)*max_sv);
	model.alpha = (double *)my_malloc(sizeof(double)*max_sv);
	read_model(modelfile,&model,max_words_sv,llsv);

	doc.twonorm_sq=sprod_ss(doc.words,doc.words);
	if(model.kernel_parm.kernel_type == 0) 
	{   // linear kernel, compute weight vector
		add_weight_vector_to_linear_model(&model);
		for(j=0;(doc.words[j]).wnum != 0;j++) 
		{  
			// Check if feature numbers are not larger than in    
			// model. Remove feature if necessary.
			if((doc.words[j]).wnum>model.totwords) 
				(doc.words[j]).wnum=0;              
		}                                        
		sim=classify_example_linear(&model,&doc);
	}
	// non-linear kernel
	else sim=classify_example(&model,&doc);

	free(model.supvec);
	free(model.alpha);
	//linear kernel
	if(model.kernel_parm.kernel_type == 0) free(model.lin_weights);
	return sim;
}

int CSVM::svm_classify(int post_label, double* weight)
{
	DOC doc;   /* test example */
	long max_docs,max_words_doc,lld,llsv;
	long max_sv,max_words_sv,totdoc=0,doc_label;
	long wnum,pred_format;
	long j;
	char *line; 
	FILE *docfl;
	MODEL model; 
	char docfile[MAX_PATH];
	char modelfile[MAX_PATH];
	
	strcpy(docfile,com_param.classifyfile);
	strcpy(modelfile,com_param.modelfile);
	pred_format=0;
	
	nol_ll(docfile,&max_docs,&max_words_doc,&lld); /* scan size of input file */
	max_words_doc+=2;
	lld+=2;
	nol_ll(modelfile,&max_sv,&max_words_sv,&llsv); /* scan size of model file */
	max_words_sv+=2;
	llsv+=2;
	model.supvec = (DOC **)my_malloc(sizeof(DOC *)*max_sv);
	model.alpha = (double *)my_malloc(sizeof(double)*max_sv);
	line = (char *)my_malloc(sizeof(char)*lld);
	doc.words = (SVM_WORD *)my_malloc(sizeof(SVM_WORD)*(max_words_doc+10));
	read_model(modelfile,&model,max_words_sv,llsv);
	/* linear kernel */
		/* compute weight vector */
	if(model.kernel_parm.kernel_type == 0)
	{ 
		add_weight_vector_to_linear_model(&model);
	}
	if (com_pro.show_action)
		printm("Classifying test examples..");

	if ((docfl = fopen (docfile, "r")) == NULL)
	{
		printe(docfile);
		return -1;
	}
//chen 10.9.2001

	while((!feof(docfl)) && fgets(line,(int)lld,docfl)) 
	{
		if(line[0] == '#') continue;  /* line contains comments */
		parse_document(line,&doc,&doc_label,&wnum,max_words_doc);
		if(model.kernel_parm.kernel_type == 0) 
		{   /* linear kernel */
			for(j=0;(doc.words[j]).wnum != 0;j++) 
			{  /* Check if feature numbers are not larger than in    
				 model. Remove feature if necessary.  */
				if((doc.words[j]).wnum>model.totwords) 
					(doc.words[j]).wnum=0;              
			}                                        
			weight[totdoc]=classify_example_linear(&model,&doc);
		}
		else /* non-linear kernel */                        
			weight[totdoc]=classify_example(&model,&doc);
		totdoc++;
	}  
	free(line);
	free(doc.words);
	free(model.supvec);
	free(model.alpha);
	if(model.kernel_parm.kernel_type == 0) 
	{ /* linear kernel */
		free(model.lin_weights);
	}	
	return(0);
}
/********************************svm_classify****************************/


/********************************svm_common****************************/
double CSVM::classify_example(MODEL *model,DOC *ex)
{
	register long i;
	register double dist;
	
	dist=0;
	for(i=1;i<model->sv_num;i++) {  
		dist+=kernel(&model->kernel_parm,model->supvec[i],ex)*model->alpha[i];
	}
	return(dist-model->b);
}

/*    classifies example for linear kernel 
important: the model must have the linear weight vector computed 
important: the feature numbers in the example to classify must 
not be larger than the weight vector!               */
double CSVM::classify_example_linear(MODEL *model,DOC *ex)
{
	return((double)(sprod_ns(model->lin_weights,ex->words)-model->b));
}

/* calculate the kernel function */
CFLOAT CSVM::kernel(KERNEL_PARM *kernel_parm,DOC *a,DOC*b)
{
	com_result.kernel_cache_statistic++;
	switch(kernel_parm->kernel_type)
	{
    case 0: /* linear */ 
		return((CFLOAT)sprod_ss(a->words,b->words)); 
    case 1: /* polynomial */
		return((CFLOAT)pow(kernel_parm->coef_lin*sprod_ss(a->words,b->words)+kernel_parm->coef_const,(double)kernel_parm->poly_degree)); 
    case 2: /* radial basis function */
		return((CFLOAT)exp(-kernel_parm->rbf_gamma*(a->twonorm_sq-2*sprod_ss(a->words,b->words)+b->twonorm_sq)));
    case 3: /* sigmoid neural net */
		return((CFLOAT)tanh(kernel_parm->coef_lin*sprod_ss(a->words,b->words)+kernel_parm->coef_const)); 
    case 4: /* custom-kernel supplied in file kernel.h*/
		return((CFLOAT)custom_kernel(kernel_parm,a,b)); 
		//chen .test sum of 
		//return((CFLOAT)pow(kernel_parm->coef_lin*sprod_ss(a->words,b->words)+kernel_parm->coef_const,(double)kernel_parm->poly_degree)+exp(-kernel_parm->rbf_gamma*(a->twonorm_sq-2*sprod_ss(a->words,b->words)+b->twonorm_sq))); 
    default: sprintf(temstr,"Error: Unknown kernel function");
		printm(temstr);
		return (-1);
	}
}

/* compute the inner product of two sparse vectors */
double CSVM::sprod_ss(SVM_WORD *a,SVM_WORD*b)
{
    register FVAL sum=0;
    register SVM_WORD *ai,*bj;
    ai=a;
    bj=b;
    while (ai->wnum && bj->wnum) {
		if(ai->wnum > bj->wnum) {
			bj++;
		}
		else if (ai->wnum < bj->wnum) {
			ai++;
		}
		else {
			sum+=ai->weight * bj->weight;
			ai++;
			bj++;
		}
    }
    return((double)sum);
}



/* compute the inner product of two sparse vectors,b right shit 1 bit */
double CSVM::sprod_ss1(SVM_WORD *a,SVM_WORD*b,int offset)
{
    register FVAL sum=0;
    register SVM_WORD *ai,*bj;
    ai=a;
    bj=b;
    while (ai->wnum && bj->wnum) {
		if(ai->wnum > bj->wnum+offset) {
			bj++;
		}
		else if (ai->wnum < bj->wnum+offset) {
			ai++;
		}
		else 
		{
			int np=(ai->wnum-1)%16+1+offset;
			if (np>0 && np<17) 
				sum+=ai->weight * bj->weight;
			ai++;
			bj++;
		}
    }
    return((double)sum);
}

double CSVM::sprod_ss2(SVM_WORD *a,SVM_WORD*b,int offset)
{
    register FVAL sum=0;
    register SVM_WORD *ai,*bj;
    ai=a;
    bj=b;
    while (ai->wnum && bj->wnum) {
		if(ai->wnum > bj->wnum+offset) {
			bj++;
		}
		else if (ai->wnum < bj->wnum+offset) {
			ai++;
		}
		else 
		{
			int np=ai->wnum+offset;
			if (np>0 && np<257) 
				sum+=ai->weight * bj->weight;
			ai++;
			bj++;
		}
    }
    return((double)sum);
}

/* compute length of weight vector */
double CSVM::model_length_s(MODEL *model,KERNEL_PARM *kernel_parm)
{
	register long i,j;
	register double sum=0,alphai;
	register DOC *supveci;
	
	for(i=1;i<model->sv_num;i++) {  
		alphai=model->alpha[i];
		supveci=model->supvec[i];
		for(j=1;j<model->sv_num;j++) {
			sum+=alphai*model->alpha[j]
				*kernel(kernel_parm,supveci,model->supvec[j]);
		}
	}
	return(sqrt(sum));
}

void CSVM::clear_vector_n(double *vec,long n)
{
	register long i;
	for(i=0;i<=n;i++) vec[i]=0;
}

void CSVM::add_vector_ns(double *vec_n,SVM_WORD *vec_s,double faktor)
{
	register SVM_WORD *ai;
	ai=vec_s;
	while (ai->wnum) {
		vec_n[ai->wnum]+=(faktor*ai->weight);
		ai++;
	}
}

double CSVM::sprod_ns(double *vec_n,SVM_WORD *vec_s)
{
	register double sum=0;
	register SVM_WORD *ai;
	ai=vec_s;
	while (ai->wnum) {
		sum+=(vec_n[ai->wnum]*ai->weight);
		ai++;
	}
	return(sum);
}

/* compute weight vector in linear case and add to model*/
void CSVM::add_weight_vector_to_linear_model(MODEL *model)
{
	long i;
	
	model->lin_weights=(double *)my_malloc(sizeof(double)*(model->totwords+1));
	clear_vector_n(model->lin_weights,model->totwords);
	for(i=1;i<model->sv_num;i++) {
		add_vector_ns(model->lin_weights,(model->supvec[i])->words,
			model->alpha[i]);
	}
}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -