svm.cpp
来自「Ball Vector Machine (BVM)支撑向量机C++程序项目代码」· C++ 代码 · 共 3,208 行 · 第 1/5 页
CPP
3,208 行
#ifndef _SVM_H_#define _SVM_H_#include <math.h>#include <stdio.h>#include <stdlib.h>#include <ctype.h>#include <float.h>#include <string.h>#include <stdarg.h>#include <assert.h>#include <time.h>#include "utility.h"#include "svm.h"
#include "cvm.h"
#include "bvm.h"
#if 1void info(char *fmt,...){ va_list ap; va_start(ap,fmt); vprintf(fmt,ap); va_end(ap);}void info_flush(){ fflush(stdout);}#elsevoid info(char *fmt,...) {}void info_flush() {}#endif
inline double powi(double base, int times){ double tmp = base, ret = 1.0; for(int t=times; t>0; t/=2) { if(t%2==1) ret*=tmp; tmp = tmp * tmp; } return ret;}
//// Kernel Cache//// l is the number of total data items// size is the cache size limit in bytes//class Cache{public: Cache(int l,int size); ~Cache(); // request data [0,len) // return some position p where [p,len) need to be filled // (p >= len if nothing needs to be filled) int get_data(const int index, Qfloat **data, int len); void swap_index(int i, int j); // future_optionprivate: int l; int size; struct head_t { head_t *prev, *next; // a cicular list Qfloat *data; int len; // data[0,len) is cached in this entry }; head_t *head; head_t lru_head; void lru_delete(head_t *h); void lru_insert(head_t *h);};Cache::Cache(int l_,int size_):l(l_),size(size_){ head = (head_t *)calloc(l,sizeof(head_t)); // initialized to 0 size /= sizeof(Qfloat); size -= l * sizeof(head_t) / sizeof(Qfloat); size = max(size, 2*l); // cache must be large enough for two columns lru_head.next = lru_head.prev = &lru_head;}Cache::~Cache(){ for(head_t *h = lru_head.next; h != &lru_head; h=h->next) free(h->data); free(head);}void Cache::lru_delete(head_t *h){ // delete from current location h->prev->next = h->next; h->next->prev = h->prev;}void Cache::lru_insert(head_t *h){ // insert to last position h->next = &lru_head; h->prev = lru_head.prev; h->prev->next = h; h->next->prev = h;}int Cache::get_data(const int index, Qfloat **data, int len){ head_t *h = &head[index]; if(h->len) lru_delete(h); int more = len - h->len; if(more > 0) { // free old space while(size < more) { head_t *old = lru_head.next; lru_delete(old); free(old->data); size += old->len; old->data = 0; old->len = 0; } // allocate new space h->data = (Qfloat *)realloc(h->data,sizeof(Qfloat)*len); size -= more; swap(h->len,len); } lru_insert(h); *data = h->data; return len;}void Cache::swap_index(int i, int j){ if(i==j) return; if(head[i].len) lru_delete(&head[i]); if(head[j].len) lru_delete(&head[j]); swap(head[i].data,head[j].data); swap(head[i].len,head[j].len); if(head[i].len) lru_insert(&head[i]); if(head[j].len) lru_insert(&head[j]); if(i>j) swap(i,j); for(head_t *h = lru_head.next; h!=&lru_head; h=h->next) { if(h->len > i) { if(h->len > j) swap(h->data[i],h->data[j]); else { // give up lru_delete(h); free(h->data); size += h->len; h->data = 0; h->len = 0; } } }}//// Kernel evaluation//// the static method k_function is for doing single kernel evaluation// the constructor of Kernel prepares to calculate the l*l kernel matrix// the member function get_Q is for getting one column from the Q Matrix//
double Kernel::kernel_linear(int i, int j) const
{
return dot(x[i],x[j]);
}
double Kernel::kernel_poly(int i, int j) const
{
return powi(gamma*dot(x[i],x[j])+coef0,degree);
}
double Kernel::kernel_rbf(int i, int j) const
{
return exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j])));
}
double Kernel::kernel_sigmoid(int i, int j) const
{
return tanh(gamma*dot(x[i],x[j])+coef0);
}
double Kernel::kernel_precomputed(int i, int j) const
{
return x[i][(int)(x[j][0].value)].value;
}
double Kernel::kernel_normalized_poly(int i, int j) const
{
return pow((gamma*dot(x[i],x[j])+coef0) / sqrt((gamma*x_square[i]+coef0)*(gamma*x_square[j])+coef0),degree);
}
double Kernel::kernel_exp(int i, int j) const
{
double temp=gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]));
return exp(-sqrt(temp));
}
double Kernel::kernel_inv_sqdist(int i, int j) const
{
return 1.0/(gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]))+1.0);
}
double Kernel::kernel_inv_dist(int i, int j) const
{
double temp=gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j]));
return 1.0/(sqrt(temp)+1.0);
}
void Kernel::swap_index(int i, int j) const // no so const...
{
swap(x[i],x[j]);
if(x_square) swap(x_square[i],x_square[j]);
}
Kernel::Kernel(int l, svm_node * const * x_, const svm_parameter& param):kernel_type(param.kernel_type), degree(param.degree), gamma(param.gamma), coef0(param.coef0){ switch(kernel_type) { case LINEAR: kernel_function = &Kernel::kernel_linear; break; case POLY: kernel_function = &Kernel::kernel_poly; break; case RBF: kernel_function = &Kernel::kernel_rbf; break; case SIGMOID: kernel_function = &Kernel::kernel_sigmoid; case PRECOMPUTED: kernel_function = &Kernel::kernel_precomputed; break;
case EXP:
kernel_function = &Kernel::kernel_exp;
break;
case NORMAL_POLY:
kernel_function = &Kernel::kernel_normalized_poly;
break; case INV_DIST: kernel_function = &Kernel::kernel_inv_dist;
break; case INV_SQDIST: kernel_function = &Kernel::kernel_inv_sqdist;
break; } clone(x,x_,l); if(kernel_type == RBF || kernel_type == NORMAL_POLY || kernel_type == EXP || kernel_type == INV_DIST || kernel_type == INV_SQDIST) { x_square = new double[l]; for(int i=0;i<l;i++) x_square[i] = dot(x[i],x[i]); } else x_square = 0;}Kernel::~Kernel(){ delete[] x; delete[] x_square;}double Kernel::dot(const svm_node *px, const svm_node *py){ double sum = 0; while(px->index != -1 && py->index != -1) { if(px->index == py->index) { sum += (double)px->value * (double)py->value; ++px; ++py; } else { if(px->index > py->index) ++py; else ++px; } } return sum;}
double Kernel::distanceSq(const svm_node *x, const svm_node *y)
{
double sum = 0.0;
while(x->index != -1 && y->index !=-1)
{
if(x->index == y->index)
{
double d = (double)x->value - (double)y->value;
sum += d*d;
++x;
++y;
}
else
{
if(x->index > y->index)
{
sum += ((double)y->value) * (double)y->value;
++y;
}
else
{
sum += ((double)x->value) * (double)x->value;
++x;
}
}
}
while(x->index != -1)
{
sum += ((double)x->value) * (double)x->value;
++x;
}
while(y->index != -1)
{
sum += ((double)y->value) * (double)y->value;
++y;
}
return (double)sum;
}
double Kernel::k_function(const svm_node *x, const svm_node *y, const svm_parameter& param){ switch(param.kernel_type) { case LINEAR: return dot(x,y); case POLY: return powi(param.gamma*dot(x,y)+param.coef0,param.degree); case RBF: { double sum = 0; while(x->index != -1 && y->index !=-1) { if(x->index == y->index) { double d = (double)x->value - (double)y->value; sum += d*d; ++x; ++y; } else { if(x->index > y->index) { sum += (double)y->value * (double)y->value; ++y; } else { sum += (double)x->value * (double)x->value; ++x; } } } while(x->index != -1) { sum += (double)x->value * (double)x->value; ++x; } while(y->index != -1) { sum += (double)y->value * (double)y->value; ++y; } return exp(-param.gamma*sum); }
case EXP:
{
double dist = param.gamma*distanceSq(x,y);
return exp(-sqrt(dist));
}
case INV_DIST:
{
double dist = param.gamma*distanceSq(x,y);
return 1.0/(sqrt(dist)+1.0);
}
case INV_SQDIST:
{
return 1.0/(param.gamma*distanceSq(x,y)+1.0);
}
case NORMAL_POLY:
return pow((param.gamma*dot(x,y)+param.coef0)/sqrt((param.gamma*dot(x,x)+param.coef0)*(param.gamma*dot(y,y)+param.coef0)),param.degree); case SIGMOID: return tanh(param.gamma*dot(x,y)+param.coef0); case PRECOMPUTED: //x: test (validation), y: SV return x[(int)(y->value)].value; default: return 0; /* Unreachable */ }}void Solver::swap_index(int i, int j){ Q->swap_index(i,j); swap(y[i],y[j]); swap(G[i],G[j]); swap(alpha_status[i],alpha_status[j]); swap(alpha[i],alpha[j]); swap(b[i],b[j]); swap(active_set[i],active_set[j]); swap(G_bar[i],G_bar[j]);}void Solver::reconstruct_gradient(){ // reconstruct inactive elements of G from G_bar and free variables if(active_size == l) return; int i; for(i=active_size;i<l;i++) G[i] = G_bar[i] + b[i]; for(i=0;i<active_size;i++) if(is_free(i)) { const Qfloat *Q_i = Q->get_Q(i,l); double alpha_i = alpha[i]; for(int j=active_size;j<l;j++) G[j] += alpha_i * Q_i[j]; }}void Solver::Solve(int l, const QMatrix& Q, const double *b_, const schar *y_, double *alpha_, double Cp, double Cn, double eps, SolutionInfo* si, int shrinking){ this->l = l; this->Q = &Q; QD=Q.get_QD(); clone(b, b_,l); clone(y, y_,l); clone(alpha,alpha_,l); this->Cp = Cp; this->Cn = Cn; this->eps = eps; unshrinked = false; // initialize alpha_status { alpha_status = new char[l]; for(int i=0;i<l;i++) update_alpha_status(i); } // initialize active set (for shrinking) { active_set = new int[l]; for(int i=0;i<l;i++) active_set[i] = i; active_size = l; } // initialize gradient { G = new double[l]; G_bar = new double[l]; int i; for(i=0;i<l;i++) { G[i] = b[i]; G_bar[i] = 0; } for(i=0;i<l;i++) if(!is_lower_bound(i)) { const Qfloat *Q_i = Q.get_Q(i,l); double alpha_i = alpha[i]; int j; for(j=0;j<l;j++) G[j] += alpha_i*Q_i[j]; if(is_upper_bound(i)) for(j=0;j<l;j++) G_bar[j] += get_C(i) * Q_i[j]; } } // optimization step int iter = 0; int counter = min(l,1000)+1; while(1) { // show progress and do shrinking if(--counter == 0) { counter = min(l,1000); if(shrinking) do_shrinking(); info("."); info_flush(); } int i,j; if(select_working_set(i,j)!=0) { // reconstruct the whole gradient reconstruct_gradient(); // reset active set size and check active_size = l; info("*"); info_flush(); if(select_working_set(i,j)!=0) break; else counter = 1; // do shrinking next iteration } ++iter; // update alpha[i] and alpha[j], handle bounds carefully const Qfloat *Q_i = Q.get_Q(i,active_size); const Qfloat *Q_j = Q.get_Q(j,active_size); double C_i = get_C(i); double C_j = get_C(j); double old_alpha_i = alpha[i]; double old_alpha_j = alpha[j]; if(y[i]!=y[j]) { double quad_coef = Q_i[i]+Q_j[j]+2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (-G[i]-G[j])/quad_coef; double diff = alpha[i] - alpha[j]; alpha[i] += delta; alpha[j] += delta; if(diff > 0) { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = diff; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = -diff; } } if(diff > C_i - C_j) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = C_i - diff; } } else { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = C_j + diff; } } } else { double quad_coef = Q_i[i]+Q_j[j]-2*Q_i[j]; if (quad_coef <= 0) quad_coef = TAU; double delta = (G[i]-G[j])/quad_coef; double sum = alpha[i] + alpha[j]; alpha[i] -= delta; alpha[j] += delta; if(sum > C_i) { if(alpha[i] > C_i) { alpha[i] = C_i; alpha[j] = sum - C_i; } } else { if(alpha[j] < 0) { alpha[j] = 0; alpha[i] = sum; } } if(sum > C_j) { if(alpha[j] > C_j) { alpha[j] = C_j; alpha[i] = sum - C_j; } } else { if(alpha[i] < 0) { alpha[i] = 0; alpha[j] = sum; } } }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?