⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kernelfuncs.cpp

📁 This is SvmFu, a package for training and testing support vector machines (SVMs). It s written in C
💻 CPP
字号:
// Copyright (C) 2000 Ryan M. Rifkin <rif@mit.edu>//  // This program is free software; you can redistribute it and/or// modify it under the terms of the GNU General Public License as// published by the Free Software Foundation; either version 2 of the// License, or (at your option) any later version.//  // This program is distributed in the hope that it will be useful, but// WITHOUT ANY WARRANTY; without even the implied warranty of// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU// General Public License for more details.//  // You should have received a copy of the GNU General Public License// along with this program; if not, write to the Free Software// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA// 02111-1307, USA.//  // Kernel function headers.  These will be include in both the training// and testing clients.  Note that they should be included AFTER the// typedefs, as they're dependent on the definition of DataElt and KernVal.// code by rif. template wrapping by vking./*! This sets pfunc to be the appropriate kernel function * for the given parameters; pass pfunc as a kernel function * pointer to an Svm constructor with the same template types. */template<class KernVal, class DataElt>KernelFuncs<KernVal, DataElt>::KernelFuncs (KernType k_, MachType m_){  afunc = NULL;  mfunc = NULL;  // brute-force selection of the appropriate kernel function  if (k_ == linear && m_ == dense)        {    pfunc = &linearProduct;    afunc = &addToW;     mfunc = &multW;  } else if (k_ == linear && m_ == sparse01) {    pfunc = &sparse01BinaryProduct;    afunc = &addToWsparse01;    mfunc = &multWsparse01;  } else if (k_ == linear && m_ == sparseN) {    pfunc = &sparseNLinearProduct;    afunc = &addToWsparseN;    mfunc = &multWsparseN;  } else if (k_ == polynomial && m_ == dense)    pfunc = &polynomialProduct;  else if (k_ == polynomial && m_ == sparse01) pfunc = &sparse01PolynomialProduct;  else if (k_ == polynomial && m_ == sparseN)  pfunc = &sparseNPolynomialProduct;  else if (k_ == gaussian && m_ == dense)      pfunc = &gaussianProduct;  else if (k_ == gaussian && m_ == sparse01)   pfunc = &sparse01GaussianProduct;  else if (k_ == gaussian && m_ == sparseN)    pfunc = &sparseNGaussianProduct;}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::linearProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  KernVal kernProd=0;  // dense  const DataElt *p1value = p1.value;  const DataElt *p2value = p2.value;    for (int i = 0; i < p1.dim; i++) {    kernProd += p1value[i]*p2value[i];  }    return (KernVal)(kernProd/normalizer);}template <class KernVal, class DataElt> const voidKernelFuncs<KernVal, DataElt>::addToW(double *&w, const DataPoint<DataElt> &p, double amt) {  for (int i = 0; i < p.dim; i++) {    w[i] += p.value[i]*amt;  }}template <class KernVal, class DataElt> const doubleKernelFuncs<KernVal, DataElt>::multW(double *w, const DataPoint<DataElt> &p) {  double output = 0;    for (int i = 0; i < p.dim; i++) {    output += w[i]*p.value[i];  }  return (output/normalizer);}  template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::polynomialProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  int i;  KernVal kernProd=0;  // dense  const DataElt *p1value = p1.value;  const DataElt *p2value = p2.value;  for (i = 0; i < p1.dim; i++) {    kernProd += p1value[i]*p2value[i];  }  #ifdef USE_POW  // #warning  ------  using pow for dense polynomial  kernProd = (KernVal)pow(kernProd/normalizer+offset, degree);#else  // #warning  ------  using loop for dense polynomial  kernProd = (KernVal)(kernProd/normalizer+offset);  KernVal kernBase = kernProd;  for (i = 1; i < degree; i++) {    kernProd *= kernBase;  }#endif  return kernProd;}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::gaussianProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  double tmp = 0.0;  // dense  const DataElt *p1value = p1.value;  const DataElt *p2value = p2.value;  for (int i = 0; i < p1.dim; i++) {    tmp +=(p1value[i]-p2value[i])*(p1value[i]-p2value[i]);  }  tmp /= (2*sigma*sigma);  return (KernVal)(exp(-tmp)/normalizer);}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparse01InternalProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  int prod = 0;  int i1=0, i2=0;  int ip1, ip2;  // sparse01  const int *p1index = p1.index;  const int *p2index = p2.index;  while (i1 < p1.dim && i2 < p2.dim) {    ip1 = p1index[i1];    ip2 = p2index[i2];    if      (ip1 > ip2) { i2++; }     else if (ip1 < ip2) { i1++; }    else                { prod++; i1++; i2++; }  }  return (KernVal) prod;}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparse01BinaryProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  // sparse01  return (KernVal)(sparse01InternalProduct(p1, p2)/normalizer);}template <class KernVal, class DataElt> const voidKernelFuncs<KernVal, DataElt>::addToWsparse01(double *&w, const DataPoint<DataElt> &p, double amt) {  for (int i = 0; i < p.dim; i++) {    w[p.index[i]] += amt;  }}template <class KernVal, class DataElt> const doubleKernelFuncs<KernVal, DataElt>::multWsparse01(double *w, const DataPoint<DataElt> &p) {  double output = 0;  for (int i = 0; i < p.dim; i++) {    output += w[p.index[i]];  }    return (output/normalizer);}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparse01PolynomialProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  // sparse01  KernVal kernProd = (KernVal)(sparse01InternalProduct(p1,p2)/normalizer+offset);  KernVal kernBase = kernProd;  for (int i = 1; i < degree; i++) {    kernProd *= kernBase;  }  return kernProd;}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparse01GaussianProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  double diff = 0;  int i1=0, i2=0;  int ip1, ip2;  // sparse01  const int *p1index = p1.index;  const int *p2index = p2.index;    while (i1 < p1.dim && i2 < p2.dim) {    ip1 = p1index[i1];    ip2 = p2index[i2];    if (ip1 > ip2) {i2++; diff++; }    else if (ip1 <ip2 ) { i1++; diff++; }    else { i1++; i2++; }  }  // ??? should p1index[i1] ever equal -1?  I think not, but..  -jim  while (i1<p1.dim && p1index[i1]!=-1) {    i1++; diff++;  }  while (i2<p2.dim && p2index[i2]!=-1) {    i2++; diff++;  }  diff /= (2*sigma*sigma);   return (KernVal)(exp((double)(-diff)/normalizer));}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparseNInternalProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  double prod = 0;  int i1=0, i2=0;  int ip1=0, ip2=0;  // sparseN  const int *p1index = p1.index;  const int *p2index = p2.index;  const DataElt *p1value = p1.value;  const DataElt *p2value = p2.value;    while (i1 < p1.dim && i2 < p2.dim) {    ip1 = p1index[i1];    ip2 = p2index[i2];    if (ip1 > ip2) { i2++; }    else if (ip1 < ip2) { i1++; }    else { prod += p1value[i1]*p2value[i2]; i1++; i2++; }  }  return (KernVal)prod;}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparseNLinearProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  // sparseN  return (KernVal)(sparseNInternalProduct(p1, p2)/normalizer);}template <class KernVal, class DataElt> const voidKernelFuncs<KernVal, DataElt>::addToWsparseN(double *&w, const DataPoint<DataElt> &p, double amt) {  for (int i = 0; i < p.dim; i++) {    w[p.index[i]] += p.value[i]*amt;  }}template <class KernVal, class DataElt> const doubleKernelFuncs<KernVal, DataElt>::multWsparseN(double *w, const DataPoint<DataElt> &p) {  double output = 0;  for (int i = 0; i < p.dim; i++) {    output += w[p.index[i]]*p.value[i];  }  return (output/normalizer);}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparseNPolynomialProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  // sparseN  KernVal kernProd = (KernVal)(sparseNInternalProduct(p1,p2)/normalizer+offset);  KernVal kernBase = kernProd;  for (int i = 1; i < degree; i++) {    kernProd *= kernBase;  }  return kernProd;}template <class KernVal, class DataElt> const KernValKernelFuncs<KernVal, DataElt>::sparseNGaussianProduct(const DataPoint<DataElt> &p1, const DataPoint<DataElt> &p2) {  double diff = 0;  int i1=0, i2=0;  int ip1, ip2;  // sparseN  const int *p1index = p1.index;  const int *p2index = p2.index;  const DataElt *p1value = p1.value;  const DataElt *p2value = p2.value;    while (i1 < p1.dim && i2 < p2.dim) {    ip1 = p1index[i1];    ip2 = p2index[i2];    if (ip1 > ip2) { diff += (p2value[i2]*p2value[i2]); i2++; }    else if (ip1 < ip2) { diff += (p1value[i1]*p1value[i1]); i1++; }    else {       diff += (p1value[i1]-p2value[i2])*(p1value[i1]-p2value[i2]); i1++; i2++;    }  }  // ??? should p1index[i1] ever equal -1?  I think not, but..  -jim  while (i1<p1.dim && p1index[i1]!=-1) {    diff += (p1value[i1]*p1value[i1]); i1++;  }  while (i2<p2.dim && p2index[i2]!=-1) {    diff += (p2value[i2]*p2value[i2]); i2++;  }  diff /= (2*sigma*sigma);  return (KernVal)(exp((double)(-diff)/normalizer));}#define IterateTypes(datatype, kerntype) \    template class KernelFuncs<kerntype, datatype>;#include "SvmFuSvmTypes.h"

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -