⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svm.cpp

📁 C++编写的机器学习算法 Lemga is a C++ package which consists of classes for several learning models and gener
💻 CPP
字号:
/** @file *  $Id: svm.cpp 2538 2006-01-08 10:01:17Z ling $ */#include <assert.h>#include <cmath>#include <svm.h>#include "svm.h"REGISTER_CREATOR(lemga::SVM);// In order to access nSV, we have to copy svm_model here.// Comments removed. Please see svm.cpp for details.// Any direct use of svm_model is marked with /* direct access svm_model */struct svm_model {    svm_parameter param;    int nr_class;    int l;    svm_node **SV;    double **sv_coef;    double *rho;    double *probA;    double *probB;    int *label;    int *nSV;    int free_sv;};namespace lemga {typedef struct svm_node* p_svm_node;struct SVM_detail {    struct svm_parameter param;    struct svm_problem prob;    struct svm_model *model;    struct svm_node *x_space;    UINT n_class, n_sv;    int *labels;    SVM_detail ();    SVM_detail (const SVM_detail&);    ~SVM_detail () {        clean_model(); clean_data(); svm_destroy_param(&param); }    bool fill_svm_problem (const pDataSet&, const pDataWgt&);    bool train (const pDataSet&, const pDataWgt&);    void clean_model ();    void clean_data ();};SVM_detail::SVM_detail () : model(0), x_space(0) {    // default LIBSVM parameters, copied from svm-train.c    param.svm_type = C_SVC;    param.kernel_type = RBF;    param.degree = 3;    param.gamma = 0;    param.coef0 = 0;    param.nu = 0.5;    param.cache_size = 40;    param.C = 1;    param.eps = 1e-3;    param.p = 0.1;    param.shrinking = 1;    param.probability = 0;    param.nr_weight = 0;    param.weight_label = NULL;    param.weight = NULL;}SVM_detail::SVM_detail (const SVM_detail& s)    : param(s.param), model(0), x_space(0) {    // param is copied because the pointers in param are NULL    assert(!s.param.weight && !s.param.weight_label);    assert(!s.model);   // we don't know how to copy a model    assert(!s.x_space); // we assume s is not trained.}void SVM_detail::clean_model () {    if (!model) return;    svm_destroy_model(model);    delete[] labels;    model = 0;}void SVM_detail::clean_data () {    if (!x_space) return;    delete[] prob.x; delete[] prob.y;    delete[] prob.W;    delete[] x_space; x_space = 0;}p_svm_node fill_svm_node (const Input& x, struct svm_node *pool) {    for (UINT j = 0; j < x.size(); ++j, ++pool) {        pool->index = j+1;        pool->value = x[j];    }    pool->index = -1;    return ++pool;}bool SVM_detail::fill_svm_problem (const pDataSet& ptd, const pDataWgt& ptw) {    assert(ptd->size() == ptw->size());    const UINT n_samples = ptd->size();    if (n_samples == 0 || ptd->y(0).size() != 1) return false;    const UINT n_in = ptd->x(0).size();    clean_data();    prob.l = n_samples;    prob.x = new p_svm_node[n_samples];    prob.y = new double[n_samples];    prob.W = new double[n_samples];    x_space = new struct svm_node[n_samples*(n_in+1)];    if (!x_space) return false;    struct svm_node *psn = x_space;    for (UINT i = 0; i < n_samples; ++i) {        prob.x[i] = psn;        prob.y[i] = ptd->y(i)[0];        psn = fill_svm_node(ptd->x(i), psn);        prob.W[i] = (*ptw)[i] * n_samples;    }    return true;}bool SVM_detail::train (const pDataSet& ptd, const pDataWgt& ptw) {    if (!fill_svm_problem(ptd, ptw)) {        std::cerr << "Error in filling SVM problem (training data)\n";        return false;    }    const char* error_msg = svm_check_parameter(&prob,&param);    if (error_msg) {        std::cerr << "Error: " << error_msg << '\n';        return false;    }    clean_model();    model = svm_train(&prob, &param);    n_class = svm_get_nr_class(model);    labels = new int[n_class];    svm_get_labels(model, labels);    n_sv = model->l;    /* direct access svm_model */    return true;}namespace kernel {void Linear::set_params (SVM_detail* sd) const {    sd->param.kernel_type = ::LINEAR;}void Polynomial::set_params (SVM_detail* sd) const {    sd->param.kernel_type = ::POLY;    sd->param.degree = degree;    sd->param.gamma = gamma;    sd->param.coef0 = coef0;}void RBF::set_params (SVM_detail* sd) const {    sd->param.kernel_type = ::RBF;    sd->param.gamma = gamma;}void Sigmoid::set_params (SVM_detail* sd) const {    sd->param.kernel_type = ::SIGMOID;    sd->param.gamma = gamma;    sd->param.coef0 = coef0;}void Stump::set_params (SVM_detail* sd) const {    sd->param.kernel_type = ::STUMP;}void Perceptron::set_params (SVM_detail* sd) const {    sd->param.kernel_type = ::PERCEPTRON;}} // namespace kernelbool SVM::serialize (std::ostream& os, ver_list& vl) const {    OBJ_FUNC_UNDEFINED("serialize");}bool SVM::unserialize (std::istream& is, ver_list& vl, const id_t& d) {    OBJ_FUNC_UNDEFINED("unserialize");}SVM::SVM (const kernel::Kernel& k, UINT n_in) : LearnModel(n_in, 1), ker(k) {    detail = new struct SVM_detail;    ker.set_params(detail);}SVM::SVM (const SVM& s) : LearnModel(s), ker(s.ker) {    detail = new struct SVM_detail(*s.detail);}SVM::~SVM () {    delete detail;}const SVM& SVM::operator= (const SVM& s) {    if (&s == this) return *this;    /* unable to also copy the kernel, so break out for now */    OBJ_FUNC_UNDEFINED("operator=");    LearnModel::operator=(s);    delete detail;    detail = new struct SVM_detail(*s.detail);    return *this;}REAL SVM::C () const {    return detail->param.C;}void SVM::set_C (REAL c) {    detail->param.C = c;}UINT SVM::n_support_vectors () const {    assert(detail->model);    return detail->n_sv;}REAL SVM::kernel (const Input& x1, const Input& x2) const {#ifndef NDEBUG    struct svm_node sx1[n_input()+1], sx2[n_input()+1];    fill_svm_node(x1, sx1);    fill_svm_node(x2, sx2);    REAL svmk = svm_kernel(sx1, sx2, detail->param);#endif    REAL k = ker(x1, x2);    assert(std::fabs(svmk - k) < EPSILON);    return k;}void SVM::initialize () {    detail->clean_model();}REAL SVM::train () {    assert(n_input() == ptd->x(0).size() && n_output() == ptd->y(0).size());    assert(n_samples == ptd->size());    if (!detail->train(ptd, ptw)) exit(-1);    return -1; // not the training error}REAL SVM::margin_of (const Input& x, const Input& y) const {    assert(std::fabs(std::fabs(y[0]) - 1) < INFINITESIMAL);    return signed_margin(x) * y[0];}REAL SVM::signed_margin (const Input& x) const {    assert(x.size() == n_input());    assert(detail && detail->model);    assert(detail->n_class > 0 && detail->n_class <= 2);    if (detail->n_class == 1) return detail->labels[0];    struct svm_node sx[n_input()+1];    fill_svm_node(x, sx);    REAL m;    svm_predict_values(detail->model, sx, &m);    if (detail->labels[0] < detail->labels[1]) m = -m;#ifndef NDEBUG    const UINT nsv = n_support_vectors();    REAL sum = bias();    for (UINT i = 0; i < nsv; ++i)        sum += support_vector_coef(i) * ker(support_vector(i), x);#endif    assert(std::fabs(sum - m) < EPSILON);    return m;}REAL SVM::w_norm () const {    assert(detail && detail->model);    assert(detail->n_class == 2);    const UINT nsv = n_support_vectors();    REAL sum = 0;    for (UINT i = 0; i < nsv; ++i) {        for (UINT j = i; j < nsv; ++j) {            REAL ip = ker(support_vector(i), support_vector(j))                * support_vector_coef(i) * support_vector_coef(j);#ifndef NDEBUG            /* direct access svm_model */            REAL ve = svm_kernel(detail->model->SV[i],                                 detail->model->SV[j], detail->param)                * detail->model->sv_coef[0][i] * detail->model->sv_coef[0][j];#endif            assert(std::fabs(ip - ve) < EPSILON);            sum += ip + (i==j? 0 : ip);        }    }    assert(sum > 0);    return std::sqrt(sum);}Output SVM::operator() (const Input& x) const {    assert(x.size() == n_input());    assert(detail && detail->model);    REAL y = (signed_margin(x) > 0? 1 : -1);#ifndef NDEBUG    struct svm_node sx[n_input()+1];    fill_svm_node(x, sx);    REAL l = svm_predict(detail->model, sx);#endif    assert(std::fabs(y - l) < INFINITESIMAL);    return Output(1, y);}Input SVM::support_vector (UINT i) const {    assert(i < n_support_vectors());    assert(detail->n_class == 2);    /* direct access svm_model */    svm_node *SVi = detail->model->SV[i];    Input sv(_n_in, 0);    for (; SVi->index != -1; ++SVi) {        assert(SVi->index > 0 && (UINT) SVi->index <= sv.size());        sv[SVi->index-1] = SVi->value;    }    return sv;}REAL SVM::support_vector_coef (UINT i) const {    assert(i < n_support_vectors());    assert(detail->n_class == 2);    /* direct access svm_model */    REAL coef = detail->model->sv_coef[0][i];    return (detail->labels[0] < detail->labels[1])?        -coef : coef;}REAL SVM::bias () const {    assert(detail && detail->model);    assert(detail->n_class == 2);    /* direct access svm_model */    REAL rho = detail->model->rho[0];    return (detail->labels[0] < detail->labels[1])?        rho : -rho;}} // namespace lemga

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -