⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 ploylinear.cpp

📁 Locally weighted polynomial regression LWPR is a popular instance based al gorithm for learning c
💻 CPP
📖 第 1 页 / 共 2 页
字号:
/*
   File:        lwpr.c
   Author:      Anthony DiLello
                Carnegie Mellon University
   Created:     Mon Jul  7 14:38:38 EDT 1997
   Description: Educational locally weighted regression code.
*/


#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>


#define SQR(x) ((x)*(x))
#define EPSILON 1e-10 /* A constant used for ridge regression */


/* A vector is just an array of doubles */
double *alloc_vector(int size)
{
  return (double *) malloc(size * sizeof(double));
}


/* Uses array-of-arrays matrix representation
so that matrix[i] points to an array of doubles
forall i with 0 <= i < num_rows */
double **alloc_matrix(int rows,int cols)
{
  int i;
  double **mx = (double **)malloc(rows * sizeof(double *));

  for ( i = 0 ; i < rows ; i++ )
    mx[i] = (double *) malloc(cols * sizeof(double));

  return mx;
}


/* Deallocates memory for array */
void free_vector(double *vector)
{
  free(vector);
}


/* Deallocates memory for vector */
void free_matrix(double **matrix,int rows)
{
  int i;

  for(i=0; i<rows; i++)
    free(matrix[i]);
  
  free(matrix);
}


/*
Uses cholesky decomposition (based on source code found in 
Numerical Recipes in C, Second Edition)
to fill in the values x_vec[0] ... x_vec[size-1]
so that (as a matrix equation)
A x = b
where A (a_mx) is a size * size symmetric positive definite matrix and
b (b_vec) is a vector of "size" elements

Assumes a_mx, size, and b_vec are initialized and memory for
x_vec has already been allocated.

Note: This function will overwrite a_mx */
void solve_spd_equation(double **a_mx,
			int size,
			double *b_vec,
			double *x_vec)
{
  double *diag_vec;
  int i,j,k;
  double sum;

  diag_vec = alloc_vector(size);
  
  for (i=1;i<=size;i++)  /* Performs the decomposition */
  {
    for (j=i;j<=size;j++) 
    {
      for (sum=a_mx[i-1][j-1],k=i-1;k>=1;k--) 
	sum -= a_mx[i-1][k-1]*a_mx[j-1][k-1];

      if (i == j) 
      {
	if (sum <= 0.0)  /* Indicates that the decoposition failed */
	{                /* Sets x_vec to 0 by default             */
	  for(i=0; i<size; i++) 
	    x_vec[i] = 0;
	  return;
	}
	diag_vec[i-1]=sqrt(sum);
      } 
      else a_mx[j-1][i-1]=sum/diag_vec[i-1];
    }
  }
  
  for (i=1;i<=size;i++)  /* Performs the backsubstitution */
  {
    for (sum=b_vec[i-1],k=i-1;k>=1;k--) 
      sum -= a_mx[i-1][k-1]*x_vec[k-1];
    x_vec[i-1]=sum/diag_vec[i-1];
  }
  for (i=size;i>=1;i--) 
  {
    for (sum=x_vec[i-1],k=i+1;k<=size;k++) 
      sum -= a_mx[k-1][i-1]*x_vec[k-1];
    x_vec[i-1]=sum/diag_vec[i-1];
  }
 
  free_vector(diag_vec);
}


/*
This function computes the weighted X^transpose * X 
matrix and weighted X^transpose * Y vector (where X 
and Y are the unweighted input terms matrix and output
vector for the locally weighted regression).

Sets wxtx[i][j] to be the sum over k of 
   weights[k] * weights[k] * terms[k][i] * terms[k][j]

Adds EPSILON to the main diagonal of wxtx to ensure the matrix 
is non-singular. This technique is called "Ridge regression."

Sets wxty[i] to be sum over k of weights[k] * weights[k] * terms[k][i] * 
                               output[k]

terms_mx: a matrix with num_points rows and num_terms columns
output_vec: has "num_points" elements
weights_vec: has "num_points" elements
wxtx_mx has: num_terms rows and num_terms columns
wxty_vec has: num_terms elements 

Assumes terms_mx, output_vec, and weights_vec are initialized 
and memory of wxtx_mx and wxty_vec has already been allocated */
void compute_regression_matrices(double **terms_mx,
				 double *output_vec,
				 double *weights_vec,
				 int num_points,
				 int num_terms,
				 double **wxtx_mx,
				 double *wxty_vec)
{
  int i, j, k;

  for(i=0; i<num_terms; i++)   /* computes wxtx_mx */
    for(j=0; j<num_terms; j++)
    {
      wxtx_mx[i][j] = 0;
      for(k=0; k<num_points; k++)
	wxtx_mx[i][j] += SQR(weights_vec[k])*terms_mx[k][i]*terms_mx[k][j];
    }

  for(i=0; i<num_terms; i++)  /* computes wxty_vec and adds EPSILON */
  {                           /* to the main diagonal of wxtx_vec.  */
    wxtx_mx[i][i] += EPSILON;
    wxty_vec[i] = 0;
    for(k=0; k<num_points; k++)
      wxty_vec[i] += SQR(weights_vec[k])*terms_mx[k][i]*output_vec[k];
  }
}


/*
Updates weights_vec so that weights[k] =
  weight_fn(DistSquared(inputs[k],query)/KW_squared) 
Assumes memory for weights_vec has been allocated and
all other inputs have been initialized. */
void compute_weights(double **inputs_mx,
		     double *query_input_vec,
		     double kernel_width,
		     double (*weight_function)(double distance_sqd,
					       double kernel_width),
		     int num_inputs,
		     int num_points,
		     double *weights_vec)
{
  double dsqd;
  int i, k;

    /* for each point, computes the squared distance 
       from the query_input and calls weight_function */
  for(k=0; k<num_points; k++) 
  {
    dsqd = 0;
    for(i=0; i<num_inputs; i++)
      dsqd += SQR(inputs_mx[k][i]-query_input_vec[i]);

    weights_vec[k] = weight_function(dsqd, kernel_width);
  }
}


/* An example weight function */
double gaussian_weight_function(double dsqd,double kw)
{
  return(exp(-dsqd/(2.0 * SQR(kw))));
}


/* Solves wxtx beta = wxty for beta. Note that wxtx is symmetric
   positive definite. Assumes memory for beta has already been 
   allocated and all other inputs have been initialized.  */
void compute_beta(double **wxtx_mx,
		  double *wxty_vec,
		  int num_terms,
		  double *beta_vec)
{
  solve_spd_equation(wxtx_mx, num_terms, wxty_vec, beta_vec);
}


/* computes and returns the prediction at the query 
   point, which is the dot product of beta and query_terms */
double compute_predict(double *beta_vec,int num_terms,
                       double *query_term_vec)
{
  int i;
  double val = 0;

  for(i=0; i<num_terms; i++)  /* taking the dot product */
    val += beta_vec[i] * query_term_vec[i];

  return val;
}


/* mallocs and returns the terms matrix so that the ith
row of result corresponds to the "terms" vector for the
i'th row of inputs. ("terms" vectors are the rows of the 
unweighted matrix used in locally weighted regression.) 

terms_function is assumed to be a function that does
two things. 
  (1) It returns (int *num_terms) the number of terms
      associated with n input dimensions (for example, in linear
      regression with two inputs there are three terms. Quadratic
      regression with two inputs has 6 terms etc.  It ALWAYS does
      this.
  (2) IF *terms_vec is NULL it only does (1). But if
      *terms_vec is non-NULL it computes the terms values
      from input_vec and stores them in terms_vec. */
double **compute_terms_mx(double **input_mx,
			  int num_inputs,
			  int num_points,
			  int *num_terms,
			  void (*terms_function)(double *input_vec,
						 int num_inputs,
						 int *num_terms,
						 double *terms_vec))
{
  int i;
  double **terms_mx;

     /* computing num_terms and allocating terms_mx */ 
  terms_function(NULL, num_inputs, num_terms, NULL);
  terms_mx = alloc_matrix(num_points, *num_terms);

     /* setting terms_mx */
  for(i=0; i<num_points; i++)
    terms_function(input_mx[i], num_inputs, num_terms, terms_mx[i]);

  return terms_mx;
}


/* An example terms function for linear regression 

Any terms function must do the following:

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -