📄 nn-utility.h
字号:
/*
nn-utility (Provides neural networking utilites for c++ programmers)
Copyright (C) 2003 Panayiotis Thomakos
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
//To contact the author send an email to panthomakos@users.sourceforge.net
#ifndef _NN_H_
#define _NN_H_
#include <fstream>
#include <stdarg.h>
#include <math.h>
#include <stdlib.h>
#include <string>
#include <iomanip>
#include <iostream>
using std::cout;
using std::cin;
using std::fstream;
using std::ios;
using std::setprecision;
using std::setw;
using std::ostream;
using std::istream;
#define NN_UTIL_SIZE 300
#define FILE_NAME_SIZE 100
#define RECURRENT 10
//typedef float MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE]; //Matrix of weights for layers
//typedef float VECTOR[NN_UTIL_SIZE]; //Input and output vectors
namespace nn_utility{
template<class T> class layer;
template<class T> class multi_layer;
template<class T> class BITMAP;
template<class T> class WIDROW_HOFF;
class SIGMOID;
class KOHEN;
class KOHEN_SOFM;
class RADIAL_BASIS;
class BINOMIAL;
class SGN;
class PNN;
class HOPEFIELD;
// template<class T>
// class shared_functions{
// public:
// };
template<class T>
class nn_utility_functions{
public:
typedef T VECTOR[NN_UTIL_SIZE];
typedef T MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
/*User defined function to recieve input for training patterns*/
virtual void GetInput( int interation, VECTOR &send, VECTOR &target );
virtual void GetInput( int interation, MATRIX &send, MATRIX &target, int &inputs );
/*User defined function to determine whether or not to train after a result and target are presented*/
virtual bool CheckTrain( VECTOR output, VECTOR target, int length );
virtual bool CheckTrain( MATRIX output, MATRIX target, int length_of_output, int length );
/*Function to print a vector to the screen.*/
void PrintVector( char * str_, VECTOR &vector, int length );
void PrintVector( VECTOR &vector, int length );
void PrintMatrix( char *str_, MATRIX &matrix, int row, int col );
void PrintMatrix( MATRIX &matrix, int row, int col );
/*Function to print a vector to the screen exactly*/
void PrintVectorExact( VECTOR &vector, int length );
/*Function to load a vector with values*/
void LoadVectori( VECTOR &vector, int len, T value, ... );
void LoadVectorf( VECTOR &vector, int len, T value, ... );
/*Function for copying the contents of one vector to another vector*/
void CopyVector( VECTOR &destination, VECTOR source, int length );
/*Function to clear a vector*/
void ClearVector( VECTOR &vector, int length );
void ClearVector( VECTOR &vector, int length, T clear_value );
/*Add two vectors*/
void AddVectors( VECTOR &destination, VECTOR source1, VECTOR source2, int length );
/*Function to insert a layer at the end of a multi-layer network*/
bool Insert( layer<T> **START, layer<T> **ADD );
/*Function to insert a layer after the "First" layer*/
void InsertAfter( layer<T> **First, layer<T> **Second );
/*Function to dispaly a multi-layer network through a screen dump*/
void DisplayLAYER( layer<T> **start );
/*Function to train a neural network for a given number of interations*/
void train( layer<T> ** input, int interations, T learning_rate, bool print_info );
void train( layer<T> **, layer<T> **, int, T, bool );
void train( layer<T> **TRACK, int interations, T learning_rate );
};
/*Class of an individual layer in a neural network*/
//TODO: see if must derive from nn_utility_functions
template<class T>
class layer : public nn_utility_functions<T>{
public:
typedef T VECTOR[NN_UTIL_SIZE];
typedef T MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
layer(){ set_defaults(); };
void define( int, int );
void definei( int row_, int col_, T value, ... );
void definef( int row_, int col_, T value, ... );
void define( char *filename, int index );
void define( char *filename, int index, layer<T> **PREV );
void set_defaults( );
void FeedForward( VECTOR in, VECTOR &final );
void BackPropagate( VECTOR input, VECTOR target, T learning_rate, int length );
void CreateVector( VECTOR &vector, int column );
void Seti( T value, ... );
void Setf( T value, ... );
void SetBinary( int value, ... );
void SetColumn( int index, VECTOR to_load );
void Print();
void SetConstant( T n );
void annealing( VECTOR input, int len, VECTOR output, int length );
void set_annealing( VECTOR initial, int len, T temperature, T alpha, int M );
void write( ostream &output );
void read( istream &input );
void define_recurrent();
void add( layer<T> **ADD );
void FeedForward( MATRIX input, MATRIX &final, int inputs );
int THETA( int );
int F( int, int, int );
void FeedForward_recurrent( MATRIX input, MATRIX &final, int inputs );
void BackPropagate_recurrent( MATRIX input, MATRIX target, T learning_rate, int inputs );
void Merge( VECTOR &destination, VECTOR first, int len1, VECTOR second, int len2 );
void SetConstant_recurrent( T n );
void SetConstant_recurrent( int index, T n );
// layer<T> ** &operator<<( VECTOR );
// void &operator>>( VECTOR );
// void &operator<<( MATRIX );
// void &operator>>( MATRIX );
// void &operator>( layer<T> ** );
// void &operator=( MATRIX );
// void &operator=( VECTOR );
// VECTOR &operator[]( int );
// layer<T> ** &operator+( layer** <T> );
// layer<T> ** &operator*( layer** <T> );
//default layer data
int row, col;
MATRIX matrix, binary_number;
VECTOR result, weight;
bool binary_fire, network_recurrent;
layer<T> *Previous, *Next;
//Annealing data
VECTOR annealing_result, annealing_input;
T annealing_probability, annealing_temperature, annealing_energy, annealing_alpha;
int annealing_M, annealing_update, annealing_length;
bool annealing_on;
//recurrent data
int length;
layer<T> *layers[RECURRENT];
VECTOR history[RECURRENT][RECURRENT]; //NN_UTIL_SIZE
//have to check if this does anything:
VECTOR NULL_VECTOR;
/*User defined feed forward sweep function*/
virtual T function( VECTOR in, VECTOR weight, T bias, int length, bool output);
/*User defined functino to update a layer in a back propagation sweep*/
virtual void UpdateLayer( VECTOR target, VECTOR &new_target, int length,
T learning_rate, VECTOR input, VECTOR result, bool );
};
/*Accessory for reading text-defined bitmaps*/
template<class T>
class BITMAP{
public:
typedef T VECTOR[NN_UTIL_SIZE];
typedef T MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
BITMAP( char * file ){ strcpy( filename, file ); };
char filename[FILE_NAME_SIZE];
int readbitmap( VECTOR &vector, int index );
void noise( VECTOR &vector, int length, int type, int amplitude );
void noise( VECTOR &vector, int length, int amplitude );
void Print( VECTOR vector, int line, int length );
};
template< class T >
class WIDROW_HOFF : public layer<T>{
public:
WIDROW_HOFF() : layer<T>(){};
typedef T VECTOR[NN_UTIL_SIZE];
typedef T MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
T WidrowHoff( VECTOR in, VECTOR weight, T bias, int length );
T function( VECTOR in, VECTOR weight, T bias, int length, bool output );
/*The purpose of this function is to udpate a layer using the Widrow Hoff learning rule.*/
void UpdateLayer( VECTOR target, VECTOR &new_target, int length, T learning_rate,
VECTOR input, VECTOR result, bool output );
};
class SIGMOID : public WIDROW_HOFF<float>{
public:
SIGMOID();
typedef float VECTOR[NN_UTIL_SIZE];
typedef float MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
float sigmoid( VECTOR, VECTOR, float, int );
float function( VECTOR, VECTOR, float, int, bool );
void sigmoid( VECTOR, VECTOR &, int, float, VECTOR, VECTOR, bool );
void UpdateLayer( VECTOR, VECTOR &, int, float, VECTOR, VECTOR, bool );
void sigmoid( VECTOR &, int );
};
class KOHEN : public layer<float>{
public:
KOHEN();
typedef float VECTOR[NN_UTIL_SIZE];
typedef float MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
bool SOFM_on;
int radius;
float function( VECTOR, VECTOR, float, int, bool );
void UpdateLayer( VECTOR, VECTOR &, int, float, VECTOR, VECTOR, bool );
};
class KOHEN_SOFM : public KOHEN{
public:
KOHEN_SOFM() : KOHEN(){ SOFM_on = true; }; };
class RADIAL_BASIS : public SIGMOID {
public:
RADIAL_BASIS() : SIGMOID(){};
typedef float VECTOR[NN_UTIL_SIZE];
typedef float MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
float function( VECTOR, VECTOR, float, int, bool );
void UpdateLayer( VECTOR, VECTOR &, int, float, VECTOR, VECTOR, bool );
};
class BINOMIAL : public WIDROW_HOFF<float>{
public:
BINOMIAL();
typedef float VECTOR[NN_UTIL_SIZE];
typedef float MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
float function( VECTOR, VECTOR, float, int, bool );
};
class HOPEFIELD : public WIDROW_HOFF<int>{
public:
HOPEFIELD();
typedef int VECTOR[NN_UTIL_SIZE];
typedef int MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
void HopefieldMultiply( VECTOR Vector1, VECTOR Vector2 );
void HopefieldAdd( HOPEFIELD **Layer1, HOPEFIELD **Layer2 );
void HopefieldAddTo( VECTOR vect1, VECTOR vect2 );
void Hopefield( VECTOR &vector, int length );
};
class SGN : public HOPEFIELD{
public:
SGN();
typedef int VECTOR[NN_UTIL_SIZE];
typedef int MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
int function( VECTOR input, VECTOR weight, int bias, int length, bool output );
};
class PNN : public layer<float>{
public:
PNN();
typedef float VECTOR[NN_UTIL_SIZE];
typedef float MATRIX[NN_UTIL_SIZE][NN_UTIL_SIZE];
float function( VECTOR in, VECTOR weight, float bias, int length, bool output );
};
};
#endif
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -