📄 supported.cpp
字号:
#include "nn-utility.h"
using namespace nn_utility;
template<class T> T WIDROW_HOFF<T>::WidrowHoff( VECTOR in, VECTOR weight, T bias, int length ){
T result = bias;
for ( int i = 0; i < length; i++ )
result += in[i]*weight[i];
return result;
}
template<class T> T WIDROW_HOFF<T>::function( VECTOR in, VECTOR weight, T bias, int length, bool output ){ return WidrowHoff( in, weight, bias, length ); }
/*The purpose of this function is to udpate a layer using the Widrow Hoff learning rule.*/
template<class T> void WIDROW_HOFF<T>::UpdateLayer( VECTOR target, VECTOR &new_target, int length, T learning_rate,
VECTOR input, VECTOR result, bool output ){
for ( int i = 0; i < length; i++ ){
T error = target[i] - result[i];
weight[i] += learning_rate*error;
for ( int e = 0; e < row; e++ ){
new_target[i] += error*matrix[e][i];
matrix[e][i] += learning_rate*input[i]*error;
}
}
}
template WIDROW_HOFF<int>;
template WIDROW_HOFF<float>;
template WIDROW_HOFF<double>;
SIGMOID::SIGMOID() : WIDROW_HOFF<float>(){};
float SIGMOID::sigmoid( VECTOR in, VECTOR weight, float bias, int length ){
float result = WidrowHoff( in, weight, bias, length );
return ( 1/( 1+( exp( -1*result ) ) ) );
}
float SIGMOID::function( VECTOR in, VECTOR weight, float bias, int length, bool output ){ return sigmoid( in, weight, bias, length ); }
void SIGMOID::sigmoid( VECTOR target, VECTOR &new_target, int length, float learning_rate, VECTOR input, VECTOR result, bool output ){
for ( int i = 0; i < col; i++ ){
float Error =
(output ?
( target[i] - result[i] )*( result[i] )*( 1 - result[i] ):
result[i]*( 1 - result[i] )*( target[i] ) );
weight[i] += learning_rate*Error;
for ( int e = 0; e < row; e++ ){
new_target[e] += Error*matrix[e][i];
matrix[e][i] += learning_rate*Error*input[e];
}
}
}
void SIGMOID::UpdateLayer( VECTOR target, VECTOR &new_target, int length, float learning_rate, VECTOR input, VECTOR result, bool output ){
sigmoid( target, new_target, length, learning_rate, input, result, output ); }
void SIGMOID::sigmoid( VECTOR &bitmap, int length ){
for ( int it = 0; it < length; it++ )
bitmap[it] = ( bitmap[it] == 1 ? 0.9 : 0.1 ); }
KOHEN::KOHEN() : layer<float>(){};
/*The purpose of this function is to perform the Kohen SOFM function for a Feed Forward sweep of a node.*/
float KOHEN::function( VECTOR in, VECTOR weight, float bias, int length, bool output ){
float result = 0;
for ( int kohen_i = 0; kohen_i < length; kohen_i++ ){
result += ( weight[ kohen_i ] - in[ kohen_i ] )*( weight[ kohen_i ] - in[ kohen_i ] ); }
return result;
}
/*The purpose of this function is to update a layer in a Back Propagation sweep of a Kohen SOFM network.*/
void KOHEN::UpdateLayer( VECTOR target, VECTOR &new_target, int length, float learning_rate, VECTOR input, VECTOR result, bool output ){
if ( SOFM_on ){
VECTOR descend;
int mins[NN_UTIL_SIZE];
memset(mins, 0, sizeof(mins));
for ( int r = 0; r < col; r++ ){
descend[r] = result[r];
mins[r] = r;}
int CUR, TMPint;
float TMP;
bool DONE;
do{
DONE = true;
for ( int CUR = 0; CUR < col; CUR++ ){
if ( descend[CUR] > descend[CUR+1] ){
TMP = descend[CUR];
descend[CUR] = descend[CUR+1];
descend[CUR+1] = TMP;
TMPint = mins[CUR];
mins[CUR] = mins[CUR+1];
mins[CUR+1] = TMPint;
DONE = false;
}
}
}while ( !DONE );
for ( int i = 0; i < radius; i++ ){
for ( int kohen_i = 0; kohen_i < row; kohen_i++ ){
matrix[kohen_i][mins[i]] += learning_rate*( input[kohen_i]-matrix[kohen_i][mins[i]] ); } }
}
else{
for ( int kohen_i = 0; kohen_i < row; kohen_i++ ){
matrix[kohen_i][radius] += learning_rate*( input[kohen_i]-matrix[kohen_i][radius] );}
}
}
float RADIAL_BASIS::function( VECTOR in, VECTOR weight, float bias, int length, bool output ){
float result = bias;
for ( int ii = 0; ii < length; ii++ ){ result += ( in[ii]-weight[ii] )*( in[ii]-weight[ii] ); }
return exp(-1*result);
}
/*The purpose fo this function is to update a Radial Basis function layer.*/
void RADIAL_BASIS::UpdateLayer( VECTOR target, VECTOR &new_target, int length, float learning_rate, VECTOR input, VECTOR result, bool output ){
if ( output )
sigmoid( target, new_target, length, learning_rate, input, result, output );
}
BINOMIAL::BINOMIAL() : WIDROW_HOFF<float>(){};
float BINOMIAL::function( VECTOR in, VECTOR weight, float bias, int length, bool output ){
float result = WidrowHoff( in, weight, bias, length );
return ( result > 0 ? 1.0 : 0.0 );
}
SGN::SGN() : HOPEFIELD(){};
int SGN::function( VECTOR input, VECTOR weight, int bias, int length, bool output ){
int result = WidrowHoff( input, weight, bias, length );
return ( result > 0 ? 1 : -1 );
}
PNN::PNN() : layer<float>(){};
float PNN::function( VECTOR in, VECTOR weight, float bias, int length, bool output ){
float result = bias;
if ( output ){
for ( int tr = 0; tr < length; tr++ )
result += in[tr];
result /= length;
}
else{
for ( int tr = 0; tr < length; tr++ )
result += (weight[tr]-in[tr])*(weight[tr]-in[tr]);
result = sqrt( result );
}
return result;
}
HOPEFIELD::HOPEFIELD() : WIDROW_HOFF<int>(){};
void HOPEFIELD::HopefieldMultiply( VECTOR Vector1, VECTOR Vector2 ){
for ( int i = 0; i < row; i++ ){
for ( int e = 0; e < col; e++ ){
matrix[i][e] = Vector1[i]*Vector2[e];}}
}
void HOPEFIELD::HopefieldAdd( HOPEFIELD **Layer1, HOPEFIELD **Layer2 ){
for ( int i = 0; i < row; i++ ){
for ( int e = 0; e < col; e++ ){
matrix[i][e] = (*Layer1)->matrix[i][e]+(*Layer2)->matrix[i][e];}}
}
void HOPEFIELD::HopefieldAddTo( VECTOR vect1, VECTOR vect2 ){
for ( int i = 0; i < row; i++ ){
for ( int e = 0; e < col; e++ ){
matrix[i][e] += vect1[i]*vect2[e];}}
}
void HOPEFIELD::Hopefield( VECTOR &vector, int length ){
for ( int i = 0; i < length; i++ )
vector[i] = ( vector[i] > 0 ? 1 : -1 );
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -