📄 esn.h
字号:
/***************************************************************************//*! * \file esn.h * * \brief implements the base class of an echo state network * * \author Georg Holzmann, grh _at_ mur _dot_ at * \date Sept 2007 * * ::::_aureservoir_:::: * C++ library for analog reservoir computing neural networks * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * ***************************************************************************/#ifndef AURESERVOIR_ESN_H__#define AURESERVOIR_ESN_H__#include <iostream>#include <map>#include <algorithm>#include "utilities.h"#include "activations.h"#include "init.h"#include "simulate.h"#include "train.h"namespace aureservoir{/*! * \class ESN * * \brief class for a basic Echo State Network * * This class implements a basic Echo State Network as described * in articles by Herbert Jaeger on the following page: * \sa http://www.scholarpedia.org/article/Echo_State_Network * * The template argument T can be float or double. Single Precision * (float) saves quite some computation time. * * The "echo state" approach looks at RNNs from a new angle. Large RNNs * are interpreted as "reservoirs" of complex, excitable dynamics. * Output units "tap" from this reservoir by linearly combining the * desired output signal from the rich variety of excited reservoir signals. * This idea leads to training algorithms where only the network-to-output * connection weights have to be trained. This can be done with known, * highly efficient linear regression algorithms. * from \sa http://www.faculty.iu-bremen.de/hjaeger/esn_research.html * * For more information and a complete documentation of this library * see \sa http://aureservoir.sourceforge.net * * \example "esn_example.cpp" * \example "slow_sine.py" */template <typename T = float>class ESN{ public: /// typedef of a Parameter Map typedef std::map<InitParameter,T> ParameterMap; typedef typename SPMatrix<T>::Type SPMatrix; typedef typename DEMatrix<T>::Type DEMatrix; typedef typename DEVector<T>::Type DEVector; /// Constructor ESN(); /// Copy Constructor ESN(const ESN<T> &src); /// assignement operator const ESN& operator= (const ESN<T>& src); /// Destructor ~ESN(); //! @name Algorithm interface //@{ /*! * Initialization Algorithm for an Echo State Network * \sa class InitBase */ void init() throw(AUExcept) { init_->init(); } /*! * Reservoir Adaptation Algorithm Interface * At the moment this is only the Gaussian-IP reservoir adaptation method * for tanh neurons. * \sa "Adapting reservoirs to get Gaussian distributions" by David Verstraeten, * Benjamin Schrauwen and Dirk Stroobandt * * @param in matrix of input values (inputs x timesteps), * the reservoir will be adapted by this number of timesteps. * @return mean value of differences between all parameters before and after * adaptation, can be used to see if learning still makes an progress. */ double adapt(const DEMatrix &in) throw(AUExcept); /*! * Training Algorithm Interface * \sa class TrainBase * * @param in matrix of input values (inputs x timesteps) * @param out matrix of desired output values (outputs x timesteps) * for teacher forcing * @param washout washout time in samples, used to get rid of the * transient dynamics of the network starting state */ inline void train(const DEMatrix &in, const DEMatrix &out, int washout) throw(AUExcept) { train_->train(in, out, washout); } /*! * Simulation Algorithm Interface * \sa class SimBase * * @param in matrix of input values (inputs x timesteps) * @param out matrix for output values (outputs x timesteps) */ inline void simulate(const DEMatrix &in, DEMatrix &out) { sim_->simulate(in, out); } /*! * resets the internal state vector x of the reservoir to zero */ void resetState() { std::fill_n( x_.data(), x_.length(), 0 ); std::fill_n( sim_->last_out_.data(), outputs_, 0 ); } //@} //! @name C-style Algorithm interface //@{ /*! * C-style Reservoir Adaptation Algorithm Interface * (data will be copied into a FLENS matrix) * At the moment this is only the Gaussian-IP reservoir adaptation method * for tanh neurons. * \sa "Adapting reservoirs to get Gaussian distributions" by David Verstraeten, * Benjamin Schrauwen and Dirk Stroobandt * * @param inmtx matrix of input values (inputs x timesteps), * the reservoir will be adapted by this number of timesteps. * @return mean value of differences between all parameters before and after * adaptation, can be used to see if learning still makes an progress. */ double adapt(T *inmtx, int inrows, int incols) throw(AUExcept); /*! * C-style Training Algorithm Interface * (data will be copied into a FLENS matrix) * \sa class TrainBase * * @param inmtx input matrix in row major storage (usual C array) * (inputs x timesteps) * @param outmtx output matrix in row major storage (outputs x timesteps) * for teacher forcing * @param washout washout time in samples, used to get rid of the * transient dynamics of the network starting state */ inline void train(T *inmtx, int inrows, int incols, T *outmtx, int outrows, int outcols, int washout) throw(AUExcept); /*! * C-style Simulation Algorithm Interface with some additional * error checking. * (data will be copied into a FLENS matrix) * \sa class SimBase * * @param inmtx input matrix in row major storage (usual C array) * (inputs x timesteps) * @param outmtx output matrix in row major storage (outputs x timesteps), * \attention Data must be already allocated! */ inline void simulate(T *inmtx, int inrows, int incols, T *outmtx, int outrows, int outcols) throw(AUExcept); /*! * C-style Simulation Algorithm Interface, for single step simulation * \sa class SimBase * \todo see if we can do this in python without this additional method * @param inmtx input vector, size = inputs * @param outmtx output vector, size = outputs * \attention Data must be already allocated! */ inline void simulateStep(T *invec, int insize, T *outvec, int outsize) throw(AUExcept); //@} //! @name Additional Interface for Bandpass and IIR-Filter Neurons /// \todo rethink if this is consistent -> in neue klasse tun ? //@{ /*! * Set lowpass/highpass cutoff frequencies for bandpass style neurons. " \sa class SimBP * * @param f1 vector with lowpass cutoff for all neurons (size = neurons) * @param f2 vector with highpass cutoffs (size = neurons) */ void setBPCutoff(const DEVector &f1, const DEVector &f2) throw(AUExcept); /*! * Set lowpass/highpass cutoff frequencies for bandpass style neurons " (C-style Interface). * * @param f1 vector with lowpass cutoff for all neurons (size = neurons) * @param f2 vector with highpass cutoffs (size = neurons) */ void setBPCutoff(T *f1vec, int f1size, T *f2vec, int f2size) throw(AUExcept); /*! * sets the IIR-Filter coefficients, like Matlabs filter object. * * @param B matrix with numerator coefficient vectors (m x nb) * m ... nr of parallel filters (neurons) * nb ... nr of filter coefficients * @param A matrix with denominator coefficient vectors (m x na) * m ... nr of parallel filters (neurons) * na ... nr of filter coefficients * @param seris nr of serial IIR filters, e.g. if series=2 the coefficients * B and A will be divided in its half and calculated with * 2 serial IIR filters */ void setIIRCoeff(const DEMatrix &B, const DEMatrix &A, int series=1) throw(AUExcept); /*! * sets the IIR-Filter coefficients, like Matlabs filter object. * * @param B matrix with numerator coefficient vectors (m x nb) * m ... nr of parallel filters (neurons) * nb ... nr of filter coefficients * @param A matrix with denominator coefficient vectors (m x na) * m ... nr of parallel filters (neurons) * na ... nr of filter coefficients * @param seris nr of serial IIR filters, e.g. if series=2 the coefficients * B and A will be divided in its half and calculated with * 2 serial IIR filters */ void setIIRCoeff(T *bmtx, int brows, int bcols, T *amtx, int arows, int acols, int series=1) throw(AUExcept); //@} //! @name GET parameters //@{ /*! * posts current parameters to stdout * \todo maybe return a outputstream (if stdout is not useful) * or just use the << operator ? */ void post(); /// @return reservoir size (nr of neurons) int getSize() const { return neurons_; }; /// @return nr of inputs to the reservoir int getInputs() const { return inputs_; }; /// @return nr of outputs from the reservoir int getOutputs() const { return outputs_; }; /// @return current noise level double getNoise() const { return noise_; } /*! * returns an initialization parametern from the parameter map * @param key the requested parameter * @return the value of the parameter */ T getInitParam(InitParameter key) { return init_params_[key]; } /// @return initialization algorithm InitAlgorithm getInitAlgorithm() const { return static_cast<InitAlgorithm>(net_info_.at(INIT_ALG)); } /// @return training algorithm TrainAlgorithm getTrainAlgorithm() const { return static_cast<TrainAlgorithm>(net_info_.at(TRAIN_ALG)); }
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -