📄 network.h
字号:
* reset_ssab() or if you want to free memory used for SuperSab, use * free_ssab() * \return -1 on failure, number of weights of the net otherwise. */ int begin_ssab (); /*!\brief Train a network in ssab mode * * Before calling this routine, begin_ssab() should have been called to * begin SuperSab training. * * Furthermore, for the current input/output pair, compute() and * compute_output_error() should have been called to compute outputs * for given inputs and to prepare the neural network for training by * computing the output error. This routine performs the actual training * by backpropagating the output error through the layers and changing * the weights. * * The better way to use SuperSab is in combination with batch training, * using train_batch() for the training and end_batch_ssab() at the end of * every epoch. */ void train_ssab (); /*!\brief Reset the values of learning rates of the network to learning_rate * in SuperSab mode. * * Precondition: is_ssab_active() * \return int -1 on failure (SuperSab mode is not active), the number of * weights of the network otherwise. */ int reset_ssab (); /*!\brief Free the memory used for SuperSab and end SuperSab mode * * After the call of free_ssab, the values of learning rates are * lost and SuperSab mode is off. */ void free_ssab (); /*!\brief Write SuperSab learning rates to a binary file. * \param filename Pointer to name of file to write to. * \return true on success, false on failure. */ bool save_ssab (const char *filename) const; /*!\brief Load SuperSab learning rates from a binary file. * \param filename Pointer to name of file to read from. * \return true on success, false on failure. */ bool load_ssab (const char *filename); /*!\brief Print learning rates for SuperSab mode * \return number of weights in the network, * -1 if SSab mode is not active */ int ssab_print_nus () const;/*!\brief Make some statistics about learning rates in SuperSab mode. * \return -1 if SuperSab mode is not active, number of weights of the network otherwise * \param average the average of learning rates * \param max the max value of learning rates * \param min the min value of learning rates * \param n_max number of learning rates equal to max * \param n_min number of learning rates equal to min */ int ssab_stats(double& average, double &max, double &min, int &n_max, int &n_min); /**************************************** * Batch Training ****************************************/ /*!\brief Begin training in batch mode. */ void begin_batch (); /*!\brief Train a network in batch mode. * * Before calling this routine, begin_batch() should have been * called (at the start of the batch) to begin batch training. * Furthermore, for the current input/target pair, compute() and * compute_output_error() should have been called to compute outputs * for given the inputs and to prepare the neural network for training * by computing the output error using the given targets. This routine * performs the actual training by backpropagating the output error * through the layers, but does not change the weights. The weights * will be changed when (at the end of the batch) end_batch() * (or end_batch_ssab()) is called. */ void train_batch (); /*!\brief End training in batch mode adjusting weights. * * Adjust the weights in the neural network according to the average * delta of all patterns in the batch. */ void end_batch (); /*!\brief End training in batch mode adjusting weights with SuperSab. * * * Adjust the weights in the neural network according to the average * delta of all patterns in the batch and with SuperSab. * * For using SuperSab mode in batch training you should call once * begin_ssab(), then begin_batch() at the beginning of every epoch, * train the network with train_batch() and then * call end_batch_ssab() at the end of every epoch. */ void end_batch_ssab (); /**************************************** * Modification ****************************************/ /*!\brief Make small random changes to the weight of a network. * \param factor doubleing point number. * \param range doubleing point number. * * All weights in the neural network that are in absolute value smaller * than range become a random value from the interval [-range,range]. * All other weights get multiplied by a random value from the interval * [1-factor,1+factor]. */ void jolt (double factor, double range); /**************************************** * Overloaded operators ****************************************/ /*! \brief Overloaded operator= */ const network & operator= (const network & b); /* PRIVATE */private: /* [Private] * Structs for neurons and layers */ typedef struct { double output; double error; double *weight; double *delta; double *sumdeltas; } neuron_t; typedef struct { int no_of_neurons; neuron_t *neuron; } layer_t; void reset_deltas (); void reset_sumdeltas (); void reset_deltas_and_sumdeltas (); void allocate_layer (layer_t * layer, int no_of_neurons); void allocate_weights (layer_t * lower, layer_t * upper); void allocate_l (int act, int layers, const int *arglist); void fbprint (FILE * file) const; void fbscan (FILE * file); void do_load (const char *filename); void do_textload (const char *filename); void fprint (FILE * file) const; void fscan (FILE * file); void set_input (const double *input); void get_output (double *output); static double sigmoidal (double x, int num_func); void propagate_layer (layer_t * lower, layer_t * upper); void forward_pass (); void backpropagate_layer (layer_t * lower, layer_t * upper); void backward_pass (); void adjust_weights (); void adjust_weights_ssab (); int fprint_ssab (FILE * file) const; int fscan_ssab (FILE * file); void adjust_sumdeltas_batch (); void adjust_weights_batch (); void adjust_weights_batch_ssab (); void copy (const network & b); void destroy ();
public: int no_of_layers; double momentum; double learning_rate; double global_error; int no_of_patterns; layer_t *layer; layer_t *input_layer; layer_t *output_layer; int activation; double *nus; double maxnu; double minnu; double nuup; double nudown; /* operator<< is declared friend because it needs to access private fields */ friend ostream & operator<< (ostream &, const network &);};/*! \brief Write a network on a stream * * Same format as friendly_print() (friendly_print(false) i.e. weights are * not displayed) * * Usage: * os << net; */ostream & operator<< (ostream & os, const network & net);/**************************************** * IMPLEMENTATION OF INLINE FUNCTIONS * ACCESSORS AND MUTATORS ****************************************//**************************************** * Accessors ****************************************//*!\brief Retrieve the momentum of a network. * \return Momentum of the neural work. */inline doublenetwork::get_momentum () const{ return momentum;}/*!\brief Retrieve the momentum of a network. * \return Learning rate of the neural work. */inline doublenetwork::get_learning_rate () const{ return learning_rate;}/*!\brief Retrieve the number of inputs of a network. * \return Number of neurons in the input layer of the neural network. */inline intnetwork::get_no_of_inputs () const{ return input_layer->no_of_neurons;}/*!\brief Retrieve the number of outputs of a network. * \return Number of neurons in the output layer of the neural network. */inline intnetwork::get_no_of_outputs () const{ return output_layer->no_of_neurons;}/*!\brief Retrieve the number of layers of a network. * \return Number of layers, including the input and output layers, of the * neural network. */inline intnetwork::get_no_of_layers () const{ return no_of_layers;}/*!\brief Retrieve the number of patterns in batch training * \return number of patterns */inline intnetwork::get_no_of_patterns () const{ return no_of_patterns;}/*!\brief Retrieve the activation function of network (network::LOGISTIC or network::TANH) * \return activation function */inline intnetwork::get_activation () const{ return activation;}/*!\brief Retrieve the output error of a network. * \return Output error of the neural network. * * Before calling this routine, compute() and * compute_output_error() should have been called to compute outputs * for given inputs and to acually compute the output error. This * routine merely returns the output error (which is stored internally * in the neural network). */inline doublenetwork::get_output_error () const{ return global_error;}/*!\brief True if ssab is active *\return true if supersab mode is active, false otherwise. */inline bool network::is_ssab_active () const{ return (nus != NULL);}/**************************************** * Mutators ****************************************//*!\brief Change the learning rate of a network. * \param learning_rate doubleing point number. */inline voidnetwork::set_learning_rate (double the_learning_rate){ learning_rate = the_learning_rate;}/*!\brief Set activation function of the network. * \param num_func Number of function (network::LOGISTIC | network::TANH) */inline voidnetwork::set_activation (int num_func){ if (num_func != NET_LOGISTIC && num_func != NET_TANH) { activation = NET_LOGISTIC; fprintf (stderr, "warning: impossible to set activation function number %d \n", num_func); fprintf (stderr, "warning: activation function is network::LOGISTIC"); } else { activation = num_func; }}/*!\brief Change the momentum of a network. * \param momentum doubleing point number. */inline voidnetwork::set_momentum (double the_momentum){ momentum = the_momentum;} /*!\brief Retrieve maximum learning rate allowed in SuperSab mode * \return double maximum learning rate * * Values of learning rates cannot be greater than this value */inline doublenetwork::get_max_learning_rate (){ return maxnu;} /*!\brief Retrieve minimum learning rate allowed in SuperSab mode * \return double minimum learning rate * * Values of learning rates cannot be lesser than this value */inline doublenetwork::get_min_learning_rate (){ return minnu;} /*!\brief Retrieve factor for increasing learning rate in SuperSab mode * \return double factor for increasing learning rate * * In SuperSab mode: if delta at this step has the same sign of delta at * the previous step, the learning rate of that weight is multiplied by * this value */inline doublenetwork::get_ssab_up_factor (){ return nuup;} /*!\brief Retrieve factor for decreasing learning rate in SuperSab mode * \return double factor for decreasing learning rate * * In SuperSab mode: if delta at this step has the opposite sign of delta at * the previous step, the learning rate of that weight is multiplied by * this value */inline doublenetwork::get_ssab_down_factor (){ return nudown;}/*!\brief Set factor for increasing learning rate in SuperSab mode * \param factor (for increasing learning rate) * * In SuperSab mode: if delta at this step has the same sign of delta at * the previous step, the learning rate of that weight is multiplied by * this value ( should be factor > 1 ) */inline voidnetwork::set_ssab_up_factor (double factor){ nuup = factor;}/*!\brief Set factor for decreasing learning rate in SuperSab mode * \param factor (for decreasing learning rate) * * In SuperSab mode: if delta at this step has the opposite sign of delta at * the previous step, the learning rate of that weight is multiplied by * this value ( should be 0 < factor < 1 ) */inline voidnetwork::set_ssab_down_factor (double factor){ nudown = factor;}#endif /* NETWORK_H */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -