📄 network.cpp
字号:
/* write network constants */ fprintf (file, "%f\n", momentum); fprintf (file, "%f\n", learning_rate); fprintf (file, "%f\n", global_error); /* write network weights */ for (l = 1; l < no_of_layers; l++) { for (nu = 0; nu < layer[l].no_of_neurons; nu++) { for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++) { fprintf (file, "%f\n", layer[l].neuron[nu].weight[nl]); } } }}/*!\brief[Private] Read a network from a file. * \param file Pointer to a file descriptor. */voidnetwork::fscan (FILE * file){ int no_of_layers, l, nu, nl, funct, *arglist; /* read function */ fscanf (file, "%i", &funct); /* tricky solution for importing files with old format * without the number of the function at the beginning: * Function number can be 0 or 1 while number of layers * must be >= 2. So if funct>1 the file should be in * old format and we can set funct = 0 (logistic) and * take the first number as no_of_layers */ if (funct > NET_TANH) { no_of_layers = funct; funct = NET_LOGISTIC; } else { /* read number of layers */ fscanf (file, "%i", &no_of_layers); } if (no_of_layers < 2) { throw runtime_error("Error in text file format"); } /* read number of neurons in each layer */ arglist = (int *) calloc (no_of_layers, sizeof (int)); if (arglist == NULL) { throw runtime_error ("Error in text file format"); } for (l = 0; l < no_of_layers; l++) { fscanf (file, "%i", &arglist[l]); } /* allocate memory for the network */ allocate_l (funct, no_of_layers, arglist); set_activation (funct); /* read network constants */ fscanf (file, "%f", &momentum); fscanf (file, "%f", &learning_rate); fscanf (file, "%f", &global_error); /* read network weights */ for (l = 1; l < no_of_layers; l++) { for (nu = 0; nu < layer[l].no_of_neurons; nu++) { for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++) { fscanf (file, "%f", &layer[l].neuron[nu].weight[nl]); } } } free (arglist);}/*!\brief Write a network to a stdout. */voidnetwork::print () const{ fprint (stdout);}/*!\brief Write a network to a stdout in a friendly format. * \param show If show==true weights are displayed */voidnetwork::friendly_print (const bool show) const{ int l, nu, nl; /* write network dimensions */ printf ("No of layers: %i\n", no_of_layers); for (l = 0; l < no_of_layers; l++) { printf ("No of neurons on layer %i: %i\n", l, layer[l].no_of_neurons); } /* write network constants */ printf ("Momentum: %f\n", momentum); printf ("Learning rate: %f\n", learning_rate); printf ("Global Error: %f\n", global_error); printf ("Activation Func: %s\n", activation == NET_LOGISTIC ? " Logistic" : "Tanh"); if (is_ssab_active()) { printf("SuperSab mode is active."); printf("Max Learning rate: %f\n", maxnu); printf("Min Learning rate: %f\n", minnu); printf("nu_up (factor for increasing): %f\n",nuup); printf("nu_down (factor for decreasing): %f\n",nudown); } if (show) { printf ("Weights: \n\n"); /* write network weights */ for (l = 1; l < no_of_layers; l++) { printf ("Weights from layer %d to %d\n", l - 1, l); for (nu = 0; nu < layer[l].no_of_neurons; nu++) { for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++) { printf ("W(%d,%d) = %f\n", nl, nu, layer[l].neuron[nu].weight[nl]); } } } }}/*!\brief Write a network to a text file. * \param filename Pointer to name of file to write to. * \return true on success, false on failure. */bool network::textsave (const char *filename) const{ FILE * file; file = fopen (filename, "w"); if (file == NULL) { return false; } fprint (file); return (fclose (file) == 0);}/*!\brief Read a network from a text file. * \param filename Pointer to name of file to read from. * If filename does not exist throws a runtime_error exception */voidnetwork::textload (const char *filename){ destroy (); do_textload (filename);}/*!\brief[Private] Read a network from a text file. * \param filename Pointer to name of file to read from. * If filename does not exist throws a runtime_error exception */voidnetwork::do_textload (const char *filename){ FILE *file; file = fopen (filename, "r"); if (file == NULL) { throw runtime_error ("File " + string (filename) + " not found"); } fscan (file); fclose (file);}/**************************************** * Input and Output ****************************************//*!\brief [Private] Copy inputs to input layer of a network. */inline voidnetwork::set_input (const double *input){ int n; for (n = 0; n < input_layer->no_of_neurons; n++) { input_layer->neuron[n].output = input[n]; }}/*!\brief [Interal] Copy outputs from output layer of a network. */inline voidnetwork::get_output (double *output){ int n; for (n = 0; n < output_layer->no_of_neurons; n++) { output[n] = output_layer->neuron[n].output; }}/**************************************** * Errors * * Before calling these routines, compute() should have been called to * compute the ouputs for a given input. This routine compares the * actual output of the neural network (which is stored internally in * the neural network) and the intended output (in target). * ****************************************//*!\brief Compute the output error of a network. * \param target Pointer to a sequence of doubleing point numbers. * \return Output error of the neural network. * * The return value is the square of the Euclidean distance between the * actual output and the target. This routine also prepares the network * for backpropagation training by storing (internally in the neural * network) the errors associated with each of the outputs. */doublenetwork::compute_output_error (const double *target){ int n; double output, error; global_error = 0.0; for (n = 0; n < output_layer->no_of_neurons; n++) { output = output_layer->neuron[n].output; error = target[n] - output; if (activation == NET_LOGISTIC) { output_layer->neuron[n].error = output * (1 - output) * error; } else { output_layer->neuron[n].error = (1 - output * output) * error; } global_error += error * error; } return global_error;}/*!\brief Compute the average error of a network * \param target Pointer to a sequence of doubleing point numbers. * \return Average error of the neural network. * * The average error is defined as the average value of absolute * differences between output and target */doublenetwork::compute_average_error (const double *target) const{ int n; double output; double error = 0.0; for (n = 0; n < output_layer->no_of_neurons; n++) { output = output_layer->neuron[n].output; error += fabs (target[n] - output); } return error / output_layer->no_of_neurons;}/*!\brief Compute the quadratic error a network * \param target Pointer to a sequence of doubleing point numbers. * \return Quadratic error of the neural network. * * The quadratic error is defined as * sqrt(sum ( T_j - O_j )^2) / N where T_j are targets and O_j are outputs */doublenetwork::compute_quadratic_error (const double *target) const{ int n; double output; double error = 0.0; for (n = 0; n < output_layer->no_of_neurons; n++) { output = output_layer->neuron[n].output; error += (target[n] - output) * (target[n] - output); } return sqrt (error) / output_layer->no_of_neurons;}/*!\brief Compute the max error a network * \param target Pointer to a sequence of doubleing point numbers. * \return Maximum error of the neural network. * * The maximum error is defined as the maximum of absolute differences * between outputs and targets. */doublenetwork::compute_max_error (const double *target) const{ int n; double output; double error = 0.0; for (n = 0; n < output_layer->no_of_neurons; n++) { output = output_layer->neuron[n].output; error = max (error, fabs (target[n] - output)); } return error;}/**************************************** * Sigmoidal functions ****************************************/#if 0/* THIS IS ONLY FOR REFERENCE !! *//* reference implementation for sigmoidal *//*!\brief [Private] Activation function of a neuron. * \param x point where function should be evaluated * \param num_func type of sigmoidal function (network::LOGISTIC, network::TANH) */inline doublenetwork::sigmoidal (double x, int num_func){ if (num_func == network::LOGISTIC) { return 1.0 / (1.0 + exp (-x)); } else if (num_func = network::TANH) { return tanh (x); }
return 0;}#else/* implementation of sigmoidal with table lookup */#include "sigmoidal.cpp"/*!\brief [Private] Activation function of a neuron. * \param x point where function should be evaluated * \param num_func type of sigmoidal function (network::LOGISTIC, network::TANH) */inline doublenetwork::sigmoidal (double x, int num_func){ int index = (int) ((x - min_entry[num_func]) * invinterval[num_func]); if (index <= 0) { return lowbound[num_func]; } else if (index >= num_entries) { return highbound[num_func]; } else { return interpolation[index][num_func]; }}#endif/**************************************** * Forward and Backward Propagation ****************************************//*!\brief [Private] Forward propagate inputs from one layer to next layer. */inline voidnetwork::propagate_layer (layer_t * lower, layer_t * upper){ int nu, nl; double value; for (nu = 0; nu < upper->no_of_neurons; nu++) { value = 0.0; for (nl = 0; nl < lower->no_of_neurons; nl++) { value += upper->neuron[nu].weight[nl] * lower->neuron[nl].output; } upper->neuron[nu].output = sigmoidal (value, activation); }}/*!\brief [Private] Forward propagate inputs through a network. */inline voidnetwork::forward_pass (){ int l; for (l = 1; l < no_of_layers; l++) { propagate_layer (&layer[l - 1], &layer[l]); }}/*!\brief [Private] Backpropagate error from one layer to previous layer. */inline voidnetwork::backpropagate_layer (layer_t * lower, layer_t * upper){ int nl, nu; double output, error; for (nl = 0; nl < lower->no_of_neurons; nl++) { error = 0.0; for (nu = 0; nu < upper->no_of_neurons; nu++) { error += upper->neuron[nu].weight[nl] * upper->neuron[nu].error; } output = lower->neuron[nl].output; if (activation == NET_LOGISTIC) { lower->neuron[nl].error = output * (1 - output) * error; } else { lower->neuron[nl].error = (1 - output * output) * error; } }}/*!\brief [Private] Backpropagate output error through a network. */inline voidnetwork::backward_pass (){ int l; for (l = no_of_layers - 1; l > 1; l--) { backpropagate_layer (&layer[l - 1], &layer[l]); }}/*!\brief [Private] Adjust weights based on (backpropagated) output error. */inline voidnetwork::adjust_weights (){ int l, nu, nl; double error, delta; for (l = 1; l < no_of_layers; l++) { for (nu = 0; nu < layer[l].no_of_neurons; nu++) { error = layer[l].neuron[nu].error; for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++) { delta = learning_rate * error * layer[l - 1].neuron[nl].output + momentum * layer[l].neuron[nu].delta[nl]; layer[l].neuron[nu].weight[nl] += delta; layer[l].neuron[nu].delta[nl] = delta; } } }}/**************************************** * Evaluation and Training ****************************************//*!\brief Compute outputs of a network for given inputs. * \param input Pointer to sequence of doubleing point numbers. * \param output Pointer to sequence of doubleing point numbers or NULL. * * Compute outputs of a neural network for given inputs by forward * propagating the inputs through the layers. If output is non-NULL, the * outputs are copied to output (otherwise they are only stored * internally in the network). */voidnetwork::compute (const double *input, double *output){ set_input (input); forward_pass (); if (output != NULL) { get_output (output); }}/*!\brief Train a network. * * Before calling this routine, compute() and * compute_output_error() should have been called to compute outputs * for given inputs and to prepare the neural network for training by * computing the output error. This routine performs the actual training * by backpropagating the output error through the layers. */voidnetwork::train (){ backward_pass (); adjust_weights ();}/**************************************** * SuperSab ****************************************//*!\brief [Private] Adjust weights with SuperSAB */voidnetwork::adjust_weights_ssab (){ int l, nu, nl; double error, delta; int nuind = 0; for (l = 1; l < no_of_layers; l++) { for (nu = 0; nu < layer[l].no_of_neurons; nu++) { error = layer[l].neuron[nu].error; for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++) { delta = nus[nuind] * error * layer[l - 1].neuron[nl].output + momentum * layer[l].neuron[nu].delta[nl]; layer[l].neuron[nu].weight[nl] += delta; if (layer[l].neuron[nu].delta[nl] * delta > 0) { nus[nuind] = min (nus[nuind] * nuup, maxnu); } else { nus[nuind] = max (nus[nuind] * nudown, minnu); } layer[l].neuron[nu].delta[nl] = delta; nuind++; } } }}/* !\brief Count the number of weights of the network net * \return int number of weights */intnetwork::count_weights () const{ int l; int nweights = 0; for (l = 1; l < no_of_layers; l++) { nweights += layer[l - 1].no_of_neurons * layer[l].no_of_neurons; } return nweights;}/*!\brief Begin SuperSab mode setting the nus to learning rate of the * network * Precondition: (! is_ssab_active()) i.e. begin_ssab was not called before. * If is_ssab_active() and you want to reset the values of nus, use * reset_ssab or free_ssab * * \return int -1 on failure, number of weights of the net otherwise. */intnetwork::begin_ssab (){ int i; int nw; if (nus != NULL) { return -1; } nw = count_weights (); nus = (double *) malloc (nw * sizeof (double)); for (i = 0; i < nw; i++) { nus[i] = learning_rate; } return nw;}/*!\brief Train a network in ssab mode * * Before calling this routine, compute() and * compute_output_error() should have been called to compute outputs * for given inputs and to prepare the neural network for training by * computing the output error. */voidnetwork::train_ssab (){ backward_pass ();
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -