⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 network.cpp

📁 足球机器人仿真组SimuroSot11vs11的源程序。
💻 CPP
📖 第 1 页 / 共 3 页
字号:
  adjust_weights_ssab ();}/*!\brief Reset the values of learning rates of the network in SuperSab mode * \return int -1 on failure (SuperSab mode is not active), the number of  *         weights of the network otherwise. */intnetwork::reset_ssab (){  int i, nw;  if (nus == NULL)    {      return -1;    }  nw = count_weights ();  for (i = 0; i < nw; i++)    {      nus[i] = learning_rate;    }  return nw;}/*!\brief Free the memory used for SuperSab and end SuperSab mode */voidnetwork::free_ssab (){  free (nus);  nus = NULL;}/*!\brief Save SuperSab learning rates to a file. * \param file Pointer to file descriptor. *  * \return -1 on failure, number of weights of the net otherwise. */intnetwork::fprint_ssab (FILE * file) const{  if (nus == NULL)    {      return -1;    }  int nw = count_weights ();  fwrite (&nw, sizeof (int), 1, file);  fwrite (nus, sizeof (double), nw, file);  fwrite (&maxnu, sizeof(double), 1, file);  fwrite (&minnu, sizeof(double), 1, file);  fwrite (&nuup, sizeof(double), 1, file);  fwrite (&nudown, sizeof(double), 1, file);  return nw;}/*!\brief Read  SuperSab learning rates from a file. * \param file Pointer to file descriptor. * \return -1 on failure, number of weights of the net otherwise. */intnetwork::fscan_ssab (FILE * file){  int nw;  if (nus != NULL)    {      return -1;    }  fread (&nw, sizeof (int), 1, file);  int net_nw = count_weights();    if (net_nw != nw) {    cerr << "lwneuralnet++:  Wrong number of weights in file";    return -1;  }  nus = (double *) malloc (nw * sizeof (double));  if (fread (nus, sizeof (double), nw, file) != nw) {    cerr << "lwneuralnet++: Error in format of file ";    free(nus);    return -1;  }  int flag = 1;  flag = flag &&     fread (&maxnu, sizeof(double), 1, file) &&     fread (&minnu, sizeof(double), 1, file) &&    fread (&nuup, sizeof(double), 1, file) &&    fread (&nudown, sizeof(double), 1, file);    /* If the file does not contain values of parameters (old format)   * set them to the default ones    */  if (! flag) {    maxnu = DEFAULT_MAX_NU;    minnu = DEFAULT_MIN_NU;    nuup = DEFAULT_NUUP;    nudown = DEFAULT_NUDOWN;  }  return nw;}/*!\brief Write SuperSab learning rates to a binary file. * \param filename Pointer to name of file to write to. * \return true on success, false on failure. */bool network::save_ssab (const char *filename) const{  FILE *    file;  int    nw;  file = fopen (filename, "w");  if (file == NULL)    {      return false;    }  nw = fprint_ssab (file);  if (nw == -1)    {      fclose (file);      return false;    }  return (fclose (file) == 0);}/*!\brief Load SuperSab learning rates from a binary file. * \param filename Pointer to name of file to read from. * \return true on success, false on failure. */bool network::load_ssab (const char *filename){  FILE *    file;  int    nw;  file = fopen (filename, "r");  if (file == NULL)    {      return false;    }  nw = fscan_ssab (file);    if (nw == -1)    {      cerr << filename << endl;      nus = NULL;      fclose (file);      return false;    }  return (fclose (file) == 0);}/*!\brief Set maximum learning rate allowed in SuperSab mode  * \param max maximum learning rate * * Values of learning rates cannot be greater than this value. * * If the previous max learning rate was greater than the new one * and SuperSab mode is active, all the learning rates are changed to make * them lesser than the new maximum.  * * So, if you just want to change default max learning rate,  * call this method before begin_ssab(). */voidnetwork::set_max_learning_rate (double max){  if (is_ssab_active()) {    if (max < maxnu) {      int nw = count_weights();      for (int i=0; i<nw; i++) {	if (nus[i]>max) nus[i]=max;      }    }  }  maxnu = max;}/*!\brief Set minimum learning rate allowed in SuperSab mode  * \param min minimum learning rate * * Values of learning rates cannot be lesser than this value * * If the previous min learning rate was lesser  than the new one * and SuperSab mode is active, all the learning rates are changed to make * them greater than the new minimum.  * * So, if you just want to change default min learning rate,  * call this method before begin_ssab(). */voidnetwork::set_min_learning_rate (double min){  if (is_ssab_active()) {    if (min > minnu) {      int nw = count_weights();      for (int i=0; i<nw; i++) {	if (nus[i]<min) nus[i]=min;      }    }  }  minnu = min;}/*!\brief Print learning rates for SuperSab mode * \return number of weights */intnetwork::ssab_print_nus () const{  int i, nweights;  if (nus == NULL)    {      cerr << "lwneuralnet++: Warning SuperSAB is not active" << endl;      return -1;    }  else    {      nweights = count_weights ();      for (i = 0; i < nweights; i++)	{	  printf ("%f   ", nus[i]);	}      printf ("\n");      return nweights;    }}/*!\brief Make some statistics about learning rates in SuperSab mode * \return -1 if ( ! is_ssab_active() ) , number of weights of the network otherwise * \param average the average of learning rates * \param max the max value of learning rates * \param min the min value of learning rates * \param n_max number of learning rates equal to max * \param n_min number of learning rates equal to min */int network::ssab_stats(double& average, double &max, double &min, int &n_max, int &n_min) {  if (! is_ssab_active() ) {    return -1;  }  int nw = count_weights();    double sum = 0.0;  max = minnu;  min = maxnu;  for (int i = 0; i < nw  ; i++) {    sum += nus[i];    if (nus[i]>max) max = nus[i];    if (nus[i]<min) min = nus[i];  }  average = sum / nw;  n_min = 0;  n_max = 0;  for (i = 0; i < nw  ; i++) {    if (nus[i]==max) n_max++;    if (nus[i]==min) n_min++;  }  return nw;}/**************************************** * Batch Training ****************************************//*!\brief [Private] Adjust deltas based on (backpropagated) output error. */inline voidnetwork::adjust_sumdeltas_batch (){  int l, nu, nl;  double error;  for (l = 1; l < no_of_layers; l++)    {      for (nu = 0; nu < layer[l].no_of_neurons; nu++)	{	  error = layer[l].neuron[nu].error;	  for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++)	    {	      layer[l].neuron[nu].sumdeltas[nl] +=		 error * layer[l - 1].neuron[nl].output;	    }	}    }}/*!\brief [Private] Adjust weights based on deltas determined by batch * training. */inline voidnetwork::adjust_weights_batch (){  int l, nu, nl;  double error, delta;  double learning_factor =  learning_rate /( (double) no_of_patterns);  for (l = 1; l < no_of_layers; l++)    {      for (nu = 0; nu < layer[l].no_of_neurons; nu++)	{	  error = layer[l].neuron[nu].error;	  for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++)	    {	      delta = learning_factor * (layer[l].neuron[nu].sumdeltas[nl]) 		+ momentum * layer[l].neuron[nu].delta[nl];	      layer[l].neuron[nu].weight[nl] += delta;	      layer[l].neuron[nu].delta[nl] = delta;	    }	}    }}/*!\brief Begin training in batch mode. */voidnetwork::begin_batch (){  no_of_patterns = 0;  reset_sumdeltas ();}/*!\brief Train a network in batch mode. * * Before calling this routine, begin_batch() should have been * called (at the start of the batch) to begin batch training. * Furthermore, for the current input/target pair, compute() and * compute_output_error() should have been called to compute outputs * for given the inputs and to prepare the neural network for training * by computing the output error using the given targets. This routine * performs the actual training by backpropagating the output error * through the layers, but does not change the weights. The weights * will be changed when (at the end of the batch) end_batch() is * called. */voidnetwork::train_batch (){  no_of_patterns++;  backward_pass ();  adjust_sumdeltas_batch ();}/*!\brief End training in batch mode adjusting weights. * * Adjust the weights in the neural network according to the average  * delta of all patterns in the batch. */voidnetwork::end_batch (){  adjust_weights_batch ();}/*************************************** * Batch and SuperSab ***************************************//*!\brief [Private] Adjust weights based on deltas determined by batch * training with SuperSab learning rates. */inline voidnetwork::adjust_weights_batch_ssab (){  int l, nu, nl;  double error, delta;  double inv_no_of_patterns = 1.0 /( (double) no_of_patterns);  int nuind = 0;  for (l = 1; l < no_of_layers; l++)    {      for (nu = 0; nu < layer[l].no_of_neurons; nu++)	{	  error = layer[l].neuron[nu].error;	  for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++)	    {	      delta = nus[nuind] * (layer[l].neuron[nu].sumdeltas[nl]) * inv_no_of_patterns + momentum * layer[l].neuron[nu].delta[nl];	      layer[l].neuron[nu].weight[nl] += delta;	  	      if (layer[l].neuron[nu].delta[nl] * delta > 0)		{		  nus[nuind] = min (nus[nuind] * nuup, maxnu);		}	      else		{		  nus[nuind] = max (nus[nuind] * nudown, minnu);		}	      layer[l].neuron[nu].delta[nl] = delta;	      nuind++;	    }	}    }}/*!\brief End training in batch mode adjusting weights with SuperSab. * * For using SuperSab  mode in batch training you should call begin_ssab()  * and begin_batch(), train the network with train_batch() and then  * call end_batch_ssab() at the end of every epoch. *  * Adjust the weights in the neural network according to the average  * delta of all patterns in the batch and with SuperSab. */void network::end_batch_ssab () {  if ( ! is_ssab_active() ) {    cerr << "lwneuralnet++: Warning: using end_batch_ssab but not in SuperSab mode. Ignoring SuperSab" << endl;     adjust_weights_batch ();  } else {    adjust_weights_batch_ssab();  }}/**************************************** * Modification ****************************************//*!\brief Make small random changes to the weight of a network. * \param factor doubleing point number. * \param range doubleing point number. * * All weights in the neural network that are in absolute value smaller * than range become a random value from the interval [-range,range]. * All other weights get multiplied by a random value from the interval * [1-factor,1+factor]. */voidnetwork::jolt (double factor, double range){  int l, nu, nl;  /* sanity check */  if ((factor < 0) || (range < 0))    {      return;    }  /* modify weights */  for (l = 1; l < no_of_layers; l++)    {      for (nu = 0; nu < layer[l].no_of_neurons; nu++)	{	  for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++)	    {	      if (fabs (layer[l].neuron[nu].weight[nl]) < range)		{		  layer[l].neuron[nu].weight[nl] =		    2.0 * range * ((double) rand () / RAND_MAX - 0.5);		}	      else		{		  layer[l].neuron[nu].weight[nl] *=		    1.0 + 2.0 * factor * ((double) rand () / RAND_MAX - 0.5);		}	    }	}    }}/* \brief Writes a network on a stream   * Same format as friendly_print(false) * Usage: * os << net; */ostream & operator<< (ostream & os, const network & net){  stringstream buf;  /* Note that this function is declared as friend by the    * network class and hence can access its private fields   */  /* write network dimensions */  buf << "No of layers: " << net.no_of_layers << endl;  for (int l = 0; l < net.no_of_layers; l++)    {      buf << "No of neurons on layer " << l << ": " << net.layer[l].no_of_neurons << endl;    }  /* write network constants */  buf << "Momentum: " << net.momentum << endl;  buf << "Learning rate: " << net.learning_rate << endl;  buf << "Global Error: " << net.global_error << endl;  buf << "Activation Func: " <<    (net.activation == network::LOGISTIC ? " Logistic" : "Tanh") << endl;  if (net.is_ssab_active()) {    buf << "SuperSab mode is active." << endl;    buf << "Max Learning rate: " <<  net.maxnu << endl;    buf << "Min Learning rate: " <<  net.minnu << endl;    buf << "nu_up (factor for increasing): " << net.nuup << endl;    buf << "nu_down (factor for decreasing): " << net.nudown << endl;  }  return os << buf.str ();}/* \brief Overloaded operator=  */const network &network::operator= (const network & b){  // guard against assignment to itself  if (this == &b)    return *this;  // destroy the left-hand side  destroy ();  // copy the right-hand side  copy (b);  return *this;}/* \brief Copy constructor */network::network (const network & b){  copy (b);}/* \brief[Private]  * Used to copy a network (common code of operator= and copy constructor) */voidnetwork::copy (const network & b){  int l, nu, nl;  no_of_layers = b.no_of_layers;  layer = (layer_t *) calloc (no_of_layers, sizeof (layer_t));  for (l = 0; l < no_of_layers; l++)    {      allocate_layer (&layer[l], b.layer[l].no_of_neurons);    }  for (l = 1; l < no_of_layers; l++)    {      allocate_weights (&layer[l - 1], &layer[l]);    }  /* abbreviations for input and output layer */  input_layer = &layer[0];  output_layer = &layer[no_of_layers - 1];  /* copy values of network constants */  momentum = b.momentum;  learning_rate = b.learning_rate;  activation = b.activation;  /* copy Super Sab learning rates */  if (b.nus == NULL)    {      nus = NULL;    }  else    {      int nw = b.count_weights ();      nus = (double *) malloc (nw * sizeof (double));      memcpy (nus, b.nus, nw * sizeof (double));    }  /* copy parameters of SuperSab */  minnu = b.minnu;  maxnu = b.maxnu;  nuup = b.nuup;  nudown = b.nudown;  /* copy other fields */  global_error = b.global_error;  no_of_patterns = b.no_of_patterns;  /* copy the  network's weights, deltas and sumdeltas */  for (l = 1; l < no_of_layers; l++)    {      for (nu = 0; nu < layer[l].no_of_neurons; nu++)	{	  for (nl = 0; nl < layer[l - 1].no_of_neurons; nl++)	    {	      layer[l].neuron[nu].weight[nl] =		b.layer[l].neuron[nu].weight[nl];	      layer[l].neuron[nu].delta[nl] = b.layer[l].neuron[nu].delta[nl];	      layer[l].neuron[nu].sumdeltas[nl] = b.layer[l].neuron[nu].sumdeltas[nl];	    }	}    }}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -