⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nn_util.c

📁 关于遗传算法的一些见地。特别是关于简单遗传程序设计的实现。
💻 C
📖 第 1 页 / 共 4 页
字号:
/**********************************************************************  NN_propagate()  synopsis:     Propagate errors through network.  parameters:   network_t *network  return:       none  last updated:  **********************************************************************/void NN_propagate(network_t *network)  {  int l;  int  i,j;  float sum;     for (l=0; l<network->num_layers-1; l++)    {    for (i=1; i<=network->layer[l+1].neurons; i++)      {      sum = 0;      for (j=0; j<=network->layer[l].neurons; j++)        {        sum += network->layer[l+1].weight[i][j] * network->layer[l].output[j];        }      network->layer[l+1].output[i] = 1 / (1 + exp(-network->gain * sum));      }    }  return;  }/**********************************************************************  NN_output_error()  synopsis:     Assess the error of a network against a given output		vector.  (For sequential mode training)  parameters:   network_t *network		float *target  return:       none  last updated:  **********************************************************************/void NN_output_error(network_t *network, float *target)  {  int  i;  float out, err;     network->error = 0;  for (i=1; i<=network->layer[network->num_layers-1].neurons; i++)    {    out = network->layer[network->num_layers-1].output[i];    err = target[i-1]-out;    network->layer[network->num_layers-1].error[i] = network->gain * out * (1-out) * err;    network->error += 0.5 * SQU(err);    }#if NN_DEBUG>2  printf("network->error = %f\n", network->error);#endif  return;  }/**********************************************************************  NN_output_error_sum()  synopsis:     Sumate the error of a network against a given output		vector.  (For batch mode training)  parameters:   network_t *network		float *target  return:       none  last updated: 25 Feb 2002 **********************************************************************/void NN_output_error_sum(network_t *network, float *target)  {  int  i;  float out, err;     network->error = 0;  for (i=1; i<=network->layer[network->num_layers-1].neurons; i++)    {    out = network->layer[network->num_layers-1].output[i];    err = target[i-1]-out;    network->layer[network->num_layers-1].error[i] += network->gain * out * (1-out) * err;    network->error += 0.5 * SQU(err);    }  return;  }/**********************************************************************  NN_backpropagate()  synopsis:     Perform one step of error back-propagation.  parameters:   network_t *network  return:       none  last updated: **********************************************************************/void NN_backpropagate(network_t *network)  {  int l;  int  i,j;  float out, err;     for (l=network->num_layers-1; l>1; l--)    {    for (i=1; i<=network->layer[l-1].neurons; i++)      {      out = network->layer[l-1].output[i];      err = 0;      for (j=1; j<=network->layer[l].neurons; j++)        {        err += network->layer[l].weight[j][i] * network->layer[l].error[j];        }      network->layer[l-1].error[i] = network->gain * out * (1-out) * err;      }    }  return;  }/**********************************************************************  NN_decay_weights()  synopsis:     Apply weight decay.  parameters:   network_t *network  return:       none  last updated:	01 Mar 2002 **********************************************************************/void NN_decay_weights(network_t *network)  {  int  l,i,j;     for (l=1; l<network->num_layers; l++)    {    for (i=1; i<=network->layer[l].neurons; i++)      {      for (j=0; j<=network->layer[l-1].neurons; j++)        {        network->layer[l].weight[i][j] -= network->layer[l].weight[i][j]*network->decay;        }      }    }  return;  }/**********************************************************************  NN_adjust_weights()  synopsis:     Modify network weights according to classic  		back-propagated error.  parameters:   network_t *network  return:       none  last updated: 01 Mar 2002 **********************************************************************/void NN_adjust_weights(network_t *network)  {  int  l,i,j;  float out, err;     for (l=1; l<network->num_layers; l++)    {    for (i=1; i<=network->layer[l].neurons; i++)      {      for (j=0; j<=network->layer[l-1].neurons; j++)        {        out = network->layer[l-1].output[j];        err = network->layer[l].error[i];        network->layer[l].weight[i][j] += network->rate * err * out;        }      }    }  return;  }/**********************************************************************  NN_adjust_weights_decay()  synopsis:     Modify network weights according to back-propagated		error with weight decay.  parameters:   network_t *network  return:       none  last updated:	01 Mar 2002 **********************************************************************/void NN_adjust_weights_decay(network_t *network)  {  int  l,i,j;  float out, err;     for (l=1; l<network->num_layers; l++)    {    for (i=1; i<=network->layer[l].neurons; i++)      {      for (j=0; j<=network->layer[l-1].neurons; j++)        {        out = network->layer[l-1].output[j];        err = network->layer[l].error[i];        network->layer[l].weight[i][j] += network->rate * err * out                                       - network->decay * network->layer[l].weight[i][j];        }      }    }  return;  }/**********************************************************************  NN_adjust_weights_momentum()  synopsis:     Modify network weights according to back-propagated		error with momentum.  parameters:   network_t *network  return:       none  last updated: **********************************************************************/void NN_adjust_weights_momentum(network_t *network)  {  int  l,i,j;  float out, err;#if NN_DEBUG>2  printf("Adjusting weights with mmtm.  network->error = %f\n", network->error);#endif     for (l=1; l<network->num_layers; l++)    {    for (i=1; i<=network->layer[l].neurons; i++)      {      for (j=0; j<=network->layer[l-1].neurons; j++)        {        out = network->layer[l-1].output[j];        err = network->layer[l].error[i];        network->layer[l].weight[i][j] += network->rate * err * out                                        + network->momentum * network->layer[l].weight_change[i][j];        network->layer[l].weight_change[i][j] = network->rate * err * out;        }      }    }  return;  }/**********************************************************************  NN_simulate_batch()  synopsis:     Training simulation for batch-mode training.  parameters:   network_t *network		float *input		float *target  return:       none  last updated: 25 Feb 2002 **********************************************************************/void NN_simulate_batch(network_t *network, float *input, float *target)  {  NN_input(network, input);  NN_propagate(network);     NN_output_error_sum(network, target);  return;  }/**********************************************************************  NN_simulate()  synopsis:     Training simulation.  parameters:   network_t *network		float *input		float *target  return:       none  last updated: **********************************************************************/void NN_simulate(network_t *network, float *input, float *target)  {#if NN_DEBUG>2  int i;	/* Debug. */#endif  NN_input(network, input);  NN_propagate(network);     NN_output_error(network, target);#if NN_DEBUG>2  for (i=1; i<=network->layer[network->num_layers-1].neurons; i++)    printf("%f ", network->layer[network->num_layers-1].output[i]);  printf("\n");#endif  return;  }/**********************************************************************  NN_simulate_with_output()  synopsis:     Training simulation which also returns the output		vector.  parameters:   network_t *network		float *input		float *target		float *output  return:       none  last updated: **********************************************************************/void NN_simulate_with_output(network_t *network, float *input, float *target, float *output)  {  NN_input(network, input);  NN_propagate(network);  NN_output(network, output);     NN_output_error(network, target);  return;  }/**********************************************************************  NN_run()  synopsis:     Prediction simulation.  parameters:   network_t *network		float *input		float *output  return:       none  last updated:	28 Jan 2002 **********************************************************************/void NN_run(network_t *network, float *input, float *output)  {  NN_input(network, input);  NN_propagate(network);  NN_output(network, output);     return;  }/**********************************************************************  NN_train_random()  synopsis:     Train network using back-propagation.  parameters:   network_t *network		int num_epochs  return:       none  last updated: 28 Jan 2002 **********************************************************************/void NN_train_random(network_t *network, const int num_epochs)  {  int  item, n;  for (n=0; n<num_epochs*num_train_data; n++)    {    item = random_int(num_train_data);    NN_simulate(network, train_data[item], train_property[item]);    NN_backpropagate(network);    NN_adjust_weights_momentum(network);    }   return;  }/**********************************************************************  NN_train_systematic()  synopsis:     Train network using back-propagation.  parameters:   network_t *network		int num_epochs  return:       none  last updated: 06 Feb 2002 **********************************************************************/void NN_train_systematic(network_t *network, const int num_epochs)  {  int  i, n;  for (i=0; i<num_epochs; i++)    {    for (n=0; n<num_train_data; n++)      {      NN_simulate(network, train_data[n], train_property[n]);      NN_backpropagate(network);      NN_adjust_weights_momentum(network);      }    }  return;  }/**********************************************************************  NN_train_batch_random()  synopsis:     Train network using back-propagation.  parameters:   network_t *network		int num_epochs  return:       none  last updated: 25 Feb 2002 **********************************************************************/void NN_train_batch_random(network_t *network, const int num_epochs)  {  int  i, n;  int  item;  for (i=0; i<num_epochs; i++)    {    for (n=0; n<num_train_data; n++)      {      item = random_int(num_train_data);      NN_simulate_batch(network, train_data[item], train_property[item]);      }    NN_backpropagate(network);    NN_adjust_weights_momentum(network);    }   return;  }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -