⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 network.c

📁 * Lightweight backpropagation neural network. * This a lightweight library implementating a neura
💻 C
📖 第 1 页 / 共 3 页
字号:
    output = net->output_layer->neuron[n].output;    error = target[n] - output;    net->output_layer->neuron[n].error = output * (1.0 - output) * error;    net->global_error += error * error;  }  net->global_error *= 0.5;  return net->global_error;}/*!\brief Retrieve the output error of a network. * \param net Pointer to a neural network. * \return Output error of the neural network. * * Before calling this routine, net_compute() and * net_compute_output_error() should have been called to compute outputs * for given inputs and to acually compute the output error. This * routine merely returns the output error (which is stored internally * in the neural network). */floatnet_get_output_error (const network_t *net){  assert (net != NULL);  return net->global_error;}/*!\brief [Internal] Backpropagate error from one layer to previous layer. */static inline voidbackpropagate_layer (layer_t *lower, layer_t *upper){  int nl, nu;  float output, error;  assert (lower != NULL);  assert (upper != NULL);  for (nl = 0; nl <= lower->no_of_neurons; nl++) {    error = 0.0;    for (nu = 0; nu < upper->no_of_neurons; nu++) {      error += upper->neuron[nu].weight[nl] * upper->neuron[nu].error;    }    output = lower->neuron[nl].output;    lower->neuron[nl].error = output * (1.0 - output) * error;  }}/*!\brief [Internal] Backpropagate output error through a network. */static inline voidbackward_pass (network_t *net){  int l;  assert (net != NULL);  for (l = net->no_of_layers - 1; l > 1; l--) {    backpropagate_layer (&net->layer[l - 1], &net->layer[l]);  }}/*!\brief [Internal] Adjust weights based on (backpropagated) output error. */static inline voidadjust_weights (network_t *net){  int l, nu, nl;  float error, delta;  assert (net != NULL);  for (l = 1; l < net->no_of_layers; l++) {    for (nu = 0; nu < net->layer[l].no_of_neurons; nu++) {      error = net->layer[l].neuron[nu].error;      for (nl = 0; nl <= net->layer[l - 1].no_of_neurons; nl++) {#if 1        delta =          net->learning_rate * error * net->layer[l - 1].neuron[nl].output +          net->momentum * net->layer[l].neuron[nu].delta[nl];        net->layer[l].neuron[nu].weight[nl] += delta;        net->layer[l].neuron[nu].delta[nl] = delta;#else /* without momentum */        net->layer[l].neuron[nu].weight[nl] +=          net->learning_rate * error * net->layer[l - 1].neuron[nl].output;#endif      }    }  }}/**************************************** * Evaluation and Training ****************************************//*!\brief Compute outputs of a network for given inputs. * \param net Pointer to a neural network. * \param input Pointer to sequence of floating point numbers. * \param output Pointer to sequence of floating point numbers or NULL. * * Compute outputs of a neural network for given inputs by forward * propagating the inputs through the layers. If output is non-NULL, the * outputs are copied to output (otherwise they are only stored * internally in the network). Note that the outputs of the neural * network will always lie in the interval (0,1); the caller will have to * rescale them if neccesary. */voidnet_compute (network_t *net, const float *input, float *output){  assert (net != NULL);  assert (input != NULL);  set_input (net, input);  forward_pass (net);  if (output != NULL) {    get_output (net, output);  }}/*!\brief Train a network. * \param net Pointer to a neural network. * * Before calling this routine, net_compute() and * net_compute_output_error() should have been called to compute outputs * for given inputs and to prepare the neural network for training by * computing the output error. This routine performs the actual training * by backpropagating the output error through the layers. */voidnet_train (network_t *net){  assert (net != NULL);  backward_pass (net);  adjust_weights (net);}/**************************************** * Batch Training ****************************************//*!\brief [Internal] Adjust deltas based on (backpropagated) output error. * \param net Pointer to a neural network. */static inline voidadjust_deltas_batch (network_t *net){  int l, nu, nl;  float error;  assert (net != NULL);  for (l = 1; l < net->no_of_layers; l++) {    for (nu = 0; nu < net->layer[l].no_of_neurons; nu++) {      error = net->layer[l].neuron[nu].error;      for (nl = 0; nl <= net->layer[l - 1].no_of_neurons; nl++) {        net->layer[l].neuron[nu].delta[nl] +=          net->learning_rate * error * net->layer[l - 1].neuron[nl].output;      }    }  }}/*!\brief [Internal] Adjust weights based on deltas determined by batch * training. * \param net Pointer to a neural network. */static inline voidadjust_weights_batch (network_t *net){  int l, nu, nl;  assert (net != NULL);  for (l = 1; l < net->no_of_layers; l++) {    for (nu = 0; nu < net->layer[l].no_of_neurons; nu++) {      for (nl = 0; nl <= net->layer[l - 1].no_of_neurons; nl++) {        net->layer[l].neuron[nu].weight[nl] +=          net->layer[l].neuron[nu].delta[nl] / net->no_of_patterns;      }    }  }}/*!\brief Begin training in batch mode. * \param net Pointer to a neural network. * * Note that batch training does not care about momentum. */voidnet_begin_batch (network_t *net){  assert (net != NULL);  net->no_of_patterns = 0;  net_reset_deltas (net);}/*!\brief Train a network in batch mode. * \param net Pointer to a neural network. * * Before calling this routine, net_begin_batch() should have been * called (at the start of the batch) to begin batch training. * Furthermore, for the current input/target pair, net_compute() and * net_compute_output_error() should have been called to compute outputs * for given the inputs and to prepare the neural network for training * by computing the output error using the given targets. This routine * performs the actual training by backpropagating the output error * through the layers, but does not change the weights. The weights * will be changed when (at the end of the batch) net_end_batch() is * called. */voidnet_train_batch (network_t *net){  assert (net != NULL);  net->no_of_patterns++;  backward_pass (net);  adjust_deltas_batch (net);}/*!\brief End training in batch mode adjusting weights. * \param net Pointer to a neural network. * * Adjust the weights in the neural network according to the average  * delta of all patterns in the batch. */voidnet_end_batch (network_t *net){  assert (net != NULL);  adjust_weights_batch (net);}/**************************************** * Modification ****************************************//*!\brief Make small random changes to the weights of a network. * \param net Pointer to a neural network. * \param factor Floating point number. * \param range Floating point number. * * All weights in the neural network that are in absolute value smaller * than range become a random value from the interval [-range,range]. * All other weights get multiplied by a random value from the interval * [1-factor,1+factor]. */voidnet_jolt (network_t *net, float factor, float range){  int l, nu, nl;  assert (net != NULL);  assert (factor >= 0.0);  assert (range >= 0.0);  /* modify weights */  for (l = 1; l < net->no_of_layers; l++) {    for (nu = 0; nu < net->layer[l].no_of_neurons; nu++) {      for (nl = 0; nl <= net->layer[l - 1].no_of_neurons; nl++) {        if (fabs (net->layer[l].neuron[nu].weight[nl]) < range) {          net->layer[l].neuron[nu].weight[nl] =            2.0 * range * ((float) random () / RAND_MAX - 0.5);        } else {          net->layer[l].neuron[nu].weight[nl] *=            1.0 + 2.0 * factor * ((float) random () / RAND_MAX - 0.5);        }      }    }  }}/*!\brief Add neurons to a network. * \param net Pointer to a neural network. * \param layer Integer * \param neuron Integer * \param number Integer * \param range Floating point number */voidnet_add_neurons (network_t *net, int layer, int neuron, int number,                 float range){  int l, nu, nl, new_nu, new_nl, *arglist;  network_t *new_net, *tmp_net;  assert (net != NULL);  assert (0 <= layer && layer < net->no_of_layers);  assert (0 <= neuron);  assert (number >= 0);  assert (range >= 0.0);  /* special case to conveniently add neurons at the end of the layer */  if (neuron == -1) {    neuron = net->layer[layer].no_of_neurons;  }  /* allocate memory for the new network */  arglist = calloc (net->no_of_layers, sizeof (int));  for (l = 0; l < net->no_of_layers; l++) {    arglist[l] = net->layer[l].no_of_neurons;  }  arglist[layer] += number;  new_net = net_allocate_l (net->no_of_layers, arglist);  free (arglist);  /* the new neuron will be connected with small, random weights */  net_randomize (net, range);  /* copy the original network's weights and deltas into the new one */  for (l = 1; l < net->no_of_layers; l++) {    for (nu = 0; nu < net->layer[l].no_of_neurons; nu++) {      new_nu = (l == layer) && (nu >= neuron) ? nu + number : nu;      for (nl = 0; nl <= net->layer[l - 1].no_of_neurons; nl++) {        new_nl = (l == layer + 1) && (nl >= neuron) ? nl + number : nl;        new_net->layer[l].neuron[new_nu].weight[new_nl] =          net->layer[l].neuron[nu].weight[nl];        new_net->layer[l].neuron[new_nu].delta[new_nl] =          net->layer[l].neuron[nu].delta[nl];      }    }  }  /* copy the original network's constants into the new one */  new_net->momentum = net->momentum;  new_net->learning_rate = net->learning_rate;  /* switch new_net and net, so it is possible to keep the same pointer */  tmp_net = malloc (sizeof (network_t));  memcpy (tmp_net, new_net, sizeof (network_t));  memcpy (new_net, net, sizeof (network_t));  memcpy (net, tmp_net, sizeof (network_t));  free (tmp_net);  /* free allocated memory */  net_free (new_net);}/*!\brief Remove neurons from a network. * \param net Pointer to a neural network. * \param layer Integer * \param neuron Integer * \param number Integer */voidnet_remove_neurons (network_t *net, int layer, int neuron, int number){  int l, nu, nl, orig_nu, orig_nl, *arglist;  network_t *new_net, *tmp_net;  assert (net != NULL);  assert (0 <= layer && layer < net->no_of_layers);  assert (0 <= neuron);  assert (number >= 0);  /* allocate memory for the new network */  arglist = calloc (net->no_of_layers, sizeof (int));  for (l = 0; l < net->no_of_layers; l++) {    arglist[l] = net->layer[l].no_of_neurons;  }  arglist[layer] -= number;  new_net = net_allocate_l (net->no_of_layers, arglist);  free (arglist);  /* copy the original network's weights and deltas into the new one */  for (l = 1; l < new_net->no_of_layers; l++) {    for (nu = 0; nu < new_net->layer[l].no_of_neurons; nu++) {      orig_nu = (l == layer) && (nu >= neuron) ? nu + number : nu;      for (nl = 0; nl <= new_net->layer[l - 1].no_of_neurons; nl++) {        orig_nl = (l == layer + 1) && (nl >= neuron) ? nl + number : nl;        new_net->layer[l].neuron[nu].weight[nl] =          net->layer[l].neuron[orig_nu].weight[orig_nl];        new_net->layer[l].neuron[nu].delta[nl] =          net->layer[l].neuron[orig_nu].delta[orig_nl];      }    }  }  /* copy the original network's constants into the new one */  new_net->momentum = net->momentum;  new_net->learning_rate = net->learning_rate;  /* switch new_net and net, so it is possible to keep the same pointer */  tmp_net = malloc (sizeof (network_t));  memcpy (tmp_net, new_net, sizeof (network_t));  memcpy (new_net, net, sizeof (network_t));  memcpy (net, tmp_net, sizeof (network_t));  free (tmp_net);  /* free allocated memory */  net_free (new_net);}/*!\brief Copy a network. * \param net Pointer to a neural network. * \return Pointer to a copy of the neural network. */network_t *net_copy (const network_t *net){  int l, nu, nl, *arglist;  network_t *new_net;  assert (net != NULL);  /* allocate memory for the new network */  arglist = calloc (net->no_of_layers, sizeof (int));  for (l = 0; l < net->no_of_layers; l++) {    arglist[l] = net->layer[l].no_of_neurons;  }  new_net = net_allocate_l (net->no_of_layers, arglist);  free (arglist);  /* copy the original network's weights and deltas into the new one */  for (l = 1; l < net->no_of_layers; l++) {    for (nu = 0; nu < net->layer[l].no_of_neurons; nu++) {      for (nl = 0; nl <= net->layer[l - 1].no_of_neurons; nl++) {        new_net->layer[l].neuron[nu].weight[nl] =          net->layer[l].neuron[nu].weight[nl];        new_net->layer[l].neuron[nu].delta[nl] =          net->layer[l].neuron[nu].delta[nl];      }    }  }  /* copy the original network's constants into the new one */  new_net->momentum = net->momentum;  new_net->learning_rate = net->learning_rate;  new_net->no_of_patterns = net->no_of_patterns;  return new_net;}/*!\brief Overwrite one network with another. * \param dest Pointer to a neural network. * \param src Pointer to a neural network. * * The neural network dest becomes a copy of the neural network src. * Note that dest must be an allocated neural network and its original * contents is discarded (with net_free()). */voidnet_overwrite (network_t *dest, const network_t *src){  network_t *new_net, *tmp_net;  assert (dest != NULL);  assert (src != NULL);  new_net = net_copy (src);  /* switch new_net and dest, so it is possible to keep the same pointer */  tmp_net = malloc (sizeof (network_t));  memcpy (tmp_net, new_net, sizeof (network_t));  memcpy (new_net, dest, sizeof (network_t));  memcpy (dest, tmp_net, sizeof (network_t));  free (tmp_net);  net_free (new_net);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -