⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 kohnet.c

📁 统计模式识别算法包
💻 C
📖 第 1 页 / 共 2 页
字号:
   else       work = NULL ;   if ((won == NULL)  ||  (correc == NULL)  ||       ((! kp->learn_method)  &&  (work == NULL))) {      if (won != NULL)         FREE ( won ) ;      if (correc != NULL)         FREE ( correc ) ;      if (work != NULL)         FREE ( work ) ;      delete bestnet ;      memory_message ( "to learn" ) ;      return ;      }   rate = kp->rate ;/*   If the user specified NOINIT, they are continuing to learn from   existing weights.  Call epoch1 to find the error associated with   those weights, and save the weights as best so far.   Then don't waste that call to epoch1.  Call epoch2 to update   the weights.*/   if (lptr->init == 0) {      // NOINIT (continue learning)      epoch1 ( tptr , rate , 1 , won , &bigerr , correc , work ) ;      best_err = neterr = bigerr ;      copy_weights ( bestnet , this ) ;      epoch2 ( rate , kp->learn_method , won , &bigcorr , correc ) ;      }   else if (lptr->init == 1) { // RANDOM Initialize weights      initialize () ;      best_err = 1.e30 ;      }/*   Main loop is here.  Each iter is a complete epoch.*/   n_retry = 0 ;   for (iter=0 ; ; iter++) {      epoch1 ( tptr , rate , kp->learn_method , won ,               &bigerr , correc , work ) ;      neterr = bigerr ;      if (neterr < best_err) {  // Keep track of best         best_err = neterr ;         copy_weights ( bestnet , this ) ;         }      winners = 0 ;     // Count how many neurons won this iter      i = nout ;      while (i--) {         if (won[i])            ++winners ;         }      sprintf( msg ,         "Iter %d err=%.2lf (best=%.2lf)  %d won",          iter, 100.0 * neterr, 100.0 * best_err, winners ) ;      normal_message ( msg ) ;/****************************************************************************      if (kbhit()) {            // Was a key pressed?         key = getch () ;       // Read it if so         while (kbhit())        // Flush key buffer in case function key            getch () ;          // or key was held down         if (key == 27)         // ESCape            break ;         }***************************************************************************/      if (bigerr < lptr->quit_err) // Are we done?         break ;/*   If one or more neurons failed to ever win, make it a winner.   Note that this has a theoretical flaw.   If the training set has duplication such that there are fewer   unique values than neurons, we can get in a loop of flipping   case values around neurons.  Thus, rather than verifying   winners<tptr->ntrain below, we should ideally count how many   unique values are in the training set, and use that number.   However, that would be time consuming and protect against an   extremely unlikely event anyway.*/      if ((winners < nout)  &&  (winners < tptr->ntrain)) {         force_win ( tptr , won ) ;         continue ;         }      epoch2 ( rate , kp->learn_method , won , &bigcorr , correc ) ;      sprintf( msg , "  correction=%.2lf", 100.0 * bigcorr ) ;      progress_message ( msg ) ;      if (bigcorr < 1.e-5) { // Trivial improvement?         if (++n_retry > lptr->retries) // If so, start over            break ;          // unless at user's limit         initialize () ;     // Use totally random weights         iter = -1 ;         // End of loop incs this to 0         rate = kp->rate ;   // Rate starts high again         continue ;         }      if (rate > 0.01)  // Reduce learning rate each time         rate *= kp->reduction ;      } // Endless learning loop/*   We are done.  Retrieve the best weights.  Learning should have left   them very close to normalized, but it doesn't hurt to touch them up.   Unfortunately, this can slightly change the network error.*/   copy_weights ( this , bestnet ) ;   for (i=0 ; i<nout ; i++)      wt_norm ( out_coefs + i * (nin+1) ) ;   MEMTEXT ( "KOHNET: Learn scratch" ) ;   delete bestnet ;   FREE ( won ) ;   FREE ( correc ) ;   if (! kp->learn_method)  // Needed only for additive method      FREE ( work ) ;   return ;}/*--------------------------------------------------------------------------------   initialize - Initialize weights--------------------------------------------------------------------------------*/void KohNet::initialize (){   int i ;   double *optr ;   zero_weights () ;   shake ( nout * (nin+1) , out_coefs , out_coefs , 1.0 ) ;   for (i=0 ; i<nout ; i++) {      optr = out_coefs + i * (nin+1) ;  // This weight vector      wt_norm ( optr ) ;      }}/*--------------------------------------------------------------------------------   epoch1 - Compute the error and correction vector--------------------------------------------------------------------------------*/void KohNet::epoch1 (   TrainingSet *tptr , // Training set   double rate ,       // Learning rate   int learn_method ,  // 0=additive, 1=subtractive   int *won ,          // Work vector holds times each neuron won   double *bigerr ,    // Returns max error length across training set   double *correc ,    // Work vector nout*(nin+1) long for corrections   double *work        // Work vector nin+1 long for additive learning   )   {   int i, best, size, nwts, tset ;   double *dptr, normfac, synth, *cptr, *wptr, length, diff ;   nwts = nout * (nin+1) ;   size = nin + 1 ;   // Size of each case in training set/*   Zero cumulative corrections and winner counts*/   i = nwts ;   while (i--)      correc[i] = 0.0 ;   memset ( won , 0 , nout * sizeof(int) ) ;   *bigerr = 0.0 ;  // Length of biggest error vector/*   Cumulate the correction vector 'correc' across the epoch*/   for (tset=0 ; tset<tptr->ntrain ; tset++) {      dptr = tptr->data + size * tset ; // Point to this case      best = winner ( dptr , &normfac , &synth ) ; // Winning neuron      ++won[best] ;                   // Record this win      wptr = out_coefs+best*(nin+1) ; // Winner's weights here      cptr = correc+best*(nin+1) ;    // Corrections summed here      length = 0.0 ;                  // Length of error vector      for (i=0 ; i<nin ; i++) {  // Do all inputs         diff = dptr[i] * normfac - wptr[i] ; // Input minus weight         length += diff * diff ; // Cumulate length of error         if (learn_method)       // Subtractive method            cptr[i] += diff ;    // just uses differences         else                    // Additive more complex            work[i] = rate * dptr[i] * normfac + wptr[i] ;         }                       // Loop does actual inputs      diff = synth - wptr[nin] ; // Don't forget synthetic input      length += diff * diff ;    // It is part of input too!      if (learn_method)          // Subtractive method         cptr[nin] += diff ;     // Cumulate across epoch      else                       // Additive more complex         work[nin] = rate * synth + wptr[nin] ;      if (length > *bigerr)      // Keep track of largest error         *bigerr = length ;      if (! learn_method) {      // Additive method         wt_norm ( work ) ;         for (i=0 ; i<=nin ; i++)            cptr[i] += work[i] - wptr[i] ;         }      } // Pass through all training sets, cumulating correction vector   *bigerr = sqrt ( *bigerr ) ;}/*--------------------------------------------------------------------------------   epoch2 - Adjust weights per corrections from epoch1--------------------------------------------------------------------------------*/void KohNet::epoch2 (   double rate ,       // Learning rate   int learn_method ,  // 0=additive, 1=subtractive   int *won ,          // Work vector holds times each neuron won   double *bigcorr ,   // Returns length of largest correction vector   double *correc      // Work vector nout*(nin+1) long for corrections   )   {   int i, j ;   double corr, *cptr, *wptr, length, f, diff ;   *bigcorr = 0.0 ;                // Length of largest correction   for (i=0 ; i<nout ; i++) {      // Apply mean correction to each      if (! won[i])                // If this neuron never won         continue ;                // might as well skip update      wptr = out_coefs+i*(nin+1) ; // i's weights here      cptr = correc+i*(nin+1) ;    // Corrections were summed here      f = 1.0 / (double) won[i] ;  // Finds mean across epoch      if (learn_method)            // Subtractive method         f *= rate ;               // needs learning rate included      length = 0.0 ;               // Will sum length of correction      for (j=0 ; j<=nin ; j++) {   // Weight vector for this neuron         corr = f * cptr[j] ;      // Mean correction         wptr[j] += corr ;         // Update weight vector         length += corr * corr ;   // Sum length of this correction         }      if (length > *bigcorr)       // Keep track of biggest correction         *bigcorr = length ;      }/*   Scale the correction length per learning rate so that we   are not fooled into thinking we converged when really all   that happened is that the learning rate got small.   Note that it can exceed 1.0 if the weights and data   pointed in opposing directions.*/   *bigcorr = sqrt ( *bigcorr ) / rate ;}/*--------------------------------------------------------------------------------   force_win - Force a neuron to win.--------------------------------------------------------------------------------*/void KohNet::force_win (   TrainingSet *tptr , // Training set   int *won            // Work vector holds times each neuron won   )   {   int i, tset, best, size, which ;   double *dptr, normfac, synth, dist, *optr ;   size = nin + 1 ;  // Size of each training case/*   Find the training case which is farthest from its winning neuron.   It is reasonable to believe that this case is not adequately   represented by that neuron, and deserves a neuron of its very own.*/   dist = 1.e30 ;   for (tset=0 ; tset<tptr->ntrain ; tset++) {      dptr = tptr->data + size * tset ; // Point to this case      best = winner ( dptr , &normfac , &synth ) ; // Winning neuron      if (out[best] < dist) {  // Far indicated by low activation         dist = out[best] ;    // Maintain record         which = tset ;        // and which case did it         }      }/*   Now find the non-winning neuron which is most similar to   the under-represented case found above.*/   dptr = tptr->data + size * which ;   best = winner ( dptr , &normfac , &synth ) ;   dist = -1.e30 ;   i = nout ;   while (i--) {           // Try all neurons      if (won[i])          // If this one won then skip it         continue ;        // We want a non-winner      if (out[i] > dist) { // High activation means similar         dist = out[i] ;   // Keep track of best         which = i ;       // and its subscript         }      }/*   Use that training case to define the new weights.   Strictly speaking, we should multiply the inputs by normfac,   then append synth.  But since we normalize, it is equivalent   (and faster) to copy the inputs, then append synth / normfac.*/   optr = out_coefs + which * (nin+1) ;        // Non-winner's weights   memcpy( optr , dptr , nin*sizeof(double)) ; // become case   optr[nin] = synth / normfac ;               // Append synth   wt_norm ( optr ) ;                          // Keep normal}/*--------------------------------------------------------------------------------   wt_save - Save weights to disk (called from WT_SAVE.CPP)   wt_restore - Restore weights from disk (called from WT_SAVE.CPP)--------------------------------------------------------------------------------*/int KohNet::wt_save ( FILE *fp ){   int n ;   n = nout * (nin+1) ;   fwrite ( out_coefs , n * sizeof(double) , 1 , fp ) ;    if (ferror ( fp ))      return 1 ;   return 0 ;}void KohNet::wt_restore ( FILE *fp ){   int n ;   n = nout * (nin+1) ;   fread ( out_coefs , n * sizeof(double) , 1 , fp ) ;    if (ferror ( fp ))      ok = 0 ;}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -