📄 ngen_wsta.c
字号:
Line_Search(DONE, ITERATIONS, NEW, OBJ_CUR, best, OBJ_PAST, SEARCH, descent); first_iter = 0; /* no more first_iter *//* Checking if this procedure was of any good*/ /* Is this solution better ? */ /* Is this solution better ? */ if (verbose) /* reporting first */ { for (number_of_iterations = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) number_of_iterations += ITERATIONS[i_to_calc]; fprintf(stderr,"Subpopulation %d performed %d iterations in the CG\n", instance, number_of_iterations); } for (i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { /* computing relative change */ change = ABS((OBJ_CUR[i_to_calc] - OBJ_PAST[i_to_calc]) / OBJ_CUR[i_to_calc]); if ((OBJ_CUR[i_to_calc] > OBJ_PAST[i_to_calc]) || (change < EPSLON)) { /* stop the procedure */ DONE[i_to_calc] = 1; } else if (ITERATIONS[i_to_calc] > max_iter) /* MAX # of iterations reached */ /* stop, but swap the models */ { DONE[i_to_calc] = 1; /* new CURRENT OF */ OBJ_PAST[i_to_calc] = OBJ_CUR[i_to_calc]; /* swapping solutions and gradients*/ /* M*U*S*T B*E like that */ for (iparam = 0; iparam < NSOURCES + NRECEIVERS; iparam++) best[i_to_calc][iparam] = NEW[i_to_calc][iparam]; } else { /* new CURRENT OF */ OBJ_PAST[i_to_calc] = OBJ_CUR[i_to_calc]; /* swapping solutions and gradients*/ /* M*U*S*T B*E like that */ for (iparam = 0; iparam < NSOURCES + NRECEIVERS; iparam++) { aux = NEW[i_to_calc][iparam]; NEW[i_to_calc][iparam] = best[i_to_calc][iparam]; best[i_to_calc][iparam] = aux; } temp = GRADIENT[i_to_calc]; GRADIENT[i_to_calc] = GRADIENT_BEF[i_to_calc]; GRADIENT_BEF[i_to_calc] = temp; } } /* closing if for DONE */ } /* closing LOOP i_to_calc *//* Computing the derivatives for the models that are not done */ Derivatives(DONE, GRADIENT, best);/* Now checking the DONE TOTAL flag. Is the whole computation finished ?*/ for (DONE_TOTAL = 1, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { DONE_TOTAL *= DONE[i_to_calc]; } } /* closing while for DONE_TOTAL *//* Returning # of iterations but first freeing memory*/ for (number_of_iterations = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) number_of_iterations += ITERATIONS[i_to_calc]; free2double(NEW); free2double(GRADIENT); free2double(GRADIENT_BEF); free2double(SEARCH); free1double(OBJ_CUR); free1double(BETA); free1int(DONE); free1int(ITERATIONS); return(number_of_iterations);} /* this routine will calculate the GRADIENT the stacking power at the model provided*/Derivatives(DONE, GRADIENT, model)double **GRADIENT; /* computed gradient */double **model; /* models where the gradient are computed */int *DONE; /* defines whether the computation is necessary */{ double static_itrace, static_i; /* statics calculation */ double xc0, xc1, xc2; /* Xcor derivatives */ double xcor_deriv_0; /* Xcor derivatives */ double aux; /* auxiliar quantity */ register int i, ict; /* counters */ int i_to_calc; /* define the model to */ /* compute gradient */ int ipoint_cross, iparam; int itrace, icmp; /* pointers */ int LAG; /* LAG between traces *//* Sweeping over all models and calculate the gradient for the ones that are not done yet*//* Reading the file*/ rewind (Xfp);/* And differentiating the Xcorrelation part of the objective function First resetting*/ for (i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) for (iparam=0; iparam < NSOURCES+NRECEIVERS; iparam++) GRADIENT[i_to_calc][iparam] = 0.; for (icmp = 0; icmp < NCMP; icmp++) { fread(CROSS[0],sizeof(float),(TOTAL_LAG+1)*(MAXFOLD*(MAXFOLD-1)/2),Xfp); for (i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { for (iparam=0; iparam < NSOURCES+NRECEIVERS; iparam++) { if (DERIVATIVE[icmp][iparam] != -1) /* i.e. the deriv */ /* is not zero */ { itrace = DERIVATIVE[icmp][iparam]; static_itrace = model[i_to_calc][POINTER[icmp][itrace][0]] + model[i_to_calc][POINTER[icmp][itrace][1] + NSOURCES]; for (i = 0; i < FOLDCMP[icmp]; i++) {/* statics for trace i*/ static_i = model[i_to_calc][POINTER[icmp][i][0]] + model[i_to_calc][POINTER[icmp][i][1] + NSOURCES];/* computing the LAG*/ if (i < itrace) LAG = TOTAL_LAG / 2 + NINT(static_itrace - static_i); if (i > itrace) LAG = TOTAL_LAG / 2 + NINT(static_i - static_itrace); if (i != itrace) {/* ipoint_cross points to the proper Xcorrelation */ ipoint_cross = POINT_CROSS[icmp][iparam][i]; if (LAG == 0) { xc0 = (double) CROSS[ipoint_cross][0]; xc1 = (double) CROSS[ipoint_cross][1]; xc2 = (double) CROSS[ipoint_cross][2]; /* for the gradient */ xcor_deriv_0 = xc1-xc0; } else if (LAG == TOTAL_LAG+1) { xc0 = (double) CROSS[ipoint_cross][TOTAL_LAG-1]; xc1 = (double) CROSS[ipoint_cross][TOTAL_LAG]; xc2 = (double) CROSS[ipoint_cross][TOTAL_LAG+1]; xcor_deriv_0 = xc2-xc1; } else { xc0 = (double) CROSS[ipoint_cross][LAG-1]; xc1 = (double) CROSS[ipoint_cross][LAG]; xc2 = (double) CROSS[ipoint_cross][LAG+1]; xcor_deriv_0 = .5*(xc2-xc0); }/* computing the gradient*/ if (i < itrace) GRADIENT[i_to_calc][iparam] -= xcor_deriv_0; else GRADIENT[i_to_calc][iparam] += xcor_deriv_0;/* The signs above are justified if i is less or greater than itrace AND the fact that I am minimizing the NEGATIVE of the stacking power*/ } /* i!=itrace */ } /* within CMP */ } /* derivative exists */ } /* for all parameters */ } /* close IF for DONE */ } /* close for i_to_calc */ } /* for all CMPs */} /* that's it *//* * Inexact Cubic Line Search * * Function directl returns number of function evaluations performed * doing the line search. * Function indirectly returns x(k+1) with f(x(k))>f(x(k+1)). * * Written by Douglas I. Hart -- July 1993 * Adapted by Wences Gouveia -- September, 1993 * *//*------------------------------------------------------------------------*/Line_Search( int *DONE, /* DONE flags*/ int *ITERATIONS, /* # of iterations per member */ double **NEW, /* minimizer */ double *OBJ_CUR, /* evaluations for minimizer */ double **best, /* input point for line search */ double *OBJ_PAST, /* evaluations for input data */ double **p, /* descent direction */ double *slope) /* slope in descent direction */{ int *TST; double *OBJ_PREV; float *LAMBDA; /* start with a full Newton step */ float lambda2, *LAMBDAPREV, lambdaprev2, lambdatemp; float f1, f2; float c, cm11, cm12, cm21, cm22; float a, b, disc; float alpha = .25; int *GOLD_TEST, *ARM_TEST; int i, j, i_to_calc, how_many; int GOLD_TEST_TOTAL, ARM_TEST_TOTAL; OBJ_PREV = alloc1double( Popsize ); LAMBDAPREV = alloc1float( Popsize ); LAMBDA = alloc1float( Popsize ); GOLD_TEST = alloc1int( Popsize ); ARM_TEST = alloc1int( Popsize ); TST = alloc1int( Popsize ); if( OBJ_PREV == NULL || GOLD_TEST == NULL || ARM_TEST == NULL || TST == NULL || LAMBDA == NULL || LAMBDAPREV == NULL ) { Error( "Memory allocation error in Line_Search" ); } /* initial LAMBDA */ for (i = 0; i < Popsize; i++) { TST[i] = 0; LAMBDA[i] = .001; } /* Let's try the 1st update */ for (how_many = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { for (j = 0; j < NSOURCES + NRECEIVERS; j++) { NEW[i_to_calc][j] = best[i_to_calc][j] + LAMBDA[i_to_calc] * p[i_to_calc][j]; /* limiting */ if (NEW[i_to_calc][j] > Gene[j].max) NEW[i_to_calc][j] = Gene[j].max; if (NEW[i_to_calc][j] < Gene[j].min) NEW[i_to_calc][j] = Gene[j].min;/* Gathering the models to be evaluated*/ to_be_calculated[how_many][j] = NEW[i_to_calc][j]; } how_many ++; } } eval(how_many, NSOURCES+NRECEIVERS); /* Copying evaluations for OBJ_CUR */ for (how_many = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { OBJ_CUR[i_to_calc] = eval_returned[how_many]; ITERATIONS[i_to_calc]++; how_many++; } } /* Goldstein's Test for lambda too small of a step size */ /* See Fletcher, Practical Methods of Optimization, page26ff. */ GOLD_TEST_TOTAL = 1; do { for (how_many = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { if (OBJ_CUR[i_to_calc] < (OBJ_PAST[i_to_calc] + (1-alpha)*LAMBDA[i_to_calc]*slope[i_to_calc])) GOLD_TEST[i_to_calc] = 1; else GOLD_TEST[i_to_calc] = 0; if (GOLD_TEST[i_to_calc]) { LAMBDA[i_to_calc] *= 3.0; /* increase lambda by 3 if to small of step */ for( i = 0; i < NSOURCES + NRECEIVERS; i++ ) { /* new trial point */ NEW[i_to_calc][i] = best[i_to_calc][i] + LAMBDA[i_to_calc] * p[i_to_calc][i]; /* limiting */ if (NEW[i_to_calc][i] > Gene[i].max) NEW[i_to_calc][i] = Gene[i].max; if (NEW[i_to_calc][i] < Gene[i].min) NEW[i_to_calc][i] = Gene[i].min; to_be_calculated[how_many][i] = NEW[i_to_calc][i]; } how_many ++; } } } /* evaluating model */ if (how_many != 0) eval(how_many, NSOURCES + NRECEIVERS); GOLD_TEST_TOTAL = 0; for (how_many = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { if (GOLD_TEST[i_to_calc]) { OBJ_CUR[i_to_calc] = eval_returned[how_many]; ITERATIONS[i_to_calc]++; how_many ++; GOLD_TEST_TOTAL += GOLD_TEST[i_to_calc]; } } } } while (GOLD_TEST_TOTAL); /* Armijo's Test for lambda to large (the backtracking algorithm) */ /* See Dennis and Schnabel, Numerical Methods for Unconstrained */ /* Optimization and Nonlinear Equations, page 126ff. */ do { for (how_many = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { if (OBJ_CUR[i_to_calc] > (OBJ_PAST[i_to_calc] + alpha*LAMBDA[i_to_calc]*slope[i_to_calc]) && LAMBDA[i_to_calc] > LAMBDA_MIN) ARM_TEST[i_to_calc] = 1; else ARM_TEST[i_to_calc] = 0; if (ARM_TEST[i_to_calc]) { lambda2 = LAMBDA[i_to_calc]*LAMBDA[i_to_calc]; /* intermediate computation */ f1 = OBJ_CUR[i_to_calc] - OBJ_PAST[i_to_calc]- slope[i_to_calc] * LAMBDA[i_to_calc]; /* intermediate computation */ if( TST[i_to_calc] == 0 ) { /* try a quadratic fit first */ lambdatemp = -slope[i_to_calc]*lambda2/(2.0*f1); /* tentative lambda */ TST[i_to_calc] = 1; /* don't go here again */ } else { /* otherwise minimize using a cubic approximation */ lambdaprev2 = LAMBDAPREV[i_to_calc]*LAMBDAPREV[i_to_calc]; /* intermediate computation */ f2 = OBJ_PREV[i_to_calc] - OBJ_PAST[i_to_calc] - LAMBDAPREV[i_to_calc] * slope[i_to_calc]; /* intermediate computation */ c = 1.0/(LAMBDA[i_to_calc]-LAMBDAPREV[i_to_calc]); /* cubic fit computations */ cm11 = 1.0/lambda2; cm12 = -1.0/lambdaprev2; cm21 = -LAMBDAPREV[i_to_calc]/lambda2; cm22 = LAMBDA[i_to_calc]/lambdaprev2; a = c*(cm11*f1+cm12*f2); b = c*(cm21*f1+cm22*f2); disc = b*b - 3.0*a*slope[i_to_calc]; if( (fabs(a)>MINFLOAT) && (disc>MINFLOAT) ) { /* legitimate cubic */ lambdatemp = (-b+sqrt(disc))/(3.0*a); } else { /* degenerate quadratic fit */ lambdatemp = slope[i_to_calc] * lambda2/(2.0*f1); } if( lambdatemp >= 0.5*LAMBDA[i_to_calc] ) /* if lambdatemp is to large */ lambdatemp = 0.5*LAMBDA[i_to_calc]; } LAMBDAPREV[i_to_calc] = LAMBDA[i_to_calc]; /* save for next cubic fit */ OBJ_PREV[i_to_calc] = OBJ_CUR[i_to_calc]; /* save for next cubic fit */ if( lambdatemp < (0.1*LAMBDA[i_to_calc]) ) /* prevents large decreases */ LAMBDA[i_to_calc] *= 0.1; else LAMBDA[i_to_calc] = lambdatemp; for( i = 0; i < NSOURCES + NRECEIVERS; i++ ) /* new trial point */ { NEW[i_to_calc][i] = best[i_to_calc][i] + LAMBDA[i_to_calc]*p[i_to_calc][i]; /* limiting */ if (NEW[i_to_calc][i] > Gene[i].max) NEW[i_to_calc][i] = Gene[i].max; if (NEW[i_to_calc][i] < Gene[i].min) NEW[i_to_calc][i] = Gene[i].min; to_be_calculated[how_many][i] = NEW[i_to_calc][i]; } how_many++; } } } /* evaluating */ eval(how_many, NSOURCES + NRECEIVERS); ARM_TEST_TOTAL = 0; for (how_many = 0, i_to_calc = 0; i_to_calc < Popsize; i_to_calc++) { if (!DONE[i_to_calc]) { if (ARM_TEST[i_to_calc]) { OBJ_CUR[i_to_calc] = eval_returned[how_many]; ITERATIONS[i_to_calc]++; how_many++; ARM_TEST_TOTAL += ARM_TEST[i_to_calc]; } } } } while (ARM_TEST_TOTAL); /* freeing memory */ free1int ( TST ); free1double ( OBJ_PREV ); free1int ( GOLD_TEST ); free1int ( ARM_TEST ); free1float ( LAMBDAPREV ); free1float ( LAMBDA );}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -