📄 main.c
字号:
}
// Check whether positions have to be memorized or not
MEMO=0;
for (j=0;j<Max_status;j++)
{
if (strategies[0][j]==18 || strategies[0][j] ==19) MEMO=1;
}
fscanf(f_problem, "%i",&no_best);
fscanf(f_problem,"%i",&adapt); // Usually 0. If >0, it is a constant swarm size
fscanf(f_problem,"%i",&linkreorg);
fscanf(f_problem,"%i",&nb_pb_max); // Number of different problems
nb_pb=0;
problem:
printf("\n Problem number %i",nb_pb);
// Set level to the main one (normal case)
level=0;
// Description of the problem(s)
problem[level]=read_problem(f_problem,f_data,level); // READ THE PROBLEM DESCRIPTION
if (TSP==1 || QAP==1) // For TSP or QAP problem, the graph has been read in read_problem
// Check it if is consistent with the data
{
if(problem[level].DD!=problem[level].P.size)
{
printf("\n ERROR. Graph size %i not consistent with problem data %i",problem[level].P.size, problem[level].DD);
return EXIT_SUCCESS;
}
}
display_problem(level);
times=0; // you can run several times the same problem (problemeter problem[level].times)
// Print titles on the f_run file
fprintf(f_run,"Function Run Target" );
for (i=0;i<problem[level].nb_f;i++) fprintf (f_run," f%i",i+1);
fprintf(f_run," Eval_nb Duration Added Removed" );
for (i=0;i<problem[level].DD;i++) fprintf( f_run," x%i",i+1);
if (problem[level].funct==11) // Apples trees (more generally, use of homogeneous coordinates
{
fprintf(f_run," Real position");
for (i=1;i<problem[level].DD-1;i++) fprintf( f_run," _");
}
// Print titles on the f_synth file
fprintf(f_synth,"Function Dim. Target Wanted_error Nb_of_runs Mean_eval._nb Std_dev") ;
fprintf(f_synth," Success_rate");
fprintf(f_synth," Min_eval Max_eval Mean_swarm Mean_error Std_dev Mean_error_cont Std_dev Min_error Max_error Duration") ;
problem[1]=problem[0];
problem[1].DD=1;
// Compute coefficients for some mouve options
coeff_S_C=coeff_SC(problem[level].DD);
phi=2/0.97725;
khi=1/(phi-1+sqrt(phi*phi-2*phi));
cmax=khi*phi;
// Display function and dimension
printf("\n%s %iD",functions[problem[level].funct],problem[level].DD);
seed_rand_kiss(1); // Initialize pseudo random number generator
nprogr=1; // For progressive problems like Neural Network Training
//core:
printf("\n %i %i",problem[level].Max_Eval, abs(problem[level].Max_Eval_2));
for (Max_Eval=problem[level].Max_Eval;fabs(Max_Eval)<=fabs(problem[level].Max_Eval_2);Max_Eval=Max_Eval+problem[level].Max_Eval_delta)
{
// Arbitrary very high values for best of the best, to begin
for (i=0;i<problem[level].nb_f;i++) BEST.f.f[i]=infinite;
printf("\n**********\n Run PSO with Max_Eval= %.0f",Max_Eval);
times=0;
result=PSO(level, Max_Eval); // ******* CORE OF THE PROGRAM *******
}
//nprogr=nprogr+1; if (nprogr<=PROGR) goto core;
nb_pb=nb_pb+1; if(nb_pb<nb_pb_max) goto problem;
return EXIT_SUCCESS;
}
/*============================ S U B R O U T I N E S =====================================*/
/* Except the first one, they are in alphabetical order */
struct position PSO(int level, float Max_Eval) // Main routine.
{
int add_option=1; // 0 => generate just random particles
// 1 => generate also a hopefully good one
int already;
int bad;
//struct position best_result;
int better;
int chance;
int cycle,cycle_size;
int d;
//int dim;
double duration;
double duration_tot=0;
double Ek; // Kinetic energy
double Ep; // Potential energy
double eps_run[Max_run][2];
double error,error_cont;
double eval_run[Max_run];
int failure_tot;
struct f f0,f1;
int good;
int i,i0;
struct i_group ig,igt;
int Iter;
double improv;
int k;
int j;
int l;
int label_best,label_worst;
int labelt;
int m;
int max_add;
double max_eps;
double max_f;
double mean_eps,mean_eps_cont;
double mean_eval;
double mean_swarm_size;
double min_eps;
int n_add;
int n_connect;
int n_local_remove;
int n_remove;
double nb_eval_max;
double nb_eval_min;
double nb_no_progress;
int new_label;
struct particle part_best;
struct particle partg;
struct particle partt;
struct position post;
double pr;
struct f previous_best;
int rank;
int rank_best,rank_worst;
double sigma_eps,sigma_eps_cont;
double sigma_eval;
int size;
double strateg[nb_strateg]; // Just for information (see move_particle)
int swarm_size;
clock_t ticks0,ticks1, ticks2;
int TRsize0;
//double volume; // Just for info
//double x_max,x_min;
//double x1,x2;
//int worst[Max_swarmsize]; // Label of worst particle in each tribe
double z,zzz;
zzz=0;
nb_eval_max=0;
nb_eval_min=infinite;
failure_tot=0; // Number of failures
min_eps=infinite; // This is a _minimization_ problem
mean_eps=0;
max_eps=0;
for (i=0;i<nb_strateg;i++) strateg[i]=0; // Just for information about the strategies used
for (i=0;i<Max_status;i++) status_count[i]=0; // Just for information about particle status
//======================================== You may solve several times the same problem
times_loop:
memo[level].size=0;
/* Useful only when using pivot method
Note: for implementation of a future ReHope method (Restart)
it would be interesting to put this instruction BEFORE the restart loop
in order to keep memory of best results. In such a case, you also have to put the
other "... =0" before the loop
*/
eval_f_tot[level] =0; // Number of objective function evaluations
eval_f[level]=0; // Number of evaluations counter
offline_error=0; // Useful only for dynamic optimisation
offline_error_cont=0;
ticks1=clock(); // Just for information. To evaluate processor time needed
chance=0; // Equal to 1 if a solution is found
nb_no_progress=0;// If this number becomes > parapet, then => Failure
Iter=1; // Number of iterations. Just for information
n_add=0;
n_change=1;
n_remove=0;
mean_swarm_size=0;
label[level]=0; // Label of the first particle. Just for later visualisation
ticks0=clock();
Xmin.size=problem[level].DD;
Xmax.size=Xmin.size;
for(d=0;d<Xmin.size;d++) // Initialize Xmin and Xmax
{
Xmin.x[d]=problem[level].H.min[d];
Xmax.x[d]=problem[level].H.max[d];
}
//srand((unsigned)time(NULL)); // Re initialize random number generation, according to the time
//srand(1); // Re initialize random number generation with the same seed
// Check improvement every cycle_size time steps
cycle_size=problem[level].init_size; // Initial value
// ******** SWARM INITIALIZATION *********
if (problem[level].printlevel>=0)
printf("\n------------------------------------------------------------------------------------------------------------");
if (problem[level].printlevel>0)
printf(" \n Run %i/%i",times+1,problem[level].times);
// Initialize the first tribe
best_result.p.size= problem[level].DD;
best_result.f.size= problem[level].nb_f;
previous_best.size=problem[level].nb_f;
TR[level].size=1; // Number of tribes
TR[level].tr[0].size=problem[level].N; // Tribe size
max_f=0;
// Generate particles and keep the best result found so far
//printf("\n Run %i",times+1);
if (TSP==1) // Special initialization for TSP
{
TR[level].tr[0]=init_swarm_tsp(problem[level].N,problem[level].target,problem[level]. printlevel,level);
goto end_init;
}
if (QAP==1) // Special initialization for QAP
{
TR[level].tr[0]=init_swarm_qap(problem[level].N,problem[level].target,problem[level].printlevel,level);
goto end_init;
}
// Normal initialization (random)
for (i=0;i<TR[level].tr[0].size;i++)
{
TR[level].tr[0].part[i]=init_particle(0,level,dummy_part);
}
end_init: // End of initialisation
// display_swarm(1,level);
best_result=TR[level].tr[0].part[0].x;
previous_best=best_result.f;
for (i=1;i<TR[level].tr[0].size;i++)
{
//display_position( TR[level].tr[0].part[i].x);
// Look for the best result
better=better_than(TR[level].tr[0].part[i].x.f, best_result.f,level);
if (better==1)
{
previous_best= best_result.f;
best_result=TR[level].tr[0].part[i].x;
}
offline_error_cont+=total_error(best_result.f);
} // next i
if (BIN==1)
for (d=0;d<best_result.p.size;d++)
best_result.p.x[d]=floor(best_result.p.x[d]+0.5);
// Memorize the best value found so far
error=total_error(best_result.f);
offline_error=error;
offline_error_cont=error;
if (error<min_eps) min_eps=error;
// In case of progressive approach, keep the best of the best
if (nprogr>1) TR[level].tr[0].part[0].x=BEST;
// Display best result
//if (problem[level].printlevel>0)
{
printf("\n Best result after initialization");
display_position(best_result);
printf("\n");
}
// Check if solution found by chance
if (DYN==0) if (error<=problem[level].eps) goto end_by_chance;
// Memorize positions ("Black board" optional method)
if (MEMO==1)
{
for (i=0;i<TR[level].tr[0].size;i++)
{
add_memo(TR[level].tr[0].part[i].x,level);
}
}
// Initialize i-groups
if (circular_hood>0) goto circular; // Option to simulate classical PSO
if (rand_hood>0) goto rand_i_group;
// First particle
TR[level].tr[0].part[0].ig.size=TR[level].tr[0].size; // i-group = all particles
for (j=0;j<TR[level].tr[0].part[0].ig.size;j++)
{
TR[level].tr[0].part[0].ig.label[j]=TR[level].tr[0].part[j].label;
}
// Same i-group for all the others
for (i=1;i<TR[level].tr[0].size;i++)
TR[level].tr[0].part[i].ig=TR[level].tr[0].part[0].ig;
goto before_cycles;
//------------------------------
circular: // W A R N I N G
// Valid only for constant swarm size option (and, then, just one tribe, rank 0)
size= TR[level].tr[0].size;
for (i=0;i<size;i++)
{
TR[level].tr[0].part[i].ig.size=circular_hood;
for (j=0;j<TR[level].tr[0].part[i].ig.size;j++)
{
rank=i+j-(int)(circular_hood/2.0);
if (rank<0) rank=rank+size;
else
if (rank>=TR[level].tr[0].size) rank=rank-size;
TR[level].tr[0].part[i].ig.label[j]=TR[level].tr[0].part[rank].label;
}
}
goto before_cycles;
//-------------------------- Option random i-groups
rand_i_group:
swarm_size=problem[level].init_size;
for(k=0;k<TR[level].size;k++)
{
size= TR[level].tr[k].size;
for (i=0;i<size;i++)
{
TR[level].tr[k].part[i].ig.size=rand_hood;
for (j=0;j<TR[level].tr[k].part[i].ig.size;j++)
{
rank=alea(0,swarm_size);
TR[level].tr[k].part[i].ig.label[j]=TR[level].tr[k].part[rank].label;
}
}
}
//------------------------
before_cycles:
if (problem[level].save==-1) // Save swarm size and energies
// Just for information
{
Ek=energy_k(level); Ep=energy_p(level);
swarm_size=problem[level].init_size+n_add-n_remove;
fprintf(f_energy,"\n%i %i %i %i %f %f",(int)eval_f_tot[level],swarm_size,n_add,n_remove,Ek,Ep);
}
if (problem[level].save==-2) // Save info for convergence curve plotting
{
fprintf(f_trace_run,"Eval error error_cont n_add");
fprintf(f_trace_run,"\n%.0f %f %f 0",eval_f_tot[level],error,error);
}
recent_change=0; // For Moving Peaks
// cycles:
cycle=0; // Number of iterations since last adaptive update
// ******** SWARM MOVES *********
do_Iter: //termination criterion is (currenteps<=eps or eval_f_tot>stop)
if (problem[level].printlevel>0)
{
printf("\n Best result after %.0f evaluations: ",eval_f_tot[level]);
for (i=0;i<best_result.f.size;i++) printf (" %f",best_result.f.f[i]);
if (DYN==1) printf("\n offline error %f",offline_error/n_change);
}
if (problem[level].printlevel>1)
{
display_position (best_result);
printf("\n------ MOVING the %i tribe(s)",TR[level].size);
}
if (problem[level].printlevel>1)
{
printf("\n %i tribe(s)",TR[level].size);
}
if (problem[level].save>=2) fprintf(f_trace,"\n %i tribe(s)",TR[level].size);
for (i=0;i<TR[level].size;i++) // For each tribe ...
{
if (problem[level].printlevel>1)
{
display_tribe(f_trace,TR[level].tr[i],0,level);
}
if (problem[level].printlevel>2)
{
for (j=0;j<TR[level].tr[i].size;j++)
display_i_group(f_trace,TR[level].tr[i].part[j],0);
}
if (problem[level].save>0)
{
fprintf(f_trace,"\n tribe %i",i);
display_tribe(f_trace,TR[level].tr[i],1,level);
}
for (j=0;j<TR[level].tr[i].size;j++) // ... for each particle ...
{
// Find the "best informer" (the true one, or at random)
partg=best_informer(TR[level].tr[i].part[j],no_best,level);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -