📄 psagent.java
字号:
/**
* Description: The description of particle swarm agent.
*
#Supported parameters:
NAME VALUE_type Range DefaultV Description
c1 real [0, 2] 1.494 PSAgent: learning factor for pbest
c2 real [0, 2] 1.494 PSAgent: learning factor for gbest
w real [0, 1] 0.729 PSAgent: inertia weight
CL real [0, 0.1] 0 PSAgent: chaos factor
//Other choices for c1, c2, w, and CL: (2, 2, 0.4, 0.001)
* @ Author Create/Modi Note
* Xiaofeng Xie May 11, 2004
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* Please acknowledge the author(s) if you use this code in any way.
*
* @version 1.0
* @Since MAOS1.0
*
* @References:
* [1] Kennedy J, Eberhart R C. Particle swarm optimization. IEEE Int. Conf. on
* Neural Networks, Perth, Australia, 1995: 1942-1948
* @For initial idea
* [2] Shi Y H, Eberhart R C. A Modified Particle Swarm Optimizer. IEEE Inter. Conf.
* on Evolutionary Computation, Anchorage, Alaska, 1998: 69-73
* @For the inertia weight: adjust the trade-off between exploitation & exploration
* [3] Clerc M, Kennedy J. The particle swarm - explosion, stability, and
* convergence in a multidimensional complex space. IEEE Trans. on Evolutionary
* Computation. 2002, 6 (1): 58-73
* @Constriction factor: ensures the convergence
* [4] Xie X F, Zhang W J, Yang Z L. A dissipative particle swarm optimization.
* Congress on Evolutionary Computation, Hawaii, USA, 2002: 1456-1461
* @The CL parameter
* [5] Xie X F, Zhang W J, Bi D C. Optimizing semiconductor devices by self-
* organizing particle swarm. Congress on Evolutionary Computation, Oregon, USA,
* 2004: 2017-2022
* @Further experimental analysis on the convergence
*/
package agent;
import knowledge.*;
import Global.*;
import goodness.*;
import problem.*;
import space.*;
public class PSAgent extends AbstractAgent {
public double c1=2;
public double c2=2;
public double weight = 0.4; //inertia weight
public double CL=0; //See ref[4]
//the own memory: store the point that generated in old learning cycle
protected BasicPoint pold_t;
//the own memory: store the point that generated in last learning cycle
protected BasicPoint pcurrent_t;
//the own memory: store the personal best point
protected SearchPoint pbest_t;
//the best point in the social sharing information
protected BasicPoint gbest_t;
public void setProblemEncoder(ProblemEncoder encoder) {
super.setProblemEncoder(encoder);
pold_t = problemEncoder.getFreshSearchPoint();
pcurrent_t = problemEncoder.getFreshSearchPoint();
}
public void setGbest(SearchPoint gbest) {
gbest_t = gbest;
}
public void setPbest(SearchPoint pbest) {
pbest_t = pbest;
}
public void generatePoint(ILocationEngine tempPoint, int t) {
DesignSpace designSpace = problemEncoder.getDesignSpace();
int DIMENSION = designSpace.getDimension();
double deltaxb, deltaxbm;
for (int b=0;b<DIMENSION;b++) {
if (Math.random()<CL) {
designSpace.mutationAt(tempPoint.getLocation(), b);
} else {
deltaxb = weight*(pcurrent_t.getLocation()[b]-pold_t.getLocation()[b])
+ c1*Math.random()*(pbest_t.getLocation()[b]-pcurrent_t.getLocation()[b])
+ c2*Math.random()*(gbest_t.getLocation()[b]-pcurrent_t.getLocation()[b]);
//limitation for deltax
deltaxbm = 0.5*designSpace.getMagnitudeIn(b);
if(deltaxb<-deltaxbm) {
deltaxb = -deltaxbm;
} else if (deltaxb>deltaxbm) {
deltaxb = deltaxbm;
}
tempPoint.getLocation()[b] = pcurrent_t.getLocation()[b]+deltaxb;
}
}
}
public void updateInfo() {
Library.replace(specComparator, trailPoint, pbest_t);
pold_t.importLocation(pcurrent_t);
pcurrent_t.importLocation(trailPoint);
}
}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -