一)线性递减
function [xm,fv] = PSO_lin(fitness,N,c1,c2,wmax,wmin,M,D)
format long;
% fitness学习函数
% c1学习因子1
% c2学习因子2
% wmax惯性权重最大值
% wmin惯性权重最值小
% M最大迭代次数
% D搜索空间维数
% N初始化群体个体数目
% xm目标函数取最小值时的自变量
% fv目标函数最小值
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%初始化种群的个体%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
for j=1:D
x(i,j)=randn;
v(i,j)=randn;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%先计算各个粒子的适应度,并初始化Pi和Pg%%%%%%%%%%%%
for i=1:N
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
pg = x(N,:); %Pg为全局最优
for i=1:(N-1)
if fitness(x(i,:))
pg=x(i,:);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%主循环,按照公式依次迭代%%%%%%%%%%%%%%%%%%%%%%%%%%
for t=1:M
for i=1:N
w = wmax - (t-1)(wmax-wmin)/(M-1);
v(i,:)=wv(i,:)+c1rand(y(i,:)-x(i,:))+c2rand(pg-x(i,:));
x(i,:)=x(i,:)+v(i,:);
if fitness(x(i,:))
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
if p(i)
pg=y(i,:);
end
end
Pbest(t)=fitness(pg);
end
xm = pg’;
fv = fitness(pg);
(二)自适应
function [xm,fv] = PSO_adaptation(fitness,N,c1,c2,wmax,wmin,M,D)
format long;
% fitness学习函数
% c1学习因子1
% c2学习因子2
% w惯性权重
% M最大迭代次数
% D搜索空间维数
% N初始化群体个体数目
% xm目标函数取最小值时的自变量
% fv目标函数最小值
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%初始化种群的个体%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
for j=1:D
x(i,j)=randn;
v(i,j)=randn;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%%%%先计算各个粒子的适应度%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
pg=x(N,:); %Pg表示全局最优
for i=1:(N-1)
if fitness(x(i,:))
pg=x(i,:);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%进入主要循环%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for t=1:M
for j=1:N
fv(j) = fitness(x(j,:));
end
fvag = sum(fv)/N;
fmin = min(fv);
for i=1:N
if fv(i) <= fvag
w = wmin + (fv(i)-fmin)(wmax-wmin)/(fvag-fmin);
else
w = wmax;
end
v(i,:)=wv(i,:)+c1rand(y(i,:)-x(i,:))+c2rand(pg-x(i,:));
x(i,:)=x(i,:)+v(i,:);
if fitness(x(i,:))
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
if p(i)
pg=y(i,:);
end
end
end
xm = pg’; %目标函数取最小值时的自变量
fv = fitness(pg); %目标函数最小值
(三)增加学习因子
% D搜索空间维数
%%%%%%%%%%%%初始化种群的个体%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
for j=1:D
x(i,j)=randn; %初始化位置
v(i,j)=randn; %初始化速度
end
end
%%%%%%%%%%先计算各个粒子的适应度,并初始化Pi和Pg%%%%%%%%%%
for i=1:N
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
pg = x(N,:); %Pg为全局最优
for i=1:(N-1)
if fitness(x(i,:))
pg=x(i,:);
end
end
%%%%%主循环,按照公式依次迭代%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
T = - fitness(pg)/log(0.2);
for t=1:M
groupFit = fitness(pg);
for i=1:N
Tfit(i) = exp( - (p(i) - groupFit)/T);
end
SumTfit = sum(Tfit);
Tfit = Tfit/SumTfit;
pBet = rand();
for i=1:N
ComFit(i) = sum(Tfit(1:i));
if pBet <= ComFit(i)
pg_plus = x(i,:);
break;
end
end
C = c1 + c2;
ksi = 2/abs( 2 - C - sqrt(C^2 - 4C));
for i=1:N
v(i,:)=ksi(v(i,:)+c1rand(y(i,:)-x(i,:))+c2rand(pg_plus-x(i,:)));
x(i,:)=x(i,:)+v(i,:);
if fitness(x(i,:))
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
if p(i)
pg=y(i,:);
end
end
T = T * lamda;
Pbest(t)=fitness(pg);
end
xm = pg’;
fv = fitness(pg);
(四)随机权重
function [xm,fv] = PSO_rand(fitness,N,c1,c2,wmax,wmin,rande,M,D)
format long;
% fitness学习函数
% c1学习因子1
% c2学习因子2
% w惯性权重
% M最大迭代次数
% D搜索空间维数
% N初始化群体个体数目
% xm目标函数取最小值时的自变量
% fv目标函数最小值
% rande随机权重方差
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%初始化种群的个体%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for i=1:N
for j=1:D
x(i,j)=randn;
v(i,j)=randn;
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%先计算各个粒子的适应度,并初始化Pi和Pg%%%%%%%%%%%%
for i=1:N
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
pg = x(N,:); %Pg为全局最优
for i=1:(N-1)
if fitness(x(i,:))
pg=x(i,:);
end
end
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%%%%%%进入主要循环,按照公式依次迭代%%%%%%%%%%%%%%%%%%%%
for t=1:M
for i=1:N
miu = wmin + (wmax - wmin)rand();
w = miu + randerandn();
v(i,:)=wv(i,:)+c1rand*(y(i,:)-x(i,:))+c2rand(pg-x(i,:));
x(i,:)=x(i,:)+v(i,:);
if fitness(x(i,:))
p(i)=fitness(x(i,:));
y(i,:)=x(i,:);
end
if p(i)
pg=y(i,:);
end
end
Pbest(t)=fitness(pg);
end
xm = pg’;
fv = fitness(pg);
以上四种w的改进方法各有千秋;请读者以自身要求去选择相应的方法,除此之外,还有很多其它对于w的改进。不过,现在比较主流的给c1,c2,w建立一个相应的关系,通过c1或者c2的值来控制w的变化。