参考: https://blog.csdn.net/m0_38097087/article/details/79818348 http://yarpiz.com/59/ypea121-mopso Coello C A C , Pulido G T , Lechuga M S . Handling multiple objectives with particle swarm optimization[J]. IEEE Transactions on Evolutionary Computation, 2004, 8(3):256-279.。
是按照上面的第三个的那篇论文实习实现的 论文里面的速度更新公式没有用到惯性c,好像是因为他有个变异算子(还没实现)。
算法流程
初始化群体粒子群的位置和速度,计算适应值评价粒子的适应度和Pareto支配关系将非劣解保存到Archive中去计算Archive集中的拥挤度,为粒子选择gbest更新粒子的速度、位置、适应度、pbest更新Archive满足结束条件,则结束;否则,转到第4步继续循环。
import numpy
as np
class Partical:
def __init__(self
,x_min
,x_max
,max_v
,min_v
,fitness
):
self
.dim
=len(x_min
)
self
.max_v
=max_v
self
.min_v
=min_v
self
.x_min
=x_min
self
.x_max
=x_max
self
.dominated
=False
self
.pos
=np
.zeros
(self
.dim
)
self
.pbest
=np
.zeros
(self
.dim
)
self
.initPos
(x_min
,x_max
)
self
._v
=np
.zeros
(self
.dim
)
self
.fitness
=fitness
self
.cur_fitness
=fitness
(self
.pos
)
self
.bestFitness
=fitness
(self
.pos
)
def _updateFit(self
):
if isDominates
(np
.array
(self
.cur_fitness
),np
.array
(self
.bestFitness
)):
self
.bestFitness
=self
.cur_fitness
self
.pbest
=self
.pos
elif isDominates
(np
.array
(self
.bestFitness
),np
.array
(self
.cur_fitness
)):
pass
else:
if np
.random
.random
()<0.5:
self
.bestFitness
=self
.cur_fitness
self
.pbest
=self
.pos
def _updatePos(self
):
self
.pos
=self
.pos
+self
._v
for i
in range(self
.dim
):
self
.pos
[i
]=min(self
.pos
[i
],self
.x_max
[i
])
self
.pos
[i
]=max(self
.pos
[i
],self
.x_min
[i
])
def _updateV(self
,w
,c1
,c2
,gbest
):
'''这里进行的都是数组的运算'''
self
._v
=w
*self
._v
+c1
*np
.random
.random
()*(self
.pbest
-self
.pos
)+c2
*np
.random
.random
()*(gbest
-self
.pos
)
for i
in range(self
.dim
):
self
._v
[i
]=min(self
._v
[i
],self
.max_v
[i
])
self
._v
[i
]=max(self
._v
[i
],self
.min_v
[i
])
def initPos(self
,x_min
,x_max
):
for i
in range(self
.dim
):
self
.pos
[i
]=np
.random
.random
()*(x_max
[i
]-x_min
[i
])+ x_min
[i
]
self
.pbest
[i
]=self
.pos
[i
]
def getPbest(self
):
return self
.pbest
def getBestFit(self
):
return self
.bestFitness
def setIsDominated(self
,r
):
self
.dominated
=r
def getIsDominated(self
):
return self
.dominated
def update(self
,w
,c1
,c2
,gbest
):
self
._updateV
(w
,c1
,c2
,gbest
)
self
._updatePos
()
self
.cur_fitness
=self
.fitness
(self
.pos
)
self
._updateFit
()
判断是否被支配 链接一的实现感觉不大对
def isDominates(x
,y
):
return (x
<=y
).all() and (x
<y
).any()
def determinDomination(pop
):
for i
in range(len(pop
)):
pop
[i
].setIsDominated
(False)
for i
in range(0,len(pop
)-1):
for j
in range(i
+1,len(pop
)):
if isDominates
(np
.array
(pop
[i
].cur_fitness
),np
.array
(pop
[j
].cur_fitness
)):
pop
[j
].setIsDominated
(True)
if isDominates
(np
.array
(pop
[j
].cur_fitness
),np
.array
(pop
[i
].cur_fitness
)):
pop
[i
].setIsDominated
(True)
处理存档的方法
如果外部存档为空,那么非支配解直接进入如果新的解被存档中的解支配,就丢弃这个解如果新的解不被存档中的任意一个解支配就进入存档中如果存档中的解被这个新的解支配就删去存档中被支配的解如果存档满了调用自适应网格程序
全局最优的粒子的选择 论文中写得方法是用10除以每个网格中的粒子数作为每个网格的适应度,然后使用轮盘赌的方法选出一个网格,如果网格中的粒子数目不止一个就随机选择一个。
但在参考链接二的实现中是用P=exp(-beta*N); P=P/sum(P);作为每个网格的适应度(概率)然后再用轮盘赌。其中beta表示选择压力,可能beta越小选择压力越大??因为他们的概率差别越小。原因应该就是为了轮盘赌的通用性,存档中删去粒子也是要用轮盘赌的。
参考链接一用的是10,但除了立方没有很明白,[为了增加选择全局最佳的压力,我试了一下效果会好特别多]。需要再去看下论文。然后他的轮盘赌是所有粒子进行的。我实现的是网格进行轮盘赌。
外部存档和网格的实现
class Archive:
def __init__(self
,partical
,size
,mesh_div
=10):
self
.mesh_div
=mesh_div
self
.particals
=partical
self
.size
=size
determinDomination
(self
.particals
)
tempArchive
=[x
for x
in self
.particals
if x
.dominated
!=True]
self
.archive
=copy
.deepcopy
(tempArchive
)
self
.mesh_id
=np
.zeros
(len(self
.archive
))
self
.inflation
=0.1
self
.fitness
=[par
.cur_fitness
for par
in self
.archive
]
self
.f_max
=np
.max(np
.array
(self
.fitness
),axis
=0)
self
.f_min
=np
.min(np
.array
(self
.fitness
),axis
=0)
def createGrid(self
):
'''存档中要是只有一个粒子就返回吧,也可以给他创建个网格'''
if len(self
.archive
)==1:
return
self
.f_max
=np
.max(np
.array
(self
.fitness
),axis
=0)
self
.f_min
=np
.min(np
.array
(self
.fitness
),axis
=0)
dc
=self
.f_max
-self
.f_min
f_max
=self
.f_max
+self
.inflation
*dc
f_min
=self
.f_min
-self
.inflation
*dc
self
.mesh_id
=np
.zeros
(len(self
.archive
))
for i
in range(len(self
.archive
)):
self
.mesh_id
[i
]=self
._cal_mesh_id
(self
.fitness
[i
])
def _cal_mesh_id(self
,fit
):
id_
=0
for i
in range(len(fit
)):
'''首先,将每个维度按照等分因子进行等分离散化,获取粒子在各维度上的编号。按照10进制将每一个维度编号等比相加
(如过用户自定义了mesh_div_num的值,则按照自定义),计算出值,第一次就只有一个非支配'''
id_dim
=int((fit
[i
]-self
.f_min
[i
])*self
.mesh_div
/(self
.f_max
[i
]-self
.f_min
[i
]))
id_
= id_
+ id_dim
*(self
.mesh_div
**i
)
return id_
def RouletteWheelSelection(self
,prob
):
p
=np
.random
.random
()
cunsum
=0
for i
in range(len(prob
)):
cunsum
+=prob
[i
]
if p
<=cunsum
:
return i
def selectLeader(self
):
mesh_id
=self
.mesh_id
unique_elements
,counts_elements
= np
.unique
(mesh_id
,return_counts
=True)
print(unique_elements
,counts_elements
)
counts_elements
=10/(counts_elements
**3)
prob
=counts_elements
/np
.sum(counts_elements
)
idx
=self
.RouletteWheelSelection
(prob
)
smi
=np
.where
(self
.mesh_id
==unique_elements
[idx
])
smi
=list(smi
)[0]
select
=np
.random
.randint
(0,len(smi
))
return self
.archive
[smi
[select
]]
def deletePartical(self
):
mesh_id
=self
.mesh_id
unique_elements
,counts_elements
= np
.unique
(mesh_id
,return_counts
=True)
p
=np
.exp
(counts_elements
)
p
=p
/np
.sum(p
)
idx
=self
.RouletteWheelSelection
(p
)
smi
=np
.where
(self
.mesh_id
==unique_elements
[idx
])
smi
=list(smi
)[0]
select
=np
.random
.randint
(0,len(smi
))
del self
.archive
[smi
[select
]]
self
.mesh_id
=np
.delete
(self
.mesh_id
,smi
[select
])
del self
.fitness
[smi
[select
]]
def update(self
,particals
):
self
.particals
=particals
determinDomination
(self
.particals
)
curr_temp
=[x
for x
in self
.particals
if x
.dominated
!=True]
curr
=copy
.deepcopy
(curr_temp
)
self
.archive
+=curr
determinDomination
(self
.archive
)
curr_temp
=[x
for x
in self
.archive
if x
.dominated
!=True]
self
.archive
=copy
.deepcopy
(curr_temp
)
self
.fitness
=[par
.cur_fitness
for par
in self
.archive
]
self
.createGrid
()
while len(self
.archive
)>self
.size
:
self
.deletePartical
()
主流程
class Mopso:
def __init__(self
,pop
,fitnessFunction
,w
,c1
,c2
,max_
,min_
,thresh
,mesh_div
=10):
self
.w
,self
.c1
,self
.c2
=w
,c1
,c2
self
.mesh_div
=mesh_div
self
.pop
=pop
self
.thresh
=thresh
self
.max_
=max_
self
.min_
=min_
self
.max_v
=(max_
-min_
)*0.05
self
.min_v
=(max_
-min_
)*0.05*(-1)
self
.fitnessFunction
=fitnessFunction
self
.particals
=[Partical
(self
.min_
,self
.max_
,self
.max_v
,self
.min_v
,self
.fitnessFunction
) for i
in range(self
.pop
)]
self
.archive
=[]
def evaluation_fitness(self
):
fitness_curr
=[]
for i
in range((self
.in_
).shape
[0]):
fitness_curr
.append
(fitness_
(self
.in_
[i
]))
self
.fitness_
=np
.array
(fitness_curr
)
def update_(self
):
for part
in self
.particals
:
gbestPart
=self
.archive
.selectLeader
()
gbest
=gbestPart
.pos
part
.update
(self
.w
,self
.c1
,self
.c2
,gbest
)
self
.archive
.update
(self
.particals
)
def done(self
,cycle_
):
print("in")
self
.archive
=Archive
(self
.particals
,self
.thresh
)
self
.archive
.createGrid
()
for i
in range(cycle_
):
print("="*10,i
,"="*10)
self
.update_
()
return self
.archive
我的参数设置
w
=0.4
c1
=1
c2
=2
particals
=200
gengeration
=200
mesh_div
=10
thresh
=100
var_num
=30
min_
=np
.zeros
(30)
max_
=np
.ones
(30)
mopso_
= Mopso
(particals
,fitness_
,w
,c1
,c2
,max_
,min_
,thresh
,mesh_div
)
pareto_front
= mopso_
.done
(gengeration
)
结果
试了一下ZDT1函数 c1=1;c2=2(同样的参数又跑了几次,拟合效果一点也不稳定。。下图这次是唯一比较好的一次) 红色的是正确答案,蓝色是拟合的帕累托前沿 应该还是有问题,为什么这么短,参数的影响非常大 当我把c2改成0.5时,线是边长了,但拟合效果emmmm什么垃圾 c1=0.5;c2=1;粒子数80,迭代300次 论文结尾说他们发现算法对惯性权重十分敏感所以有了变异算子。。emmm看来有必要实现一下
推荐参数
论文里面推荐粒子数为100; 迭代次数为80~120 网格划分为30 外部存档数250 差的也蛮远的。。。 !!!:是选择全局最优时的选择压力不够大 感觉c1=1;c2=2 选择全局最优时除以数目的立方 ZDT&ZDT2 的结果 好了超级多
还有一些问题:
变异算子没有去实现,链接二的代码有实现;.别的标准测试函数也还没有试验在算是否支配那里有重复计算的问题写的时候越写越随意根本没有区分是不是私有的属性。。不面向对象完全可以
如果变量超出了约束条件的范围的处理方法:
使变量的值等于它的上边界或者下边界速度乘(-1)往相反的方向搜索 (这个也没有按论文来实现,再改改吧,目前完全不能用啊比不上NSGA2,可能我实现的有问题。。。)
在2005年有一篇,看题目应该是拥挤度的计算方法不同
An Effective use of Crowding Distance in Multiobjective Particle Swarm Optimization
Ps. 感受到了自己辣鸡的代码水平,竟然抄还抄了这么久,加油吧。