📄 improvedrancaiwu.asv
字号:
% 将RAN和相关性剪枝方法糅合在一块儿,并将其应用到增加了从输入层直接到输出层的线性连接的RBF网络中,应用到丙烯纯度预测中。
% 程序一段
% RAN算法
function main()
tic
% 有输入至输出线性环节的RBF网络
p=[ 0.09132 0.03615 0.0415 -0.3056983 -0.429 -0.3424
0.24647 0.09228 0.0909 0.29356763 0.0757 0.109821
0.30593 0.13124 0.1806 -0.1730657 -0.148 -0.11433
0.04546 0.04738 0.1568 -0.1580513 0.0949 0.100019
0.19179 0.05514 0.0661 -0.2462878 -0.086 -0.11713
0.26843 0.10353 0.1346 0.46888081 0.3058 0.320772
0.23414 0.14571 0.1253 0.12413238 0.5833 0.546907
0.12411 0.05107 0.0734 0.13528976 0.1961 0.050202
0.21223 0.09518 0.0787 -0.3200746 -0.121 -0.11254
0.44223 0.10126 0.0598 -0.5987261 -0.281 -0.30444
0.10466 0.03811 0.0363 0.3780161 0.6449 0.720158
0.10348 0.04947 0.0451 -0.0591081 -0.155 -0.25771
0.1038 0.03736 0.051 -0.4548067 -0.406 -0.48903
0.33955 0.06541 0.0631 0.26631751 0.046 0.007402
0.24708 0.06499 0.071 -0.0290705 -0.137 -0.13544
0.29178 0.12776 0.1819 0.20166437 0.9176 0.578143
0.03803 0.02276 0.0012 -0.7855768 -0.987 -0.69645
0.08378 0.01404 0.2073 -0.0473601 -0.85 -0.89666
0.05505 0.01348 0.0096 -0.2675278 -0.865 -0.83182
0.08603 0.01643 0.005 -0.3048749 -0.575 -0.73226
0.16272 0.04433 0.0503 0.09753527 -0.799 -0.81446
0.30537 0.09123 0.0813 0.41089893 0.1747 0.220284
0.10243 0.03277 0.0014 -0.1932007 -0.986 -0.96686
0.04935 0.05256 0.0552 -0.428435 -0.765 -0.78969
0.216 0.1 0.108 -0.03 0.159 0.082
0.057 0.058 0.054 0.099 -0.27 -0.27
0.346 0.089 0.098 0.081 0.221 0.162
0.12 0.074 0.137 0.242 0.334 0.283
0.118 0.037 0.071 0.057 -0.03 -0.03
0.235 0.078 0.086 -0.07 -0.15 -0.16
0.174 0.057 0.086 0.221 1.153 0.038
0.083 0.014 0.02 -0.22 0.068 0.167
0.589 0.041 0.036 -0.24 -0.48 -0.46
0.092 0.026 0.026 0.335 -0.45 -0.45
0.089 0.043 0.025 -0.29 -0.84 -0.83
0.201 0.12 0.134 -0.56 0.09 -0.02
0.14 0.034 0.002 -0.59 -0.84 -0.57
0.05 0.059 0.098 0.492 -0.02 -0.04
0.13 0.04 0.026 -0.4 -0.72 -0.6
]';
t=[0.1077
0.1044
0.1561
0.1615
0.1011
0.1621
0.078
0.0619
0.0405
0.0327
0.0286
0.0406
0.0145
0.0125
0.0088
0.0104
-0.326
-3.943
-0.281
-0.24
-0.256
-0.86
-0.32
-0.335
0.074
0.105
0.105
0.117
0.07
0.044
0.005
0.009
0.022
0.031
-0.09
-0.16
-0.12
-0.26
-0.2
]';
p1=p(:,1:24);
p2=p(:,25:39);
t1=t(:,1:24);
t2=t(:,25:39);
[InDim,TrainSamNum]=size(p1);
[OutDim,TrainSamNum]=size(t1);
[InDim,TestSamNum]=size(p2);
OverLapCoe=0.515; %重叠系数
Dist_Max=0.7; %最大距离分辨率
Dist_Min=0.01; %最小距离分辨率
ErrLimit=0.05; %误差分辨率
Decay=0.7; %分辨率衰减常数
lr=0.05; %学习率
MaxEpoch=200; %最大学习次数
DistLimit=Dist_Max; %距离分辨率
b2=t1(:,1);
w2=[];
UnitCenters=[];
SpreadConstant=[];
UnitNum=0;
AllUnitNum=0;
AllTestRSME=[];
tp=[ErrLimit lr MaxEpoch];
for TrainedNum=2:TrainSamNum
NewInput=p1(:,TrainedNum);
NewOutput=t1(:,TrainedNum);
NetOut=RBFNN(NewInput,UnitCenters,w2,b2,SpreadConstant);
NewErr=NewOutput-NetOut;
if (UnitNum==0),
NewDist=Dist_Max;
else
AllDist=dist(UnitCenters',NewInput);
NewDist=min(AllDist);
end
if(norm(NewErr)>=ErrLimit & NewDist>=DistLimit), %判断是否添加隐节点
[UnitCenters,w2,SpreadConstant]=AddNewUnit(NewInput,NewErr,NewDist,UnitCenters,w2,SpreadConstant,OverLapCoe);
TrainedNum;
UnitNum=UnitNum+1;
else
[UnitCenters,w2,b2]=FineTuning(NewInput,NewOutput,UnitCenters,w2,b2,SpreadConstant,tp); % 参数精调的每一次迭代都是一个样本进入
end
if DistLimit>Dist_Min, %分辨率衰减
DistLimit=DistLimit*Decay;
else
DistLimit=Dist_Min;
end
AllUnitNum=[AllUnitNum UnitNum];
TestNNOut=RBFNN(p2,UnitCenters,w2,b2,SpreadConstant);
TestRSME=sqrt(sumsqr(TestNNOut-t2)/TestSamNum)
AllTestRSME=[AllTestRSME TestRSME];
end
%绘制目标曲线和神经网络输出曲线
TestNNOut=RBFNN(p2,UnitCenters,w2,b2,SpreadConstant)
plot(1:TestSamNum,TestNNOut,'r.-')
hold on
plot(1:TestSamNum,t2,'k.-')
xlabel('RAN')
hold on
plot(1:TestSamNum,0.06,'g.-')
hold on
plot(1:TestSamNum,0,'g.-')
hold off
UnitNum
%绘制隐节点变化曲线
[xxx,PtNum]=size(AllUnitNum); %此处的PtNum=400
figure
echo off
axis([0 PtNum 0 150])
axis on
grid
hold on
plot(1:PtNum,AllUnitNum,'b-')
xlabel('隐节点变化曲线')
%绘制RSME变化曲线
[xxx,PtNum]=size(AllTestRSME);
figure
echo off
axis on
grid
hold on
plot(1:PtNum,AllTestRSME,'b-')
xlabel('测试误差曲线RSME')
%% 程序二段
% 隐节点合成
lr=0.002; % 学习率
maxepoch=15; % 最大训练时间
errcombine=0.5; % 节点合成误差
errgoal=0.01; % 训练目标误差
unitscombinethreshold=0.38; % 节点合成阈值
biascombinethreshold=0.04; % 偏置合成误差
w2ex=[w2 b2]; % 隐层到输出层的初始权值扩展
errhistory=[];
resizeflag=1; %网络规模发生变化的标识
for epoch=1:maxepoch
if(resizeflag==1),
[OutDim,UnitNum]=size(w2ex);
UnitNum=UnitNum-1;
w2=w2ex(:,1:UnitNum);
b2=w2ex(:,UnitNum+1);
resizeflag=0;
end
% 正向传播计算网络输出
hiddenout=ho(p1,UnitCenters,SpreadConstant); % 每个节点的输出为行向量
hiddenoutex=[hiddenout' ones(TrainSamNum,1)]';
NetOut=w2ex*hiddenoutex;
% 停止学习判断
error=t1-NetOut;
sse=sqrt(sumsqr(error)/TrainSamNum); % sse范围在0,1之间
% 纪录每次权值调整后的训练误差
errhistory=[errhistory sse];
if(sse<errcombine),
% 计算隐节点输出标准差
hiddenvar=var(hiddenout')';
% 计算隐节点输出相关系数
hiddencorr=corrcoef(hiddenout');
% 检查是否有隐节点需要合并
[hiddenunit1,hiddenunit2]=findunittocombine(hiddencorr,...
hiddenvar,unitscombinethreshold,biascombinethreshold);
if(hiddenunit1>0),
if(hiddenunit2>0), % 两个隐节点合并
[a,b]=linearreg(hiddenout(hiddenunit1,:),...
hiddenout(hiddenunit2,:)); % 线性回归,即计算出vj=avi+b中的a和b
epoch
combinetype=11
drawcorrelatedunitsout(hiddenout...
(hiddenunit1,:),hiddenout(hiddenunit2,:));
[UnitCenters,SpreadConstant,w2ex]=combinetwounits(hiddenunit1,...
hiddenunit2,a,b,w2ex,UnitCenters,SpreadConstant); % 当变量需要更新时函数定义中该变量在自变量和返回中均需出现,如UnitCenters和SpreadConstant
else % 隐节点并到偏移
epoch
combinetype=12
drawbiasedunitout(hiddenout(hiddenunit1,:));
unitmean=mean(hiddenout(hiddenunit1,:));
[UnitCenters,SpreadConstant,w2ex]=combineunittobias...
(hiddenunit1,unitmean,w2ex,UnitCenters,SpreadConstant);
end
resizeflag=1;
continue; % 执行到此不再执行循环体下面尚没有执行的部分,重新回到是否执行循环的判断
end
end
if(sse<errgoal),break,end
% 计算反向传播误差
delta2=error.*NetOut.*(1-NetOut);
% 计算权值调节量
hiddenoutex=[hiddenout' ones(TrainSamNum,1)]';
dw2ex=delta2*hiddenoutex';
% 权值调节
w2ex=w2ex+lr*dw2ex;
% 分离w2和b2
[c,d]=size(w2ex);
w2=w2ex(:,1:d-1);
b2=w2ex(:,d);
end
% 绘制学习误差曲线
figure
echo off
axis on
grid
hold on
[xx,num]=size(errhistory);
%semilogy(1:num,errhistory,'r-'); % SEMILOGY(...) is the same as PLOT(...), except a logarithmic (base 10) scale is used for the Y-axis.
plot(1:num,errhistory,'r-');
xlabel('训练误差曲线')
%绘制目标曲线和神经网络输出曲线
figure
TestNNOut=RBFNN(p2,UnitCenters,w2,b2,SpreadConstant)
plot(1:TestSamNum,TestNNOut,'.r-')
hold on
plot(1:TestSamNum,t2,'*k-')
hold on
plot(1:TestSamNum,0.06,'g.-')
hold on
plot(1:TestSamNum,0,'g.-')
xlabel('剪枝后')
TestRSME2=sqrt(sumsqr(TestNNOut-t2)/TestSamNum)
%---------------------------------------------------------------------------------------------------------
% 下面是主函数中调用的函数,均以function定义,在主函数中没有function
%程序二段中调用的函数
% 隐层输出
function hiddenout=ho(p1,UnitCenters,SpreadConstant)
[xxx,InNum]=size(p1);
SpreadMat=repmat(SpreadConstant,1,InNum);
AllDist=dist(UnitCenters',p1);
hiddenout=radbas(AllDist./SpreadMat);
% 寻找需要合并的隐节点
function [hiddenunit1,hiddenunit2]=findunittocombine(hiddencorr,hiddenvar,...
unitscombinethreshold,biascombinethreshold)
corrtri=triu(hiddencorr)-eye(size(hiddencorr)); % TRIU Extract upper triangular part
while(1)
[val,pos]=max(abs(corrtri)); % [Y,I] = MAX(X) returns the indices of the maximum values in vector I and the maximum values in Y of X
[maxcorr,hiddenunit2]=max(val); %对于行向量X,[c,d]=max(X),返回c,d分别表示X中最大的数及其对应X的index
if(maxcorr<unitscombinethreshold)
hiddenunit1=0;hiddenunit2=0;
break % 用以退出while循环
end
hiddenunit1=pos(hiddenunit2); % 如果该语句执行则说明maxcorr>unitscombinethreshold,if段没有执行
if(hiddenvar(hiddenunit1)>biascombinethreshold &...
hiddenvar(hiddenunit2)>biascombinethreshold)
break
else
corrtri(hiddenunit1,hiddenunit2)=0;
end
end
if(hiddenunit1>0)return;end % return与break不同,用以退出本函数
[minvar,unit]=min(hiddenvar);
if(minvar<biascombinethreshold)
hiddenunit1=unit;
hiddenunit2=0;
end
% 线性回归
function [a,b]=linearreg(vect1,vect2)
[xxx,n]=size(vect1);
meanv1=mean(vect1);
meanv2=mean(vect2);
a=(vect1*vect2'/n-meanv1*meanv2)/(vect1*vect1'/n-meanv1^2);
b=meanv2-a*meanv1;
% 绘制两相关隐节点对所有样本的输出
function drawcorrelatedunitsout(unitout1,unitout2)
[xxx,ptnum]=size(unitout1);
figure
echo off
axis([0 ptnum 0 1])
axis on
grid
hold on
plot(1:ptnum,unitout1,'b-')
plot(1:ptnum,unitout2,'k-')
% 两个隐节点合并
function [UnitCenters,SpreadConstant,w2ex]=combinetwounits(hiddenunit1,hiddenunit2,a,b,w2ex,UnitCenters,SpreadConstant)
[xxx,biascol]=size(w2ex); % biascol=h1num+1,
w2ex(:,hiddenunit1)=w2ex(:,hiddenunit1)+a*w2ex(:,hiddenunit2); % 节点unit1与下一层节点的连接权矢量
w2ex(:,biascol)=w2ex(:,biascol)+b*w2ex(:,hiddenunit2); % 偏移权矢量更新
UnitCenters(:,hiddenunit2)=[];
SpreadConstant(hiddenunit2,:)=[];
w2ex(:,hiddenunit2)=[]; % 删除隐节点unit2 % unit2与下一层的连接权值矢量
% 绘制标准差较小的单个隐节点输出
function drawbiasedunitout(unitout)
[xxx,ptnum]=size(unitout);
figure('position',[300 300 400 300])
echo off
axis([0 ptnum 0 1])
axis on
grid
hold on
plot(1:ptnum,unitout,'k-')
% 将隐节点合并到偏移
function [UnitCenters,SpreadConstant,w2ex]=combineunittobias(hiddenunit1,unitmean,w2ex,UnitCenters,SpreadConstant)
[xxx,biascol]=size(w2ex);
w2ex(:,biascol)=w2ex(:,biascol)+unitmean*w2ex(:,hiddenunit1);
w2ex(:,hiddenunit1)=[];
UnitCenters(:,hiddenunit1)=[];
SpreadConstant(hiddenunit1,:)=[];
% ----------------------------------------------------------------------------------------
% 程序一段中调用的函数
%网络输出函数
function NetOut=RBFNN(NewInput,UnitCenters,w2,b2,SpreadConstant)
[OutDim,UnitNum]=size(w2);
[xxx,InNum]=size(NewInput);
if(UnitNum==0),
NetOut=repmat(b2,1,InNum);
else
SpreadMat=repmat(SpreadConstant,1,InNum);
b2Mat=repmat(b2,1,InNum);
AllDist=dist(UnitCenters',NewInput);
al=radbas(AllDist./SpreadMat);
NetOut=w2*al+b2Mat;
end
%增加新的隐节点
function[UnitCenters,w2,SpreadConstant]=AddNewUnit(NewInput,NewErr,NewDist,UnitCenters,w2,SpreadConstant,OverLapCoe)
UnitCenters=[UnitCenters NewInput];
w2=[w2 NewErr];
SpreadConstant=[SpreadConstant;OverLapCoe*NewDist];
%梯度法实现参数精调
function[UnitCenters,w2,b2]=FineTuning(NewInput,NewOutput,UnitCenters,w2,b2,SpreadConstant,tp)
[xxx,UnitNum]=size(UnitCenters);
if(UnitNum==0),b2=NewOutput;return,end
ErrLimit=tp(1); %即tp的第一个值
lr=tp(2);
MaxEpoch=tp(3);
for epoch=1:MaxEpoch
AllDist=dist(UnitCenters',NewInput);
al=radbas(AllDist./SpreadConstant); %radbas(n)=exp(-n^2),隐层输出
NetOut=w2*al+b2;
NewErr=NewOutput-NetOut;
if(norm(NewErr)<ErrLimit),break,end
b2=b2+lr*NewErr;
w2=w2+lr*NewErr*al';
for i=1:UnitNum
CentInc=2*(NewInput-UnitCenters(:,i))*al(i)*NewErr*w2(i)/(SpreadConstant(i)^2);
UnitCenters(:,i)=UnitCenters(:,i)+lr*CentInc;
end
end
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -