⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 whk_l.m

📁 魏海坤编著的《神经网络结构设计的理论与方法》 国防工业出版社出版
💻 M
字号:
function main()
AllDataNum=200;
TrainDataNum=100;

% 时间序列计算
u=rands(1,AllDataNum+1);
y=zeros(1,AllDataNum+1);
for i=2:AllDataNum+1
    numerator=16*u(i-1)+8*y(i-1);
    denominator=3+4*y(i-1)^2+4*y(i-1)^2;
    append=2/10*u(i-1)+2/10*y(i-1);
    y(i)=numerator/denominator+append;
end

% 产生所有输入输出对
AllDataIn=[];
AllDataOut=[];
for i=2:AllDataNum+1
    NewIn=[u(i-1);y(i-1)];
    AllDataIn=[AllDataIn NewIn];
    AllDataOut=[AllDataOut y(i)];
end

TrainDataIn=AllDataIn(:,1:TrainDataNum);
TrainDataOut=AllDataOut(:,1:TrainDataNum);
TestDataIn=AllDataIn(:,TrainDataNum+1:AllDataNum);
TestDataOut=AllDataOut(:,TrainDataNum+1:AllDataNum);

InDim=2;
OutDim=1;
HiddenUnitNum=20;

MaxEpochs=10000;
lr=0.0005;
E0=0.5;
W0=0.1;
Filter=0.92;
Regular=0;
RegularInc=0.5e-007;
EliminateLimit=0.01;

W1=0.1*rands(HiddenUnitNum,InDim);
B1=0.1*rands(HiddenUnitNum,1);
W2=0.1*rands(OutDim,HiddenUnitNum);
B2=0.1*rands(OutDim,1);

W1Ex=[W1 B1];
W2Ex=[W2 B2];

TrainDataInEx=[TrainDataIn' ones(TrainDataNum,1)]';
ErrHistory=[];
RegularHistory=Regular;
NewAveSSE=0;
PrvAveSSE=0;
for i=MaxEpochs
    % 正向传播计算网络输出
    HiddenOut=logsig(W1Ex*TrainDataInEx);
    HiddenOutEx=[HiddenOut' ones(TrainDataNum,1)]';
    NetworkOut=W2Ex*HiddenOutEx;
    
    % 停止学习判断
    Error=TrainDataOut-NetworkOut;
    SEE=sumsqr(Error)
    NewAveSSE=Filter*NewAveSSE+(1-Filter)*SSE;
    
    % 记录每次权值调整后的训练误差
    ErrHistory=[ErrHistory SSE];
    
    if SSE<E0,break,end
    
    % 计算反向传播误差
    Delta2=Error;
    Delta1=W2'*Delta2.*HiddenOut.*(1-HiddenOut);
    
    % 计算权值调整量
    dW2Ex=Delta2*HiddenOutEx';
    dW1Ex=Delta1*TrainDataInEx';
    
    ddW2Ex=2*W2Ex./((W0+W2Ex.^2/W0).^2);
    ddW1Ex=2*W1Ex./((W0+W1Ex.^2/W0).^2);
    
    % 权值调节
    W1Ex=W1Ex+lr*dW1Ex-Regular*ddW1Ex;
    W2Ex=W2Ex+lr*dW2Ex-Regular*ddW2Ex;
    
    if (SSE>=PrvSSE&NewAveSSE<PrvAveSSE&SSE>E0),
        Regular=Regular-RegularInc;
        if Regular<0,Regular=0;end
    elseif (SSE>=PrvSSE&NewAveSSE>=PrvAveSSE&SSE>E0),
        Regular=Regular*0.995;
    else
        Regular=Regular+RegularInc;
    end
    
    % 记录每次权值调整后的训练误差
    RegularHistory=[RegularHistory Regular];
    PrvSSE=SSE;PrvAveSSE=NewAveSSE;
    
    % 分离隐层到输出层的初始权值,以便以后使用
    W2=W2Ex(:,1:HiddenUnitNum);
end
% 绘制学习误差曲线
figure
hold on
grid
[xx,Num]=size(ErrHistory);
plot(1:Num,ErrHistory,'k-');

% 绘制RrgularHistory曲线
figure
hold on
grid
plot(1:Num,RegularHistory(1:Num),'k-');

LastB2=W2Ex(:,HiddenUnitNum+1);
LastW1Ex=[];
LastW2=[];
for i=1:HiddenUnitNum
    if (abs(W2(i))>EliminatedLimit)
        LastW2=[LastW2 W2(:,i)];
        LastW1Ex=[LastW1Ex;W1Ex(i,:)];
    end
end

% 显示计算结果
[xx,HiddenUnitNum]=size(LastW2);
HiddenUnitNum
LastW1=LastW1Ex(:,1:InDim);
LastB1=LastW1Ex(:,InDim+1);

TestNNOut=BPNet(TestDataIn,LastW1,LastB1,LastW2,LastB2);
TestError=sumsqr(TestDataOut-TestNNOut)

function Out=BPNet(In,W1,B1,W2,B2)
[xxx,InNum]=size(In);
HiddenOut=logsig(W1*In+repmat(B1,1,InNum));
Out=W2*HiddenOut+repmat(B2,1,InNum);

    

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -