📄 bpprogram.m
字号:
%两层BP网络程序
inputsamplematrix=finalout1327;
%本神经网络将采用sigmoid函数,因而输出层各神经元的理想输出值只能趋近于1或0,而不能达到1或0,
%因此将理想输出设置为0.9或0.1是较为适宜的。
targetvalue=targetvalue327;
trainingtime=0;%训练次数
abs_error=0;
square_error=0;
inputweightmatrix=abs(randn(4,13));%输入层至隐含层的连接权
hiddenlayerthreshold1=randn(4,1);%隐含层阈值
hiddenlayerthreshold=zeros(4,27);%共有27个样本
for ii=1:27
hiddenlayerthreshold(:,ii)=hiddenlayerthreshold1;
end
hiddenlayerin=zeros(4,27);%隐含层净输入
hiddenlayerout=zeros(4,27);%隐含层输出
%隐含层到输出层
layerweightmatrix=abs(randn(3,4));%隐含层到输出层的连接权
outputlayerthreshold1=randn(3,1);%输出层阈值
outputlayerthreshold=zeros(3,27);%共有27个样本
for ii=1:27
outputlayerthreshold(:,ii)=outputlayerthreshold1;
end
outputlayerin=zeros(3,27);%输出层的净输入
outputlayerout=zeros(3,27);%输出层输出,即此BP网络的输出
outputlayer_err=zeros(3,27);%输出层个神经元的一般化误差
hiddenlayer_err=zeros(4,27);%隐含层个神经元的一般化误差
% 迭代第一次
hiddenlayerin=inputweightmatrix*inputsamplematrix-hiddenlayerthreshold;
hiddenlayerout=1./(1+exp(-hiddenlayerin));%The training function in hidden layer is sigmoid.
%隐含层到输出层
outputlayerin=layerweightmatrix*hiddenlayerout-outputlayerthreshold;
outputlayerout=1./(1+exp(-outputlayerin));%The training function in output layer is sigmoid.
%计算误差
abs_error=targetvalue-outputlayerout;
outputlayer_err=abs_error.*outputlayerout.*(ones(3,27)-outputlayerout);
hiddenlayer_err=layerweightmatrix'*outputlayer_err.*hiddenlayerout.*(ones(4,27)-hiddenlayerout);
learningrate=1;%步幅
layerweightmatrix=layerweightmatrix+learningrate*outputlayer_err*hiddenlayerout';
outputlayerthreshold=outputlayerthreshold+learningrate*outputlayer_err;
inputweightmatrix=inputweightmatrix+learningrate*hiddenlayer_err*inputsamplematrix';
hiddenlayerthreshold=hiddenlayerthreshold+learningrate*hiddenlayer_err;
square_error=abs_error.*abs_error;
errorfunction(1)=ones(1,3)*square_error*ones(27,1)/27/2;
for iternum=2:1000%从第二次接着迭代
hiddenlayerin=inputweightmatrix*inputsamplematrix-hiddenlayerthreshold;
hiddenlayerout=1./(1+exp(-hiddenlayerin));%The training function in hidden layer is sigmoid.
%隐含层到输出层
outputlayerin=layerweightmatrix*hiddenlayerout-outputlayerthreshold;
outputlayerout=1./(1+exp(-outputlayerin));%The training function in output layer is sigmoid.
%计算误差
abs_error=targetvalue-outputlayerout;
outputlayer_err=abs_error.*outputlayerout.*(ones(3,27)-outputlayerout);
hiddenlayer_err=layerweightmatrix'*outputlayer_err.*hiddenlayerout.*(ones(4,27)-hiddenlayerout);
square_error=abs_error.*abs_error;
errorfunction(iternum)=ones(1,3)*square_error*ones(27,1)/27/2;
if errorfunction(iternum) < 1e-4%中途停止迭代的条件
break;
end
%调整learningrate (Variable Learning Rate)
%If the error function over the entrie training set has decreased,
%increase the learning rate by multiplying it by a number typically
%1.05
if errorfunction(iternum) < errorfunction(iternum-1)
learningrate=1.05*learningrate;
%If the error function has increased more than some set percentage
%typically a few percent(4%), decrease the learning rate by multiplying
%it by a number typically 0.7
elseif errorfunction(iternum) > 1.04*errorfunction(iternum-1)
learningrate=0.7*learningrate;
%If the error function is increased less than the percentage(%4), the learning rate ramins unchanged.
else
learningrate=learningrate;
end
layerweightmatrix=layerweightmatrix+learningrate*outputlayer_err*hiddenlayerout';
outputlayerthreshold=outputlayerthreshold+learningrate*outputlayer_err;
inputweightmatrix=inputweightmatrix+learningrate*hiddenlayer_err*inputsamplematrix';
hiddenlayerthreshold=hiddenlayerthreshold+learningrate*hiddenlayer_err;
end
plot(errorfunction)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -