📄 wnns.m
字号:
clear all
%initiate of data
P=48; %numberof sample样本数
m=3;%number of input node输入节点数
n=10;%number of hidden node隐层节点数
N=3;%number of ouptut node输出节点数
%
%a(n) b(n) scale and shifting parameter matrix尺度和平移参数矩阵
%x(P,m) input matrix of P sample输入矩阵
%net(P,n) ouput of hidden node隐层输出节点
%y(P,N) output of network网络输出矩阵
%d(P,N) ideal output of network理想输出矩阵
% phi(P,n) ouput of hidden node wavelet funciton隐层节点小波变换函数
%W(N,n)weight value between ouput and hidden 隐层到输出的权值
%WW(n,m) weight value between hidden and input node输入节点到隐层的权值
x=[141 68 77
31 51 74
92 80 84
137 69 76
32 50 72
111 87 90
147 67 77
34 53 75
158 128 115
138 61 72
34 51 73
97 81 81
140 67 78
30 50 70
120 97 91
123 76 82
33 48 71
92 76 82
122 73 93
34 52 75
121 97 92
144 69 78
32 49 72
121 102 98
140 67 78
31 50 72
121 96 94
132 68 75
32 53 75
98 76 81
121 71 77
33 55 74
125 96 93
122 65 76
45 65 85
122 95 93
152 59 69
35 51 75
149 120 108
147 61 73
39 55 80
104 96 97
136 67 77
31 51 73
113 90 91
145 67 78
32 50 72
130 95 91
];
d=[1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
1 0 0
0 1 0
0 0 1
];
x=mat2gray(x);
W=rand(N,n);%产生N*n均匀分布的随机矩阵
WW=rand(n,m);
a=ones(1,n);
b=ones(1,n);
%%%%%%%%%%%%%%%%%%
%EW(N,n) gradient of W隐层到输出层的权值梯度
%EWW(n,m) gradient of WW输入节点到隐层的权值梯度
%Ea(n) gradient of a小波尺度参数梯度
%Eb(n) gradient of b小波平移参数梯度
%%%%%%%%%%%%%%]
epoch=1;%初始训练次数
epo=1000;%最大训练次数
error=0.05;%初始网络误差
err=0.01;%期望网络误差
delta =1;
lin=0.5;%学习系数
while (error>=err & epoch<=epo)%将网络误差大于期望误差且训练次数小于最大训练次数时进行下面的循环语句
u=0;%u is the middle variant
%caculation of net input
for p=1:P
for j=1:n
u=0;
for k=1:m
u=u+WW(j,k)*x(p,k);
end
net(p,j)=u;%隐单元得到的输入矩阵P*n
end
end
%calculation of morlet 0r mexican wavelet output
for p=1:P
for j=1:n
u=net(p,j);
phi(p,j)=wavelet(u,a(j),b(j));
end%经小波变换后隐单元输出的矩阵P*n
end
%calculation of output of network
for p=1:P
for i=1:N
v=0;
for j=1:n
v=v+W(i,j)*phi(p,j);
end
y(p,i)=1/(1+exp(-v));%输出端输出的分类矩阵
end
end
%calculation of error of output计算实际输出与期望输出的误差
u=0;
for p=1:P
for i=1:N
u=u+(d(p,i)-y(p,i))^2;
end
end
u=u/2;
error=u; %网络误差
%calculate of gradient of network计算网络误差梯度
EW=zeros(N,n);
for k=1:n
for j=1:N
for p=1:P
EW(j,k)=EW(j,k)-(d(p,j)-y(p,j))*y(p,j)*(1-y(p,j))*phi(p,k);
end
end
end
Ea=zeros(1,n);
Eb=zeros(1,n);
for k=1:n
for p=1:P
for j=1:N
Ea(1,k)=Ea(k)+(d(p,j)-y(p,j))*y(p,j)*(1-y(p,j))*W(j,k)*diffa(net(p,k),a(k),b(k))*(net(p,k)-b(k))/a(k)^2;
end
end
end
for k=1:n
for p=1:P
for j=1:N
Eb(1,k)=Eb(k)+(d(p,j)-y(p,j))*y(p,j)*(1-y(p,j))*W(j,k)*diffb(net(p,k),a(k),b(k))/a(k);
end
end
end
for i=1:N
for j=1:n
W(i,j)=W(i,j)-lin*EW(i,j);
end
end
for i=1:n
a(i)=a(i)-lin*Ea(1,i);
b(i)=b(i)-lin*Eb(1,i);
end
% W=W-lin*EW;
% a=a-lin*Ea;
% b=b-lin*Eb;
%number of epoch increase by 1
epoch=epoch+1;
end
pic=imread('sub.tif');
pic=im2double(pic);
pic=pic(:,2:end-1,:);
pic1=pic(:,:,1);
pic2=pic(:,:,2);
pic3=pic(:,:,3);
[mm,nn]=size(pic1);
pic1=reshape(pic1,mm*nn,1);
pic2=reshape(pic2,mm*nn,1);
pic3=reshape(pic3,mm*nn,1);
s(:,1)=pic1;
s(:,2)=pic2;
s(:,3)=pic3;
P=mm*nn;
u=0;%u is the middle variant
%caculation of net input
for p=1:P
for j=1:n
u=0;
for k=1:m
u=u+WW(j,k)*s(p,k);
end
net(p,j)=u;%隐单元得到的输入矩阵P*n
end
end
%calculation of morlet 0r mexican wavelet output
for p=1:P
for j=1:n
u=net(p,j);
u=(u-b(j))/a(j);
phi(p,j)=cos(1.75*u)*exp(-u*u/2); %morlet wavelet
end%经小波变换后隐单元输出的矩阵P*n
end
%calculation of output of network
for p=1:P
for i=1:N
v=0;
for j=1:n
v=v+W(i,j)*phi(p,j);
end
y(p,i)=1/(1+exp(-v));%输出端输出的分类矩阵
end
end
for p=1:P
if y(p,1)>=y(p,2)
if y(p,1)>=y(p,3)
clas(p,1)=150;
else clas(p,1)=230;
end
else if y(p,2)>=y(p,3)
clas(p,1)=5;
else clas(p,1)=230;
end
end
end
cla=reshape(clas,mm,nn);
c=mat2gray(cla);
imshow(c,[]);
epoch
error
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -