📄 waveletnn_time_feature_morlet_4f2pog16q.m
字号:
%% this procedure is a simulation of wavelet artificial neural network (WNN) for modulated signal classification
%% the weights of hidden layer are wavelet functions and the mother wavelet is 'morlet'function.
clear all;
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% define the necessary parameters for WNN
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global node_in node_media node_out alfa belta w u o layer_j shift_k; % % node_in:represent 'I' in the paper,node_media:'J',node_out:'K',alfa:the rate of step change
% % 输入神经网络的学习参数和各层节点数
% % 输入节点数node_in,隐层节点数node_media,输出节点数node_out
alfa=input('请输入网络学习率因子 alfa=')
belta=input('请输入网络动量项因子 belta=')
layer_j=input('请输入网络多分辨级数 layer_j=') % % 首次值可以选3
shift_k=input('请输入网络平移参数 shift_k=') % % 首次值可以选1
node_in=5;
node_media=(2*layer_j+1)*(2*shift_k+1);
node_out=3;
% node_out=5;
n=5 % % 神经网络识别模式总数
%% 初始化网络
K=10;
w=rand(node_out,node_media)/K; % % w represents the weights between media layer and out layer and w(i,j) stands for the weight value between ith out-layer and jth media-layer:(-0.5,0.5)/K
u=rand(node_media,node_in)/K;
w1=w;
u1=u;
% load('e:\lcy\wnn-network\dwnn-recognize\morning-1-1.mat','w1')
% load('e:\lcy\wnn-network\dwnn-recognize\morning-1-1.mat','u1')
%
% w=w1;
% u=u1;
o=rand(1,node_out)/K; % % o stores the values of WNN output layer
% % 设定网络训练参数
sse_draw=0; % % 全局误差向量初设为零
train_max=2000; % % 设定最大训练次数为2500
qwsse=0.001; % % 期望误差为0.001,则每一个模式的误差必须小于0.01,否则全局误差会大于0.001
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% simulating data for learning
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% load('d:\lcy\AMR\design-programme-regulator\DWNN\data\learn-data-5-4f2pog16qam-20av.mat','ppp1')
load('d:\lcy\AMR\design-programme-regulator\DWNN\data\learn-data-5-4f2pog16qam-20av.mat','ppp1')
% load('d:\lcy\AMR\design-programme-regulator\DWNN\data\filter_8db_learn_4f2pog16qam_unav_1.mat','ppp1')
% load('e:\work_files\design-programme-regulator\DWNN\data\4db_learn_4f2pog16q_20av_4.mat','ppp1')
% load('d:\lcy\AMR\design-programme-regulator\DWNN\data\4db_learn_4f2pog16q_20av_4.mat','ppp1')
low=[0 0 0.1 0.05 0.6]; % % 用线性方法归一化 SNR=8dB
up=[35 2.5 0.45 1.2 6];
% low=[0 0.1 0.1 0.1 1.1]; % % 用线性方法归一化 snr=5db
% up=[36 2.3 0.45 1.4 6.8];
% low=[0 0 0.1 0 0]'; % % 用线性方法归一化 snr=15db
% up=[55 2.2 0.45 1.2 5.1]';
% low=[0 0.3 0.1 0.2 1.6]; % % 噪声不加带限滤波时特征范围,用线性方法归一化 SNR=8dB
% up=[25 2.9 0.35 1.25 7.8];
t=up-low;
for i=1:n
ppp1(i,:)=ppp1(i,:)./t+1-up./t;
end
% d=ppp1;
% for i=1:n % % 用除以各分量和的方法归一化
% for j=1:node_in
% ppp1(i,j)=ppp1(i,j)/(sum(d(i,:)));
% end
% end
% for i=1:n
% ppp1(i,:)=ppp1(i,:)/max(d(i,:));
% end
qw_o=[0 0 1 % % 各种调制模式的期望输出值
0 1 0
0 1 1
1 0 0
1 0 1];
% qw_o=[0 0 0 0 1 % % 各种调制模式的期望输出值
% 0 0 0 1 0
% 0 0 1 0 0
% 0 1 0 0 0
% 1 0 0 0 0];
for train=1:train_max
sse=0; % % 全局误差清零
gw_l=zeros(node_media,node_out); % % gw_l,gb_l,ga_l 分别存放的是此次训练的全局误差对w,b,a的偏导数
gu_l=zeros(node_media,node_in);
for i=1:n % % 每次训练交叉输入n个模式特征向量
out=wnn_implementation(ppp1(i,:)); % % 计算对应的神经网络分类器实际输出
sse=sse+0.5*(qw_o(i,:)-o)*(qw_o(i,:)-o)'; % % 计算批训练的全局误差
for t1=1:node_media % % 计算当前训练全局误差的偏导数
if mod(t1,2*shift_k+1)==0
j1=t1/(2*shift_k+1)-1-layer_j;
k1=shift_k;
else
j1=floor(t1/(2*shift_k+1))-layer_j;
k1=mod(t1,2*shift_k+1)-shift_k-1;
end
gw_l(t1,:)=-(qw_o(i,:)-o)*wavelet_function(ppp1(i,:)*u(t1,:)',j1,k1);
gu_l(t1,:)=-(sum((qw_o(i,:)-o).*w(:,t1)'))*(2^(j1/2))*wavelet_derivative(ppp1(i,:)*u(t1,:)',j1,k1)*ppp1(i,:);
end
if train==1
sw_l=-alfa*gw_l;
su_l=-alfa*gu_l;
else
sw_l=-alfa*gw_l+belta*sw_l;
su_l=-alfa*gu_l+belta*su_l;
end
w=w+sw_l'; % % 调整网络权值
u=u+su_l;
end
sse_draw(train)=sse; % % 记录每次训练的全局误差值
% if train>=2 % % 自适应调整学习率
% if sse>sse_draw(train-1)
% alfa=alfa*0.2;
% elseif sse<sse_draw(train-1)
% alfa=alfa*1.15;
% end
% end
if sse<qwsse&&train<train_max % % 如果达到期望误差要求,结束网络训练过程
disp('the number of learning times train=')
train
sse
break;
end
end
%训练结束
x=1:1:train; % % 画全局误差与训练次数的关系曲线
% plot(x,sse_draw,'r--');
semilogy(x,sse_draw,'r--');
xlabel('训练次数(*5)')
ylabel('全局误差')
title('The Maximum Error is 0.001')
hold on
% disp('输出层权值为')
% w
% u
% % 进行分类器识别性能评估:
% 设每种调制方式400个样本,其特征值向量构成的输入矩阵pppf为一个400行5列的矩阵。
% 各个pppf矩阵分别从data_2ASK_8dB.mat、data_4ASK_8dB.mat、data_2FSK_8dB.mat、data_4FSK_8dB.mat、
% data_2PSK_8dB.mat、data_4PSK_8dB.mat、data_16QAM_8dB.mat文件中加载.
% 每次选一种方式,将相应文件的特征矩阵加载到pppf矩阵:
% 说明:此程序的检测识别成功率的测试集是由400个2ASK信号的特征向量构成的400行5列的矩阵ppp
num_e=zeros(1,n);
out1=zeros(400,node_out,n);
o1=zeros(400,node_out,n);
for p=1:5
if p==1
% load('d:\lcy\AMR\design-programme-regulator\DWNN\data\4db-10-4fsk-400-1.mat','ppp');
load('d:\lcy\AMR\design-programme-regulator\DWNN\data\data-10-1-4fsk.mat','ppp');
elseif p==2
% load('d:\work_files\design-programme-regulator\DWNN\data\4db-10-2psk-400-1.mat','ppp');
load('d:\lcy\AMR\design-programme-regulator\DWNN\data\data-10-1-2psk.mat','ppp');
elseif p==3
% load('d:\work_files\design-programme-regulator\DWNN\data\4db-10-oqpsk-400-1.mat','ppp');
load('d:\lcy\AMR\design-programme-regulator\DWNN\data\data-10-1-oqpsk.mat','ppp');
elseif p==4
% load('d:\work_files\design-programme-regulator\DWNN\data\4db-10-gmsk-400-1.mat','ppp');
load('d:\lcy\AMR\design-programme-regulator\DWNN\data\data-10-1-gmsk.mat','ppp');
elseif p==5
% load('d:\work_files\design-programme-regulator\DWNN\data\4db-10-16qam-400-1.mat','ppp');
load('d:\lcy\AMR\design-programme-regulator\DWNN\data\data-10-1-16qam.mat','ppp');
end
% c=ppp;
for i=1:400
ppp(i,:)=ppp(i,:)./t+1-up./t;
end
% for i=1:400
% ppp(i,:)=ppp(i,:)/max(c(i,:));
% end
%
% for i=1:400 % 用除以各分量和的方法归一化
% for j=1:node_in
% ppp(i,j)=ppp(i,j)/(sum(c(i,:)));
% end
% end
% ppp=(ppp2+ppp3)/2;
for i=1:400 % % 统计此小波神经网络分类器对2ASK调制制式的识别性能
if size(ppp)~=[400,5]
disp('输入测试样本集有误');
break;
end
out=wnn_implementation(ppp(i,:));
out1(i,:,p)=out;
o1(i,:,p)=o;
if out==qw_o(p,:)
num_e(p)=num_e(p)+1;
end
end
disp('此小波神经网络分类器调制制式的正确识别率为:')
num_e/400
end
return;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -