📄 compet7.m
字号:
% compet7.m
%
clf reset
pausetime = 0.05;
% PROBLEM DEFINITION
%===================
peec7
P=normc(PE);
% INITIALIZE NETWORK ARCHITECTURE
%================================
% Set input layer size R and neuron layer size S.
[R,Q] = size(P); S = 5;
%W0 = randnc(S,R);
W0 =[-0.5953 -0.7837;
-0.2435 0.0032;
-0.8005 0.5992;
0.6311 -0.7735;
0.9771 -0.1390];
%=======
plot(cos(0:.1:2*pi),sin(0:.1:2*pi),'--')
axis('equal')
hold on
plotv(P);
h=plot(W0(:,1)',W0(:,2)','.');
title ('Input Vectors (lines) & Weight Vectors (+)');
xlabel('P(1,q) W(i,1)');
ylabel('P(2,q) W(i,2)');
% TRAIN THE NETWORK
%==================
% TRAINING PARAMETERS
disp_freq = 10;
max_cycle = 500;
lp.lr = 0.05;
% NETWORK PARAMETER
W = W0;
LW = W;
for cycle=1:max_cycle
% PRESENTATION PHASE
q = fix(rand*Q) + 1;
A = compet(W*P(:,q));
% LEARNING PHASE
i = find(A == 1);
dW = learnk(W,P(:,i),[],[],A,[],[],[],[],[],lp,[]);
W = W + dW;
% DISPLAY PROGRESS
if rem(cycle,disp_freq) == 0
delete(h)
h = plot(W(:,1)',W(:,2)','+');
LW = W;
pause2(pausetime);
end
end
pause
hold off
% SUMMARIZE RESULTS
%==================
fprintf('\nFINAL NETWORK VALUES:\n');
W
fprintf('Trained for %.0f cycles.\n',max_cycle)
YE=hardlim(W*P-.95)
for i=1:5,
W2=sqrt(W(i,1)^2+W(i,2)^2);
WE(i,:)=[W(i,1) W(i,2)]*W2;
end
P
PE
WE
YE
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -