📄 som.m
字号:
% Basic implementation of the Kohonen Map
%
% Source:
% T. Kohonen (1997). Self-Organizing Maps, 2nd. Edition,
% Springer-Verlag.
%
% NOTE: This implementation aims to be simple and direct. More powerful
% implementations of the SOM can be found in the SOMtoolbox demos.
%
% Authors: Guilherme A. Barreto
% Date: November 17th 2005
clear; clc; close all;
% Load data
load dataset1.dat;
Dw=dataset1; clear dataset1
% Get size of data matrix (1 input vector per row)
[LEN_DATA DIM_INPUT]=size(Dw);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Define size of the network %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Mx = 4; % Number of neurons in the X-dimension
My = 4; % Number of neurons in the Y-dimension
MAP_SIZE = [Mx My]; % Size of SOM map
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Create a CL network structure %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
sMap = som_map_struct(DIM_INPUT,'msize',MAP_SIZE,'rect','sheet');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Different weights initialization methods %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% sMap = som_randinit(Dw, sMap); % Random weight initialization
% sMap = som_lininit(Dw, sMap); % Linear weight initialization
I=randperm(LEN_DATA); sMap.codebook=Dw(I(1:Mx*My),:); % Select Mx*My data vectors at random
Co=som_unit_coords(sMap); % Coordinates of neurons in the map
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Specification of some training parameters %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
si=round(max(Mx,My)/2); % Initial neighborhood
sf=0.001; % Final neighborhood
ei=0.8; % Initial learning rate
ef=0.001; % Final learning rate
Nep=100; % Number of epochs
Tmax=LEN_DATA*Nep; % Maximum number of iterations
T=0:Tmax; % Time index for training iteration
eta=ei*power(ef/ei,T/Tmax); % Learning rate vector
sig=si*power(sf/si,T/Tmax); % Neighborhood width vector
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Train Kohonen Map (TKM) %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
counter=zeros(1,Mx*My); % Counter for the number of victories
for t=1:Nep, % loop for the epochs
epoch=t, % Show current epoch
for tt=1:LEN_DATA,
% Compute distances of all prototype vectors to current input
Di=sqrt(som_eucdist2(sMap,Dw(tt,:)));
% Find the BMU (i.e. the one with minimum value for Di)
[Di_min win] = min(Di);
counter(win)=counter(win)+1; % Increment the number of victories of the winner
% Update the weights of the winner and its neighbors
T=(t-1)*LEN_DATA+tt; % iteration throughout the epochs
for i=1:Mx*My,
% Squared distance (in map coordinates) between winner and neuron i
D2=power(norm(Co(win,:)-Co(i,:)),2);
% Compute corresponding value of the neighborhood function
H=exp(-0.5*D2/(sig(T)*sig(T)));
% Update the weights of neuron i
sMap.codebook(i,:)=sMap.codebook(i,:) + eta(T)*H*(Dw(tt,:)-sMap.codebook(i,:));
end
end
% Quantization error per training epoch
Qerr(t) = som_quality(sMap, Dw);
end
% Plot prototypes and data altogether
figure, plot(Dw(:,1),Dw(:,2),'+r'), hold on
som_grid(sMap,'Coord',sMap.codebook)
%plot(sMap.codebook(:,1),sMap.codebook(:,2),'b*')
title('Prototype vectors in input space'), hold off
% Plot quantization error evolution per training epoch
figure, plot(Qerr)
title('Quantization Error per Training Epoch')
% A bar plot of the number of victories per neuron throughout epochs
figure, bar(1:Mx*My,counter)
title('Victories per neuron')
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -