📄 ng.m
字号:
% Basic implementation of the Neural-Gas algorithm
%
% Source:
% T. M. Martinetz, S. G. Berkovich, and K. J. Schulten.
% Neural-gas network for vector quantization and its application to time-series prediction.
% IEEE Transactions on Neural Networks, 4(4):558-569, 1993.
%
% This implementation aims to be simple and direct. More powerful
% implementations of the Neural-Gas can be found in the SOMtoolbox.
%
% Authors: Guilherme A. Barreto
% Date: November 17th 2005
clear; clc; close all;
% Load data
load dataset1.dat;
Dw=dataset1; clear dataset1
% Get size of data matrix (1 input vector per row)
[LEN_DATA DIM_INPUT]=size(Dw);
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Define size of the network %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Mx = 4; % Number of neurons in the X-dimension
My = 4; % Number of neurons in the Y-dimension
MAP_SIZE = [Mx My]; % Size of SOM map (always use 1-D map)
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Create a CL network structure %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
sMap = som_map_struct(DIM_INPUT,'msize',MAP_SIZE,'rect','sheet');
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Different weights initialization methods %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% sMap = som_randinit(Dw, sMap); % Random weight initialization
% sMap = som_lininit(Dw, sMap); % Linear weight initialization
I=randperm(LEN_DATA); sMap.codebook=Dw(I(1:Mx*My),:); % Select Mx*My data vectors at random
Co=som_unit_coords(sMap); % Coordinates of neurons in the map
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Specification of some training parameters %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
li=10; %round(max(Mx,My)/2); % Initial neighborhood
lf=0.01; % Final neighborhood
ei=0.1; % Initial learning rate
ef=0.001; % Final learning rate
Nep=100; % Number of epochs
Tmax=LEN_DATA*Nep; % Maximum number of iterations
T=0:Tmax; % Time index for training iteration
lambda=li*power(lf/li,T/Tmax); % Learning rate vector
eta=ei*power(ef/ei,T/Tmax); % Neighborhood width vector
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Train Kohonen Map (TKM) %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
for t=1:Nep, % loop for the epochs
epoch=t, % Show current epoch
for tt=1:LEN_DATA,
% Compute distances of all prototype vectors to current input
Di=sqrt(som_eucdist2(sMap,Dw(tt,:)));
% Sort Di in ascending order
[Di_ordered RANK] = sort(Di);
% Update the weights of the winner and its neighbors
T=(t-1)*LEN_DATA+tt; % iteration throughout the epochs
for i=1:Mx*My,
% Find the position of neuron "i" in RANK
ki=find(RANK==i);
% Compute the corresponding weighting function
H=exp(-(ki-1)/lambda(T));
% Update the weights of neuron "i"
sMap.codebook(i,:)=sMap.codebook(i,:) + eta(T)*H*(Dw(tt,:)-sMap.codebook(i,:));
end
end
% Quantization error per training epoch
Qerr(t) = som_quality(sMap, Dw);
end
% Plot prototypes and data altogether
figure, plot(Dw(:,1),Dw(:,2),'+r'), hold on
plot(sMap.codebook(:,1),sMap.codebook(:,2),'b*')
title('Prototype vectors in input space'), hold off
% Plot quantization error evolution per training epoch
figure, plot(Qerr)
title('Quantization Error per Training Epoch')
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -