📄 steepestdescent.m
字号:
% ==========================================================
%
% Neural Networks A Classroom Approach
% Satish Kumar
% Copyright Tata McGraw Hill, 2004
%
% MATLAB code that implements the steepest descent
% learning law
% Reference: Table 5.9;Page 146
%
% ==========================================================
% PROGRAM FOR STEEPEST DESCENT LEARNING
load data.txt
max_points = size(data',2);
x=data(:,1)' + 0.5;
d=data(:,2)' + 0.5;
eta = .01; % Set learning rate
R=zeros(2,2); % Initialize correlation matrix
X = [ones(1,max_points);x]; % Augment input vectors
% Calculate cross-correlations and target expectations
P = (sum([d.*X(1,:); d.*X(2,:)],2))/max_points;
D = (sum(d.^2))/max_points;
for k =1:max_points
R = R+X(:,k)*X(:,k)'; % Compute R
end
R = R/max_points;
weiner=inv(R)*P; % Compute the Weiner solution
errormin = D - P'*inv(R)*P; % Find the minimum error
shift1 = linspace(-12,12, 21);% Generate a weight space matrix
shift2 = linspace(-9,9, 21);
for i = 1:21 % Compute a weight matrix about
shiftwts(1,i) = weiner(1)+shift1(i); % the Weiner solution
shiftwts(2,i) = weiner(2)+shift2(i);
end
for i=1:21 % Compute the error matrix
for j = 1:21 % to plot the error contours
error(i,j) = sum((d - (shiftwts(1,i) + x.*shiftwts(2,j))).^2);
end
end
error = error/max_points;
figure
plot(weiner(1),weiner(2),'*k')% Plot the error contours
hold on
[lab,lab1]=contour(shiftwts(1,:), shiftwts(2,:),error,10);
clabel(lab);
hold on
w=[-3.9 6.27]';
w0=w;
for loop = 1:500 % Perform descent for 500 iterations
w = w + eta*(-2*(R*w-P));
wts1(loop)=w(1);
wts2(loop)=w(2);
end
wts1=[w0(1) wts1];
wts2=[w0(2) wts2];
plot(wts1,wts2,'r')
axis([-12 12 -10 8]);
grid on
xlabel('\itw_0');
ylabel('\itw_1');
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -