📄 gaprbf.m
字号:
function gaprbf(min_goal,name, PTGenerator, sample_range)% Sample: gaprbf(min_goal, dir_name, PTGenerator,sample_range)% min_goal = goal for the Neural Network% name = directory to be created in the Current working directory% PTGenerator = String name to the function that generates the P and T % sample_range = possible range S(X) (size) where the training samples will be drawn% %%% Authors: DR. GUANG-BIN HUANG% %%% NANYANG TECHNOLOGICAL UNIVERSITY% %%% EMAIL: EGBHUANG@NTU.EDU.SG; GBHUANG@IEEE.ORG% %%% Web: http://www.ntu.edu.sg/eee/icis/cv/egbhuang.htm% %%% DATE: AUGUST 2002 (Ver 1.0)% %%% Latest Revision: November 2004% For 'Sine' Example: gaprbf(1e-4, 'Sine', 'GetPTforSine',10) %obtain the function handle and execute the function by using feval fhandle = str2func(PTGenerator);[I,T,TestMatrix] = feval(fhandle);mkdir(name);FileName = strcat(name,'\',name);DirName = strcat(name,'\');I_Size = size(I); % I: InputT_Size = size(T); % T: Expected outputN = I_Size(1,2); % N: the number of training samplesInput_Dim=I_Size(1,1); Output_Dim=T_Size(1,1); % Input_Dim: the number of input neurons; Output_Dim: the number of output neuronsemin=min_goal; %e_minetamax=1.15; r=0.999; % etamax: epsilon_max; r: grammaetamin=0.04; % etamin: epsilon_minkp=0.10; % kp: kuppaqq0=0.00001; pp0=0.9; % pp0: P_0; qq0: Q_0 of EKFK=0; KK=zeros(N,1); % K: number of neurons; KK(n): number of neurons after learning n-th observationsRn=eye(Output_Dim,Output_Dim); % Rn: R_n of EKF pp=eye(1,1); % pp: P_n of EKFepoch1_cpu_time=zeros(N, 1); % to the record the CPU time spent on learning each input sample% begin the function estimationfor n=1:N % Start the sequencial learning eta(n)=max(etamax*r^(n-1), etamin);epoch_cpu_time(n)=cputime;KK(n)=K; if K==0 % do the thing when hidden neuron is zero. K=K+1; K,n, % add neuron A(:,K)=T(:,n); u(:,K)=I(:,n); thelta(K)=kp*sqrt(I(:,n)'*I(:,n)); if thelta(K)==0 thelta(K)=0.00001; end w(1:Output_Dim,1)= A(:,K) ; w(Output_Dim+1:Output_Dim+Input_Dim,1)= u(:,K); w(Output_Dim+Input_Dim+1,1)=thelta(K); P=pp0*eye(Output_Dim+Input_Dim+1,Output_Dim+Input_Dim+1); P(1:1,1:1)=pp; pp=P; else % do the work when K~=0 % compute the network output for i=1:K yyy(i,1)=gaussly(I(:,n),u(:,i),thelta(i)); end E(:,n)= T(:,n)-A*yyy(1:K,1); e(n)=sqrt(E(:,n)'*E(:,n)); %calculate the nearest neuron nr=1; for i=1:K tem(i)=sqrt((I(:,n)-u(:,i))'*(I(:,n)-u(:,i))); end [tnear(n), nr]=min(tem(1:K)); thelta(K+1)=kp*sqrt((I(:,n)-u(:,nr))'*(I(:,n)-u(:,nr))); % temporarily assigned for K+1 if tnear(n)>=eta(n) & e(n)/Output_Dim*(1.8*thelta(K+1))^Input_Dim/(sample_range+0.0001)>min_goal K=K+1; K,n, A(:,K)=E(:,n); u(:,K)=I(:,n); if thelta(K)==0 thelta(K)=0.00001; end tn=(K-1)*(Input_Dim+Output_Dim+1); w(tn+1:tn+Output_Dim,1)= A(:,K); w(tn+Output_Dim+1:tn+Output_Dim+Input_Dim,1)= u(:,K); w(tn+Output_Dim+Input_Dim+1,1)=thelta(K); P=pp0*eye(tn+Output_Dim+Input_Dim+1,tn+Output_Dim+Input_Dim+1); P(1:tn,1:tn)=pp; pp=P; elseif K>1 B=zeros(K*(Input_Dim+Output_Dim+1),Output_Dim); %B: A_n of EKF j=(nr-1)*(Input_Dim+Output_Dim+1); nearest_output=gaussly(I(:,n),u(:,nr),thelta(nr)); B(j+1:j+Output_Dim,1:Output_Dim)=nearest_output*eye(Output_Dim,Output_Dim); B(j+Output_Dim+1:j+Output_Dim+Input_Dim,1:Output_Dim)=2*(I(:,n)-u(:,nr))*nearest_output*A(:,nr)'/(thelta(nr)^2); B(j+Output_Dim+Input_Dim+1,1:Output_Dim)=2*(I(:,n)-u(:,nr))'*(I(:,n)-u(:,nr))*nearest_output*A(:,nr)'/(thelta(nr)^3); %Ki: K_n of EKF Ki=pp(:,(nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1))*B((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),1:Output_Dim)*inv(Rn+B((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),1:Output_Dim)'*pp((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),(nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1))*B((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),1:Output_Dim)); E1=Ki*E(:,n); %%%%% KALMAN Filter Method%%% only adjust the nearest neuron; Kalman Filter method j=(nr-1)*(Input_Dim+Output_Dim+1); w(j+1:j+Output_Dim+Input_Dim+1, 1)=w(j+1:j+Output_Dim+Input_Dim+1, 1)+E1(j+1:j+Output_Dim+Input_Dim+1,1); i=nr; j=(i-1)*(Input_Dim+Output_Dim+1); A(:,i)=w(j+1:j+Output_Dim,1); u(:,i)=w(j+Output_Dim+1:j+Output_Dim+Input_Dim,1); thelta(i)=w(j+Output_Dim+Input_Dim+1,1); if thelta(i)==0 thelta(i)=0.0001; end s=size(pp,1); pp=(eye(s)-Ki*B')*pp+qq0*eye(s);%%%%% pruning stratage; pruning the nearest neuron only if sqrt(A(:,nr)'*A(:,nr))/Output_Dim*(1.8^Input_Dim*thelta(nr)^Input_Dim/(sample_range+0.0001))<=min_goal % in order to prevent 0 range case K=K-1; K,n, for j=nr:K A(:,j)=A(:,j+1); u(:,j)=u(:,j+1); thelta(j)=thelta(j+1); tn1=(j-1)*(Input_Dim+Output_Dim+1); tn2=j*(Input_Dim+Output_Dim+1); w(tn1+1:tn1+Output_Dim+Input_Dim+1,1)= w(tn2+1:tn2+Output_Dim+Input_Dim+1,1); pp(:,tn1+1:tn1+Output_Dim+Input_Dim+1)=pp(:,tn2+1:tn2+Output_Dim+Input_Dim+1); pp(tn1+1:tn1+Output_Dim+Input_Dim+1,:)=pp(tn2+1:tn2+Output_Dim+Input_Dim+1,:); end end%end of Pruning A=A(:,1:K); u=u(:,1:K); thelta=thelta(1:K); w=w(1:K*(Input_Dim+Output_Dim+1),1); pp=pp(1:K*(Input_Dim+Output_Dim+1),1:K*(Input_Dim+Output_Dim+1)); endend if n>1 epoch_cpu_time(n)=epoch_cpu_time(n-1)+cputime-epoch_cpu_time(n);else epoch_cpu_time(n)=cputime-epoch_cpu_time(n);endend % End the sequencial learning; end of fortraining_time=epoch_cpu_time(n)%% TRAINING ERROR: RMSrms_training=0;for i=1:size(I,2) for j=1:K yyy4(j,1)=gaussly(I(:,i),u(:,j),thelta(j)); end E2(:,i)= T(:,i)-A*yyy4(1:K,1); rms_training=rms_training+E2(:,i)'*E2(:,i);end rms_training=sqrt(rms_training/(Output_Dim*N))x_testing=TestMatrix.P;t_testing=TestMatrix.T;y3=zeros(Output_Dim,size(x_testing,2));E3=zeros(Output_Dim,size(x_testing,2));rms_testing=0;for i=1:size(x_testing,2) for j=1:K yyy5(j,1)=gaussly(x_testing(:,i),u(:,j),thelta(j)); end y3(:,i)=A*yyy5(1:K,1); E3(:,i)= t_testing(:,i)-y3(:,i); rms_testing=rms_testing+E3(:,i)'*E3(:,i);endrms_testing=sqrt(rms_testing/(Output_Dim*size(x_testing, 2)))if Input_Dim==1 % only when Input dimension is one, the testing samples and actual output are plotted figure(1); clf; hold on plot(TestMatrix.P,TestMatrix.T,'-b'); % testing data plot(x_testing, y3,'-*r', 'LineWidth', 1, 'MarkerSize', 2); % actual output xlabel('Number of Observations'); ylabel('Output'); saveas(1,strcat(DirName,'Output.fig'));end figure(2);clf;plot(KK);xlabel('Number of Observations'); ylabel('Number of Hidden Neurons');figure(3);clf;plot(epoch_cpu_time, '-');xlabel('Number of Observations'); ylabel('Spent CPU Time (seconds) on Training');save(FileName, 'min_goal', 'KK', 'training_time', 'epoch_cpu_time', 'rms_training', 'rms_testing', 'sample_range', 'A','u','thelta', 'I', 'T');end % End of main function of GAP-RBFfunction y=outputly(a0,A,y1)% y1 is k*1 vector of the hidden layer (k is the number of the hidden layer).% a0 is the ny*1 bias vector (ny is the output vector y's dimension) .% A is the ny*k matrix of the weight.% y is the output vector of ny*1temp=A*y1 ;y=a0+temp;endfunction y1=gaussly(x,u,thelta) y1=exp( -1*(x-u)'*(x-u)/(thelta^2) );end
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -