⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 gapekf_multidensity_norm1.m

📁 模糊神经网络实现函数逼近与分类
💻 M
字号:
function gapekf_multidensity_norm1(min_goal,name, PTGenerator)% Sample: gapekf_multidensity_norm1(min_goal, dir_name, PTGenerator)% min_goal = goal for the Neural Network% name = directory to be created in the Current working directory% PTGenerator = String name to the function that generates the P and T% Sample: gapekf_multidensity_norm1(1e-4, 'GetPTforSim', 'GetPTforSim')%obtain the function handle and execute the function by using feval                                        %%%%    Authors: DR. HUANG GUANGBIN                                        %%%%    NANYANG TECHNOLOGICAL UNIVERSITY                                        %%%%    EMAIL: EGBHUANG@NTU.EDU.SG; GBHUANG@IEEE.ORG                                        %%%%    DATE: AUGUST 2002    fhandle = str2func(PTGenerator);[I,T,TestMatrix] = feval(fhandle);mkdir(name);FileName = strcat(name,'\',name);DirName = strcat(name,'\');I_Size = size(I);   % I: InputT_Size = size(T);   % T: Expected outputN = I_Size(1,2);    % N: the number of training samplesInput_Dim=I_Size(1,1); Output_Dim=T_Size(1,1); % Input_Dim: the number of input neurons; Output_Dim: the number of output neuronsemin=min_goal;          %e_minetamax=1.15;  r=0.999;  % etamax: epsilon_max;  r: grammaetamin=0.04;            % etamin: epsilon_minkp=0.10;                % kp: kuppaqq0=0.00001; pp0=0.9;   % pp0: P_0; qq0: Q_0 of EKFK=0;   KK=zeros(N,1);   % K: number of neurons; KK(n): number of neurons after learning n-th observationsRn=eye(Output_Dim,Output_Dim);    % Rn: R_n of EKF pp=eye(1,1);            % pp:   P_n of EKFepoch1_cpu_time=zeros(N, 1);   % to the record the CPU time spent on learning each input sample% begin the function estimationfor n=1:N   % Start the sequencial learning eta(n)=max(etamax*r^(n-1), etamin);epoch_cpu_time(n)=cputime;KK(n)=K;         if K==0                        % do the thing when hidden neuron is zero.    K=K+1;        K,n,                  % add neuron    A(:,K)=T(:,n);    u(:,K)=I(:,n);    thelta(K)=kp*sqrt(I(:,n)'*I(:,n));    if thelta(K)==0        thelta(K)=0.00001;    end    w(1:Output_Dim,1)= A(:,K) ;    w(Output_Dim+1:Output_Dim+Input_Dim,1)= u(:,K) ;    w(Output_Dim+Input_Dim+1,1)=thelta(K);        P=pp0*eye(Output_Dim+Input_Dim+1,Output_Dim+Input_Dim+1);    P(1:1,1:1)=pp;     pp=P;         else                  % do the work when K~=0    % compute the network output    for i=1:K        yyy(i,1)=gaussly(I(:,n),u(:,i),thelta(i));    end    E(:,n)= T(:,n)-A*yyy(1:K,1);    e(n)=sqrt(E(:,n)'*E(:,n))/Output_Dim;        tnear(n)=sqrt((I(:,n)-u(:,1))'*(I(:,n)-u(:,1)));     nr=1;                                    %calculate the nearest neuron    for i=2:K        tem=sqrt((I(:,n)-u(:,i))'*(I(:,n)-u(:,i)));         if tem<tnear(n)            tnear(n)=tem; nr=i;        end    end               thelta(K+1)=kp*sqrt((I(:,n)-u(:,nr))'*(I(:,n)-u(:,nr)));    % temporarily assigned for K+1      growing_sig=e(n);    theta1=0.08; mu1=0.25; theta2=0.10; mu2=0.65;%attr 1    growing_sig=growing_sig* ((thelta(K+1)/(sqrt(2)*theta1))*exp(-(I(1,n)-mu1)^2/(2*theta1^2)) +(thelta(K+1)/(sqrt(2)*theta2))*exp(-(I(1,n)-mu2)^2/(2*theta2^2)));    theta1=0.08; mu1=0.15; theta2=0.10; mu2=0.55;%attr 2    growing_sig=growing_sig* ((thelta(K+1)/(sqrt(2)*theta1))*exp(-(I(2,n)-mu1)^2/(2*theta1^2)) +(thelta(K+1)/(sqrt(2)*theta2))*exp(-(I(2,n)-mu2)^2/(2*theta2^2)));    growing_sig=growing_sig*sqrt(pi)*thelta(K+1); %attr 3    mu=0.0675; %attr 4, exponential PDF    growing_sig=growing_sig* sqrt(pi)*thelta(K+1)/mu*exp(-(I(4,n)/mu-thelta(K+1)^2/(4*mu^2)));        mu=0.0838; %attr 5, exponential PDF    growing_sig=growing_sig* sqrt(pi)*thelta(K+1)/mu*exp(-(I(5,n)/mu-thelta(K+1)^2/(4*mu^2)));    mu=0.0501; %attr 6, exponential PDF    growing_sig=growing_sig* sqrt(pi)*thelta(K+1)/mu*exp(-(I(6,n)/mu-thelta(K+1)^2/(4*mu^2)));    mu=0.0825; %attr 7, exponential PDF    growing_sig=growing_sig* sqrt(pi)*thelta(K+1)/mu*exp(-(I(7,n)/mu-thelta(K+1)^2/(4*mu^2)));        mu=0.1880; %attr 8, Rayleigh PDF    growing_sig=growing_sig* (2/mu)^2*sqrt(pi)* (mu*thelta(K+1))^3/(2*mu^2+thelta(K+1)^2)^(3/2)*exp((I(8,n)^2)/(2*mu^2+thelta(K+1)^2));    if tnear(n)>=eta(n) & growing_sig>min_goal        K=K+1;        K,n,        A(:,K)=E(:,n);        u(:,K)=I(:,n);        if thelta(K)==0            thelta(K)=0.00001;        end        tn=(K-1)*(Input_Dim+Output_Dim+1);            w(tn+1:tn+Output_Dim,1)= A(:,K);        w(tn+Output_Dim+1:tn+Output_Dim+Input_Dim,1)= u(:,K);        w(tn+Output_Dim+Input_Dim+1,1)=thelta(K);                P=pp0*eye(tn+Output_Dim+Input_Dim+1,tn+Output_Dim+Input_Dim+1);        P(1:tn,1:tn)=pp;        pp=P;            elseif K>1        B=zeros(K*(Input_Dim+Output_Dim+1),Output_Dim); %B: A_n of EKF                j=(nr-1)*(Input_Dim+Output_Dim+1);        B(j+1:j+Output_Dim,1:Output_Dim)=gaussly(I(:,n),u(:,nr),thelta(nr))*eye(Output_Dim,Output_Dim);        B(j+Output_Dim+1:j+Output_Dim+Input_Dim,1:Output_Dim)=2*(I(:,n)-u(:,nr))*gaussly(I(:,n),u(:,nr),thelta(nr))*A(:,nr)'/(thelta(nr)^2);        B(j+Output_Dim+Input_Dim+1,1:Output_Dim)=2*(I(:,n)-u(:,nr))'*(I(:,n)-u(:,nr))*gaussly(I(:,n),u(:,nr),thelta(nr))*A(:,nr)'/(thelta(nr)^3);        %Ki: K_n of EKF        Ki=pp(:,(nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1))*B((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),1:Output_Dim)*inv(Rn+B((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),1:Output_Dim)'*pp((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),(nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1))*B((nr-1)*(Input_Dim+Output_Dim+1)+1: nr*(Output_Dim+Input_Dim+1),1:Output_Dim));        E1=Ki*E(:,n);        %%%%%   KALMAN Filter Method%%%     only adjust the nearest neuron; Kalman Filter method        j=(nr-1)*(Input_Dim+Output_Dim+1);        w(j+1:j+Output_Dim+Input_Dim+1, 1)=w(j+1:j+Output_Dim+Input_Dim+1, 1)+E1(j+1:j+Output_Dim+Input_Dim+1,1);        for i=1:K            j=(i-1)*(Input_Dim+Output_Dim+1);            A(:,i)=w(j+1:j+Output_Dim,1);            u(:,i)=w(j+Output_Dim+1:j+Output_Dim+Input_Dim,1);            thelta(i)=w(j+Output_Dim+Input_Dim+1,1);            if thelta(i)==0                thelta(i)=0.0001;            end        end        s=size(pp,1);        pp=(eye(s)-Ki*B')*pp+qq0*eye(s);%%%%%   pruning stratage; pruning the nearest neuron only        pruning_sig=sqrt(A(:,nr)'*A(:,nr))/Output_Dim;        theta1=0.08; mu1=0.25; theta2=0.10; mu2=0.65;%attr 1        pruning_sig=pruning_sig* ((thelta(nr)/(sqrt(2)*theta1))*exp(-(u(1,nr)-mu1)^2/(2*theta1^2)) +(thelta(nr)/(sqrt(2)*theta2))*exp(-(u(1,nr)-mu2)^2/(2*theta2^2)));        theta1=0.08; mu1=0.15; theta2=0.10; mu2=0.55;%attr 2        pruning_sig=pruning_sig* ((thelta(nr)/(sqrt(2)*theta1))*exp(-(u(2,nr)-mu1)^2/(2*theta1^2)) +(thelta(nr)/(sqrt(2)*theta2))*exp(-(u(2,nr)-mu2)^2/(2*theta2^2)));        pruning_sig=pruning_sig*sqrt(pi)*thelta(nr); %attr 3        mu=0.0675; %attr 4, exponential PDF        pruning_sig=pruning_sig* sqrt(pi)*thelta(nr)/mu*exp(-(u(4,nr)/mu-thelta(nr)^2/(4*mu^2)));            mu=0.0838; %attr 5, exponential PDF        pruning_sig=pruning_sig* sqrt(pi)*thelta(nr)/mu*exp(-(u(5,nr)/mu-thelta(nr)^2/(4*mu^2)));        mu=0.0501; %attr 6, exponential PDF        pruning_sig=pruning_sig* sqrt(pi)*thelta(nr)/mu*exp(-(u(6,nr)/mu-thelta(nr)^2/(4*mu^2)));        mu=0.0825; %attr 7, exponential PDF        pruning_sig=pruning_sig* sqrt(pi)*thelta(nr)/mu*exp(-(u(7,nr)/mu-thelta(nr)^2/(4*mu^2)));            mu=0.1880; %attr 8, Rayleigh PDF        pruning_sig=pruning_sig* (2/mu)^2*sqrt(pi)* (mu*thelta(nr))^3/(2*mu^2+thelta(nr)^2)^(3/2)*exp((u(8,nr)^2)/(2*mu^2+thelta(nr)^2));        if pruning_sig<min_goal            K=K-1;            K,n,            for j=nr:K                A(:,j)=A(:,j+1);                u(:,j)=u(:,j+1);                thelta(j)=thelta(j+1);                tn1=(j-1)*(Input_Dim+Output_Dim+1);                tn2=j*(Input_Dim+Output_Dim+1);                w(tn1+1:tn1+Output_Dim+Input_Dim+1,1)= w(tn2+1:tn2+Output_Dim+Input_Dim+1,1);                            pp(:,tn1+1:tn1+Output_Dim+Input_Dim+1)=pp(:,tn2+1:tn2+Output_Dim+Input_Dim+1);                pp(tn1+1:tn1+Output_Dim+Input_Dim+1,:)=pp(tn2+1:tn2+Output_Dim+Input_Dim+1,:);            end        end%end of Pruning        A=A(:,1:K);        u=u(:,1:K);        thelta=thelta(1:K);        w=w(1:K*(Input_Dim+Output_Dim+1),1);            pp=pp(1:K*(Input_Dim+Output_Dim+1),1:K*(Input_Dim+Output_Dim+1));    endend                if n>1    epoch_cpu_time(n)=epoch_cpu_time(n-1)+cputime-epoch_cpu_time(n);else    epoch_cpu_time(n)=cputime-epoch_cpu_time(n);endend % End the sequencial learning; end of forepoch1_cpu_time=epoch_cpu_time(n)%% TRAINING ERROR: RMSrms_training=0;mae_training=0;for i=1:size(I,2)    for j=1:K        yyy4(j,1)=gaussly(I(:,i),u(:,j),thelta(j));    end        E2(:,i)= T(:,i)-A*yyy4(1:K,1);    rms_training=rms_training+E2(:,i)'*E2(:,i);    mae_training=mae_training+max(abs(E2(:,i)));    end    rms_training=sqrt(rms_training/N)mae_training=mae_training/(Output_Dim*N)x_testing=TestMatrix.P;t_testing=TestMatrix.T;y3=zeros(Output_Dim,size(x_testing,2));E3=zeros(1,size(x_testing,2));rms_testing=0;mae_testing=0;for i=1:size(x_testing,2)    for j=1:K        yyy5(j,1)=gaussly(x_testing(:,i),u(:,j),thelta(j));    end    y3(:,i)=A*yyy5(1:K,1);    E2(:,i)= t_testing(:,i)-y3(:,i);    rms_testing=rms_testing+E2(:,i)'*E2(:,i);    mae_testing=mae_testing+max(abs(E2(:,i)));    endrms_testing=sqrt(rms_testing/size(x_testing, 2))mae_testing=mae_testing/(Output_Dim*size(x_testing, 2))if Input_Dim==1 % only when Input dimension is one, the testing samples and actual output are plotted    figure(2); clf; hold on    plot(TestMatrix.P,TestMatrix.T,'-b');   % testing data    plot(x_testing, y3,'-*r', 'LineWidth', 1, 'MarkerSize', 2); % actual output    saveas(2,strcat(DirName,'Output.fig'));end        figure(3);clfplot(KK);xlabel('Number of Observations'); ylabel('Number of Hidden Neurons');save(FileName, 'min_goal', 'KK', 'epoch1_cpu_time', 'epoch_cpu_time', 'rms_training', 'mae_training','rms_testing', 'mae_testing','A','u','thelta', 'I', 'T');        

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -