⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fcm_pca.m

📁 使用Fuzzy Cluster Mean (FCM)與Principal component analysis (PCA)分類Yeast Data
💻 M
字号:
%column
%1-463 belong to CYT 463
%464-892 belong to NUC 429
%893-1136 belong to MIT 244
%1137-1299 belong to ME3 163
%1300-1350 belong to ME2 51
%1351-1394 belong to ME1 44
%1395-1429 BELOMG TO EXC 35
%1430-1459 BELONG TO VAC
%1460-1479 BELONG TO POX
%1480-1484 BELONG TO ERL
clear;

m =22; %explonetial weight
e = 1e-2;  %termination value
c=10;   %number of class
x = dlmread('fuzzy_n.txt', '\t'); %read sample
 
%form a sample from picking up a member for each class
method=0;
for method=1:5;
x = dlmread('fuzzy_n.txt', '\t'); %read sample as beginning
s=zeros(10,1); %set s=[0] 10x1
yc=[463 429 244 163 51 44 35 30 20 5]; %definition number of class
%****Leave One Out Method**** extract sample
pickup=0;
for pickup=1:10
   if  pickup==1
      s(1)=ceil(yc(1)*rand(1,1));
   else
       s(pickup)=sum(yc(1:pickup-1))+ceil(yc(pickup)*rand(1,1));
       if pickup==10
       s(10)=sum(yc(1:9))+method ;  
       end
    end  
          
end

%form a test sample
TDsample=[x(s(1),:);x(s(2),:);x(s(3),:);x(s(4),:);x(s(5),:);x(s(6),:);x(s(7),:);x(s(8),:);x(s(9),:);x(s(10),:)];  %****Leave One Out Method****samples for each class 
remove=0;
for remove=1:10
   x(s(remove)-remove+1,:)=[];
end
yc=yc-1; %10 samples for leave one out so remove
yeastvalues = mapstd(x');   % Normalize data
pc = processpca(yeastvalues,0.01);    % PCA
[U,v] = cmfuzz(pc',c,m,e); %Fuzzy C-means
f=U';
maxU = max(f);
% Accuracy for c-means
acc=0;
accer=0;
for accer=1:10
   if accer==1
       acc(accer)=length(find(f(1,1:yc(accer)) == maxU(1:yc(accer))));
   else
       acc(accer)=length(find((f(1,(sum(yc(1:accer-1))+1):(sum(yc(1:accer))))) == maxU(sum(yc(1:accer-1))+1:sum(yc(1:accer)))));%> find accuracy
   end
end

fprintf('FCM Accuracy = %f \n',(sum(acc))/(sum(yc))*100);    %calculate accuracy

index1 = find(f(1,:) == maxU); % to be CYT
index2 = find(f(2, :) == maxU); %to be NUC
index3 = find(f(3, :) == maxU); % to be MIT
index4 = find(f(4, :) == maxU); 
index5 = find(f(5, :) == maxU);
index6 = find(f(6, :) == maxU);
index7 = find(f(7, :) == maxU);
index8 = find(f(8, :) == maxU);
index9 = find(f(9, :) == maxU);
index10 = find(f(10, :) == maxU);
Traw=x';
Dtrain=[Traw(:,index1) Traw(:,index2) Traw(:,index3) Traw(:,index4) Traw(:,index5) Traw(:,index6) Traw(:,index7) Traw(:,index8) Traw(:,index9) Traw(:,index10)]; %because c-means result is probability 
TDtrain=Dtrain';
% draw figure
figure; 
group = [repmat(1,yc(1),1); repmat(2,yc(2),1);repmat(3,yc(3),1);repmat(4,yc(4),1);repmat(5,yc(5),1);repmat(6,yc(6),1);repmat(7,yc(7),1);repmat(8,yc(8),1);repmat(9,yc(9),1);repmat(10,yc(10),1)];
gscatter(TDtrain(:,1),TDtrain(:,2),group,'bcgkmrybcg','o+x*^>sdph');
 hold on;
 % classify the sample using the nearest neighbor classification
 [predicted,memberships, numhits] = fknn(TDtrain, group, TDsample,[1 2 3 4 5 6 7 8 9 10],3,1,true); %as k=3
 %knn= knnclassify(TDsample, TDtrain, group);
%knnacc=0;
%for knnacc=1:10
%    if predicted~=knnacc
%    predicted(knnacc)=-1;
%    end
%end
fprintf('KNN Hit Number = %f \n',numhits); 
 gscatter(TDsample(:,1),TDsample(:,2),predicted,'bcgkmrybcg','.'); 
 hold on;
legend('Training class 1','Training class 2','Training class 3','Training class 4','Training class 5','Training class 6','Training class 7','Training class 8','Training class 9','Training class 10', ...
        'Data in class 1','Data in class 2','Data in class 3','Data in class 4','Data in class 5','Data in class 6','Data in class 7','Data in class 8','Data in class 9','Data in class 10');
hold off; 

end

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -