📄 test_gentleboost_model.html
字号:
positive = labels(2);ind_positive = find(labels==positive);[d , N] = size(X);[Itrain , Itest] = sampling(X , y , options);[Ncv , Ntrain] = size(Itrain);Ntest = size(Itest , 2);error_train_gentle = zeros(1 , Ncv);error_test_gentle = zeros(1 , Ncv);error_train_srng = zeros(1 , Ncv);error_test_srng = zeros(1 , Ncv);error_train_svm = zeros(1 , Ncv);error_test_svm = zeros(1 , Ncv);tptrain_gentle = zeros(Ncv , 100);fptrain_gentle = zeros(Ncv , 100);tptest_gentle = zeros(Ncv , 100);fptest_gentle = zeros(Ncv , 100);tptrain_srng = zeros(Ncv , 100);fptrain_srng = zeros(Ncv , 100);tptest_srng = zeros(Ncv , 100);fptest_srng = zeros(Ncv , 100);tptrain_svm = zeros(Ncv , 100);fptrain_svm = zeros(Ncv , 100);tptest_svm = zeros(Ncv , 100);fptest_svm = zeros(Ncv , 100);<span class="keyword">for</span> i=1:Ncv<span class="comment">% i</span><span class="comment">% drawnow</span> [Xtrain , ytrain , Xtest , ytest] = samplingset(X , y , Itrain , Itest , i); model_gentle = gentleboost_model(Xtrain , ytrain , options.T , options); [ytrain_est_gentle , fxtrain_gentle] = gentleboost_predict(Xtrain , model_gentle , options); error_train_gentle(i) = sum(ytrain_est_gentle~=ytrain)/Ntrain; [Wproto , yproto , lambda] = ini_proto(Xtrain , ytrain , options.Nproto_pclass); [Wproto_est , yproto_est , lambda_est, E_SRNG] = srng_model(Xtrain , ytrain , Wproto , yproto , lambda, options); [ytrain_est_srng , disttrain_srng] = NN_predict(Xtrain , Wproto_est , yproto_est , lambda_est , options); error_train_srng(i) = sum(ytrain_est_srng~=ytrain)/Ntrain; model_svm = svmtrain(ytrain' , Xtrain' , options.strcmd); [ytrain_est_svm , accuracy , fxtrain_svm] = svmpredict(ytrain' , Xtrain' , model_svm , [<span class="string">'-b 1'</span>]); error_train_svm(i) = sum(ytrain_est_svm'~=ytrain)/Ntrain; indextarget = find(model_svm.Label==positive); ytrain(ytrain ~=positive) = -1; ytrain(ytrain ==positive) = 1; [tptrain_gentle(i , :) , fptrain_gentle(i , :)] = basicroc(ytrain , fxtrain_gentle(ind_positive , :)); dktrain = min(disttrain_srng(yproto~=positive , :)); dltrain = min(disttrain_srng(yproto==positive , :)); fxtrain_srng = (dktrain - dltrain)./(dktrain + dltrain); [tptrain_srng(i , :) , fptrain_srng(i , :)] = basicroc(ytrain , fxtrain_srng); [tptrain_svm(i , :) , fptrain_svm(i , :)] = basicroc(ytrain , fxtrain_svm(: , indextarget)'); [ytest_est_gentle , fxtest_gentle] = gentleboost_predict(Xtest , model_gentle , options); error_test_gentle(i) = sum(ytest_est_gentle~=ytest)/Ntest; [ytest_est_srng , disttest_srng] = NN_predict(Xtest , Wproto_est , yproto_est , lambda_est , options); error_test_srng(i) = sum(ytest_est_srng~=ytest)/Ntest; [ytest_est_svm , accuracy , fxtest_svm] = svmpredict(ytest' , Xtest' , model_svm , [<span class="string">'-b 1'</span>]); error_test_svm(i) = sum(ytest_est_svm'~=ytest)/Ntest; ytest(ytest ~=positive) = -1; ytest(ytest ==positive) = 1; [tptest_gentle(i , :) , fptest_gentle(i , :)] = basicroc(ytest , fxtest_gentle(ind_positive , :)); dktest = min(disttest_srng(yproto~=positive , :)); dltest = min(disttest_srng(yproto==positive , :)); fxtest_srng = (dktest - dltest)./(dktest + dltest); [tptest_srng(i , :) , fptest_srng(i , :)] = basicroc(ytest , fxtest_srng); [tptest_svm(i , :) , fptest_svm(i , :)] = basicroc(ytest , fxtest_svm(: , indextarget)');<span class="keyword">end</span>disp([mean(error_train_gentle) , mean(error_train_srng) , mean(error_train_svm) ; mean(error_test_gentle) , mean(error_test_srng) , mean(error_test_svm)])fptrain_mean_gentle = mean(fptrain_gentle);tptrain_mean_gentle = mean(tptrain_gentle);auc_train_gentle = auroc(tptrain_mean_gentle', fptrain_mean_gentle');fptest_mean_gentle = mean(fptest_gentle);tptest_mean_gentle = mean(tptest_gentle);auc_test_gentle = auroc(tptest_mean_gentle', fptest_mean_gentle');fptrain_mean_srng = mean(fptrain_srng);tptrain_mean_srng = mean(tptrain_srng);auc_train_srng = auroc(tptrain_mean_srng', fptrain_mean_srng');fptest_mean_srng = mean(fptest_srng);tptest_mean_srng = mean(tptest_srng);auc_test_srng = auroc(tptest_mean_srng', fptest_mean_srng');fptrain_mean_svm = mean(fptrain_svm);tptrain_mean_svm = mean(tptrain_svm);auc_train_svm = auroc(tptrain_mean_svm', fptrain_mean_svm');fptest_mean_svm = mean(fptest_svm);tptest_mean_svm = mean(tptest_svm);auc_test_svm = auroc(tptest_mean_svm', fptest_mean_svm');figureplot(fptrain_mean_gentle , tptrain_mean_gentle , <span class="string">'k'</span> , fptrain_mean_srng , tptrain_mean_srng , <span class="string">'r'</span> , fptrain_mean_svm , tptrain_mean_svm , <span class="string">'b'</span> , <span class="string">'linewidth'</span> , 2)axis([-0.02 , 1.02 , -0.02 , 1.02])legend(sprintf(<span class="string">'Gentle, AUC = %5.4f'</span> , auc_train_gentle) , sprintf(<span class="string">'SRNG, AUC = %5.4f'</span> , auc_train_srng) , sprintf(<span class="string">'SVM, AUC = %5.4f'</span> , auc_train_svm))xlabel(<span class="string">'False Positive Rate'</span> , <span class="string">'fontsize'</span> , 12 , <span class="string">'fontweight'</span> , <span class="string">'bold'</span>)ylabel(<span class="string">'True Positive Rate'</span> , <span class="string">'fontsize'</span> , 12 , <span class="string">'fontweight'</span> , <span class="string">'bold'</span>)title(sprintf(<span class="string">'Train data, d = %d, positive = %d, T = %d'</span> , d , positive , options.T) , <span class="string">'fontsize'</span> , 13 , <span class="string">'fontweight'</span> , <span class="string">'bold'</span>)grid <span class="string">on</span>figureplot(fptest_mean_gentle , tptest_mean_gentle , <span class="string">'k'</span> , fptest_mean_srng , tptest_mean_srng , <span class="string">'r'</span> , fptest_mean_svm , tptest_mean_svm , <span class="string">'b'</span> , <span class="string">'linewidth'</span> , 2)axis([-0.02 , 1.02 , -0.02 , 1.02])legend(sprintf(<span class="string">'Gentle, AUC = %5.4f'</span> , auc_test_gentle) , sprintf(<span class="string">'SRNG, AUC = %5.4f'</span> , auc_test_srng) , sprintf(<span class="string">'SVM, AUC = %5.4f'</span> , auc_test_svm))xlabel(<span class="string">'False Positive Rate'</span> , <span class="string">'fontsize'</span> , 12 , <span class="string">'fontweight'</span> , <span class="string">'bold'</span>)ylabel(<span class="string">'True Positive Rate'</span> , <span class="string">'fontsize'</span> , 12 , <span class="string">'fontweight'</span> , <span class="string">'bold'</span>)title(sprintf(<span class="string">'Test data, d = %d, positive = %d, T = %d'</span> , d , positive , options.T) , <span class="string">'fontsize'</span> , 13 , <span class="string">'fontweight'</span> , <span class="string">'bold'</span>)grid <span class="string">on</span></pre><pre class="codeoutput"> 0.0232 0.0337 0.0162 0.0476 0.0404 0.0262</pre><img vspace="5" hspace="5" src="test_gentleboost_model_03.png"> <img vspace="5" hspace="5" src="test_gentleboost_model_04.png"> <p class="footer"><br> Published with MATLAB® 7.5<br></p> </div> <!--##### SOURCE BEGIN #####%% Example 1 : train, test errors and ROC cuves on IRIS data
clear, clc, close all,drawnow
load iris
labels = unique(y);
options.method = 7;
options.holding.rho = 0.7;
options.holding.K = 50;
options.weaklearner = 0;
options.epsi = 0.1;
options.lambda = 1e-2;
options.max_ite = 1000;
options.T = 10;
positive = labels(2);
ind_positive = find(labels==positive);
[d , N] = size(X);
[Itrain , Itest] = sampling(X , y , options);
[Ncv , Ntrain] = size(Itrain);
Ntest = size(Itest , 2);
error_train = zeros(1 , Ncv);
error_test = zeros(1 , Ncv);
tptrain = zeros(Ncv , 100);
fptrain = zeros(Ncv , 100);
tptest = zeros(Ncv , 100);
fptest = zeros(Ncv , 100);
for i=1:Ncv
% i
% drawnow
[Xtrain , ytrain , Xtest , ytest] = samplingset(X , y , Itrain , Itest , i);
model_gentle = gentleboost_model(Xtrain , ytrain , options.T , options);
[ytrain_est , fxtrain] = gentleboost_predict(Xtrain , model_gentle , options);
error_train(i) = sum(ytrain_est~=ytrain)/Ntrain;
ytrain(ytrain ~=positive) = -1;
ytrain(ytrain ==positive) = 1;
[tptrain(i , :) , fptrain(i , :)] = basicroc(ytrain , fxtrain(ind_positive , :));
[ytest_est , fxtest] = gentleboost_predict(Xtest , model_gentle , options);
error_test(i) = sum(ytest_est~=ytest)/Ntest;
ytest(ytest ~=positive) = -1;
ytest(ytest ==positive) = 1;
[tptest(i , :) , fptest(i , :)] = basicroc(ytest , fxtest(ind_positive , :));
end
disp([mean(error_train) , mean(error_test)])
fptrain_mean = mean(fptrain);
tptrain_mean = mean(tptrain);
auc_train = auroc(tptrain_mean', fptrain_mean');
fptest_mean = mean(fptest);
tptest_mean = mean(tptest);
auc_test = auroc(tptest_mean', fptest_mean');
figure
plot(fptrain_mean , tptrain_mean , 'k' , fptest_mean , tptest_mean , 'r' , 'linewidth' , 2)
axis([-0.02 , 1.02 , -0.02 , 1.02])
legend(sprintf('Train, AUC = %5.4f' , auc_train) , sprintf('Test, AUC = %5.4f' , auc_test))
xlabel('False Positive Rate' , 'fontsize' , 12 , 'fontweight' , 'bold')
ylabel('True Positive Rate' , 'fontsize' , 12 , 'fontweight' , 'bold')
title(sprintf('Gentleboost/weak = %d, d = %d, positive = %d, T = %d' , options.weaklearner , d , positive , options.T) , 'fontsize' , 13 , 'fontweight' , 'bold')
grid on
%% Example 2 : train, test errors versus number of weak-learners on WINE data
load wine
options.method = 7;
options.holding.rho = 0.7;
options.holding.K = 50;
options.weaklearner = 0;
options.epsi = 0.1;
options.lambda = 1e-2;
options.max_ite = 1000;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -