📄 train_test_multiple_class_al.m
字号:
function run = train_test_multiple_class_AL(X, Y, trainindex, testindex, classifier)
global preprocess;
% The statistics of dataset
num_class = length(preprocess.ClassSet);
actual_num_class = length(preprocess.OrgClassSet);
class_set = preprocess.ClassSet;
coding_matrix = GenerateCodeMatrix(preprocess.MultiClass.CodeType, actual_num_class);
coding_len = size(coding_matrix, 2);
%test_len = num_data - splitboundary;
Y_coding_matrix = (Y == class_set(1)) * coding_matrix;
X_train = X(trainindex, :);
Y_train_coding_matrix = Y_coding_matrix(trainindex, :);
X_test = X(testindex, :);
Y_test_matrix = Y(testindex, :);
Y_test_coding_matrix = Y_coding_matrix(testindex, :);
for i = 1:preprocess.ActiveLearning.Iteration
test_len = size(Y_test_coding_matrix, 1);
Y_compute_matrix = zeros(test_len, actual_num_class);
Y_uncertainty = zeros(test_len, 1);
Y_compute_coding_matrix = zeros(test_len, coding_len);
for j = 1:coding_len
Y_train_coding = Y_train_coding_matrix(:, j);
Y_test_coding = Y_test_coding_matrix(:, j);
% Delete the element of which the label is zero
X_train_norm = X_train(Y_train_coding ~= 0, :);
Y_train_coding_norm = Y_train_coding(Y_train_coding ~= 0, :);
% Converting the label back to class_set
conv_Y_train_coding_norm = class_set(1) * (Y_train_coding_norm == 1) + class_set(2) * (Y_train_coding_norm == -1);
conv_Y_test_coding = class_set(1) * (Y_test_coding == 1) + class_set(2) * (Y_test_coding == -1);
[Y_compute, Y_prob] = Classify(classifier, X_train_norm, conv_Y_train_coding_norm, X_test, conv_Y_test_coding, num_class);
CalculatePerformance(Y_compute, Y_test_coding, class_set);
% Y_compute_coding_matrix(:, j) = Y_prob - preprocess.SVMSCutThreshold;
Y_compute_coding_matrix(:, j) = 2 * Y_prob - 1;
end;
for j = 1: test_len
for k = 1:actual_num_class
dl = Y_compute_coding_matrix(j, :) .* coding_matrix(k, :);
switch (preprocess.MultiClass.LossFuncType)
case 0
loss = 1 ./ (1 + exp(2 * dl));
case 1
loss = exp(-dl);
case 2
loss = (dl <= 1) .* (1 - dl);
end;
Y_compute_matrix(j, k) = sum(loss); % Loss Function
end;
end;
[Y_loss Y_loss_index]= min(Y_compute_matrix, [], 2);
for j = 1: test_len
Y_compute_matrix(j, :) = (Y_compute_matrix(j, :) == Y_loss(j)) * class_set(1) + (Y_compute_matrix(j, :) ~= Y_loss(j)) * class_set(2);
end;
for j = 1: test_len
switch (preprocess.MultiClass.ProbEstimation)
case 0
dl = Y_compute_coding_matrix(j, :) .* coding_matrix(Y_loss_index(j), :);
Y_uncertainty(j) = SumLossFunc(dl, coding_len);
case 1
dl = Y_compute_coding_matrix(j, :) .* ones(1, coding_len);
Y_uncertainty(j) = SumLossFunc(dl, coding_len);
dl = Y_compute_coding_matrix(j, :) .* ones(1, coding_len) * -1;
Y_uncertainty(j) = Y_uncertainty(j) + SumLossFunc(dl, coding_len);
case 2
Y_uncertainty(j) = any(abs(Y_compute_coding_matrix(j, :)) == min(abs(Y_compute_coding_matrix)));
end;
end;
for j = 1:actual_num_class
Y_compute = Y_compute_matrix(:, j);
Y_test = Y_test_matrix(:, j);
[run_class.yy(j), run_class.yn(j), run_class.ny(j), run_class.nn(j), run_class.prec(j), run_class.rec(j), run_class.F1(j),...
run_class.err(j)] = CalculatePerformance(Y_compute, Y_test, class_set);
end
[Y_compute, junk] = find(Y_compute_matrix');
[Y_test, junk] = find(Y_test_matrix');
run.Y_compute = Y_compute; run.Y_prob = Y_loss; run.Y_test = Y_test;
% Aggregate the predictions in a shot
if (preprocess.ShotAvailable == 1), [Y_compute_agg, Y_prob_agg, Y_test_agg] = AggregatePredByShot(Y_compute, Y_prob, Y_test, testindex); end;
[junk, junk, junk, junk, run.Micro_Prec, run.Micro_Rec, run.Micro_F1, run.Err] = CalculatePerformance(Y_compute_agg, Y_test_agg, preprocess.OrgClassSet);
run.Macro_Prec = sum(run_class.prec) / actual_num_class;
run.Macro_Rec = sum(run_class.rec) / actual_num_class;
run.Macro_F1 = NormalizeRatio(2 * run.Macro_Prec * run.Macro_Rec, run.Macro_Prec + run.Macro_Rec);
%run.Micro_Prec = NormalizeRatio(sum(run_class.yy), sum(run_class.yy) + sum(run_class.ny));
%run.Micro_Rec = NormalizeRatio(sum(run_class.yy), sum(run_class.yy) + sum(run_class.yn));
%run.Micro_F1 = NormalizeRatio(2 * run.Micro_Prec * run.Micro_Rec, run.Micro_Prec + run.Micro_Rec);
%run.Err = 1 - run.Micro_F1;
RunClass(i) = run;
fprintf('Iter %d: Train Size = %d, Error = %f\n Macro_Precision = %f, Macro_Recall = %f, Macro_F1 = %f\n Micro_Precision = %f, Micro_Recall = %f, Micro_F1 = %f\n', ...
i, size(X_train, 1), run.Err, run.Macro_Prec, run.Macro_Rec, run.Macro_F1, run.Micro_Prec, run.Micro_Rec, run.Micro_F1);
% Delete the testing data that have been labeled
%[C Index] = sort(abs(Y_compute_coding_matrix));
[C Index] = sort(-Y_uncertainty);
for k = 1:preprocess.ActiveLearning.IncrementSize
X_train = [X_train; X_test(Index(k), :)];
Y_train_coding_matrix = [Y_train_coding_matrix; Y_test_coding_matrix(Index(k), :)];
end
Index = Index(1:preprocess.ActiveLearning.IncrementSize);
Y_uncertainty(Index) = [];
X_test(Index, :) = [];
Y_test_matrix(Index, :) = [];
Y_test_coding_matrix(Index, :) = [];
% if (preprocess.ShotAvailable == 1)
% ShotInfo = preprocess.ShotInfo(testindex(Index));
% disp(ShotInfo');
% end;
testindex(Index) = [];
end
run.Stage_Result = RunClass;
function SumCertainty = SumLossFunc(dl, coding_len)
global preprocess;
switch (preprocess.MultiClass.UncertaintyFuncType)
case 0
uncertainty = 1 ./ (1 + exp(2 * dl));
case 1
uncertainty = exp(-dl);
case 2
uncertainty = (dl <= 1) .* (1 - dl);
case 3
uncertainty = exp(-100 * abs(dl)); % minimum abs(dl)
case 4
if (dl > -1)
uncertainty = log( 1./(1+dl));
else
uncertainty = 0;
end;
case 5
uncertainty = rand(1, coding_len);
end;
SumCertainty = sum(uncertainty);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -