📄 lssvcmodoutb2_train.m
字号:
function [lssvcB, zmp, zmn, varztrnp, varztrnn, ztrain, zetanew, zetanewp, zetanewn, alpha, b, sigm,gam,muu, zeta, gameffp, gameffn] = ...
lssvcmodoutb2_train(Xtrain, Ytrain, muu, zeta, kerneltype, sigs, maxsteps, dispOpt)
% function [lssvcB, zmp, zmn, varztrnp, varztrnn, ztrain, zetanew, zetanewp, zetanewn, alpha, b, sigm,gam,muu, zeta, gameffp, gameffn] = ...
% lssvcmodoutb2_train(Xtrain, Ytrain, muu, zeta, kerneltype, sigs, maxsteps, dispOpt)
% Input -----
% Xtrain: Input data matrix (Nxd: N samples and d variables)for training
% Ytrain: class label data (Nx1) for training
% muu, zeta: initial value for hyperparameters in Bayesian training
% kerneltype: ('rbf', or 'lin')
% sigs: vector of initial candidate sigmas for optimization (used in
% optSigGamLssvmBay.m), e.g. [0.5, 1, 10, 20], or []
% maxsteps: maximum runs of support vector removal in function lssvcbs
% (for use in sparseness approximation where the non-negative
% support values were set to all zero)
% if maxsteps==-1 then no sparseness approximation is applied.
% dispOpt: display details of each runs of Bayesian hyperparameters during sparseness approximation
% Output -----
% lssvcB: model
% zmp, zmn: mean of the latent output of the training sample for + and - class, respectively
% varztrnp, vartrnn: the variance for each training sample resulted from
% the uncertainty in parameters for + and - class, respectively.
% gam=zeta/muu;
if nargin<7,
maxsteps=20;
end,
if nargin<8,
dispOpt=0;
end,
% if kerneltype == 'rbf'
% gebr_kernel = 'RBFkernel';
% else
% gebr_kernel = 'LINkernel';
% end
if strcmp(kerneltype,'rbf')
gebr_kernel = 'RBFkernel';
elseif strcmp(kerneltype,'lin')
gebr_kernel = 'LINkernel';
elseif strcmp(kerneltype,'cwrbf'),
gebr_kernel= 'RBFaddkernel';
elseif strcmp(kerneltype,'cwlin')|strcmp(kerneltype,'cwpol'),
gebr_kernel= 'LINaddkernel';
end
[alpha,b,sig2,gam,muu, zeta,L3,gameffsps]=lssvcbs(Xtrain,Ytrain,kerneltype, maxsteps, dispOpt,sigs);
sigm=sqrt(sig2);
[Ysimtrain] = lssvcoutput(alpha,b,sig2, Xtrain,Ytrain,Xtrain,[],kerneltype);
%if 0, [Ysimval] = lssvcoutput(alpha,b,sig2, Xtrain,Ytrain,Xval,[],kerneltype); end
ztrain = Ysimtrain;
%if 0, z = Ysimval; end
Mplusindex = find(Ytrain == 1); Mminindex = find(Ytrain == -1);
Nplus = length(Mplusindex); Nmin = length(Mminindex);
zmp = mean(Ysimtrain(Mplusindex)); zmn = mean(Ysimtrain(Mminindex));
e = zeros(size(Ysimtrain));
e(Mplusindex)= Ysimtrain(Mplusindex) - zmp;
e(Mminindex)= Ysimtrain(Mminindex) - zmn;
%%%%%%%%%%%%%%%%%%%%%%%%
%%% Bereken de variantie
%%%%%%%%%%%%%%%%%%%%%%%%
% initialisatie
%--------------
Ntr= length(Ytrain);
%----- Remove the validation part --- by LC
if 0
Nval = length(Ysimval);
roMAP = zeros(Nval,1);
varrhoM = zeros(Nval,1);
varrhoP = zeros(Nval,1);
Py_een = zeros(Nval,1);
Py_mineen = zeros(Nval,1);
end
% eigenw.-ontb. van K + eigenvectoren van G uit (H = alfa I + beta Gs)
%---------------------------------------------------------------------
global KK0,
if kerneltype(1:2)=='cw',
for i=1:Ntr,
for j=1:i,
KK(i,j) = eval([gebr_kernel,'(Xtrain(i,:),Xtrain(j,:),sigm)']);
KK(j,i) = KK(i,j);
end
end
else
pK=KK0;
if kerneltype=='rbf',
KK=exp(pK/sig2);
else
sig2=sigm;
KK=pK.^sig2;
end,
end
%KK=zeros(Ntr);
%for i=1:Ntr,
% for j=1:i,
% KK(i,j) = eval([gebr_kernel,'(Xtrain(i,:),Xtrain(j,:),sigm)']);
% KK(j,i) = KK(i,j);
%end
%end
Ts = eye(Ntr) - 1/Ntr*ones(Ntr);
KKc = Ts*KK*Ts;
[mul, roK, t] = svd(KKc + 0.5*eye(size(KK, 1))); clear t
roK = diag(roK); roK = roK - 0.5; index0 = find(roK <0);
if ~isempty(index0), roK(index0) = zeros(size(index0)); end
index_eff_eig = find(roK > 100000*eps);
Neff = length(index_eff_eig);
mul = mul(:,index_eff_eig);
diag_rol_eff = abs(roK(index_eff_eig));
gameff = 1 + sum( (gam*real(roK)) ./ ( 1+gam*real(roK) ) );
zetanew = 1/(sum( e.^2)/(Ntr - gameff));
gameffp=gameff*Nplus/Ntr;
gameffn=gameff*Nmin/Ntr;
%gameffp= 0.5 + sum( (gam*real(roK(Mplusindex))) ./ (1+gam*real(roK(Mplusindex)) ) );
%gameffn= 0.5 + sum( (gam*real(roK(Mminindex))) ./ (1+gam*real(roK(Mminindex)) ) );
zetanewp = 1/(sum( e(Mplusindex).^2)/(Nplus - gameffp));
zetanewn = 1/(sum( e(Mminindex).^2)/(Nmin - gameffn));
c = zeros(Neff,1);
mul_n = zeros(size(mul));
for i = 1:Neff,
c(i) = 1/sqrt(real(mul(:,i)'*KKc*mul(:,i)));
mul_n(:,i) = mul(:,i)*c(i);
end
Mpluskernel = ones(1, Nplus)*KK(Mplusindex, :)/Nplus; Mminkernel = ones(1, Nmin)*KK(Mminindex, :)/Nmin;
Lmplus = 1/Nplus^2*ones(1, Nplus)*KK(Mplusindex, Mplusindex)*ones(Nplus, 1);
Lmmin = 1/Nmin^2*ones(1, Nmin)*KK(Mminindex, Mminindex)*ones(Nmin, 1);
Diagmatrix = 1/muu - 1./(zeta*diag_rol_eff+muu);
varztrnp = zeros(Ntr,1); varztrnn = zeros(Ntr,1);
for i = 1:Ntr,
Lparttrnp = (KK(i,:) - Mpluskernel)*Ts*mul_n;
Lparttrnn = (KK(i,:) - Mminkernel)*Ts*mul_n;
Lxm = KK(i,i)-2*sum(KK(i,Mplusindex))/Nplus+Lmplus;
Lxn = KK(i,i)-2*sum(KK(i,Mminindex))/Nmin+Lmmin;
varztrnp(i) = Lxm/muu - Lparttrnp.^2*Diagmatrix;
varztrnn(i) = Lxn/muu - Lparttrnn.^2*Diagmatrix;
end
lssvcB.kerneltype=kerneltype;
lssvcB.Xtrain=Xtrain;
lssvcB.Ytrain=Ytrain;
lssvcB.sig2=sig2;
lssvcB.alpha=alpha;
lssvcB.b=b;
lssvcB.gameffsps=gameffsps;
lssvcB.gameff=gameff;
lssvcB.neff=Neff;
lssvcB.zetap=zetanewp;
lssvcB.zetan=zetanewn;
lssvcB.zmp=zmp;
lssvcB.zmn=zmn;
lssvcB.pip=Nplus/Ntr;
lssvcB.pin=Nmin/Ntr;
lssvcB.Lxm=Lxm;
lssvcB.mul_n=mul_n;
lssvcB.Mpluskernel=Mpluskernel;
lssvcB.Mminkernel=Mminkernel;
lssvcB.Lmplus=Lmplus;
lssvcB.Lmmin=Lmmin;
lssvcB.muu=muu;
lssvcB.Diagmatrix=Diagmatrix;
lssvcB.ztrn=ztrain;
lssvcB.varztrnp=varztrnp;
lssvcB.varztrnn=varztrnn;
lssvcB.ml=-L3;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -