📄 re smo-matlab code.htm
字号:
<P><A name=postfp>Post a Followup</A>
<P>
<FORM action=/cgi-bin/wwwboard.pl method=post><INPUT type=hidden value=416,454
name=followup> <INPUT type=hidden value="Diego Andres Alvarez Marin"
name=origname> <INPUT type=hidden value=diegoandresalvarez@gmx.net
name=origemail> <INPUT type=hidden value="Re: SMO-matlab code" name=origsubject>
<INPUT type=hidden value="April 24, 19102 at 01:25:41" name=origdate>
<TABLE>
<TBODY>
<TR>
<TD><FONT color=#6699cc>Name: </FONT></TD>
<TD><INPUT size=55 name=name> </TD></TR>
<TR>
<TD><FONT color=#6699cc>E-Mail: </FONT></TD>
<TD><INPUT size=55 name=email> </TD></TR>
<TR>
<TD><FONT color=#6699cc>Subject: </FONT></TD>
<TD><INPUT size=55 value="Re: SMO-matlab code" name=subject></TD></TR>
<TR>
<TD><FONT color=#6699cc>Comments: </FONT></TD>
<TD><TEXTAREA name=body rows=10 cols=55>: : Do you anybody knows where matlab code for SMO
: : algorithm is obtained?
: Hey... take a look at the following code
: This is a function I implemented for using along with Gavin Cawley's MATLAB Support Vector Machine Toolbox
: (c) September 2000.
: Diego Andres Alvarez.
: function net = train(tutor, X, Y, C, kernel, alpha_init, bias_init)
: % Train a support vector classification network, using the sequential minimal
: % optimisation algorithm.
: %
: % net = train(tutor, x, y, net);
: % net = train(tutor, x, y, C, kernel);
: % net = train(tutor, X, Y, C, kernel, alpha_init, bias_init)
: %
: % where:
: % tutor = tutor object
: % x = training inputs
: % y = training data
: % C = Upper bound - non-separable case (optional, defaults C=Inf)
: % kernel = kernel function (optional, defaults kernel=linear)
: % net = svc object (optional)
: % alpha_init =
: % bias_init =
: %
: % if kernel, alpha_init or bias_init is 'NOBIAS' then no
: % threshold, b, is used and it is set to 0
: % File : @quadprogsvctutor/train.m
: % Author : Diego Andres Alvarez Marin
: % Description : Part of an object-oriented implementation of Vapnik's
: % Support Vector Machine, as described in [1].
: %
: % References :
: % V.N. VAPNIK, "The Nature of Statistical Learning Theory",
: % Springer-Verlag, New York, ISBN 0-387-94559-8, 1995.
: %
: % PLATT, J.~C. (1998).
: % Fast training of support vector machines using sequential minimal
: % optimization. In Sch鰈kopf, B., Burges, C., and Smola, A.~J., editors,
: % Advances in Kernel Methods: Support Vector Learning, chapter~12,
: % pages 185--208. MIT Press, Cambridge, Massachusetts.
: % History : May 15/2001 - v1.00
: if size(Y, 2) ~= 1 | ~isreal(Y)
: error('y must be a real double precision column vector');
: end
: n = size(Y, 1);
: if n ~= size(X, 1)
: error('x and y must have the same number of rows');
: end
: if (nargin<3 | nargin>7) % check correct number of arguments
: help svc
: return;
: end;
: if nargin == 4 & isa(C, 'svc')
: net = C;
: C = get(net,'C');
: kernel = get(net,'kernel');
: else
: if nargin < 4, C = Inf; end;
: if nargin < 5, kernel = linear; end;
: end;
: NOBIAS = 0;
: switch nargin
: case 5
: if ischar(kernel) & strcmp(kernel,'NOBIAS')
: NOBIAS = 1;
: end;
: case 6
: if ischar(alpha_init) & strcmp(alpha_init,'NOBIAS')
: NOBIAS = 1;
: end;
: case 7
: if ischar(bias_init) & strcmp(bias_init,'NOBIAS')
: NOBIAS = 1;
: end;
: end;
: if nargin == 7
: if n ~= size(alpha_init, 1)
: error('alpha must be a real double precision column vector with the same size as y');
: end
: if any(alpha_init < 0)
: error ('No pueden existir alphas negativos')
: end;
: else
: alpha_init = zeros(n,1); %inicializo los pesos a zeros
: bias_init = 0; %inicializo threshold a zero
: end;
: fprintf('\n\nSequential Minimal Optimization: SVMs for Classification\n')
: fprintf( '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n')
: tic;
: if NOBIAS
: SMO = SMOTutorNOBIAS(X, Y, C, kernel, alpha_init, bias_init);
: else
: SMO = SMOTutor(X, Y, C, kernel, alpha_init, bias_init);
: end;
: fprintf('Execution time: %4.1f seconds\n',toc);
: sv = X;
: w = (SMO.alpha.*Y)'; %weight vector
: net = svc(kernel, sv, w, SMO.bias, C);
: fprintf('Epochs : %d\n',SMO.epochs);
: fprintf('|w0|^2 : %f\n',sum(SMO.alpha));
: fprintf('Margin : %f\n',1/sqrt(sum(w.*w)));
: NUMSV = nonZeroLagrangeMultipliers;
: fprintf('Support Vectors : %d (%3.1f%%)\n\n',NUMSV,100*NUMSV/n);
: return;
: function RESULT = SMOTutor(x,y,C,kernel,alpha_init,bias_init)
: %Implementation of the Sequential Minimal Optimization (SMO)
: %training algorithm for Vapnik's Support Vector Machine (SVM)
: global SMO;
: [ntp,d] = size(x);
: %Inicializando las variables
: SMO.epsilon = svtol(C); SMO.tolerance = KKTtol;
: SMO.x = x; SMO.y = y;
: SMO.C = C; SMO.kernel = kernel;
: SMO.alpha = alpha_init; SMO.bias = bias_init;
: SMO.ntp = ntp; %number of training points
: %CACHES:
: SMO.Kcache = evaluate(kernel,x,x); %kernel evaluations
: SMO.error = zeros(SMO.ntp,1); %error
: if ~any(SMO.alpha)
: %Como todos los alpha(i) son zeros, entonces fwd(i), tambien es zero
: SMO.error = -y;
: else
: SMO.error = fwd(1:ntp) - y;
: end;
: numChanged = 0; examineAll = 1;
: epoch = 0;
: %When all data were examined and no changes done the loop reachs its
: %end. Otherwise, loops with all data and likely support vector are
: %alternated until all support vector be found.
: while (numChanged > 0) | examineAll
: numChanged = 0;
: if examineAll
: %Loop sobre todos los puntos
: for i = 1:ntp
: numChanged = numChanged + examineExample(i);
: end;
: else
: %Loop sobre KKT points
: for i = 1:ntp
: %Solo los puntos que violan las condiciones KKT
: if (SMO.alpha(i)>SMO.epsilon) & (SMO.alpha(i)<(SMO.C-SMO.epsilon))
: numChanged = numChanged + examineExample(i);
: end;
: end;
: end;
:
: if (examineAll == 1)
: examineAll = 0;
: elseif (numChanged == 0)
: examineAll = 1;
: end;
:
: epoch = epoch+1;
: % trerror = 1; %100*sum((error)<0)/ntp;
: % fprintf('Epoch: %d, TR Error: %g%%, numChanged: %d, alpha>0: %d, 0<alpha<C: %d \n',...
: % epoch,...
: % trerror,...
: % numChanged,...
: % nonZeroLagrangeMultipliers,...
: % nonBoundLagrangeMultipliers);
:
: %WRITE RESULTADOS A DISCO, W, B, ERROR
: end;
: SMO.epochs = epoch;
: RESULT = SMO;
: return;
: function RESULT = nonZeroLagrangeMultipliers;
: global SMO;
: RESULT = sum(SMO.alpha>SMO.epsilon);
: return;
: function RESULT = nonBoundLagrangeMultipliers;
: global SMO;
: RESULT = sum((SMO.alpha>SMO.epsilon) & (SMO.alpha<(SMO.C-SMO.epsilon)));
: return;
: function RESULT = fwd(n)
: global SMO;
: LN = length(n);
: RESULT = -SMO.bias + sum(repmat(SMO.y,1,LN) .* repmat(SMO.alpha,1,LN) .* SMO.Kcache(:,n))';
: return;
: function RESULT = examineExample(i2)
: %First heuristic selects i2 and asks to examineExample to find a
: %second point (i1) in order to do an optimization step with two
: %Lagrange multipliers
: global SMO;
: alpha2 = SMO.alpha(i2); y2 = SMO.y(i2);
: if ((alpha2 > SMO.epsilon) & (alpha2 < (SMO.C-SMO.epsilon)))
: e2 = SMO.error(i2);
: else
: e2 = fwd(i2) - y2;
: end;
: % r2 < 0 if point i2 is placed between margin (-1)-(+1)
: % Otherwise r2 is > 0. r2 = f2*y2-1
: r2 = e2*y2;
: %KKT conditions:
: % r2>0 and alpha2==0 (well classified)
: % r2==0 and 0<alpha2<C (support vectors at margins)
: % r2<0 and alpha2==C (support vectors between margins)
: %
: % Test the KKT conditions for the current i2 point.
: %
: % If a point is well classified its alpha must be 0 or if
: % it is out of its margin its alpha must be C. If it is at margin
: % its alpha must be between 0<alpha2<C.
: %take action only if i2 violates Karush-Kuhn-Tucker conditions
: if ((r2 < -SMO.tolerance) & (alpha2 < (SMO.C-SMO.epsilon))) | ...
: ((r2 > SMO.tolerance) & (alpha2 > SMO.epsilon))
: % If it doens't violate KKT conditions then exit, otherwise continue.
:
: %Try i2 by three ways; if successful, then immediately return 1;
: RESULT = 1;
: % First the routine tries to find an i1 lagrange multiplier that
: % maximizes the measure |E1-E2|. As large this value is as bigger
: % the dual objective function becames.
: % In this first test, only support vectors will be tested.
:
: POS = find((SMO.alpha > SMO.epsilon) & (SMO.alpha < (SMO.C-SMO.epsilon)));
: [MAX,i1] = max(abs(e2 - SMO.error(POS)));
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -