⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 re smo-matlab code.htm

📁 svm的分类和应用~~有详细的例子,非常实际和好用~~
💻 HTM
📖 第 1 页 / 共 3 页
字号:
:    if ~isempty(i1)
:       if takeStep(i1, i2, e2), return; end;
:    end;
:    
:    %The second heuristic choose any Lagrange Multiplier that is a SV and tries to optimize
:    for i1 = randperm(SMO.ntp)
:       if (SMO.alpha(i1) > SMO.epsilon) & (SMO.alpha(i1) < (SMO.C-SMO.epsilon))
:          %if a good i1 is found, optimise
:          if takeStep(i1, i2, e2), return; end;
:       end
:    end
:    
:    %if both heuristc above fail, iterate over all data set 
:    for i1 = randperm(SMO.ntp)
:       if ~((SMO.alpha(i1) > SMO.epsilon) & (SMO.alpha(i1) < (SMO.C-SMO.epsilon)))
:          if takeStep(i1, i2, e2), return; end;
:       end
:    end;
: end;   

: %no progress possible
: RESULT = 0;
: return;

: 
: function RESULT = takeStep(i1, i2, e2)
: % for a pair of alpha indexes, verify if it is possible to execute
: % the optimisation described by Platt.

: global SMO;
: RESULT = 0;
: if (i1 == i2), return; end;

: % compute upper and lower constraints, L and H, on multiplier a2
: alpha1 = SMO.alpha(i1);          alpha2 = SMO.alpha(i2);
: x1     = SMO.x(i1);              x2     = SMO.x(i2);
: y1     = SMO.y(i1);              y2     = SMO.y(i2);
: C      = SMO.C;                  K      = SMO.Kcache;

: s   = y1*y2;
: if (y1 ~= y2)
:    L = max(0, alpha2-alpha1);    H = min(C, alpha2-alpha1+C);
: else
:    L = max(0, alpha1+alpha2-C);  H = min(C, alpha1+alpha2);
: end;

: if (L == H), return; end;

: if (alpha1 > SMO.epsilon) & (alpha1 < (C-SMO.epsilon))
:    e1 = SMO.error(i1);
: else
:    e1 = fwd(i1) - y1;
: end;

: %if (alpha2 > SMO.epsilon) & (alpha2 < (C-SMO.epsilon))
: %   e2 = SMO.error(i2);
: %else
: %   e2 = fwd(i2) - y2;
: %end;

: %compute eta
: k11 = K(i1,i1);      k12 = K(i1,i2);      k22 = K(i2,i2);
: eta = 2.0*k12-k11-k22;

: %recompute Lagrange multiplier for pattern i2
: if (eta < 0.0)
:    a2 = alpha2 - y2*(e1 - e2)/eta;
:    
:    %constrain a2 to lie between L and H
:    if (a2 < L)
:       a2 = L;
:    elseif (a2 > H)
:       a2 = H;
:    end;
: else
:    %When eta is not negative, the objective function W should be
:    %evaluated at each end of the line segment. Only those terms in the
:    %objective function that depend on alpha2 need be evaluated... 
:    
:    ind = find(SMO.alpha>0);
:    
:    aa2 = L;                         aa1 = alpha1 + s*(alpha2-aa2);
:    
:    Lobj = aa1 + aa2 + sum((-y1*aa1/2).*SMO.y(ind).*K(ind,i1) + (-y2*aa2/2).*SMO.y(ind).*K(ind,i2));
:    
:    aa2 = H;                         aa1 = alpha1 + s*(alpha2-aa2);
:    Hobj = aa1 + aa2 + sum((-y1*aa1/2).*SMO.y(ind).*K(ind,i1) + (-y2*aa2/2).*SMO.y(ind).*K(ind,i2));
:    
:    if (Lobj>Hobj+SMO.epsilon)
:       a2 = L;
:    elseif (Lobj>Hobj-SMO.epsilon)
:       a2 = H;
:    else
:       a2 = alpha2;
:    end;
: end;

: if (abs(a2-alpha2) < SMO.epsilon*(a2+alpha2+SMO.epsilon))
:    return;
: end;

: % recompute Lagrange multiplier for pattern i1
: a1 = alpha1 + s*(alpha2-a2);

: w1   = y1*(a1 - alpha1);         w2   = y2*(a2 - alpha2);

: %update threshold to reflect change in Lagrange multipliers
: b1   = SMO.bias + e1 + w1*k11 + w2*k12;    
: bold = SMO.bias;

: if (a1>SMO.epsilon) & (a1<(C-SMO.epsilon))
:    SMO.bias = b1;
: else
:    b2 = SMO.bias + e2 + w1*k12 + w2*k22;
:    if (a2>SMO.epsilon) & (a2<(C-SMO.epsilon))
:       SMO.bias = b2;
:    else
:       SMO.bias = (b1 + b2)/2;
:    end;
: end;

: % update error cache using new Lagrange multipliers
: SMO.error = SMO.error + w1*K(:,i1) + w2*K(:,i2) +  bold - SMO.bias;
: SMO.error(i1) = 0.0;                SMO.error(i2) = 0.0;

: % update vector of Lagrange multipliers
: SMO.alpha(i1) = a1;                 SMO.alpha(i2) = a2;

: %report progress made
: RESULT = 1;
: return;

: %*********************************************************************

: function RESULT = SMOTutorNOBIAS(x,y,C,kernel,alpha_init,bias_init);
: %Implementation of the Sequential Minimal Optimization (SMO)
: %training algorithm for Vapnik's Support Vector Machine (SVM)

: global SMO;

: [ntp,d] = size(x);
: %Inicializando las variables
: SMO.epsilon = svtol(C);   
: SMO.x = x;                       SMO.y = y;
: SMO.C = C;                       SMO.kernel = kernel;
: SMO.alpha = alpha_init;      

: fprintf('We will not use bias. Setting bias to zero.\n')
: SMO.bias = 0; %Implicit bias = 0

: SMO.ntp = ntp;                %number of training points

: %CACHES:
: SMO.Kcache = evaluate(kernel,x,x); %kernel evaluations
: SMO.error = zeros(SMO.ntp,1); %error           

: if ~any(SMO.alpha) 
:    %Como todos los alpha(i) son zeros, entonces fwd(i), tambien es zero
:    SMO.error = -y;
: else
:    SMO.error = fwd(1:ntp) - y;
: end;

: numChanged = 0;                  examineAll = 1;
: epoch      = 0;

: %When all data were examined and no changes done the loop reachs its 
: %end. Otherwise, loops with all data and likely support vector are 
: %alternated until all support vector be found.
: while (numChanged > 0) | examineAll
:    numChanged = 0;
:    %FIRST CHOICE HEURISTIC
:    %THE OUTER LOOP
:    if examineAll
:       %Loop sobre todos los puntos
:       for i = 1:ntp
:          numChanged = numChanged + examineExampleNOBIAS(i);
:       end;            
:    else
:       %Loop sobre KKT points
:       for i = 1:ntp
:          %Solo los puntos que violan las condiciones KKT
:          if (SMO.alpha(i)>SMO.epsilon) & (SMO.alpha(i)<(SMO.C-SMO.epsilon))
:             numChanged = numChanged + examineExampleNOBIAS(i);
:          end;
:       end;
:    end;
:    
:    if (examineAll == 1)
:       examineAll = 0;
:    elseif (numChanged == 0)
:       examineAll = 1;
:    end;
:    
: %   epoch = epoch+1;
: %      trerror = 1; %100*sum((error)<0)/ntp;   
: %      fprintf('Epoch: %d, TR Error: %g%%, numChanged: %d, alpha>0: %d, 0<alpha<C: %d \n',...
: %         epoch,...
: %         trerror,...
: %         numChanged,...
: %         nonZeroLagrangeMultipliers,...
: %         nonBoundLagrangeMultipliers);
:    
:    %WRITE RESULTADOS A DISCO, W, B, ERROR   
: end;
: SMO.epochs = epoch;
: RESULT = SMO;
: return;

: function RESULT = examineExampleNOBIAS(i1)
: RESULT = takeStepNOBIAS(i1);
: return;

: function RESULT = takeStepNOBIAS(i1)
: global SMO;

: alpha1 = SMO.alpha(i1);          
: x1     = SMO.x(i1);    
: y1     = SMO.y(i1);    
: C      = SMO.C;        
: K      = SMO.Kcache;

: if (alpha1 > SMO.epsilon) & (alpha1 < (C-SMO.epsilon))
:    e1 = SMO.error(i1);
: else
:    e1 = fwd(i1) - y1;
: end;

: %constrain a1 to lie between L and H
: a1 = min(max(0,alpha1 - y1*e1/K(i1,i1)),C);
:    
: if (abs(a1-alpha1) < SMO.epsilon*(a1+alpha1+SMO.epsilon))
:    RESULT = 0;
:    return;
: end;

: % update error cache using new Lagrange multipliers
: SMO.error = SMO.error + y1*(a1 - alpha1)*K(:,i1);
: SMO.error(i1) = 0.0;            

: % update vector of Lagrange multipliers
: SMO.alpha(i1) = a1;             

: %report progress made
: RESULT = 1;
: return;

</TEXTAREA> </TD></TR>
  <TR>
    <TD>Optional Link (URL): </TD>
    <TD><INPUT size=55 name=url> </TD></TR>
  <TR>
    <TD>Link Title: </TD>
    <TD><INPUT size=55 name=url_title> </TD></TR>
  <TR>
    <TD>Optional Image (URL): </TD>
    <TD><INPUT size=55 name=img> </TD></TR></TBODY></TABLE><INPUT type=submit value="Submit Follow Up"> <INPUT type=reset value=&#37325;&#32622;> 
<P>
<HR width="100%" SIZE=7>

<CENTER>[ <A 
href="http://www.kernel-machines.org/blackboard_bak/messages/454.html#followups">Follow 
Ups</A> ] [ <A 
href="http://www.kernel-machines.org/blackboard_bak/messages/454.html#postfp">Post 
Followup</A> ] [ <A 
href="http://www.kernel-machines.org/blackboard/index.html">Message Board on 
Kernel Machines</A> ] [ <A 
href="http://www.kernel-machines.org/blackboard/faq.html">FAQ</A> 
]</CENTER></FORM></BODY></HTML>

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -