⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 svcm_train.m

📁 svm_increasement Matlab Program
💻 M
📖 第 1 页 / 共 3 页
字号:
        f = find(isnan(g));        if any(f)           fprintf('svcm_train error: g(%g) = %g\n', [f, g(f)]')        end        f = find(a>C);        if any(f)           fprintf('svcm_train error: a(%g) = %g > C\n', [f, a(f)]')        end        f = find(a<0);        if any(f)           fprintf('svcm_train error: a(%g) = %g < 0\n', [f, a(f)]')        end        f = inde(find(a(inde)>C|a(inde)<C));        if any(f)                fprintf('svcm_train error: a(%g) = %g ~= C\n', [f, a(f)]')        end        if abs(y'*a)>tol                fprintf('svcm_train error: y''*a = %g ~= 0 (tol=%g)\n', y'*a, tol);        end        Rdiv = max(max(abs(Qss*R-diag(ones(ls+1,1)))));        if Rdiv>tol            if flag==2|flag==4    % support vector added                fprintf('svcm_train error: divergence %g in R expansion (tol=%g; pivot=%g)\n',...                         Rdiv, tol, pivot);            elseif flag==3        % support vector removed                fprintf('svcm_train error: divergence %g in R contraction (tol=%g)\n', Rdiv, tol);            else                  % no support vector added or removed; strange...                fprintf('svcm_train error: divergence %g in R ?  (tol=%g)\n', Rdiv, tol);            end        end        if keepe&keepr            % only check g when Qe and Qr are readily available            greal = Qs'*[b;a(inds)]-(1+eps);            if le>0                greal = greal+Qe'*a(inde);            end            if lr>0                greal = greal+Qr'*a(indr);            end            if max(abs(greal-g))>tol                fprintf('svcm_train error: tolerance (tol=%g) exceeded in computation of g\n', tol);            end        end        f = inds(find(abs(g(inds))>tol));        if any(f)            fprintf('svcm_train error: g(%g) = %g ~= 0 (tol=%g)\n', [f, g(f), f*0+tol]');        end        f = inde(find(g(inde)>tol));        if any(f)            fprintf('svcm_train error: g(%g) = %g > 0 (tol=%g)\n', [f, g(f), f*0+tol]');        end        Wreal = 0.5*sum((g-b*y-1-eps).*a);        % energy        if abs(Wreal-W)>tol*abs(W)            fprintf('svcm_train error: energy W = %g ~= %g (tol=%g)\n', W, Wreal, tol);        end        inda = sort([indo;inds;inde]);        if any(inda~=(1:L)')            fprintf('svcm_train error: union [indo;inds;inde] does not equate entire set\n');        end        if ls~=length(inds)            fprintf('svcm_train error: miscount in number of support vectors\n');        end        if le~=length(inde)            fprintf('svcm_train error: miscount in number of error vectors\n');        end        if lo~=length(indo)            fprintf('svcm_train error: miscount in number of other vectors\n');        end    end    memcount = max(memcount,ls+keepe*le+keepr*lr+~(keepe&keepr));        % kernel storage    if verbose        fprintf('    c: %g (#%g)', y(indc)>0, indc)        if leaveoneout            fprintf(', margin: %6.3g, gamma: %6.3g', g(indc)+1, gammac)        end        fprintf(' | s: ')        fprintf('%g', y(inds)>0)        if ls<6            fprintf(' (')            fprintf('#%g', inds)            fprintf(')')        end        fprintf(' | e: 0:%g, 1:%g\n', sum(y(inde)<0), sum(y(inde)>0))    end    if converged                        % indc finished; report        if ~training & ~leaveoneout     % retraining or tracing back; uninteresting            % nothing        elseif ~terse & ~leaveoneout            fprintf('  iteration %3g: %3g support vectors; %3g error vectors;  energy %g\n', ...                iter, ls, le, W)        elseif ~terse % & leaveoneout            fprintf('  iteration %3g: %3g leave-one-out errors', iter, lw)            fprintf(' (# %g: margin %g)\n', indc, g(indc)+1)        elseif ~leaveoneout % & terse            if ls+le>la                fprintf('+')            elseif ls+le<la                fprintf('-')            else                fprintf('=')            end        else % leaveoneout & terse            if g(indc)<-1                fprintf('x')            else                fprintf('o')            end        end    la = ls+le;    end    % prepare for next iteration, if any    indoc = indo(find(indo~=indco));       % indo other than indco (leave-one-out index, if active)    free = a(indoc)>0|g(indoc)<0;          % candidate support/error vectors in indoc    if any(free)        left = indoc(free);                % candidates left, keep (re-)training        leaveoneout = 0;                   % interrupt leave-one-out, if active    else % ~any(free)                      % done; finish up and (re-)initiate leave-one-out        if training                        % first time around (not re-training)            % print out results of svm training            if terse                fprintf('\n\n  %g support vectors; %g error vectors; energy %g\n\n', ...                                 ls, le, W)            else                fprintf('\n%4g epoch kernel evaluations (%3g%% of run-time)\n',...                        kernelcount, round(kernelcount/(ls+le)*100))                fprintf(  '%4g epoch vectors in memory  (%3g%% of data)\n\n',...                        memcount, round(memcount/(N+1)*100))            end            if visualize                % plot a trajectory                figure(2)                  h=image(atraj/C*length(gray));                h2=get(h,'Parent');                set(h2,'YDir','normal')     % 'image' normally reverts the y axis                colormap(1-gray)                xlabel('Iteration')                ylabel('Coefficients \alpha_{\it{i}}')                print -deps atraj.eps                % plot g trajectory                figure(3)                gmax = max(max(abs(gtraj)));                h=image((gtraj/gmax+1)/2*length(gray));                h2=get(h,'Parent');                set(h2,'YDir','normal')     % 'image' normally reverts the y axis                grey = gray;                                            redblue = min(grey,1-grey);                             redblue(:,1) = redblue(:,1)+1-grey(:,1);                redblue(:,2) = 2*redblue(:,2);                          redblue(:,3) = redblue(:,3)+grey(:,3);                  colormap(redblue)                xlabel('Iteration')                ylabel('Coefficients {\it{g}_{\it{i}}}')                print -deps gtraj.eps                save traj atraj gtraj ctraj            end            if debug                        % store final result to compare with retraining later                afinal = a;                gfinal = g;                Wfinal = W;            end            % initiate first leave-one-out            leaveoneout = 1;            indl = [inds;inde];             % support and error vectors (others already correct)            indw = indl(g(indl)<-1);        % leave-one-out errors so far (g<-1) ...            lw = length(indw);              % ... their number            indl = indl(g(indl)>=-1);       % remove errors so far from leave-one-out stack            indco = indl(length(indl));     % pick first leave-one-out index; top of stack ...            left = indco;                   % ... and let indc=indco (untrain; upc=0)            training = 0;                   % don't ever visit again!        elseif ~leaveoneout                 % retrained or traced back            if debug&~any(find(indo==indco))                % traced back; compare with previously trained results                if max(abs(a-afinal))>tol                        fprintf('svcm_train error: final coeffs. a exceed tolerance (tol=%g)\n', tol);                end                if max(abs(g-gfinal))>tol                        fprintf('svcm_train error: final derivatives g exceed tolerance (tol=%g)\n', tol);                end                if abs(W-Wfinal)>tol*abs(W)                        fprintf('svcm_train error: final energy W = %g ~= %g (tol=%g)\n', Wfinal, W, tol);                end            end            if any(indl)                indco = indl(length(indl)); % leave-one-out index; top of stack                left = indco;                leaveoneout = 1;            else % ~any(indl)               % finished all leave-one-outs; summarize, and done!                continued = 0;                ltw = sum(g<-1);            % number of training errors                if terse                    fprintf('\n\n  %g leave-one-out errors;', lw)                    fprintf(' %g training errors\n\n', ltw)                else % ~terse                    fprintf('\n%4g training points\n', L)                    fprintf(  '%4g support/error vectors (%g/%g)\n', la, ls, le)                    fprintf(  '%4g leave-one-out errors  (%3.1f%%)\n', lw, lw/L*100)                    fprintf(  '%4g training errors       (%3.1f%%)\n', ltw, ltw/L*100)                    fprintf('\n%4g epoch kernel evaluations (%3g%% of run-time)\n',...                        kernelcount, round(kernelcount/(ls+le)*100))                    fprintf(  '%4g epoch vectors in memory  (%3g%% of data)\n\n',...                        memcount, round(memcount/(N+1)*100))                end                if visualize                % plot g trajectory                    figure(1)                    hold off                    xlabel('\alpha_{\it{c}}')                    ylabel('\it{g_c}')                    axis([-0.1*C, 1.1*C, -1.2, 0.2])                    h=line([0,0,C,C],[-1,0,0,-1]);                    set(h,'Color',[0 0 0])                    set(h,'LineStyle',':')                    set(h,'LineWidth',[0.2])                    h=line([0,C],[-1,-1]);                    set(h,'Color',[0 0 0])                    set(h,'LineStyle','--')                    set(h,'LineWidth',[1.0])                    print -deps gctraj.eps                end            end            else % leaveoneout              % leave-one-out procedure            if flag==1                      % finished leave-one-out, now trace back                % note: already decremented indl and updated indw/lw above                leaveoneout = 0;                indco = 0;            else                            % not done yet with leave-one-out ...                left = indco;               % ... continue with indc=indco            end        end    endend

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -