gradient_vector_linear_approx_old.m

来自「RankNGG 算法实现. 含有dll文件。源码为matlab」· M 代码 · 共 123 行

M
123
字号
function [gradient]=gradient_vector_linear_approx_old(w,data)
% This function returns the gradient vecotr to be used by the nonlinear
% conjugate gradient.
%
% This is the approximate version which scales as $$O(N)$$.
%
%% Input
%
% * w  ... d x 1 weight vector
% * data ... structure containing the data regarding the ranking task at hand [See convert_data_to_ranking_format.m]
%
%  Has the following extra parameters--
%
% data.lambda, the regularization parameter
%
% data.epsil, the accuracy for fast erfc summation
%
% data.delta_1=precompute_delta_1(data);
%
% The parameters for the fast erfc summation--
%
% data.p
%
% data.h
%
% data.rx
%
% data.r
%
% data.nn
%                                
%
%% Ouput
%
% *  gradient ... d x 1 gradient vector evaluated at w
%
%% Signature
%
% Author: Vikas Chandrakant Raykar
% E-Mail: vikas@cs.umd.edu
% Date: September 27, 2006
%
%% See also
%
% convert_data_to_ranking_format,  non_linear_conjugate_gradient, RankNCG_linear_train
%

%--------------------------------------------------------------------------
% Compute the scalars z
%--------------------------------------------------------------------------

z=cell(1,data.S);

constant=sqrt(3)/(sqrt(2)*pi);

for i=1:1:data.S
    z{i}=constant*w'*data.X{i};
end

%--------------------------------------------------------------------------
% Compute the terms delta_2 and delta_3
%--------------------------------------------------------------------------

delta_2=zeros(data.d,1);
delta_3=zeros(data.d,1);

for g=1:data.C
    
    i=data.G(g,1);
    j=data.G(g,2);
    
    delta_2=delta_2+compute_E_minus(data,z,i,j);
    delta_3=delta_3+compute_E_plus(data,z,j,i);
       
end


%--------------------------------------------------------------------------
% Compute the gradient vector
%--------------------------------------------------------------------------

gradient=-(-data.lambda*w-data.delta_1+0.5*delta_2-0.5*delta_3);

%[gradient]=gradient_vector_linear_approx2(w,data)
%[gradient]=gradient_vector_linear_direct(w,data)
%pause

return

%--------------------------------------------------------------------------
function [E_minus_ij]=compute_E_minus(data,z,i,j)

E_minus_ij=zeros(data.d,1);
for k=1:data.d
    N=data.m(i);
    M=data.m(j);
    q=data.X{i}(k,:);
    x=-z{i};
    y=-z{j};
    E_minus_ij(k)=sum(FastErfcSummation(N,M,q,x,y,data.p,data.h,data.rx,data.r,data.nn));
end

return

%--------------------------------------------------------------------------
function [E_plus_ji]=compute_E_plus(data,z,j,i)

E_plus_ji=zeros(data.d,1);
for k=1:data.d
    N=data.m(j);
    M=data.m(i);
    q=data.X{j}(k,:);
    x=z{j};
    y=z{i};
    E_plus_ji(k)=sum(FastErfcSummation(N,M,q,x,y,data.p,data.h,data.rx,data.r,data.nn));
end

return

%--------------------------------------------------------------------------


⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?