⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 nlconst.m

📁 matpower软件下
💻 M
📖 第 1 页 / 共 3 页
字号:
function [x,FVAL,lambda_out,EXITFLAG,OUTPUT,GRADIENT,HESS]= ...
    nlconst(funfcn,x,lb,ub,Ain,Bin,Aeq,Beq,confcn,OPTIONS,defaultopt,...
    verbosity,gradflag,gradconstflag,hessflag,meritFunctionType,...
    fval,gval,Hval,ncineqval,nceqval,gncval,gnceqval,varargin)
%NLCONST Helper function to find the constrained minimum of a function 
%   of several variables. Called by FMINCON, FGOALATTAIN FSEMINF and FMINIMAX.
%
%   [X,OPTIONS,LAMBDA,HESS]=NLCONST('FUN',X0,OPTIONS,lb,ub,'GRADFUN',...
%   varargin{:}) starts at X0 and finds a constrained minimum to 
%   the function which is described in FUN. FUN is a four element cell array
%   set up by PREFCNCHK.  It contains the call to the objective/constraint
%   function, the gradients of the objective/constraint functions, the
%   calling type (used by OPTEVAL), and the calling function name. 
%
%   Copyright 1990-2004 The MathWorks, Inc.
%   $Revision: 1.41.6.9 $  $Date: 2004/04/20 23:19:26 $

%
%   meritFunctionType==5 for fseminf
%                    ==1 for fminimax & fgoalattain (can use 0, but 1 is default)
%                    ==0 for fmincon

% Initialize some parameters
FVAL=[]; lambda_out=[]; OUTPUT=[]; lambdaNLP = []; GRADIENT = []; 

% Handle the output
if isfield(OPTIONS,'OutputFcn')
    outputfcn = optimget(OPTIONS,'OutputFcn',defaultopt,'fast');
else
    outputfcn = defaultopt.OutputFcn;
end
if isempty(outputfcn)
  haveoutputfcn = false;
else
  haveoutputfcn = true;
  xOutputfcn = x; % Last x passed to outputfcn; has the input x's shape
end
stop = false;
% For fminimax and fgoalattain, there are 6 extra varargin elements
% preceding what the user passed in. Need to get rid of these for the
% varargin passed to the  outputfcn. For fseminf this is also true, but
% only 2 extra arguments.
if strcmp(funfcn{2},'fminimax') ||  strcmp(funfcn{2},'fgoalattain')
    outputfcnVarargin = varargin(7:end);
elseif strcmp(funfcn{2},'fseminf')
    outputfcnVarargin = varargin(3:end);
else
    outputfcnVarargin = varargin;
end

iter = 0;
XOUT = x(:);
% numberOfVariables must be the name of this variable
numberOfVariables = length(XOUT);
SD = ones(numberOfVariables,1); 
Nlconst = 'nlconst';
bestf = Inf; 

%Make sure that constraints are consistent Ain,Bin,Aeq,Beq
%Only row consistentcy check. Column check is done in the caller function
if ~isempty(Aeq) && ~isequal(size(Aeq,1),length(Beq))
        error('optim:nlconst:AeqAndBeqInconsistent', ...
            'Row dimension of Aeq is inconsistent with length of beq.')
end
if ~isempty(Ain) && ~isequal(size(Ain,1),length(Bin))
        error('optim:nlconst:AinAndBinInconsistent', ...
            'Row dimension of A is inconsistent with length of b.')
end

if isempty(confcn{1})
    constflag = 0;
else
    constflag = 1;
end
stepsize = 1;
HESS=eye(numberOfVariables,numberOfVariables); % initial Hessian approximation.
done = false; 
EXITFLAG = 1;

% Get options
tolX = optimget(OPTIONS,'TolX',defaultopt,'fast');
tolFun = optimget(OPTIONS,'TolFun',defaultopt,'fast');
tolCon = optimget(OPTIONS,'TolCon',defaultopt,'fast');
DiffMinChange = optimget(OPTIONS,'DiffMinChange',defaultopt,'fast');
DiffMaxChange = optimget(OPTIONS,'DiffMaxChange',defaultopt,'fast');
if DiffMinChange >= DiffMaxChange
    error('optim:nlconst:DiffChangesInconsistent', ...
         ['DiffMinChange options parameter is %0.5g, and DiffMaxChange is %0.5g.\n' ...
          'DiffMinChange must be strictly less than DiffMaxChange.'], ...
           DiffMinChange,DiffMaxChange)  
end
DerivativeCheck = strcmp(optimget(OPTIONS,'DerivativeCheck',defaultopt,'fast'),'on');
typicalx = optimget(OPTIONS,'TypicalX',defaultopt,'fast') ;
if ischar(typicalx)
   if isequal(lower(typicalx),'ones(numberofvariables,1)')
      typicalx = ones(numberOfVariables,1);
   else
      error('optim:nlconst:InvalidTypicalX', ...
            'Option ''TypicalX'' must be a numeric value if not the default.')
   end
end
typicalx = typicalx(:); % turn to column vector
maxFunEvals = optimget(OPTIONS,'MaxFunEvals',defaultopt,'fast');
maxIter = optimget(OPTIONS,'MaxIter',defaultopt,'fast');
relLineSrchBnd = optimget(OPTIONS,'RelLineSrchBnd',[]);
relLineSrchBndDuration = optimget(OPTIONS,'RelLineSrchBndDuration',1);
hasBoundOnStep = ~isempty(relLineSrchBnd) && isfinite(relLineSrchBnd) && ...
    relLineSrchBndDuration > 0;
noStopIfFlatInfeas = strcmp(optimget(OPTIONS,'NoStopIfFlatInfeas','off'),'on');
phaseOneTotalScaling = strcmp(optimget(OPTIONS,'PhaseOneTotalScaling','off'),'on');

% In case the defaults were gathered from calling: optimset('fmincon'):
if ischar(maxFunEvals)
    if isequal(lower(maxFunEvals),'100*numberofvariables')
        maxFunEvals = 100*numberOfVariables;
    else
        error('optim:nlconst:InvalidMaxFunEvals', ...
              'Option ''MaxFunEvals'' must be an integer value if not the default.')
    end
end

% Handle bounds as linear constraints
arglb = ~isinf(lb);
lenlb=length(lb); % maybe less than numberOfVariables due to old code
argub = ~isinf(ub);
lenub=length(ub);
boundmatrix = eye(max(lenub,lenlb),numberOfVariables);
if nnz(arglb) > 0     
    lbmatrix = -boundmatrix(arglb,1:numberOfVariables);% select non-Inf bounds 
    lbrhs = -lb(arglb);
else
    lbmatrix = []; lbrhs = [];
end
if nnz(argub) > 0
    ubmatrix = boundmatrix(argub,1:numberOfVariables);
    ubrhs=ub(argub);
else
    ubmatrix = []; ubrhs=[];
end 

% For fminimax and fgoalattain, an extra "slack" 
% variable (gamma) is added to create the minimax/goal attain
% objective function.  Add an extra element to lb/ub so
% that gamma is unconstrained but we can avoid out of index
% errors for lb/ub (when doing finite-differencing).
if  strcmp(funfcn{2},'fminimax') ||  strcmp(funfcn{2},'fgoalattain')
    lb(end+1) = -Inf;
    ub(end+1) = Inf;
end

% Update constraint matrix and right hand side vector with bound constraints.
A = [lbmatrix;ubmatrix;Ain];
B = [lbrhs;ubrhs;Bin];
if isempty(A)
    A = zeros(0,numberOfVariables); B=zeros(0,1);
end
if isempty(Aeq)
    Aeq = zeros(0,numberOfVariables); Beq=zeros(0,1);
end

% Used for semi-infinite optimization:
s = nan; POINT =[]; NEWLAMBDA =[]; LAMBDA = []; NPOINT =[]; FLAG = 2;
OLDLAMBDA = [];

x(:) = XOUT;  % Set x to have user expected size
% Compute the objective function and constraints
if strcmp(funfcn{2},'fseminf')
    f = fval;
    [ncineq,nceq,NPOINT,NEWLAMBDA,OLDLAMBDA,LOLD,s] = ...
        semicon(x,LAMBDA,NEWLAMBDA,OLDLAMBDA,POINT,FLAG,s,varargin{:});
else
    f = fval;
    nceq = nceqval; ncineq = ncineqval;  % nonlinear constraints only
end
nc = [nceq; ncineq];
c = [ Aeq*XOUT-Beq; nceq; A*XOUT-B; ncineq];

% Get information on the number and type of constraints.
non_eq = length(nceq);
non_ineq = length(ncineq);
[lin_eq,Aeqcol] = size(Aeq);
[lin_ineq,Acol] = size(A);  % includes upper and lower bounds
eq = non_eq + lin_eq;
ineq = non_ineq + lin_ineq;
ncstr = ineq + eq;
% Boolean inequalitiesExist = true if and only if there exist either
% finite bounds or linear inequalities or nonlinear inequalities. 
% Used only for printing indices of active inequalities at the solution
inequalitiesExist = any(arglb) || any(argub) || size(Ain,1) > 0 || non_ineq > 0;

% Compute the initial constraint violation.
ga=[abs(c( (1:eq)' )) ; c( (eq+1:ncstr)' ) ];
if ~isempty(c)
   mg=max(ga);
else
   mg = 0;
end

if isempty(f)
    error('optim:nlconst:InvalidFUN', ...
          'FUN must return a non-empty objective function.')
end

% Evaluate initial analytic gradients and check size.
if gradflag || gradconstflag
    if gradflag
        gf_user = gval;
    end
    if gradconstflag
        gnc_user = [gnceqval, gncval];   % Don't include A and Aeq yet
    else
        gnc_user = [];
    end
    if isempty(gnc_user) && isempty(nc)
        % Make gc compatible
        gnc = nc'; gnc_user = nc';
    end % isempty(gnc_user) & isempty(nc)
end

OLDX=XOUT;
OLDC=c; OLDNC=nc;
OLDgf=zeros(numberOfVariables,1);
gf=zeros(numberOfVariables,1);
OLDAN=zeros(ncstr,numberOfVariables);
LAMBDA=zeros(ncstr,1);
if strcmp(funfcn{2},'fseminf')
   lambdaNLP = NEWLAMBDA; 
else
   lambdaNLP = zeros(ncstr,1);
end
numFunEvals=1;
numGradEvals=1;

% Display header information.
if meritFunctionType==1
    if isequal(funfcn{2},'fgoalattain')
        header = sprintf(['\n                    Attainment                 Directional \n',...
                ' Iter   F-count       factor      Step-size     derivative    Procedure ']);
        
    else % fminimax
        header = sprintf(['\n                       Max                     Directional \n',...
                ' Iter   F-count  {F,constraints}  Step-size     derivative    Procedure ']);
    end
    formatstrFirstIter = '%5.0f  %5.0f   %12.6g                                            %s';
    formatstr = '%5.0f  %5.0f   %12.4g %12.3g    %12.3g   %s  %s';
else % fmincon or fseminf is caller
    header = sprintf(['\n                               max                   Directional   First-order \n',...
            ' Iter F-count        f(x)   constraint    Step-size   derivative   optimality Procedure ']);
    formatstrFirstIter = '%5.0f  %5.0f %12.6g %12.4g                                         %s';
    formatstr = '%5.0f  %5.0f %12.6g %12.4g %12.3g %12.3g %12.3g %s  %s';
end

how = ''; 
optimError = []; % In case we have convergence in 0th iteration, this needs a value.
%---------------------------------Main Loop-----------------------------
while ~done 
   %----------------GRADIENTS----------------
   
   if ~gradconstflag || ~gradflag || DerivativeCheck
      % Finite Difference gradients (even if just checking analytical)
      POINT = NPOINT; 
      len_nc = length(nc);
      ncstr =  lin_eq + lin_ineq + len_nc;     
      FLAG = 0; % For semi-infinite

      % Compute finite difference gradients
      %
      if DerivativeCheck || ~gradconstflag 
         [gf,gnc,NEWLAMBDA,OLDLAMBDA,s]=finitedifferences(XOUT,x,funfcn,confcn,lb,ub,f,nc, ...
                               [],DiffMinChange,DiffMaxChange,typicalx,[],'all', ...
                               LAMBDA,NEWLAMBDA,OLDLAMBDA,POINT,FLAG,s, ...
                                                  varargin{:});
         gnc = gnc'; % nlconst requires the transpose of the Jacobian
      else
         % no derivative check and user-supplied constraint gradients.             
         % Estimate objective gradient only.
         gf=finitedifferences(XOUT,x,funfcn,[],lb,ub,f,[],[], ...   
                         DiffMinChange,DiffMaxChange,typicalx,[], ...
                         'all',[],[],[],[],[],varargin{:});
      end

      % Gradient check
      if DerivativeCheck && (gradflag || gradconstflag) % analytic exists
                           
         disp('Function derivative')
         if gradflag
            gfFD = gf;
            gf = gf_user;
            
            if isa(funfcn{4},'inline')
               graderr(gfFD, gf, formula(funfcn{4}));
            else
               graderr(gfFD, gf, funfcn{4});
            end
         end
         
         if gradconstflag
            gncFD = gnc; 
            gnc = gnc_user;
            
            disp('Constraint derivative')
            if isa(confcn{4},'inline')
               graderr(gncFD, gnc, formula(confcn{4}));
            else
               graderr(gncFD, gnc, confcn{4});
            end
         end         
         DerivativeCheck = 0;
      elseif gradflag || gradconstflag
         if gradflag
            gf = gf_user;
         end
         if gradconstflag
            gnc = gnc_user;
         end
      end % DerivativeCheck == 1 &  (gradflag | gradconstflag)
      
      FLAG = 1; % For semi-infinite
      numFunEvals = numFunEvals + numberOfVariables;

   else% gradflag & gradconstflag & no DerivativeCheck 
      gnc = gnc_user;
      gf = gf_user;
   end  
   
   % Now add in Aeq, and A
   if ~isempty(gnc)
      gc = [Aeq', gnc(:,1:non_eq), A', gnc(:,non_eq+1:non_ineq+non_eq)];
   elseif ~isempty(Aeq) || ~isempty(A)
      gc = [Aeq',A'];
   else
      gc = zeros(numberOfVariables,0);
   end
   AN=gc';
   
   % Iteration 0 is handled separately below
   if iter > 0 % All but 0th iteration ----------------------------------------
       % Compute the first order KKT conditions.
       if meritFunctionType == 1 
           % don't use this stopping test for fminimax or fgoalattain
           optimError = inf;
       else
           if strcmp(funfcn{2},'fseminf'), lambdaNLP = NEWLAMBDA; end
           normgradLag = norm(gf + AN'*lambdaNLP,inf);
           normcomp = norm(lambdaNLP(eq+1:ncstr).*c(eq+1:ncstr),inf);
           if isfinite(normgradLag) && isfinite(normcomp)
               optimError = max(normgradLag, normcomp);
           else
               optimError = inf;
           end
       end
       feasError  = mg;
       optimScal = 1; feasScal = 1; 
       
       % Print iteration information starting with iteration 1
       if verbosity > 1
           if meritFunctionType == 1,
               gamma = mg+f;
               CurrOutput = sprintf(formatstr,iter,numFunEvals,gamma,...
                   stepsize,gf'*SD,how,howqp); 
               disp(CurrOutput)
           else
               CurrOutput = sprintf(formatstr,iter,numFunEvals,f,mg,...
                   stepsize,gf'*SD,optimError,how,howqp); 
               disp(CurrOutput)
           end
       end
       % OutputFcn call
       if haveoutputfcn
           [xOutputfcn, optimValues, stop] = callOutputFcn(outputfcn,XOUT,xOutputfcn,'iter',iter,numFunEvals, ...
               f,mg,stepsize,gf,SD,meritFunctionType,optimError,how,howqp,outputfcnVarargin{:});
           if stop  % Stop per user request.
               [x,FVAL,lambda_out,EXITFLAG,OUTPUT,GRADIENT,HESS] = ...
                   cleanUpInterrupt(xOutputfcn,optimValues);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -