📄 garchfit.m
字号:
ARCH = ARCH(:);
if isempty(unconditionalVariance) | (unconditionalVariance <= 0)
K = 1e-5; % A decent assumption for daily returns.
else
K = unconditionalVariance * (1 - sum([GARCH ; ARCH]));
end
%
% * * * * * Helper function for initial ARMA guesses. * * * * *
%
function [AR, MA, constant, variance] = arma0(y , R , M)
%ARMA0 Initial parameter estimates of univariate ARMA processes.
% Compute initial estimates of the auto-regressive (AR) and moving average
% (MA) coefficients of a stationary/invertible univariate ARMA time series.
% Estimates of the model constant and the variance of the innovations noise
% process are also provided. The purpose of this function is to provide
% initial coefficient estimates suitable for further refinement via maximum
% likelihood estimation.
%
% [AR , MA , Constant , Variance] = arma0(Series)
% [AR , MA , Constant , Variance] = arma0(Series , R , M)
%
% Optional Inputs: R, M
%
% Input:
% Series - Return series vector of interest. Series is a dependent stochastic
% process assumed to follow a model specification of general ARMA(R,M) form.
% The first element contains the oldest observation and the last element
% the most recent.
%
% Optional Inputs:
% R - Auto-regressive order of an ARMA(R,M) model. R is a non-negative integer
% scalar. If empty or missing, R = 0.
%
% M - Moving average order of an ARMA(R,M) model. M is a non-negative integer
% scalar. If empty or missing, M = 0.
%
% Outputs:
% AR - R by 1 vector of initial estimates of the auto-regressive parameters
% of an ARMA(R,M) model. The first element of AR is an estimate of the
% coefficient of the first lag of Series, the second element is an estimate
% of the coefficient of the second lag of Series, and so forth.
%
% MA - M by 1 vector of initial estimates of the moving average parameters of
% an ARMA(R,M) model. The first element of MA is an estimate of the
% coefficient of the first lag of the innovations noise process, the second
% element is an estimate of the coefficient of the second lag of the
% innovations noise process, and so forth.
%
% Constant - Estimate of the constant included in a general ARMA model.
%
% Variance - Estimate of the unconditional variance of the innovations noise
% process. Equivalently, it may also be viewed as the variance estimate of
% the white noise innovations under the assumption of homoskedasticity.
%
% See also GARCHSET, GARCHGET, GARCHSIM, GARCHFIT.
%
% References:
% Box, G.E.P., Jenkins, G.M., Reinsel, G.C., "Time Series Analysis:
% Forecasting and Control", 3rd edition, Prentice Hall, 1994.
% Hamilton, J.D., "Time Series Analysis", Princeton University Press, 1994.
%
%
% Check & scrub the observed return series matrix y(t).
%
if (nargin < 1) | isempty(y)
error(' Observed return series ''Series'' must be specified.');
else
if prod(size(y)) == length(y) % Check for a vector (single return series).
y = y(:); % Convert to a column vector.
end
end
if (nargin >= 2) & ~isempty(R)
if length(R) ~= 1
error(' AR order ''R'' must be a scalar.');
end
if any(round(R) - R) | any(R < 0)
error(' AR order ''R'' must be a non-negative integer.')
end
else
R = 0;
end
if (nargin >= 3) & ~isempty(M)
if length(M) ~= 1
error(' MA order ''M'' must be a scalar.');
end
if any(round(M) - M) | any(M < 0)
error(' MA order ''M'' must be a non-negative integer.')
end
else
M = 0;
end
%
% Check for the simple case of no ARMA model. In this
% case just compute the sample mean and variance and exit.
%
if (R + M) == 0
AR = [];
MA = [];
constant = mean(y);
variance = var(y,1);
return
end
%
% Estimate the AR coefficients of a general ARMA(R,M) model.
%
if R > 0
%
% Compute the auto-covariance sequence of the y(t) process. Note that the
% variance (i.e, zeroth-lag auto-covariance) is found in the first element.
%
correlation = autocorr(y , R + M); % auto-correlation sequence.
variance = var(y,1);
covariance = correlation * variance; % auto-covariance sequence.
%
% In each case below, the matrix 'C' of covariances is
% that outlined in equation A6.2.1 (page 220) of BJR.
%
if M > 0
%
% For ARMA processes, the matrix C of covariances derived from the
% estimated auto-covariance sequence is Toeplitz, but non-symmetric.
% The AR coefficients are then found by solving the modified Yule-Walker
% equations.
%
i = [M+1:-1:M-R+2];
i(i <= 0) = i(i <= 0) + 2; % covariance(k) = covariance(-k)
C = toeplitz(covariance(M+1:M+R) , covariance(i));
if R == 1
AR = covariance(M+2:M+R+1) / C;
else
AR = C \ covariance(M+2:M+R+1);
end
else
if R == 1
AR = correlation(2);
else
%
% For AR processes, the matrix C of covariances derived from the
% estimated auto-covariance sequence is Toeplitz and symmetric.
% The AR coefficients are found by solving the Yule-Walker equations.
%
C = toeplitz(covariance(1:R));
AR = C \ covariance(2:R+1);
end
end
%
% Ensure the AR process is stationary. If it's not stationary, then set all
% ARMA coefficients to 0. This ensures the subsequent optimization will
% begin with a stationary/invertible ARMA model for the conditional mean.
%
eigenValues = roots([1 ; -AR(:)]);
if any(abs(eigenValues) >= 1)
AR = zeros(R , 1);
MA = zeros(M , 1);
constant = 0;
variance = 1e-4; % A decent assumption for daily returns.
return
end
else
AR = [];
end
%
% Filter the ARMA(R,M) input series y(t) with the estimated AR coefficients
% to obtain a pure MA process. If the input moving-average model order M is
% zero (M = 0), then the filtered output is really just a pure innovations
% process (i.e., an MA(0) process); in this case the innovations variance
% estimate is just the sample variance of the filtered output. If M > 0, then
% compute the auto-covariance sequence of the MA process and continue.
%
x = filter([1 -AR'] , 1 , y);
constant = mean(x);
if M == 0
variance = var(x,1);
MA = [];
return
end
c = autocorr(x , M) * var(x,1); % Covariance of an MA(M) process.
%
% Estimate the variance of the white noise innovations process e(t)
% and the MA coefficients of a general ARMA(R,M) model. The method of
% computation is that outlined in equation A6.2.4 (page 221) of BJR.
%
MA = zeros(M , 1); % Initialize MA coefficients.
MA1 = ones (M , 1); % Saved MA coefficients from previous iteration.
counter = 1; % Iteration counter.
tol = 0.05; % Convergence tolerance.
while ((norm(MA - MA1) > tol) & (counter < 100))
MA1 = MA;
%
% Estimate the variance of the innovations process e(t).
%
variance = c(1) /([1 ; MA]'* [1 ; MA]);
if abs(variance) < tol
break
end
for j = M:-1:1
%
% Now estimate the moving-average coefficients. Note that
% the MA coefficients are the negative of those appearing
% in equation A6.2.4 (page 221) of BJR. This is due to the
% convention of entering coefficient values, via GARCHSET,
% exactly as the equation would be written.
%
MA(j) = [c(j+1) ; -MA(1:M-j)]' * [1/variance ; MA(j+1:M)];
end
counter = counter + 1;
end
%
% Test for invertibility of the noise model.
%
eigenValues = roots([1 ; MA]);
if any(abs(eigenValues) >= 1) | any(isinf(eigenValues)) | any(isnan(eigenValues))
MA = zeros(M , 1);
variance = 1e-4; % A decent assumption for daily returns.
end
%
% * * * * * Helper function for variance-covariance. * * * * *
%
function covarianceMatrix = varcov(p0 , y , R , M , P , Q , X , Fix)
%VARCOV Covariance matrix of maximum likelihood parameter estimates.
% This function uses the outer-product method to compute the error
% covariance matrix of parameters estimated by maximum likelihood. It
% is valid for maximum likelihood estimation only, and assumes that
% the peak of the log-likelihood objective function has been found
% within the interior of the allowable parameter space (i.e., no
% boundary constraints have been actively enforced).
%
% V = varcov(Parameters , Series , R , M , P , Q , X , Fix)
%
% Inputs:
% Parameters - Column vector of optimized maximum likelihood parameter
% estimates associated with fitting conditional mean and variance
% specifications to an observed return series Series. The conditional
% mean contributes the first (1 + R + M + nX) parameters (nX is the
% number of explanatory variables included in the regression component
% of the conditional mean); the conditional variance contributes the
% remaining (1 + P + Q) parameters. The length of Parameters is thus
% (2 + R + M + nX + P + Q) for a Gaussian log-likelihood function.
%
% Series - Column vector of observations of the underlying univariate return
% series of interest. Series is the response variable representing the time
% series fit to conditional mean and variance specifications. The last row
% of Series holds the most recent observation of each realization.
%
% R - Non-negative, scalar integer representing the AR-process order.
%
% M - Non-negative, scalar integer representing the MA-process order.
%
% P - Non-negative, scalar integer representing the number of lags of the
% conditional variance included in the GARCH process.
%
% Q - Non-negative, scalar integer representing the number of lags of the
% squared innovations included in the GARCH process.
%
% X - Time series regression matrix of explanatory variable(s). Typically, X
% is a regression matrix of asset returns (e.g., the return series of an
% equity index). Each column of X is an individual time series used as an
% explanatory variable in the regression component of the conditional mean.
% In each column of X, the first row contains the oldest observation and
% the last row the most recent. If empty or missing, the conditional mean
% will have no regression component
%
% Fix - Boolean (0,1) vector the same length as Parameters. 0's indicate
% that the corresponding Parameter element has been estimated; 1's indicate
% that the corresponding Parameter element has been held fixed.
%
% Outputs:
% V - Variance-covariance matrix of the estimation errors associated with
% parameter estimates obtained by maximum likelihood estimation (MLE). The
% standard errors of the estimates are the square root of the diagonal
% elements. V is computed by the 'outer-product' method.
%
% Note:
% This is an internal helper function for GARCHFIT. No error checking is
% performed.
%
% References:
% Hamilton, J.D., "Time Series Analysis", Princeton University
% Press, 1994, pages 142-144, 660-661.
%
delta = 1e-10; % Offset for numerical differentiation.
%
% Evaluate the log-likelihood objective function at the final MLE
% parameter estimates. In contrast to the optimization which is
% interested in the scalar-valued objective function 'LLF', here
% we are interested in the individual log-likelihood components
% for each observation of y(t). The relationship between them
%
% LLF = -sum(g0)
%
[LLF,G,H,residuals,sigma] = garchllfn(p0 , y , R , M , P , Q , X);
g0 = -0.5 * ( log(2*pi*(sigma.^2)) + (residuals./sigma).^2 );
%
% Initialize the perturbed parameter vector and the scores matrix.
% For 'T' observations in y(t) and 'nP' parameters estimated via
% maximum likelihood, the scores array is a T-by-nP matrix.
%
pDelta = p0;
scores = zeros(length(y) , length(p0));
for j=1:length(p0)
if ~Fix(j)
pDelta(j) = p0(j) * (1+delta);
dp = delta * p0(j);
%
% Trap the case of a zero parameter value (i.e., p0(j) = 0).
%
if dp == 0
dp = delta;
pDelta(j) = dp;
end
[LLF,G,H,residuals,sigma] = garchllfn(pDelta , y , R , M , P , Q , X);
gDelta = -0.5 * ( log(2*pi*(sigma.^2)) + (residuals./sigma).^2 );
scores(:,j) = (g0 - gDelta) / dp;
pDelta(j) = p0(j);
end
end
%
% Invert the outer product of the scores matrix to get the
% variance-covariance matrix of the MLE parameter estimates.
%
covarianceMatrix = pinv(scores'*scores);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -