📄 garchsim.m
字号:
preSamples = max([R M P Q]); % # of pre-sample lags needed to 'jump-start' the process.
%
% Estimate the number of samples it takes for the transients of the
% ARMA(R,M) and GARCH(P,Q) processes to die out, so the output processes
% are in (approximately) steady-state. The metric used is, granted, rather
% arbitrary: Estimate the number of samples needed for the magnitude of
% the impulse response (which begins at 1) to decay below 0.01 (i.e., 1%).
% The estimate is based on the magnitude of the largest eigenvalue of the
% auto-regressive polynomial. Note that a GARCH(P,Q) process for e(t)
% implies an ARMA(max(P,Q),P) process for e(t)^2.
%
if isempty(AR)
decayTime1 = M;
else
eigenValues = roots([1 -AR]);
decayTime1 = ceil(log(0.01) / log(max(abs(eigenValues)))) + M;
end
if isempty(ARCH) & isempty(GARCH)
decayTime2 = P;
else
AB = zeros(max([P Q]) , 2);
AB(1:Q+1,1) = [1 ; -ARCH(:)];
AB(1:P+1,2) = [1 ; -GARCH(:)];
polynomial = sum(AB(2:end,:),2);
eigenValues = roots([1 ; polynomial]);
decayTime2 = ceil(log(0.01) / log(max(abs(eigenValues)))) + P;
end
%
% Set the total number of samples generated for each path.
%
T = nSamples + preSamples + max(decayTime1 , decayTime2);
%
% Get the probability distribution of the innovations process
% and draw the i.i.d. random sequence, v(t), that drives the
% e(t) innovations sequence such that e(t) = v(t)*sqrt(h(t)).
%
distribution = garchget(spec , 'Distribution');
distribution = distribution(~isspace(distribution));
switch upper(distribution)
case 'GAUSSIAN'
randn('state' , seed);
v = randn(T , nPaths); % v(t) are i.i.d. Gaussian deviates ~ N(0,1) distributed.
otherwise
error(' Distribution of innovations must be ''Gaussian''.')
end
%
% Estimate the unconditional standard deviation of the invertible e(t) process.
%
sigma = sqrt(K / (1 - sum([GARCH(:) ; ARCH(:)])));
%
% Construct the conditional variance parameter vector of length (1 + P + Q).
%
varianceCoefficients = [K ; GARCH(:) ; ARCH(:)]'; % GARCH(P,Q) parameters.
%
% Form the h(t) conditional variance and e(t) innovations processes.
%
% Notes:
% (1) The C-Mex routine GARCHSIMGARCH prepends PRESAMPLES observations to each
% column of the e(t) and h(t) processes, thus (temporarily) increasing their
% sizes by PRESAMPLES observations over and above what is really necessary.
% For the purpose of simulating the e(t) and h(t) processes via the GARCH
% conditional variance equation, Hamilton (top of page 667) suggests assigning
% the unconditional variance of e(t) to the first PRESAMPLES samples of h(t)
% and e^2(t). This implies that the unconditional standard deviation, SIGMA, is
% assigned to the first PRESAMPLES observations of e(t) for jump-starting the
% variance process h(t).
% (2) Notice how the first PRESAMPLES rows of the innovations matrix e(t)
% filter through the variance process h(t). In forming the h(t) process,
% lagged values of the innovations squared, e(t)^2, are actually filtered,
% and NOT the e(t) process itself.
% (3) In an effort to minimize transients, there appears to be a subtle
% discrepancy in the first PRESAMPLES rows of the e(t) and e(t)^2 processes.
% The conditional expectation of e(t) = 0, while the conditional variance of
% e(t) = h(t). When simulating the mean y(t), the first PRESAMPLES rows of
% e(t) are set to zero. When simulating the variance h(t), the first PRESAMPLES
% rows of e(t)^2 are set to the unconditional variance, SIGMA^2. This is
% analogous to working with expectations in the pre-sample, and working with
% realizations in-sample.
%
[e , h] = garchsimgarch(P , Q , nPaths , varianceCoefficients , sigma , v , T , preSamples);
%
% Now that the GARCH innovations e(t) have been simulated, run them through
% the linear filter to generate a conditional mean of ARMA form.
%
% Notes:
% (1) The conditional mean y(t) process could have been generated inside the
% same loop that generated the innovations process e(t), but the built-in
% FILTER function is much faster for ARMA models.
% (2) The FILTER function will NOT handle the constant 'C' included in the
% general ARMA equation. However, by viewing the intermediate y(t) values
% as deviations from the unconditional mean of y(t), we can incorporate
% the constant by simply adding the unconditional mean, AVERAGE, to the
% y(t) process after the fact.
% (3) FILTER will only work for ARMA conditional mean specifications. If an
% ARMAX specification is requested (i.e., a valid X regression matrix has
% been specified), the FILTER approach will not work.
% (4) Notice that the first PRESAMPLES rows of the innovations matrix e(t) is
% now over-written by zeros for the purpose of simulating the conditional
% mean. Since the conditional expectation of e(t) = 0, this helps mimimize
% transients. See note (1) above regarding the C-Mex simulation of e(t)
% and h(t).
%
if nargout >= 3 % Simulate y(t) only if it's requested.
average = C / (1 - sum(AR)); % Unconditional mean of the stationary y(t) process.
if isempty(X) % ARMA(R,M) form for conditional mean.
y = filter([1 MA] , [1 -AR] , [zeros(preSamples,nPaths) ; e(preSamples+1:end,:)]) + average;
y = y((T - nSamples + 1):T , :);
else % ARMAX(R,M,N) form for conditional mean.
if (R + M) == 0 % Handle a simple regression as a special case.
y = C + X((end - nSamples + 1):end , :) * regress';
y = y(:,ones(nPaths,1)) + e((T - nSamples + 1):T , :);
else
%
% Construct ARMA portion of the conditional mean
% parameter vector of length (1 + R + (1 + M)).
%
meanCoefficients = [C ; AR(:) ; [1 ; MA(:)]]';
%
% Modify the unconditional mean to incorporate the regressors,
% then pre-pend to the y(t) process to help reduce transients. Also,
% pre-pend zeros (the expectation of e(t) = 0) to the first PRESAMPLES
% rows of e(t), and the sample mean of X if insufficient observations
% of the regressors exist for the transient start-up period.
%
meanX = mean(X);
average = average + (regress * meanX') / (1 - sum(AR));
y = [average(ones(preSamples,1)) ; zeros(T-preSamples,1)];
y = y(:,ones(nPaths,1));
e = [zeros(preSamples,nPaths) ; e(preSamples+1:end,:)];
rowsX = size(X,1);
if T > rowsX
X = [meanX(ones(T - rowsX , 1),:); X]; % Pre-pend the sample mean so we have T samples.
else
X = X(rowsX - (T - 1):end , :); % Retain only the most recent T samples.
end
for t = (preSamples + 1):T
meanData = [ones(1,nPaths) ; y(t-(1:R),:) ; e(t-(0:M),:)];
y(t,:) = meanCoefficients * meanData + regress * X(t,:).';
end
y = y((T - nSamples + 1):T , :); % Strip off transients.
end
end
end
%
% Since we have pre-pended PRESAMPLES samples to the output processes to
% compensate for transients and pre-sample effects, strip the start-up
% period and retain only the last 'nSamples' samples.
%
e = e((T - nSamples + 1):T , :);
h = h((T - nSamples + 1):T , :);
%
% Since h(t) is the conditional variance, convert it to a standard deviation.
%
h = sqrt(h);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -