📄 qrd_lsl.m
字号:
function [Wo, ee, gamma] = qrd_lsl(u, d, M, verbose, lambda, delta)% function [Wo, ee, gamma] = qrd_lsl(u, d, M, verbose, lambda, delta)%% qrd_lsl.m - apply the standard QR-decomposition based LSL algorithm% using angle-normalized error to predict/estimate complex-valued processes% written for MATLAB 4.0%% For efficiency, the index shifting functions should be replaced with% their corresponding macro definitions in ms.m and ns.m.% their corresponding macro definitions in ms.m and ns.m.%% Reference: Ch.15 of Haykin, _Adaptive Filter Theory_, 3rd ed., 1991%% Input parameters:% u : vector of inputs% d : vector of desired outputs% M : final order of predictor% verbose : set to 1 for interactive processing% lambda : the initial value of the forgetting factor% delta : the initial value for the prediction matrices%% Output parameters:% Wo : row-wise matrix of Hermitian transposed weights% at each iteration% ee : row vector of a posteriori prediction errors Y - xp% for final prediction order M+1% gamma : row vector of conversion factors for final% prediction order M+1% gamma : row vector of conversion factors for final% prediction order M+1%% Copyright (c) 1994-2001, Paul Yee.% Nout = size(Xi, 2);Nout = 1;% length of maximum number of timesteps that can be predictedN = min(length(u),length(d));Wo=[];% prediction initializationF = zeros(ms(M),ns(N));F(ms(0):ms(M),ns(0)) = delta * ones(M+1,1);B = zeros(ms(M),ns(N));B(ms(0):ms(M),ns(-1)) = delta * ones(M+1,1);B(ms(M),ns(0)) = delta;cb = zeros(ms(M),ns(N));sb = cb;cf = cb;sf = cb;e = zeros(ms(M+1),ns(N));ee = e;pfc = zeros(ms(M-1),ns(N));pbc = pfc;pc = pfc;pfc(ms(0):ms(M-1),ns(0)) = zeros(M, 1);pbc(ms(0):ms(M-1),ns(0)) = zeros(M, 1);pc(ms(1):ms(M),ns(0)) = zeros(M, 1);gamma_root = zeros(ms(M+1),ns(N));gamma_root(ms(0),ns(1):ns(N)) = ones(1, N);lambda_root = sqrt(lambda);% set size of reflection coefficientskappa_f = zeros(ms(M),ns(N));kappa_b = zeros(ms(M),ns(N));kappa = zeros(ms(M),ns(N));for n = 1:N, % data initialization ef(ms(0),ns(n)) = u(n); eb(ms(0),ns(n)) = u(n); e(ms(0),ns(n)) = d(n); gamma_root(ms(0),ns(n)) = 1; for m = 1:M, % predictions B(ms(m-1),ns(n-1)) = lambda * B(ms(m-1),ns(n-2)) +...abs(eb(ms(m-1),ns(n-1)))^2; cb(ms(m-1),ns(n-1)) = lambda_root * sqrt(B(ms(m-1),ns(n-2)) /...B(ms(m-1),ns(n-1))); sb(ms(m-1),ns(n-1)) = eb(ms(m-1),ns(n-1)) / sqrt(B(ms(m-1),ns(n-1))); pfc(ms(m-1),ns(n)) = cb(ms(m-1),ns(n-1)) * lambda_root *...pfc(ms(m-1),ns(n-1)) + conj(sb(ms(m-1),ns(n-1))) * ef(ms(m-1),ns(n)); ef(ms(m),ns(n)) = cb(ms(m-1),ns(n-1)) * ef(ms(m-1),ns(n)) -...sb(ms(m-1),ns(n-1)) * lambda_root * pfc(ms(m-1),ns(n-1)); gamma_root(ms(m),ns(n-1)) = cb(ms(m-1),ns(n-1)) *...gamma_root(ms(m-1),ns(n-1)); kappa_f(ms(m),ns(n)) = -conj(pfc(ms(m-1),ns(n))) /...sqrt(B(ms(m-1),ns(n-1))); F(ms(m-1),ns(n)) = lambda * F(ms(m-1),ns(n-1)) + abs(ef(ms(m-1),ns(n)))^2; cf(ms(m-1),ns(n)) = lambda_root * sqrt(F(ms(m-1),ns(n-1)) /...F(ms(m-1),ns(n))); sf(ms(m-1),ns(n)) = ef(ms(m-1),ns(n)) / sqrt(F(ms(m-1),ns(n))); pbc(ms(m-1),ns(n)) = cf(ms(m-1),ns(n)) * lambda_root * pbc(ms(m-1),ns(n-1))+... sf(ms(m-1),ns(n)) * eb(ms(m-1),ns(n-1)); eb(ms(m),ns(n)) = cf(ms(m-1),ns(n)) * eb(ms(m-1),ns(n-1)) -...sf(ms(m-1),ns(n)) * lambda_root * pbc(ms(m-1),ns(n-1)); kappa_b(ms(m),ns(n)) = -conj(pbc(ms(m-1),ns(n))) / sqrt(F(ms(m-1),ns(n))); end; % for m end; % for nfor n = 1:N, for m = 1:M, % filtering pc(ms(m-1),ns(n)) = cb(ms(m-1),ns(n)) * lambda_root * pc(ms(m-1),ns(n-1)) +...conj(sb(ms(m-1),ns(n))) * e(ms(m-1),ns(n)); e(ms(m),ns(n)) = cb(ms(m-1),ns(n)) * e(ms(m-1),ns(n)) - sb(ms(m-1),ns(n)) *...lambda_root * pc(ms(m-1),ns(n-1)); ee(ms(m),ns(n)) = gamma_root(ms(m),ns(n)) * e(ms(m),ns(n)); end; % for m end; % for nfor n = 1:N, % handle terminal case m=M separately as in Sayed and Kailath Table 5 B(ms(M),ns(n)) = lambda * B(ms(M),ns(n-1)) + abs(eb(ms(M),ns(n)))^2; cb(ms(M),ns(n)) = lambda_root * sqrt(B(ms(M),ns(n-1)) / B(ms(M),ns(n))); sb(ms(M),ns(n)) = eb(ms(M),ns(n)) / sqrt(B(ms(M),ns(n))); e(ms(M+1),ns(n)) = cb(ms(M),ns(n)) * e(ms(M),ns(n)) - sb(ms(M),ns(n)) *...lambda_root * pc(ms(M),ns(n-1)); pc(ms(M),ns(n)) = cb(ms(M),ns(n)) * lambda_root * pc(ms(M),ns(n-1)) +...conj(sb(ms(M),ns(n))) * e(ms(M),ns(n)); gamma_root(ms(M+1),ns(n)) = cb(ms(M),ns(n)) * gamma_root(ms(M),ns(N)); ee(ms(M+1),ns(n)) = gamma_root(ms(M+1),ns(n)) * e(ms(M+1),ns(n)); for m = 1:M, B(ms(m-1),ns(N)) = lambda * B(ms(m-1),ns(N-2)) + abs(eb(ms(m-1),ns(N)))^2; % joint-process regression coefficient % required to compute weight vector kappa(ms(m),ns(n)) = conj(pc(ms(m-1),ns(n))) / sqrt(B(ms(m),ns(n))); end; % for m end; % for nan = zeros(M, M+1);cn = an;cn1 = cn;L_M = eye(M+1, M+1);for n = 1:N, an(1, 1:2) = [1 kappa_f(ms(1),ns(n))]; cn(1, 1:2) = [kappa_b(ms(1),ns(n)) 1]; for m = 2:M, % compute coefficients of forward and backward prediction-error % filters for later use in computation of weight vector. Note % that we propagate an and cn in row vector form. an(m, 1:(m+1)) = [an(m-1, 1:m) 0] + kappa_f(ms(m),ns(n)) * [0 cn1(m-1,1:m)]; cn(m, 1:(m+1)) = [0 cn1(m-1, 1:m)] + kappa_b(ms(m),ns(n)) * [an(m-1, 1:m) 0]; end; % for m for m = 2:(M+1), L_M(m, 1:(m-1)) = conj(cn(m-1, 1:(m-1))); end; % for m % note that we store Hermitian-transposed weights % row-wise in Wo Wo = [Wo ; kappa(ms(0):ms(M),ns(n))' * L_M]; cn1 = cn; end; % for n% resize variables before returnWo = Wo(1:(N-1),:)';ee = ee(ms(M),ns(1):ns(N-1));gamma = gamma_root(ms(M),ns(1):ns(N-1)).^2;
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -