📄 kernel_reduce_merge.m
字号:
function g = kernel_reduce_merge(g, N, adjustP)
% Matt Ridley's algorithm: conserves mean, optionally adjust variance at end
% Based on the GMM smallest-weight clustering algorithm as described in:
% Mike West, "Approximating Posterior Distributions by Mixtures", 1992.
% Questions regarding Mahalanobis distance:
% - why kernel covariance rather than 2*kernel or ensemble covariance
% - why weighted rather than simple Mahalanobis
%
if size(g.x,2) <= N, return, end
% Record original second moment
if nargin == 3 & adjustP ~= 0
[x1,P1,w1] = kernel_to_gaussian(g);
end
% Do merging
while size(g.x,2) > N
% Select min weight kernel
wmin = min(g.w);
i = find(g.w == wmin);
if length(i) > 1
idx = ceil(rand(1)*length(i));
i = i(idx);
end
% Find its nearest neighbour (weighted Mahalanobis)
w = wmin*g.w ./ (wmin + g.w);
v = g.x - repcol(g.x(:,i), size(g.x,2));
M = distance_mahalanobis(v, g.P) .* w;
M(i) = NaN;
[Mmin, j] = min(M);
% Merge components into i
g.x(:,i) = (g.w(i)*g.x(:,i) + g.w(j)*g.x(:,j)) / (g.w(i)+g.w(j));
g.w(i) = g.w(i)+g.w(j);
% Remove element j
merge = logical(ones(size(g.w)));
merge(j) = 0;
g.x = g.x(:,merge);
g.w = g.w(merge);
end
% adjust kernel variance to conserve 2nd moment
if nargin == 3 & adjustP ~= 0
[x2,P2,w2] = kernel_to_gaussian(g);
g.P = g.P + P1 - P2;
chol(g.P);
% Alternative: do adjust only if P1-P2 is pos.def.
end
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -