📄 mlinmix_err.pro
字号:
mlinmix_posdef_invert, Tk_inv0
Tk_inv[*,*,k,i] = Tk_inv0
endfor
U_inv0 = U[*,*,i]
mlinmix_posdef_invert, U_inv0
U_inv[*,*,i] = U_inv0
endfor
repeat begin
for i = 0, nchains - 1 do begin ;do markov chains one at-a-time
W_inv = W[*,*,i]
mlinmix_posdef_invert, W_inv
;do Gibbs sampler
if ncens gt 0 then begin
;first get new values of censored y
for j = 0, ncens - 1 do begin
next = 0
repeat ygibbs[cens[j]] = eta[cens[j],i] + $
sqrt(yvar[cens[j]]) * randomn(seed) $
until ygibbs[cens[j]] le y[cens[j]]
endfor
endif
;need to get new values of Xi and Eta for Gibbs sampler
;now draw Xi|mu,covar,x, do this for
;each covariate at a time
for j = 0, np - 1 do begin
case j of
0 : inactive = indgen(np - 1) + 1L
np - 1 : inactive = indgen(np - 1)
else : inactive = [indgen(j), indgen(np - j - 1) + j + 1]
endcase
xstar[*,j] = x[*,j]
xstar[*,inactive] = x[*,inactive] - xi[*,inactive,i]
zstar = [[ygibbs - eta[*,i]], [xstar]]
zmu = total(xyvar_inv[*,*,j+1] * zstar, 2)
for k = 0, ngauss - 1 do begin ;do one gaussian at-a-time
gk = where(Glabel[*,i] eq k, ngk)
if ngk gt 0 then begin
mustar[gk,j] = mu[j,k,i]
for l = 0, np - 2 do mustar[gk,inactive[l]] = $
mu[inactive[l],k,i] - xi[gk,inactive[l],i]
mmu = Tk_inv[*,j,k,i] ## mustar[gk,*]
etamu = eta[gk,i] - alpha[i] - beta[inactive,i] ## xi[gk,inactive,i]
xihvar = 1d / (xyvar_inv[gk,j+1,j+1] + Tk_inv[j,j,k,i] + $
beta[j,i]^2 / sigsqr[i])
xihat = xihvar * (zmu[gk] + mmu + beta[j,i] * etamu / (sigsqr[i]))
xi[gk,j,i] = xihat + sqrt(xihvar) * randomn(seed, nx)
endif
endfor
endfor
;now draw Eta|Xi,alpha,beta,sigsqr,y
zstar = [[ygibbs], [x - xi[*,*,i]]]
zmu = total(xyvar_inv[*,*,0] * zstar, 2)
ximu = (alpha[i] + beta[*,i] ## xi[*,*,i]) / sigsqr[i]
etahvar = 1d / (xyvar_inv[*,0,0] + 1d / sigsqr[i])
etahat = etahvar * (zmu + ximu)
eta[*,i] = etahat + sqrt(etahvar) * randomn(seed, nx)
;now draw new class labels
if ngauss eq 1 then Glabel[*,i] = 0 else begin
;get unnormalized probability that
;source i came from Gaussian k, given
;xi[i]
for k = 0, ngauss - 1 do begin
xicent = xi[*,*,i] - mu[*,k,i] ## replicate(1, nx)
gamma[0,k] = $
pi[k,i] / ((2d*!pi)^(np/2d) * determ(T[*,*,k,i], /double)) * $
exp(-0.5 * total(xicent * (Tk_inv[*,*,k,i] ## xicent), 2))
endfor
norm = total(gamma, 2)
for j = 0, nx - 1 do begin
gamma0 = reform(gamma[j,*]) / norm[j] ;normalized probability that the i-th
;data point is from the k-th Gaussian,
;given the observed data point
Gjk = multinom(1, gamma0, seed=seed)
Glabel[j,i] = where(Gjk eq 1)
endfor
endelse
;; now draw new values of alpha, beta, and sigsqr
;first do alpha,beta|Xi,Eta,sigsqr
Xmat[*,1:*] = xi[*,*,i]
hatmat = matrix_multiply(Xmat, Xmat, /atranspose)
Vcoef = hatmat
choldc, hatmat, P, /double ;solve by cholesky decomposition
coefhat = cholsol( hatmat, P, eta[*,i] ## transpose(Xmat), /double )
mlinmix_posdef_invert, Vcoef
Vcoef = Vcoef * sigsqr[i]
coef = coefhat + mrandomn(seed, Vcoef)
alpha[i] = coef[0]
beta[*,i] = coef[1:*]
;now do sigsqr|xi,eta,alpha,beta,
;draw sigsqr from a scaled
;Inverse-chi-square density
resid = eta[*,i] - alpha[i] - beta[*,i] ## xi[*,*,i]
ssqr = total( resid^2 ) / (nx - 2d)
sigsqr[i] = ssqr * (nx - 2d) / randomchi(seed, nx - 2)
;; now do mixture model parameters, psi = (pi,mu,tausqr)
for k = 0, ngauss - 1 do begin
gk = where(Glabel[*,i] eq k, ngk)
nk[k] = ngk
if ngk gt 0 then begin
;get mu|Xi,G,tausqr,mu0,U
muvar = U_inv[*,*,i] + ngk * Tk_inv[*,*,k,i]
mlinmix_posdef_invert, muvar
xibar = total(xi[gk,*,i], 1) / ngk
muhat = (mu0[*,i] ## U_inv[*,*,i] + $
ngk * (xibar ## Tk_inv[*,*,k,i])) ## muvar
mu[*,k,i] = muhat + mrandomn(seed, muvar)
endif else mu[*,k,i] = mu0[*,i] + mrandomn(seed, U[*,*,i])
;get T|Xi,G,mu,W,nut
nuk = ngk + nut
if ngk gt 0 then begin
xicent = xi[gk,*,i] - mu[*,k,i] ## replicate(1d, ngk)
Smat = W[*,*,i] + xicent ## transpose(xicent)
Smat_inv = Smat
mlinmix_posdef_invert, Smat_inv
endif else begin
Smat = W
Smat_inv = W_inv
endelse
Tmat = randomwish(seed, nuk, Smat_inv)
Tk_inv[*,*,k,i] = Tmat
mlinmix_posdef_invert, Tmat
T[*,*,k,i] = Tmat
endfor
;get pi|G
if ngauss eq 1 then pi[*,i] = 1d else $
pi[*,i] = randomdir(seed, nk + 1)
;; now, finally update the prior parameters
;first update mean of gaussian
;centroids
mu0[*,i] = ngauss eq 1 ? mu[*,0,i] + mrandomn(seed, U[*,*,i]) : $
total(mu[*,*,i], 2) / ngauss + mrandomn(seed, U[*,*,i] / ngauss)
;update centroid covariance matrix, U
nu = ngauss + nuu
mucent = ngauss eq 1 ? transpose(mu[*,0,i] - mu0[*,i]) : $
transpose(mu[*,*,i]) - mu0[*,i] ## replicate(1d, ngauss)
Uhat = W[*,*,i] + mucent ## transpose(mucent)
mlinmix_posdef_invert, Uhat
Umat = randomwish(seed, nu, Uhat)
U_inv[*,*,i] = Umat
mlinmix_posdef_invert, Umat
U[*,*,i] = Umat
;update the common scale matrix, W
nuw = (ngauss + 2) * np + 1
What = ngauss eq 1 ? U_inv[*,*,i] + Tk_inv[*,*,0,i] : $
U_inv[*,*,i] + total(Tk_inv[*,*,*,i], 3)
mlinmix_posdef_invert, What
W[*,*,i] = randomwish(seed, nuw, What)
endfor
;save Markov Chains
if iter eq 0 then begin
alphag = alpha
betag = beta[*]
sigsqrg = sigsqr
pig = pi[*]
mug = mu[*]
Tg = T[*]
mu0g = mu0[*]
Ug = U[*]
Wg = W[*]
endif else begin
alphag = [alphag, alpha]
betag = [betag, beta[*]]
sigsqrg = [sigsqrg, sigsqr]
pig = [pig, pi[*]]
mug = [mug, mu[*]]
Tg = [Tg, T[*]]
mu0g = [mu0g, mu0[*]]
Ug = [Ug, U[*]]
Wg = [Wg, W[*]]
endelse
iter = iter + 1L
;check for convergence
if iter ge 4 then begin
Bvar = dblarr(npar) ;between-chain variance
Wvar = dblarr(npar) ;within-chain variance
ndraw = n_elements(alphag) / nchains
psi = dblarr(npar, nchains, ndraw)
psi[0,*,*] = reform(alphag, nchains, ndraw)
psi[1:np,*,*] = reform(betag, np, nchains, ndraw)
psi[np+1,*,*] = alog(reform(sigsqrg, nchains, ndraw))
psi = psi[*,*,(ndraw+1)/2:*]
ndraw = ndraw / 2
;calculate between- and within-sequence
; variances
for j = 0, npar - 1 do begin
psibarj = total( psi[j,*,*], 3 ) / ndraw
psibar = mean(psibarj)
sjsqr = 0d
for i = 0, nchains - 1 do $
sjsqr = sjsqr + total( (psi[j, i, *] - psibarj[i])^2 ) / (ndraw - 1.0)
Bvar[j] = ndraw / (nchains - 1.0) * total( (psibarj - psibar)^2 )
Wvar[j] = sjsqr / nchains
endfor
varplus = (1.0 - 1d / ndraw) * Wvar + Bvar / ndraw
Rhat = sqrt( varplus / Wvar ) ;potential variance scale reduction factor
endif
if iter eq checkiter then begin
;maximum iterations reached, now assess convergence
if (total( (Rhat le 1.1) ) eq npar and iter ge miniter) or $
iter ge maxiter then convergence = 1 $
else begin
if not silent then begin
print, 'Iteration: ', iter
print, 'Rhat Values (ALPHA, BETA, SIGSQR) : '
print, Rhat
endif
checkiter = checkiter + 100L
endelse
endif
endrep until convergence
ndraw = n_elements(alphag) / nchains
alphag = reform(alphag, nchains, ndraw)
betag = reform(betag, np, nchains, ndraw)
sigsqrg = reform(sigsqrg, nchains, ndraw)
pig = reform(pig, ngauss, nchains, ndraw)
mug = reform(mug, np, ngauss, nchains, ndraw)
Tg = reform(Tg, np, np, ngauss, nchains, ndraw)
mu0g = reform(mu0g, np, nchains, ndraw)
Ug = reform(Ug, np, np, nchains, ndraw)
Wg = reform(Wg, np, np, nchains, ndraw)
;only keep second half of markov chains
alphag = alphag[*,(ndraw+1)/2:*]
betag = betag[*,*,(ndraw+1)/2:*]
sigsqrg = sigsqrg[*,(ndraw+1)/2:*]
pig = pig[*,*,(ndraw+1)/2:*]
mug = mug[*,*,*,(ndraw+1)/2:*]
Tg = Tg[*,*,*,*,(ndraw+1)/2:*]
mu0g = mu0g[*,*,(ndraw+1)/2:*]
Ug = Ug[*,*,*,(ndraw+1)/2:*]
Wg = Wg[*,*,*,(ndraw+1)/2:*]
if not silent then begin
print, 'Iteration: ', iter
print, 'Rhat Values (ALPHA, BETA, SIGSQR) : ', Rhat
endif
;save posterior draws in a structure
ndraw = ndraw / 2
if ngauss gt 1 then $
post = {alpha:0d, beta:dblarr(np), sigsqr:0d, pi:dblarr(ngauss), mu:dblarr(np,ngauss), $
T:dblarr(np,np,ngauss), mu0:dblarr(np), U:dblarr(np,np), W:dblarr(np,np), $
ximean:dblarr(np), xivar:dblarr(np,np), xicorr:dblarr(np,np), corr:dblarr(np), $
pcorr:dblarr(np)} $
else $
post = {alpha:0d, beta:dblarr(np), sigsqr:0d, pi:0d, mu:dblarr(np), $
T:dblarr(np,np), mu0:dblarr(np), U:dblarr(np,np), W:dblarr(np,np), $
ximean:dblarr(np), xivar:dblarr(np,np), xicorr:dblarr(np,np), corr:dblarr(np), $
pcorr:dblarr(np)}
post = replicate(post, ndraw * nchains)
post.alpha = alphag[*]
post.beta = reform(betag, np, ndraw * nchains)
post.sigsqr = sigsqrg[*]
if ngauss gt 1 then begin
post.pi = reform(pig, ngauss, ndraw * nchains)
post.mu = reform(mug, np, ngauss, ndraw * nchains)
post.T = reform(Tg, np, np, ngauss, ndraw * nchains)
endif else begin
post.pi = reform(pig, ndraw * nchains)
post.mu = reform(mug, np, ndraw * nchains)
post.T = reform(Tg, np, np, ndraw * nchains)
endelse
post.mu0 = reform(mu0g, np, ndraw * nchains)
post.U = reform(Ug, np, np, ndraw * nchains)
post.W = reform(Wg, np, np, ndraw * nchains)
;get posterior draws of moments of distribution
if not silent then print, 'Getting Posterior Draws for Various Moments...'
corrmat = dblarr(np+1,np+1)
for i = 0, ndraw * nchains - 1 do begin
;average value of Xi
post[i].ximean = ngauss gt 1 ? post[i].pi ## post[i].mu : post[i].mu
if ngauss eq 1 then post[i].xivar = post[i].T else begin
for k = 0, ngauss - 1 do post[i].xivar = post[i].xivar + $
post[i].pi[k] * (post[i].T[*,*,k] + transpose(post[i].mu[*,k]) ## post[i].mu[*,k])
;covariance matrix of Xi
post[i].xivar = post[i].xivar - transpose(post[i].ximean) ## post[i].ximean
endelse
xivar = post[i].xivar
;variance in Eta
etavar = post[i].beta ## post[i].xivar ## transpose(post[i].beta) + post[i].sigsqr
;correlation coefficients between Eta
;and Xi
post[i].corr = post[i].beta ## post[i].xivar / $
sqrt( etavar[0] * post[i].xivar[diag] )
;correlation matrix of the covariates
post[i].xicorr = xivar * ( transpose(1d / sqrt(xivar[diag])) ## (1d / sqrt(xivar[diag])) )
;now get partial correlations, need
;full correlation matrix first
corrmat[0,0] = 1d
corrmat[1:*,0] = post[i].corr
corrmat[0,1:*] = post[i].corr
corrmat[1:*,1:*] = post[i].xicorr
mlinmix_posdef_invert, corrmat
post[i].pcorr = -1d * corrmat[1:*,0] / sqrt(corrmat[0,0] * corrmat[diag2[1:*]])
endfor
return
end
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -