📄 pls_analysis.m
字号:
boot_samples,new_num_boot,1);
reorder(:,p) = reorderp(:,p);
for c=1:k % recalc stdmat
stdmat(c,:) = std(stacked_behavdata(reorder((1+ ...
(n*(c-1))+span):(n*c+span), p), :));
end % scanloop
end % while
end % if ~is_boot_samples
% now, we can use this proper reorder matrix
% to generate behav_p & data_p, and then
% to calculate datamatcoors
%
behav_p = stacked_behavdata(reorder(:,p),:);
data_p = stacked_datamat(reorder(:,p),:);
if num_groups == 1
data = rri_corr_maps(behav_p, data_p, n, k);
else
data = rri_corr_maps(behav_p(1+span:n*k+span,:), ...
data_p(1+span:n*k+span,:), n, k);
end
case 4
if num_groups == 1
% meanmat = rri_task_mean(data_p,n);
% Tdata = meanmat-(ones(k,1)*mean(meanmat));
Tdata = rri_task_mean(data_p,n)-ones(k,1)*mean(data_p); % pet, erp & multiblock style
else
% meanmat = rri_task_mean(data_p(1+span:n*k+span,:),n);
% Tdata = meanmat-(ones(k,1)*mean(meanmat));
Tdata = rri_task_mean(data_p(1+span:n*k+span,:),n)-ones(k,1)*mean(data_p(1+span:n*k+span,:)); % pet, erp & multiblock style
end
if ~is_boot_samples
% the code below is mainly trying to find a proper
% reorder matrix
% init badbehav array to 0
% which is used to record the bad behav data caused by
% bad re-order. This variable is for disp only.
%
badbehav = zeros(k, size(stacked_behavdata,2));
% Check for upcoming NaN and re-sample if necessary.
% this only happened on behavior analysis, because the
% 'xcor' inside of 'corr_maps' contains a 'stdev', which
% is a divident. If it is 0, it will cause divided by 0
% problem.
% since this happend very rarely, so the speed will not
% be affected that much.
%
% For behavpls_boot, also need to account for multiple
% scans and behavs
%
for c=1:k % traverse all conditions in this group
stdmat(c,:) = std(stacked_behavdata(reorder((1+ ...
(n*(c-1))+span):(n*c+span), p), :));
end % scanloop
% now, check to see if any are zero
%
while sum(stdmat(:)==0)>0
countnewtotal = countnewtotal + 1;
% keep track of scan & behav that force a resample
%
badbehav(find(stdmat(:)==0)) = ...
badbehav(find(stdmat(:)==0)) + 1;
badbeh{g,countnewtotal} = badbehav; % save instead of disp
% num_boot is just something to be picked to prevent
% infinite loop
%
if countnewtotal > num_boot
disp('Please check behavior data');
breakon=1;
break;
end
reorderp= rri_boot_order(num_subj_lst,k,1, ...
0,min_subj_per_group,is_boot_samples, ...
boot_samples,new_num_boot,1);
reorder(:,p) = reorderp(:,p);
for c=1:k % recalc stdmat
stdmat(c,:) = std(stacked_behavdata(reorder((1+ ...
(n*(c-1))+span):(n*c+span), p), :));
end % scanloop
end % while
end % if ~is_boot_samples
% now, we can use this proper reorder matrix
% to generate behav_p & data_p, and then
% to calculate datamatcoors
%
behav_p = stacked_behavdata(reorder(:,p),:);
data_p = stacked_datamat(reorder(:,p),:);
if num_groups == 1
Bdata = rri_corr_maps_notall(behav_p, data_p, n, bscan);
else
Bdata = rri_corr_maps_notall(behav_p(1+span:n*k+span,:), ...
data_p(1+span:n*k+span,:), n, bscan);
end
TBdata = [Tdata; Bdata];
data = [normalize(Tdata,2); normalize(Bdata,2)];
end % switch
if isempty(single_cond_lst) | g==1
stacked_data_p = [stacked_data_p; data];
if method == 1 | method == 2
stacked_smeanmat_p = [stacked_smeanmat_p; smeanmat_p];
end
end
end % for num_groups
if method == 2 | method == 5
crossblock = normalize(stacked_designdata)'*stacked_data_p;
if method == 5
[brainsctmp, behavsctmp, bcorr] = ...
rri_get_behavscores(data_p, behav_p, ...
normalize(crossblock'), ...
stacked_designdata, k, num_subj_lst);
distrib(:, :, p+1) = bcorr;
elseif method == 2
tmp_usc2 = stacked_smeanmat_p * normalize(crossblock');
tmp_orig_usc = [];
first = 1;
last = 0;
for i = 1:length(num_subj_lst)
last = last + k*num_subj_lst(i);
tmp_orig_usc = [tmp_orig_usc; rri_task_mean(tmp_usc2(first:last,:), num_subj_lst(i))];
first = last + 1;
end
distrib(:, :, p+1) = tmp_orig_usc;
end
u_sq = u_sq + (crossblock.^2)';
u_sum = u_sum + crossblock';
else % method = 3,4,1
% Singular Value Decomposition
%
[r c] = size(stacked_data_p);
if r <= c
[pu, sboot, pv] = svd(stacked_data_p',0);
else
[pv, sboot, pu] = svd(stacked_data_p,0);
end
% rotate pv to align with the original v
%
rotatemat = rri_bootprocrust(v, pv);
% rescale the vectors
%
pu = pu * sboot * rotatemat;
pv = pv * sboot * rotatemat;
if method == 3 | method == 4
[brainsctmp, behavsctmp, bcorr] = ...
rri_get_behavscores(data_p, behav_p, ...
normalize(pu), normalize(pv), k, num_subj_lst);
distrib(:, :, p+1) = bcorr;
% elseif method == 4
% distrib(:, :, p+1) = pv;
elseif method == 1
tmp_usc2 = stacked_smeanmat_p * normalize(pu);
tmp_orig_usc = [];
first = 1;
last = 0;
for i = 1:length(num_subj_lst)
last = last + k*num_subj_lst(i);
tmp_orig_usc = [tmp_orig_usc; rri_task_mean(tmp_usc2(first:last,:), num_subj_lst(i))];
first = last + 1;
end
distrib(:, :, p+1) = tmp_orig_usc;
end
u_sum = u_sum + pu;
u_sq = u_sq + pu.^2;
v_sum = v_sum + pv;
v_sq = v_sq + pv.^2;
end
if pcntacc > 70
fprintf('\n');
pcntacc = 0;
end
end % for num_boot
fprintf('\n');
result.boot_result.num_boot = num_boot;
result.boot_result.num_LowVariability_behav_boots = num_LowVariability_behav_boots;
switch method
case 1
u_sum2 = (u_sum.^2) / (num_boot);
v_sum2 = (v_sum.^2) / (num_boot);
u_se = sqrt(abs(u_sq-u_sum2) / (num_boot-1));
v_se = sqrt(abs(v_sq-v_sum2) / (num_boot-1));
case 2
% calculate standard error
%
u_sum2 = (u_sum.^2) / (num_boot+1);
v_sum2 = (v_sum.^2) / (num_boot+1);
u_se = sqrt(abs(u_sq-u_sum2) / num_boot);
v_se = sqrt(abs(v_sq-v_sum2) / num_boot);
case {3, 4, 5}
u_sum2 = (u_sum.^2) / (num_boot+1);
v_sum2 = (v_sum.^2) / (num_boot+1);
% compute standard errors - standard deviation of bootstrap sample
% since original sample is part of bootstrap, divide by number of
% bootstrap iterations rather than number of bootstraps minus 1
%
% add ceiling to calculations to prevent the following operations
% from producing negative/complex numbers
%
if 0 % method == 3
u_se = sqrt((ceil(u_sq)-ceil(u_sum2))/(num_boot));
v_se = sqrt((ceil(v_sq)-ceil(v_sum2))/(num_boot));
else
u_se = sqrt(abs(u_sq-u_sum2)/(num_boot));
v_se = sqrt(abs(v_sq-v_sum2)/(num_boot));
end
end % switch method
% now compare the original unstandarized saliences
% with the bootstrap saliences
% If exist confidence interval CI (Clim)
%
ul=Clim;
ll=100-Clim;
% e.g. 0.05 >> 0.025 for upper & lower tails, two-tailed
%
ClimNi = 0.5*(1-(Clim*0.01));
% loop to calculate upper and lower CI limits
%
for r=1:r1 % [r1 c1] is size of lvcorrs
for c=1:c1
ulcorr(r,c)=percentile(distrib(r,c,2:num_boot+1),ul);
llcorr(r,c)=percentile(distrib(r,c,2:num_boot+1),ll);
if method == 3 | method == 4 | method == 5
prop(r,c)=length( find(distrib(r,c,2:num_boot+1) ...
<= orig_corr(r,c)) ) / num_boot;
elseif method == 1 | method == 2
prop(r,c)=length( find(distrib(r,c,2:num_boot+1) ...
<= orig_usc(r,c)) ) / num_boot;
end
if prop(r,c)==1 |prop(r,c)==0 % can't calculate the cumulative_gaussian_inv
llcorr_adj(r,c)=NaN;
ulcorr_adj(r,c)=NaN;
else
% adjusted confidence intervals - in case the
% bootstrap samples are extremely skewed
% norm inverse to start to adjust conf int
%
ni=cumulative_gaussian_inv(prop(r,c));
% 1st part of recalc the lower conf interval,
% this evaluates to +1.96 for 95%CI
%
uli=(2*ni) + cumulative_gaussian_inv(1-ClimNi);
% 1st part of recalc the upper conf interval
% e.g -1.96 for 95%CI
%
lli=(2*ni) + cumulative_gaussian_inv(ClimNi);
ncdf_lli=cumulative_gaussian(lli)*100; % percentile for lower bounds
ncdf_uli=cumulative_gaussian(uli)*100; % percentile for upper bounds
% new percentile
%
llcorr_adj(r,c)=(percentile(distrib(r,c,2:num_boot+1), ncdf_lli));
ulcorr_adj(r,c)=(percentile(distrib(r,c,2:num_boot+1), ncdf_uli));
end % if
end % for c
end % for r
if method == 3 | method == 4 | method == 5
result.boot_result.orig_corr = orig_corr;
result.boot_result.ulcorr = ulcorr;
result.boot_result.llcorr = llcorr;
result.boot_result.ulcorr_adj = ulcorr_adj;
result.boot_result.llcorr_adj = llcorr_adj;
result.boot_result.badbeh = badbeh;
result.boot_result.countnewtotal = countnewtotal;
elseif method == 1 | method == 2
result.boot_result.orig_usc = orig_usc;
result.boot_result.ulusc = ulcorr;
result.boot_result.llusc = llcorr;
result.boot_result.ulusc_adj = ulcorr_adj;
result.boot_result.llusc_adj = llcorr_adj;
end
result.boot_result.prop = prop;
result.boot_result.distrib = distrib;
result.boot_result.bootsamp = reorder;
% result.boot_result.u_sum2 = u_sum2;
% result.boot_result.v_sum2 = v_sum2;
if method == 2 | method == 5
% result.boot_result.u = u;
% result.boot_result.v = v;
else
% result.boot_result.orig_u = original_u;
% result.boot_result.orig_v = original_v;
end
% check for zero standard errors - replace with ones
%
test_zeros=find(u_se<=0);
if ~isempty(test_zeros);
u_se(test_zeros)=1;
end
test_zeros_v=find(v_se<=0);
if ~isempty(test_zeros_v);
v_se(test_zeros_v)=1;
end
if method == 2 | method == 5
compare_u = u ./ u_se;
compare_v = v ./ v_se;
else
compare_u = original_u ./ u_se;
compare_v = original_v ./ v_se;
end
% for zero standard errors - replace bootstrap ratios with zero
% since the ratio makes no sense anyway
%
if ~isempty(test_zeros);
compare_u(test_zeros)=0;
end
if ~isempty(test_zeros_v);
compare_v(test_zeros_v)=0;
end
result.boot_result.compare_u = compare_u;
result.boot_result.compare_v = compare_v;
result.boot_result.u_se = u_se;
result.boot_result.v_se = v_se;
result.boot_result.zero_u_se = test_zeros;
result.boot_result.zero_v_se = test_zeros_v;
end % if boot
%-------------------------_______________________-----------------------
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -