⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 spmbdk.m

📁 有监督自组织映射-偏最小二乘算法(A supervised self-organising map–partial least squares algorithm),可以用语多变量数据的回归分析
💻 M
字号:
function [MLKP] = SetParamsModel;
%
% Set parameters for 'MakeModel'
%
% Cls: classification problems (supervised)
% Reg: regression problems (supervised)
% Map: mapping problems (unsupervised)
MLKP.ProblemType='Cls';
% define training method: 'CPN', 'XYF', 'BDK', 'SKN', 'LVQ', 'AKH', 'KOH'
MLKP.TrainingMethod='BDK';
if upper(MLKP.TrainingMethod) == 'KOH'
    MLKP.ProblemType='Map'
end
% determines how the original data-file is treated:
% Orig: take Xtr, Ytr, Xtest and Ytest as defined in the data-file
%       (for comparison with experiments reported elsewhere)
% Make: constructs Xtr, Ytr, Xtest and Ytest from the master matrices X and Y
%       (for crossvalidation purposes)
MLKP.DataSet='Orig';
% defines whether training and test set are constructed (for 'Cls' class-wise):
% Random: divide data randomly (statistically sound)
% Disdis: divide data based on  distribution of the distances between the objects in X
% KStone: divide data based on the Kennard-Stone algorithm
%
% !!! this parameter is ignored if MLKP.DataSet is set to 'Orig'
MLKP.CreateDataSet='Random';
% defines filename of the original data set
MLKP.DataFile='Data/iris';
% defines filename containing XMap and YMap as starting point for learning process
% if filename is empty, network weights are randomly initialised
%
% !!! for LVQ: if filename is empty, CPN learning is invoked as pre-stage instead
MLKP.MapFile='';
% type of scaling for X and/or Y variables
%
% 'Raw': original data format (no scaling)
% 'Uni': unit lenght scaling per row
% 'RnR': range scaling (0, 1] per row
% 'RnC': range scaling [0, 1] per column
% 'Mnc': mean centering per column
% 'Aut': auto-scaling per column
% 'SNV': standard normal variate scaling per row
% 'Log': logarithmic scaling
% 'Exp': exponential scaling
MLKP.ScaleTypeX = 'Raw';
MLKP.ScaleTypeY = 'Raw';
% if ScaleBack = 'Y', afterwards the data are transformed back
MLKP.ScaleBack = 'N';
% learning parameters for 'BDK', 'XYF', 'SKN', 'LVQ',
%                         'KOH', 'AKH' and 'CPN' network
%
% number of iterations: 1 iteration (epoch) deals with all objects in the training set
MLKP.MaxIter=100;
% defines whether the model is build with a fixed random initialisation of the
% data sets X and Y (select 'Y') or for modelling with cross-validation (select 'N')
%
% !!! this parameter is ignored if 'MLKP.DataSet' is set to 'Orig'
MLKP.FixedRandomData='N';
% starting point random generator for initialisation of training and test sets
MLKP.RandomInitData=19;
% defines whether the map weights are initialised by a fixed random sequence
MLKP.FixedRandomWeights='Y';
% starting point random generator for initialisation map weights
MLKP.RandomInitWeights=171;
% way of weight initialisation of X and Y map:
%
% Random: random fraction of variable range around mean value
% Select: initial scaled weights given by random selection of objects in training set X
MLKP.InitMode='Random';
% limits of the learning rates for the input and output maps
MLKP.AlphaMinX=0.001;
MLKP.AlphaMaxX=0.1;
MLKP.AlphaMinY=0.001;
MLKP.AlphaMaxY=0.1;
% dimensions Nrow x Ncol for input and output maps
MLKP.Nrow=6;
% default, square maps are constructed
MLKP.Ncol=MLKP.Nrow;
% set maximum and minimum radius of the neighbourhood function
MLKP.RadiusMinX=0;
MLKP.RadiusMaxX=max(1,floor(MLKP.Nrow/2)-1);
MLKP.RadiusMinY=0;
MLKP.RadiusMaxY=max(1,floor(MLKP.Ncol/2)-1);
% distance measures for X and Y:
%
% 'Euc': Euclidean distance
% 'Abs': Sum of absolute values of the differences between elements of two vectors
% 'Tan': Tanimoto distance
% 'Cor': Correlation value
MLKP.DistTypeX='Euc';
MLKP.DistTypeY='Tan';
if (upper(MLKP.ProblemType) == 'REG')
    MLKP.DistTypeY='Euc';
end
% relative weighting of X,Y during training in XYF and MLX training mode
MLKP.DistWeightXYFStart=0.75;
MLKP.DistWeightXYFEnd=0.50;
% invoke a linear or exponentially decay of the weights during formation of XMap and YMap
MLKP.WeightDecay='N';
MLKP.WeightDecayMode='Exp';
% cut-off determines where the decay factor becomes equal to 0
MLKP.WeightDecayCutOff=1.0;
% for decay: rescaling factor of weights varies during training from 1-exp(-Fstart) to 1-exp(-Fend)
MLKP.WeightDecayFactorStart=1;
MLKP.WeightDecayFactorEnd=24;
% determines if degree of correlation between object and map during learning
% phase is taken into account (adaptive learning rate)
MLKP.AdaptLearn='Y';
%
%!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
% The following parameters are necessary for SOMPLS and KPLS
%!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
%
% the mode PLS is used in SOMPLS:
%
% SIMPLS: invoke the SIMPLS algorithm including mean centering of the data (fast)
% NIPALS: use the straight NIPALS algorithm for PLS2 (slightly more accurate)
MLKP.PLSMode='NIPALS';
% deteremines whether all outputs are optimised at once or individually
% All: all output variables in one pass
% Ind: each output variable separately per modeling pass
%
% !!! only applicable for regression problems
MLKP.OptMode='All';
% determines projection procedure for SOMPLS
% Xmap: kernel is constructed based on the unit weights in the XMap
% Kpls: kernel is constructed according to KPLS
MLKP.ProcMode='Xmap';
% determines the maximum number of latent variables taken into account
% during the SOMPLS optimization procedure; if set to 0 the maximum number
% of LVs is determined by the size of the data set
MLKP.MaxLatentVar=64;
% determines whether KStone is performed on the data for SOMPLS / KPLS crossvalidation
MLKP.CrossValDataSelection='N';
%
% add some internal system configuration parameters
%
[MLKP] = SetSystemConfiguration(MLKP);

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -