📄 cluster2.c
字号:
assert(clset /* check the function arguments */ && ((method & CLS_METHOD) >= CLS_GRADIENT) && ((method & CLS_METHOD) <= CLS_BACKPROP) && ((method & CLS_MODIFIER) >= CLS_NONE) && ((method & CLS_MODIFIER) <= CLS_QUICK)); clset->method = method; /* note the parameter update method */ method &= CLS_MODIFIER; /* get the update modifier */ if (method > CLS_EXPAND) { /* if one of the higher methods */ t = (method == CLS_ADAPTIVE) ? 1 : 0; for (c = clset->cls +(i = clset->clscnt); --i >= 0; ) { --c; /* traverse the clusters */ mat_init(c->chv, MAT_FULL|MAT_VECTOR|MAT_VALUE, &t); mat_init(c->grv, MAT_FULL|MAT_VECTOR|MAT_ZERO, NULL); } /* initialize the change matrix */ } /* and the buffer matrix */} /* cls_method() *//*--------------------------------------------------------------------*/void cls_regular (CLSET *clset, const double *params){ /* --- set regularization parameters */ assert(clset); /* check the function arguments */ if (!params) return; /* if regularization parameters */ clset->regps[0] = params[0]; /* are given, copy them */ clset->regps[1] = params[1]; /* to the cluster set */ clset->regps[2] = params[2]; clset->regps[3] = params[3]; clset->regps[4] = params[4];} /* cls_regular() *//*--------------------------------------------------------------------*/void cls_lrate (CLSET *clset, const double *rates, const double *decays){ /* --- set learning rate parameters */ assert(clset); /* check the function arguments */ if (rates) { /* if learning rates are given */ clset->rates[0] = rates[0]; clset->rates[1] = rates[1]; clset->rates[2] = rates[2]; } /* copy them to the cluster set */ if (decays) { /* if decay parameters are given */ clset->decay[0] = decays[0]; clset->decay[1] = decays[1]; clset->decay[2] = decays[2]; } /* copy them to the cluster set */} /* cls_lrate() *//*--------------------------------------------------------------------*/void cls_factors (CLSET *clset, double growth, double shrink){ /* --- set the update factors */ assert(clset); /* check the function argument */ clset->growth = (growth > 1) ? growth : 1; clset->shrink = ((shrink > 0) && (shrink < 1)) ? shrink : 1; clset->maxfac = clset->growth /(clset->growth +1);} /* cls_factors() *//*--------------------------------------------------------------------*/void cls_limits (CLSET *clset, double minchg, double maxchg){ /* --- set the update limits */ assert(clset); /* check the function argument */ clset->minchg = (minchg > 0) ? minchg : 0; clset->maxchg = (maxchg > 0) ? maxchg : 2;} /* cls_limits() *//*--------------------------------------------------------------------*/int cls_aggr (CLSET *clset, const double *vec, double weight){ /* --- aggregate a data vector */ int i, n; /* cluster index, loop variable */ int clrn; /* flag for competitive learning */ CLUSTER *c; /* to traverse the clusters */ MATADDFN *add; /* aggregation function */ double x; /* adaptation exponent */ double msd; /* membership degree */ assert(clset); /* check the function arguments */ i = cls_exec(clset,vec,NULL); /* compute degrees of membership */ vec = clset->vec; /* get the aggregation function */ if (clset->type & CLS_COVARS) add = mat_addmpx; else if (clset->type & (CLS_VARS|CLS_SIZE)) add = mat_addsvx; else add = mat_addvec; /* --- alternating estimation and competitive learning --- */ /* Aggregate the data vectors for the update step. */ clrn = ((clset->method & CLS_METHOD) == CLS_COMPLRN); if (((clset->method & CLS_METHOD) == CLS_ALTOPT) || clrn) { x = clset->msexp; /* get the adaptation exponent */ if (clset->norm == CLS_HARD) { /* if hard/crisp clustering */ c = clset->cls +i; /* get the cluster to assign to */ if (clrn) vec = c->dif; /* use diff. vector for comp. learn. */ add(c->smp, vec, weight);}/* sum the weighted data vector */ else if (x >= 1) { /* if fuzzy/probabilistic clustering */ for (c = clset->cls +(n = clset->clscnt); --n >= 0; ) { msd = (--c)->msd; /* traverse the clusters */ if (msd <= 0) continue; /* skip cluster with zero membership */ if (x == 2) msd *= msd; else if (x != 1) msd = pow(msd, x); msd *= weight; /* compute the data point weight */ if (clrn) vec = c->dif; /* use diff. vector for comp. learn. */ add(c->smp, vec, msd); /* sum the data vector weighted */ } } /* with the degree of membership */ else { /* if alt. fuzzifier clustering */ x = (1-x) /(1+x); /* compute transformation parameter */ for (c = clset->cls +(n = clset->clscnt); --n >= 0; ) { msd = (--c)->msd; /* traverse the clusters */ msd = (x *(msd-1) +1) *msd *weight; if (!(msd > 0)) continue; /* skip zero membership degrees */ if (clrn) vec = c->dif; /* use diff. vector for comp. learn. */ add(c->smp, vec, msd); /* compute the data point weight and */ } /* sum the data vector weighted */ } } /* with the degree of membership */ /* --- gradient based update --- */ else { /* if (method == CLS_GRADIENT) */ /* ... to be done ... */ } return i; /* return index of best cluster */} /* cls_aggr() *//*--------------------------------------------------------------------*/void cls_bkprop (CLSET *clset, const double *errs){ /* --- backpropagate errors */ int i; /* loop variable */ CLUSTER *c; /* to traverse the clusters */ double *d, *b, t; /* buffers for computations */ assert(clset && errs); /* check the function arguments */ b = clset->buf; /* get buffer for computations */ for (c = clset->cls +(i = clset->clscnt); --i >= 0; ) { d = (--c)->dif; /* traverse the clusters */ t = -errs[i] *clset->drvfn(c->d2, clset->rfnps, c->msd); if (clset->type & CLS_COVARS) { mat_addmp(c->smp, d, t); /* if adaptable covariances, */ mat_mulmv(b, c->inv, d); /* compute derivative terms */ d = b; } /* and get the buffered result */ else if (clset->type & CLS_VARS) { mat_addsv(c->smp, d, t); /* if adaptable variances, */ mat_muldv(b, c->inv, d); /* compute derivative terms */ d = b; } /* and get the buffered result */ else if (clset->type & CLS_SIZE) { /* if adaptable size */ mat_inc(c->smp, 0, 0, t *c->d2); t *= mat_get(c->inv,0,0); /* sum the variance gradients, */ } /* include the inverse variance */ vec_add(c->sum, clset->incnt, c->sum, -2 *t, d); } /* sum the center gradients */} /* cls_bkprop() *//*--------------------------------------------------------------------*/double cls_update (CLSET *clset, int conly){ /* --- update a set of clusters */ int i, m; /* loop variable, buffers */ int type; /* buffer for the cluster type */ CLUSTER *c; /* to traverse the clusters */ double max = 0, t; /* maximal change of a center coord. */ assert(clset); /* check the function argument */ m = clset->method & CLS_METHOD; type = clset->type; /* get update method and cluster type */ if (conly) clset->type = CLS_CENTER; /* --- compute new cluster parameters --- */ clset->bkprop = (m == CLS_BACKPROP); switch (m) { /* according to the update method */ case CLS_ALTOPT : _altopt (clset); break; case CLS_COMPLRN : _complrn (clset); break; case CLS_BACKPROP: _backprop(clset); break; default : _gradient(clset); break; } /* call the approp. update function */ /* After these calls c->cov contains the new (unscaled) covariance */ /* matrix, while c->smp is the old covariance matrix. Analogously, */ /* c->ctr is the new cluster center, while c->sum is the old. */ i = ((clset->fwexp > 0) && (clset->fwexp != 1)) || (clset->regps[3] < -1) || (clset->regps[3] > 0); if (clset->method & CLS_MODIFIER) _neural(clset, i); /* neural network inspired update */ /* The neural network inspired update is carried out before any */ /* regularization, because its execution may destroy effects of */ /* a regularization, but not all methods can be executed again. */ /* (That is, some regularization methods are not idempotent.) */ /* --- regularization and scaling --- */ cls_vars(clset, i); /* compute the isotropic variances */ if ((clset->type & (CLS_VARS|CLS_COVARS)) && i && ((clset->fwexp <= 0) || (clset->fwexp >= 1))) _regshape(clset); /* regularize shape (axes lengths) */ if ((clset->type & CLS_SIZE) && !(clset->type & CLS_JOINT) && ((clset->fwexp <= 0) || (clset->fwexp == 1))) _regsize(clset); /* regularize the cluster sizes */ /* For a shape and size regularization only the isotropic variances */ /* needs to be known, hence gauging and resizing can be done later. */ cls_gauge(clset); /* measure the cluster sizes */ cls_resize(clset, 1); /* and resize the clusters */ if ((clset->type & CLS_WEIGHT) && !(clset->type & CLS_JOINT) && ((clset->regps[4] < -1) || (clset->regps[4] > 0))) _regwgts(clset); /* regularize the cluster weights */ if (clset->method & (CLS_ORIGIN|CLS_UNIT)) _regctrs(clset); /* regularize the cluster centers */ /* --- determine maximal change --- */ c = clset->cls +(i = clset->clscnt); if (clset->method & CLS_ORIGIN) { /* if centers fixed at origin */ while (--i >= 0) { --c; /* traverse the clusters */ t = mat_diffx(c->cov, c->smp, MAT_DIAG); if (t > max) max = t; /* compare the variances and */ } } /* determine the maximal change */ else { /* if centers not fixed at origin */ while (--i >= 0) { --c; /* traverse the clusters */ t = vec_diff(c->ctr, c->sum, clset->incnt); if (t > max) max = t; /* compare the cluster centers and */ } /* determine the maximal change */ } /* of a center coordinate */ /* --- finalize update --- */ if (conly) clset->type = type;/* restore the cluster type */ cls_ftwgts(clset); /* compute the feature weights */ cls_invert(clset); /* and the inverse matrices */ if ((clset->type & CLS_JOINT) && (clset->type & (CLS_COVARS|CLS_VARS|CLS_SIZE))) cls_distrib(clset); /* disribute joint (co)variances */ cls_reinit(clset); /* reinit. aggregation matrices */ clset->setup = 1; /* cluster set has been set up */ return max; /* return the maximal change */} /* cls_update() *//*--------------------------------------------------------------------*/void cls_sort (CLSET *clset){ /* --- sort clusters by their centers */ int i, s, d, n; /* loop variables, indices */ int h; /* increment for index */ CLUSTER x; /* exchange buffer */ assert(clset); /* check the function argument */ n = clset->clscnt; /* get the number of clusters */ for (h = n; h > 1; ) { /* traverse the increments */ h = (h < 5) ? 1 : ((h *5 -1) /11); for (i = h-1; ++i < n; ) { /* traverse the clusters */ x = clset->cls[d = i]; /* note the cluster to insert and do */ do { /* insertion sort with increment h */ s = d-h; /* to find the insertion position */ if (vec_cmp(clset->cls[s].ctr, x.ctr, clset->incnt) <= 0) break; /* shift up all clusters that must */ clset->cls[d] = clset->cls[s]; /* follow the current one */ } while ((d = s) >= h); /* while another shift is possible */ clset->cls[d] = x; /* store the cluster to insert */ } /* at the insertion position */ }} /* cls_sort() */
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -