📄 lmbc_core.c
字号:
} else mu=(mu>=pDp_eL2)? pDp_eL2 : mu; /* pDp_eL2 is the new pDp_eL2 */#else mu=(mu>=pDp_eL2)? pDp_eL2 : mu; /* pDp_eL2 is the new pDp_eL2 */#endif nu=2; for(i=0 ; i<m; ++i) /* update p's estimate */ p[i]=pDp[i]; for(i=0; i<n; ++i) /* update e and ||e||_2 */ e[i]=hx[i]; p_eL2=pDp_eL2; ++nLMsteps; gprevtaken=0; break; } } else{ /* the augmented linear system could not be solved, increase mu */ mu*=nu; nu2=nu<<1; // 2*nu; if(nu2<=nu){ /* nu has wrapped around (overflown). Thanks to Frank Jordan for spotting this case */ stop=5; break; } nu=nu2; for(i=0; i<m; ++i) /* restore diagonal J^T J entries */ jacTjac[i*m+i]=diag_jacTjac[i]; continue; /* solve again with increased nu */ } /* if this point is reached, the LM step did not reduce the error; * see if it is a descent direction */ /* negate jacTe (i.e. g) & compute g^T * Dp */ for(i=0, jacTeDp=0.0; i<m; ++i){ jacTe[i]=-jacTe[i]; jacTeDp+=jacTe[i]*Dp[i]; } if(jacTeDp<=-rho*pow(Dp_L2, _POW_/CNST(2.0))){ /* Dp is a descent direction; do a line search along it */ int mxtake, iretcd; LM_REAL stepmx; tmp=(LM_REAL)sqrt(p_L2); stepmx=CNST(1e3)*( (tmp>=CNST(1.0))? tmp : CNST(1.0) );#if 1 /* use Schnabel's backtracking line search; it requires fewer "func" evaluations */ LNSRCH(m, p, p_eL2, jacTe, Dp, alpha, pDp, &pDp_eL2, func, fstate, &mxtake, &iretcd, stepmx, steptl, NULL); /* NOTE: LNSRCH() updates hx */ if(iretcd!=0) goto gradproj; /* rather inelegant but effective way to handle LNSRCH() failures... */#else /* use the simpler (but slower!) line search described by Kanzow */ for(t=tini; t>tmin; t*=beta){ for(i=0; i<m; ++i){ pDp[i]=p[i] + t*Dp[i]; //pDp[i]=__MEDIAN3(lb[i], pDp[i], ub[i]); /* project to feasible set */ } (*func)(pDp, hx, m, n, adata); ++nfev; /* evaluate function at p + t*Dp */ for(i=0, pDp_eL2=0.0; i<n; ++i){ /* compute ||e(pDp)||_2 */ hx[i]=tmp=x[i]-hx[i]; pDp_eL2+=tmp*tmp; } //if(CNST(0.5)*pDp_eL2<=CNST(0.5)*p_eL2 + t*alpha*jacTeDp) break; if(pDp_eL2<=p_eL2 + CNST(2.0)*t*alpha*jacTeDp) break; }#endif ++nLSsteps; gprevtaken=0; /* NOTE: new estimate for p is in pDp, associated error in hx and its norm in pDp_eL2. * These values are used below to update their corresponding variables */ } else{gradproj: /* Note that this point can also be reached via a goto when LNSRCH() fails */ /* jacTe is a descent direction; make a projected gradient step */ /* if the previous step was along the gradient descent, try to use the t employed in that step */ /* compute ||g|| */ for(i=0, tmp=0.0; i<m; ++i) tmp=jacTe[i]*jacTe[i]; tmp=(LM_REAL)sqrt(tmp); tmp=CNST(100.0)/(CNST(1.0)+tmp); t0=(tmp<=tini)? tmp : tini; /* guard against poor scaling & large steps; see (3.50) in C.T. Kelley's book */ for(t=(gprevtaken)? t : t0; t>tming; t*=beta){ for(i=0; i<m; ++i) pDp[i]=p[i] - t*jacTe[i]; BOXPROJECT(pDp, lb, ub, m); /* project to feasible set */ for(i=0; i<m; ++i) Dp[i]=pDp[i]-p[i]; (*func)(pDp, hx, m, n, adata); ++nfev; /* evaluate function at p - t*g */ for(i=0, pDp_eL2=0.0; i<n; ++i){ /* compute ||e(pDp)||_2 */ hx[i]=tmp=x[i]-hx[i]; pDp_eL2+=tmp*tmp; } for(i=0, tmp=0.0; i<m; ++i) /* compute ||g^T * Dp|| */ tmp+=jacTe[i]*Dp[i]; if(gprevtaken && pDp_eL2<=p_eL2 + CNST(2.0)*CNST(0.99999)*tmp){ /* starting t too small */ t=t0; gprevtaken=0; continue; } //if(CNST(0.5)*pDp_eL2<=CNST(0.5)*p_eL2 + alpha*tmp) break; if(pDp_eL2<=p_eL2 + CNST(2.0)*alpha*tmp) break; } ++nPGsteps; gprevtaken=1; /* NOTE: new estimate for p is in pDp, associated error in hx and its norm in pDp_eL2 */ } /* update using computed values */ for(i=0, Dp_L2=0.0; i<m; ++i){ tmp=pDp[i]-p[i]; Dp_L2+=tmp*tmp; } //Dp_L2=sqrt(Dp_L2); if(Dp_L2<=eps2_sq*p_L2){ /* relative change in p is small, stop */ stop=2; break; } for(i=0 ; i<m; ++i) /* update p's estimate */ p[i]=pDp[i]; for(i=0; i<n; ++i) /* update e and ||e||_2 */ e[i]=hx[i]; p_eL2=pDp_eL2; break; } /* inner loop */ } if(k>=itmax) stop=3; for(i=0; i<m; ++i) /* restore diagonal J^T J entries */ jacTjac[i*m+i]=diag_jacTjac[i]; if(info){ info[0]=init_p_eL2; info[1]=p_eL2; info[2]=jacTe_inf; info[3]=Dp_L2; for(i=0, tmp=LM_REAL_MIN; i<m; ++i) if(tmp<jacTjac[i*m+i]) tmp=jacTjac[i*m+i]; info[4]=mu/tmp; info[5]=(LM_REAL)k; info[6]=(LM_REAL)stop; info[7]=(LM_REAL)nfev; info[8]=(LM_REAL)njev; } /* covariance matrix */ if(covar){ LEVMAR_COVAR(jacTjac, covar, p_eL2, m, n); } if(freework) free(work);#if 0printf("%d LM steps, %d line search, %d projected gradient\n", nLMsteps, nLSsteps, nPGsteps);#endif return (stop!=4)? k : -1;}/* following struct & LMBC_DIF_XXX functions won't be necessary if a true secant * version of LEVMAR_BC_DIF() is implemented... */struct LMBC_DIF_DATA{ void (*func)(LM_REAL *p, LM_REAL *hx, int m, int n, void *adata); LM_REAL *hx, *hxx; void *adata; LM_REAL delta;};void LMBC_DIF_FUNC(LM_REAL *p, LM_REAL *hx, int m, int n, void *data){struct LMBC_DIF_DATA *dta=(struct LMBC_DIF_DATA *)data; /* call user-supplied function passing it the user-supplied data */ (*(dta->func))(p, hx, m, n, dta->adata);}void LMBC_DIF_JACF(LM_REAL *p, LM_REAL *jac, int m, int n, void *data){struct LMBC_DIF_DATA *dta=(struct LMBC_DIF_DATA *)data; /* evaluate user-supplied function at p */ (*(dta->func))(p, dta->hx, m, n, dta->adata); FDIF_FORW_JAC_APPROX(dta->func, p, dta->hx, dta->hxx, dta->delta, jac, m, n, dta->adata);}/* No jacobian version of the LEVMAR_BC_DER() function above: the jacobian is approximated with * the aid of finite differences (forward or central, see the comment for the opts argument) * Ideally, this function should be implemented with a secant approach. Currently, it just calls * LEVMAR_BC_DER() */int LEVMAR_BC_DIF( void (*func)(LM_REAL *p, LM_REAL *hx, int m, int n, void *adata), /* functional relation describing measurements. A p \in R^m yields a \hat{x} \in R^n */ LM_REAL *p, /* I/O: initial parameter estimates. On output has the estimated solution */ LM_REAL *x, /* I: measurement vector */ int m, /* I: parameter vector dimension (i.e. #unknowns) */ int n, /* I: measurement vector dimension */ LM_REAL *lb, /* I: vector of lower bounds. If NULL, no lower bounds apply */ LM_REAL *ub, /* I: vector of upper bounds. If NULL, no upper bounds apply */ int itmax, /* I: maximum number of iterations */ LM_REAL opts[5], /* I: opts[0-4] = minim. options [\mu, \epsilon1, \epsilon2, \epsilon3, \delta]. Respectively the * scale factor for initial \mu, stopping thresholds for ||J^T e||_inf, ||Dp||_2 and ||e||_2 and * the step used in difference approximation to the jacobian. Set to NULL for defaults to be used. * If \delta<0, the jacobian is approximated with central differences which are more accurate * (but slower!) compared to the forward differences employed by default. */ LM_REAL info[LM_INFO_SZ], /* O: information regarding the minimization. Set to NULL if don't care * info[0]= ||e||_2 at initial p. * info[1-4]=[ ||e||_2, ||J^T e||_inf, ||Dp||_2, mu/max[J^T J]_ii ], all computed at estimated p. * info[5]= # iterations, * info[6]=reason for terminating: 1 - stopped by small gradient J^T e * 2 - stopped by small Dp * 3 - stopped by itmax * 4 - singular matrix. Restart from current p with increased mu * 5 - no further error reduction is possible. Restart with increased mu * 6 - stopped by small ||e||_2 * info[7]= # function evaluations * info[8]= # jacobian evaluations */ LM_REAL *work, /* working memory, allocate if NULL */ LM_REAL *covar, /* O: Covariance matrix corresponding to LS solution; mxm. Set to NULL if not needed. */ void *adata) /* pointer to possibly additional data, passed uninterpreted to func. * Set to NULL if not needed */{struct LMBC_DIF_DATA data;int ret; //fprintf(stderr, RCAT("\nWarning: current implementation of ", LEVMAR_BC_DIF) "() does not use a secant approach!\n\n"); data.func=func; data.hx=(LM_REAL *)malloc(2*n*sizeof(LM_REAL)); /* allocate a big chunk in one step */ if(!data.hx){ fprintf(stderr, LCAT(LEVMAR_BC_DIF, "(): memory allocation request failed\n")); exit(1); } data.hxx=data.hx+n; data.adata=adata; data.delta=(opts)? FABS(opts[4]) : (LM_REAL)LM_DIFF_DELTA; // no central differences here... ret=LEVMAR_BC_DER(LMBC_DIF_FUNC, LMBC_DIF_JACF, p, x, m, n, lb, ub, itmax, opts, info, work, covar, (void *)&data); if(info) /* correct the number of function calls */ info[7]+=info[8]*(m+1); /* each jacobian evaluation costs m+1 function calls */ free(data.hx); return ret;}/* undefine everything. THIS MUST REMAIN AT THE END OF THE FILE */#undef FUNC_STATE#undef LNSRCH#undef BOXPROJECT#undef BOXCHECK#undef LEVMAR_BC_DER#undef LMBC_DIF_DATA#undef LMBC_DIF_FUNC#undef LMBC_DIF_JACF#undef LEVMAR_BC_DIF#undef FDIF_FORW_JAC_APPROX#undef FDIF_CENT_JAC_APPROX#undef LEVMAR_COVAR#undef TRANS_MAT_MAT_MULT#undef AX_EQ_B_LU#undef AX_EQ_B_CHOL#undef AX_EQ_B_QR#undef AX_EQ_B_QRLS#undef AX_EQ_B_SVD
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -