⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 eigensolver.c

📁 MIT开发出来的计算光子晶体的软件
💻 C
📖 第 1 页 / 共 2 页
字号:
               constraint(X, constraint_data);	  if (flags & EIGS_PROJECT_PRECONDITIONING) {               /* Operate projection P = (1 - Y U Yt) on X: */               evectmatrix_XtY(symYtD, Y, X, S2);  /* symYtD = Yt X */	       sqmatrix_AeBC(S1, U, 0, symYtD, 0);	       evectmatrix_XpaYS(X, -1.0, Y, S1, 0);          }	  /* Now, for the case of EIGS_ORTHOGONAL_PRECONDITIONER, we	     need to use G as scratch space in order to avoid the need	     for an extra column bundle.  Before that, we need to do	     any computations that we need with G.  (Yes, we're	     playing tricksy games here, but isn't it fun?) */	  mpi_assert_equal(traceGtX = SCALAR_RE(evectmatrix_traceXtY(G, X)));	  if (usingConjugateGradient) {               if (use_polak_ribiere) {                    /* assign G = G - prev_G and copy prev_G = G in the                       same loop.  We can't use the BLAS routines because                       we would then need an extra n x p array. */                    for (i = 0; i < Y.n * Y.p; ++i) {                         scalar g = G.data[i];                         ACCUMULATE_DIFF(G.data[i], prev_G.data[i]);                         prev_G.data[i] = g;                    }                    gamma_numerator = SCALAR_RE(evectmatrix_traceXtY(G, X));               }               else /* otherwise, use Fletcher-Reeves (ignore prev_G) */                    gamma_numerator = traceGtX;	       mpi_assert_equal(gamma_numerator);	  }	  /* The motivation for the following code came from a trick I	     noticed in Sleijpen and Van der Vorst, "A Jacobi-Davidson	     iteration method for linear eigenvalue problems," SIAM	     J. Matrix Anal. Appl. 17, 401-425 (April 1996).  (The	     motivation in our case comes from the fact that if you	     look at the Hessian matrix of the problem, it has a	     projection operator just as in the above reference, and	     so we should the same technique to invert it.)  So far,	     though, the hoped-for savings haven't materialized; maybe	     we need a better preconditioner first. */	  if (flags & EIGS_ORTHOGONAL_PRECONDITIONER) {	       real traceGtX_delta; /* change in traceGtX when we update X */	       /* set G = precondition(Y): */	       if (K != NULL)		    K(Y, G, Kdata, Y, NULL, YtY);	       else		    evectmatrix_copy(G, Y);  /* preconditioner is identity */	       /* let X = KG - KY S3t, where S3 is chosen so that YtX = 0:		  S3 = (YtKG)t / (YtKY).  Recall that, at this point,		  X holds KG and G holds KY.  K is assumed Hermitian. */	       evectmatrix_XtY(S1, Y, G, S2);	       sqmatrix_invert(S1, 0, S2);  /* S1 = 1 / (YtKY) */	       evectmatrix_XtY(S2, X, Y, S3);  /* S2 = GtKY = (YtKG)t */	       sqmatrix_AeBC(S3, S2, 0 , S1, 1);	       evectmatrix_XpaYS(X, -1.0, G, S3, 1);	       /* Update traceGtX and gamma_numerator.  The update		  for gamma_numerator isn't really right in the case		  of Polak-Ribiere; it amounts to doing a weird combination		  of P-R and Fletcher-Reeves...what will happen?  (To		  do the right thing, I think we would need an extra		  column bundle.)  */	       traceGtX_delta = -SCALAR_RE(sqmatrix_traceAtB(S3, S2));	       traceGtX += traceGtX_delta;	       if (usingConjugateGradient)		    gamma_numerator += traceGtX_delta;	  }	  /* In conjugate-gradient, the minimization direction D is             a combination of X with the previous search directions.             Otherwise, we just have D = X. */	  if (usingConjugateGradient) {               real gamma;               if (prev_traceGtX == 0.0)                    gamma = 0.0;               else                    gamma = gamma_numerator / prev_traceGtX;	       if ((flags & EIGS_DYNAMIC_RESET_CG) &&		   2.0 * convergence_history[iteration % EIG_HISTORY_SIZE] >=		   convergence_history[(iteration+1) % EIG_HISTORY_SIZE]) {		    gamma = 0.0;                    if (flags & EIGS_VERBOSE)                         mpi_one_printf("    dynamically resetting CG direction...\n");		    for (i = 1; i < EIG_HISTORY_SIZE; ++i)			 convergence_history[(iteration+i) % EIG_HISTORY_SIZE]			      = 10000.0;	       }	       if ((flags & EIGS_RESET_CG) &&                   (iteration + 1) % CG_RESET_ITERS == 0) {		    /* periodically forget previous search directions,		       and just juse D = X */                    gamma = 0.0;                    if (flags & EIGS_VERBOSE)                         mpi_one_printf("    resetting CG direction...\n");               }	       mpi_assert_equal(gamma * d_scale);               evectmatrix_aXpbY(gamma * d_scale, D, 1.0, X);          }	  d_scale = 1.0;	  /* Minimize the trace along Y + lamba*D: */	  if (!use_linmin) {	       real dE, E2, d2E, t, d_norm;	       /* Here, we do an approximate line minimization along D		  by evaluating dE (the derivative) at the current point,		  and the trace E2 at a second point, and then approximating		  the second derivative d2E by finite differences.  Then,		  we use one step of Newton's method on the derivative.	          This has the advantage of requiring two fewer O(np^2)	          matrix multiplications compared to the exact linmin. */	       d_norm = sqrt(SCALAR_RE(evectmatrix_traceXtY(D,D)) / Y.p);	       mpi_assert_equal(d_norm);	       /* dE = 2 * tr Gt D.  (Use prev_G instead of G so that		  it works even when we are using Polak-Ribiere.) */	       dE = 2.0 * SCALAR_RE(evectmatrix_traceXtY(prev_G, D)) / d_norm;	       /* shift Y by prev_theta along D, in the downhill direction: */	       t = dE < 0 ? -fabs(prev_theta) : fabs(prev_theta);	       evectmatrix_aXpbY(1.0, Y, t / d_norm, D);	       evectmatrix_XtX(U, Y, S2);	       sqmatrix_invert(U, 1, S2);  /* U = 1 / (Yt Y) */	       A(Y, G, Adata, 1, X); /* G = AY; X is scratch */	       evectmatrix_XtY(S1, Y, G, S2);  /* S1 = Yt A Y */	       	       E2 = SCALAR_RE(sqmatrix_traceAtB(S1, U));	       mpi_assert_equal(E2);	       /* Get finite-difference approximation for the 2nd derivative		  of the trace.  Equivalently, fit to a quadratic of the		  form:  E(theta) = E + dE theta + 1/2 d2E theta^2 */	       d2E = (E2 - E - dE * t) / (0.5 * t * t);	       theta = -dE/d2E;	       /* If the 2nd derivative is negative, or a big shift		  in the trace is predicted (compared to the previous		  iteration), then this approximate line minimization is		  probably not very good; switch back to the exact		  line minimization.  Hopefully, we won't have to		  abort like this very often, as it wastes operations. */	       if (d2E < 0 || -0.5*dE*theta > 20.0 * fabs(E-prev_E)) {		    if (flags & EIGS_FORCE_APPROX_LINMIN) {			 if (flags & EIGS_VERBOSE)			      mpi_one_printf("    using previous stepsize\n");		    }		    else {			 if (flags & EIGS_VERBOSE)			      mpi_one_printf("    switching back to exact "					     "line minimization\n");			 use_linmin = 1;			 evectmatrix_aXpbY(1.0, Y, -t / d_norm, D);			 prev_theta = atan(prev_theta); /* convert to angle */			 /* don't do this again: */			 flags |= EIGS_FORCE_EXACT_LINMIN;		    }	       }	       else {		    /* Shift Y by theta, hopefully minimizing the trace: */		    evectmatrix_aXpbY(1.0, Y, (theta - t) / d_norm, D);	       }	  }	  if (use_linmin) {	       real dE, d2E;	       d_scale = sqrt(SCALAR_RE(evectmatrix_traceXtY(D, D)) / Y.p);	       mpi_assert_equal(d_scale);	       blasglue_rscal(Y.p * Y.n, 1/d_scale, D.data, 1);	       A(D, G, Adata, 0, X); /* G = A D; X is scratch */	       evectmatrix_XtX(DtD, D, S2);	       evectmatrix_XtY(DtAD, D, G, S2);	       	       evectmatrix_XtY(S1, Y, D, S2);	       sqmatrix_symmetrize(symYtD, S1);	       evectmatrix_XtY(S1, Y, G, S2);	       sqmatrix_symmetrize(symYtAD, S1);	       sqmatrix_AeBC(S1, U, 0, symYtD, 1);	       dE = 2.0 * (SCALAR_RE(sqmatrix_traceAtB(U, symYtAD)) -			   SCALAR_RE(sqmatrix_traceAtB(YtAYU, S1)));	       sqmatrix_copy(S2, DtD);	       sqmatrix_ApaBC(S2, -4.0, symYtD, 0, S1, 0);	       sqmatrix_AeBC(S3, symYtAD, 0, S1, 0);	       sqmatrix_AeBC(S1, U, 0, S2, 1);	       d2E = 2.0 * (SCALAR_RE(sqmatrix_traceAtB(U, DtAD)) -			    SCALAR_RE(sqmatrix_traceAtB(YtAYU, S1)) -			    4.0 * SCALAR_RE(sqmatrix_traceAtB(U, S3)));	       	       /* this is just Newton-Raphson to find a root of		  the first derivative: */	       theta = -dE/d2E;	       if (d2E < 0) {		    if (flags & EIGS_VERBOSE)			 mpi_one_printf("    near maximum in trace\n");		    theta = dE > 0 ? -fabs(prev_theta) : fabs(prev_theta);	       }	       else if (-0.5*dE*theta > 2.0 * fabs(E-prev_E)) {		    if (flags & EIGS_VERBOSE)			 mpi_one_printf("    large trace change predicted "					"(%g%%)\n", -0.5*dE*theta/E * 100.0);	       }	       if (fabs(theta) >= K_PI) {		    if (flags & EIGS_VERBOSE)			 mpi_one_printf("    large theta (%g)\n", theta);		    theta = dE > 0 ? -fabs(prev_theta) : fabs(prev_theta);	       }	       /* Set S1 to YtAYU * YtY = YtAY for use in linmin.		  (tfd.YtAY == S1). */	       sqmatrix_AeBC(S1, YtAYU, 0, YtY, 1);	       mpi_assert_equal(theta);	       {		    double new_E, new_dE;		    TIME_OP(time_linmin,			    theta = linmin(&new_E, &new_dE, theta, E, dE,					   0.1, MIN2(tolerance, 1e-6), 1e-14,					   0, dE > 0 ? -K_PI : K_PI,					   trace_func, &tfd,					   flags & EIGS_VERBOSE));		    linmin_improvement = fabs(E - new_E) * 2.0/fabs(E + new_E);	       }	       mpi_assert_equal(theta);	       CHECK(fabs(theta) <= K_PI, "converged out of bounds!");	       /* Shift Y to new location minimizing the trace along D: */	       evectmatrix_aXpbY(cos(theta), Y, sin(theta), D);	  }	  /* In exact arithmetic, we don't need to do this, but in practice	     it is probably a good idea to keep errors from adding up and	     eventually violating the constraints. */	  if (constraint)               constraint(Y, constraint_data);	  prev_traceGtX = traceGtX;          prev_theta = theta;          prev_E = E;	  /* Finally, we use the times for the various operations to	     help us pick an algorithm for the next iteration: */	  {	       real t_exact, t_approx;	       t_exact = EXACT_LINMIN_TIME(time_AZ, time_KZ, time_ZtW,					   time_ZS, time_ZtZ, time_linmin);	       t_approx = APPROX_LINMIN_TIME(time_AZ, time_KZ, time_ZtW,					     time_ZS, time_ZtZ);	       if (flags & EIGS_PROJECT_PRECONDITIONING) {		    t_exact += time_ZtW + time_ZS;		    t_approx += time_ZtW + time_ZS;	       }	       /* Sum the times over the processors so that all the		  processors compare the same, average times. */	       mpi_allreduce_1(&t_exact,			       real, SCALAR_MPI_TYPE, MPI_SUM, MPI_COMM_WORLD);	       mpi_allreduce_1(&t_approx, 			       real, SCALAR_MPI_TYPE, MPI_SUM, MPI_COMM_WORLD);	       if (!(flags & EIGS_FORCE_EXACT_LINMIN) &&		   linmin_improvement > 0 &&		   linmin_improvement <= APPROX_LINMIN_IMPROVEMENT_THRESHOLD &&		   t_exact > t_approx * APPROX_LINMIN_SLOWDOWN_GUESS) {		    if ((flags & EIGS_VERBOSE) && use_linmin)			 mpi_one_printf("    switching to approximate "				"line minimization (decrease time by %g%%)\n",				(t_exact - t_approx) * 100.0 / t_exact);		    use_linmin = 0;	       }	       else if (!(flags & EIGS_FORCE_APPROX_LINMIN)) {		    if ((flags & EIGS_VERBOSE) && !use_linmin)			 mpi_one_printf("    switching back to exact "					"line minimization\n");		    use_linmin = 1;		    prev_theta = atan(prev_theta); /* convert to angle */	       }	  }     } while (++iteration < EIGENSOLVER_MAX_ITERATIONS);     CHECK(iteration < EIGENSOLVER_MAX_ITERATIONS,           "failure to converge after "           STRINGIZE(EIGENSOLVER_MAX_ITERATIONS)           " iterations");     evectmatrix_XtX(U, Y, S2);     sqmatrix_invert(U, 1, S2);     eigensolver_get_eigenvals_aux(Y, eigenvals, A, Adata,				   X, G, U, S1, S2);     *num_iterations = iteration;          destroy_sqmatrix(S3);     destroy_sqmatrix(S2);     destroy_sqmatrix(S1);     destroy_sqmatrix(symYtD);     destroy_sqmatrix(DtD);     destroy_sqmatrix(U);     destroy_sqmatrix(YtY);     destroy_sqmatrix(symYtAD);     destroy_sqmatrix(DtAD);     destroy_sqmatrix(YtAYU);}

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -