⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 krylov.py

📁 利用C
💻 PY
📖 第 1 页 / 共 2 页
字号:
        alpha = rr/inner(rs,BAp)        s     = r-alpha*BAp        As    = A*s        BAs   = B*As        w     = inner(BAs,s)/inner(BAs,BAs)        x    += alpha*p+w*s        r     = s -w*BAs        rrn   = inner(r,rs)        beta  = (rrn/rr)*(alpha/w)        rr    = rrn        p     = r+beta*(p-w*BAp)        iter += 1    debug("precondBiCGStab finished, iter: %d, ||e||=%e" % (iter,sqrt(inner(r,r))))    return (x,iter)"""Conjugate Gradients Method for the normal equations"""def CGN_AA(A, x, b, tolerance=1.0E-05, relativeconv=False):    """    CGN_AA(B,A,x,b): Solve Ax = b with the conjugate    gradient method applied to the normal equation.    @param A: Matrix or discrete operator. Must support A*x to operate on x.    @param x: Anything that A can operate on, as well as support inner product    and multiplication/addition/subtraction with scalar and something of the    same type    @param b: Right-hand side, probably the same type as x.    @param tolerance: Convergence criterion    @type tolerance: float    @param relativeconv: Control relative vs. absolute convergence criterion.    That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as    convergence criterion, else just ||r||_2 is used.  Actually ||r||_2^2 is    used, since that save a few cycles.    @type relativeconv: bool    @return:  the solution x.    """    """    Allocate some memory    """    r_a   = 1.0*x    p_aa  = 1.0*x    r = b - A*x    bnrm2=sqrt(inner(b,b))    A.prod(r,r_a,True)    rho=inner(r_a,r_a)    rho1=rho    p = 1.0*r_a    # Used to compute an estimate of the condition number    alpha_v=[]    beta_v=[]    iter=0    error   = sqrt(rho)/bnrm2    debug("error0="+str(error), 0)    while error>=tolerance:        p_a     = A*p        A.prod(p_a,p_aa)        alpha   = rho/inner(p,p_aa)        x       = x + alpha*p        r       = b-A*x        A.prod(r,r_a,True)        rho     = inner(r_a,r_a)        beta    = rho/rho1        error   = sqrt(rho)/bnrm2        debug("error = " + str(error), 0)        alpha_v.append(alpha)        beta_v.append(beta)        rho1    = rho        p       = r_a+beta*p        iter   += 1    debug("CGN_ABBA finished, iter: %d, ||e||=%e" % (iter,error))    return (x,iter,alpha_v,beta_v)def CGN_ABBA(B, A, x, b, tolerance=1.0E-05, relativeconv=False):    """    CGN_ABBA(B,A,x,b): Solve Ax = b with the preconditioned conjugate    gradient method applied to the normal equation.    @param B: Preconditioner supporting the __mul__ operator for operating on    x.    @param A: Matrix or discrete operator. Must support A*x to operate on x.    @param x: Anything that A can operate on, as well as support inner product    and multiplication/addition/subtraction with scalar and something of the    same type    @param b: Right-hand side, probably the same type as x.    @param tolerance: Convergence criterion    @type tolerance: float    @param relativeconv: Control relative vs. absolute convergence criterion.    That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as    convergence criterion, else just ||r||_2 is used.  Actually ||r||_2^2 is    used, since that save a few cycles.    @type relativeconv: bool    @return:  the solution x.    """    """    Allocate some memory    """    r_bb   = 1.0*x    r_abb  = 1.0*x    p_bba  = 1.0*x    p_abba = 1.0*x    r = b - A*x    bnrm2 = sqrt(inner(b,b))    r_b = B*r    r_bb = B*r_b #Should be transposed    A.data[0][0].prod(r_bb[0],r_abb[0])    A.prod(r_bb,r_abb,True)    rho=inner(r_abb,r_abb)    rho1=rho    p=r_abb.copy()    # Used to compute an estimate of the condition number    alpha_v=[]    beta_v=[]    iter=0    error  = sqrt(rho)/bnrm2    while error>=tolerance:        p_a     = A*p        p_ba    = B*p_a;        p_bba   = B*p_ba        A.prod(p_bba,p_abba)        alpha   = rho/inner(p,p_abba)        x       = x + alpha*p        r       = b-A*x        r_b     = B*r        r_bb    = B*r_b        A.prod(r_bb,r_abb,True)        rho     = inner(r_abb,r_abb)        beta    = rho/rho1        error   = sqrt(rho)/bnrm2        debug("i = ", error)        alpha_v.append(alpha)        beta_v.append(beta)        rho1    = rho        p       = r_abb+beta*p        iter   += 1    debug("CGN_ABBA finished, iter: %d, ||e||=%e" % (iter,error))    return (x,iter,alpha_v,beta_v)def CGN_BABA(B, A, x, b, tolerance=1.0E-05, relativeconv=False):    """    CGN_BABA(B,A,x,b): Solve Ax = b with the preconditioned conjugate    gradient method applied to the normal equation.    @param B: Preconditioner supporting the __mul__ operator for operating on    x.    @param A: Matrix or discrete operator. Must support A*x to operate on x.    @param x: Anything that A can operate on, as well as support inner product    and multiplication/addition/subtraction with scalar and something of the    same type    @param b: Right-hand side, probably the same type as x.    @param tolerance: Convergence criterion    @type tolerance: float    @param relativeconv: Control relative vs. absolute convergence criterion.    That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as    convergence criterion, else just ||r||_2 is used.  Actually ||r||_2^2 is    used, since that save a few cycles.    @type relativeconv: bool    @return:  the solution x.    """    # Is this correct?? Should we not transpose the preconditoner somewhere??    """    Allocate some memory    """    r_ab   = 1.0*x    p_aba  = 1.0*x    r      = b - A*x    bnrm2  = sqrt(inner(b,b))    r_b    = B*r    A.prod(r_b,r_ab,True)    r_bab  = B*r_ab    rho     = inner(r_bab,r_ab)    rho1    = rho    p       = r_bab.copy()    # Used to compute an estimate of the condition number    alpha_v=[]    beta_v=[]    iter=0    error   = sqrt(rho)/bnrm2    while error>=tolerance:        p_a     = A*p        p_ba    = B*p_a;        A.prod(p_ba,p_aba,True)        p_baba  = B*p_aba        alpha   = rho/inner(p,p_aba)        x       = x + alpha*p        r       = b-A*x        r_b     = B*r        A.prod(r_b,r_ab,True)        r_bab   = A*r_ab        rho     = inner(r_bab,r_ab)        beta    = rho/rho1        error   = sqrt(rho)/bnrm2        alpha_v.append(alpha)        beta_v.append(beta)        rho1    = rho        p       = r_bab+beta*p        iter   += 1    debug("CGN_BABA finished, iter: %d, ||e||=%e" % (iter,error))    return (x,iter,alpha_v,beta_v)def precondRconjgrad(B, A, x, b, tolerance=1.0E-05, relativeconv=False):    """    conjgrad(B,A,x,b): Solve Ax = b with the right preconditioned conjugate gradient method.    @param B: Preconditioner supporting the __mul__ operator for operating on    x.    @param A: Matrix or discrete operator. Must support A*x to operate on x.    @param x: Anything that A can operate on, as well as support inner product    and multiplication/addition/subtraction with scalar and something of the    same type    @param b: Right-hand side, probably the same type as x.    @param tolerance: Convergence criterion    @type tolerance: float    @param relativeconv: Control relative vs. absolute convergence criterion.    That is, if relativeconv is True, ||r||_2/||r_init||_2 is used as    convergence criterion, else just ||r||_2 is used.  Actually ||r||_2^2 is    used, since that save a few cycles.    @type relativeconv: bool    @return:  the solution x.    """    r0 = b - A*x    r = 1.0*r0    z = B*r    q = z.copy()    rho = rho0 = inner(z, r)    if relativeconv:        tolerance *= sqrt(inner(B*b,b))    alpha_v=[]    beta_v=[]    iter=0    while sqrt(fabs(rho0)) > tolerance:        Aq=A*q        alpha = rho0/inner(Aq,q)        x += alpha*q        r -= alpha*Aq        z  = B*r        rho = inner(z, r)        beta = rho/rho0        q = z+beta*q        rho0 = rho        alpha_v.append(alpha)        beta_v.append(beta)        iter += 1    return (x,iter,alpha_v,beta_v)def Richardson(A, x, b, tau=1, tolerance=1.0E-05, relativeconv=False, maxiter=1000, info=False):    print "b ", b.inner(b)#    b.disp()    print "x ", x.inner(x)    r = b - A*x    print "r ", r.inner(r)    rho = rho0 = inner(r, r)    if relativeconv:        tolerance *= sqrt(inner(b,b))    print "tolerance ", tolerance    iter = 0    while sqrt(rho) > tolerance and iter <= maxiter:        x += tau*r        r = b - A*x        rho = inner(r,r)        print "iteration ", iter, " rho= ",  sqrt(rho)        iter += 1    return x def precRichardson(B, A, x, b, tau=1, tolerance=1.0E-05, relativeconv=False, maxiter=1000, info=False):    print "b ", b.inner(b)    print "x ", x.inner(x)    r = b - A*x    print "r ", r.inner(r)    s = B*r    rho = rho0 = inner(r, r)    if relativeconv:        tolerance *= sqrt(inner(b,b))    print "tolerance ", tolerance    iter = 0    while sqrt(rho) > tolerance and iter <= maxiter:        x += tau*s        r = b - A*x        s = B*r         rho = inner(r,r)        print "iteration ", iter, " rho= ",  sqrt(rho)        iter += 1    return x 

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -