📄 cd_core.f90
字号:
#endif call t_startf('c_core')!$omp parallel do private(i, j, k, uxx, vxx, iord, jord) do k=kfirst,klast ! This is the main parallel loop. if( k <= km/8 ) then iord = 1 jord = 1 else iord = iord_c jord = jord_c endif call vpol5(u(1,jfirst,k), v(1,jfirst,k), im, jm, & coslon, sinlon, cosl5, sinl5, jfirst, jlast) do j=js1gc,jn1gcp1 do i=1,im uxx(i,j) = u(i,j,k) enddo enddo do j=js1gcp1,jn1gc do i=1,im vxx(i,j) = v(i,j,k) enddo enddo!-----------------------------------------------------------------! Call the vertical independent part of the dynamics on the C-grid!----------------------------------------------------------------- call c_sw(uxx(1,jfirst-ng_c), vxx(1,jfirst-ng_c-1), pt(1,jfirst-ng_c,k), & delp(1,jfirst,k), uc(1,jfirst-ng_c,k), & vc(1,jfirst-2,k), ptc(1,jfirst,k), & delpf(1,jfirst-ng_c,k), ptk(1,jfirst,k), & cosp, acosp, cose, coslon, sinlon, & cosl5, sinl5, dxdt, dxe, dtdx2, & dtdx4, dtxe5, rdxe, dycp, dydt, dtdy5, & cye, fc, ifax, trigs, dc(1,js2g0), & sc, zt_c, tiny, rcap, im, & jm, jfirst, jlast, ng_c, ng_d, & ng_s, js2g0, jn2g0, js2gc, jn1gc, & iord, jord ) enddo call t_stopf('c_core')! MPI note: uc, vc, ptk, and ptc computed within the above k-look from jfirst to jlast! Needed by D-core: uc(jfirst-ng_d:jlast+ng_d), vc(jfirst:jlast+1) call t_startf('c_geop')#if defined( SPMD ) if (twod_decomp .eq. 1) then if (geopkc16) then!! Stay in yz space and use semi-global z communications and 16-byte reals! call geopk16(ptop, pe, ptk, pkcc, wzc, hs, ptc, 0, im, jm, km, & jfirst, jlast, 1, im, cp, akap, kfirst, klast)!! Geopk does not need j ghost zones of pkc and wz!!$omp parallel do private(i, j, k) do k = kfirst, klast+1 do j = jfirst, jlast do i = 1, im pkc(i,j,k) = pkcc(i,j,k) wz(i,j,k) = wzc(i,j,k) enddo enddo enddo else!! Transpose to xy decomposition! call redistributestart(inter_ijk, .true., ptk) call redistributefinish(inter_ijk, .true., delpxy) call redistributestart(inter_ijk, .true., ptc) call redistributefinish(inter_ijk, .true., ptxy) call geopk(ptop, pexy, delpxy, pkxy, wzxy, hsxy, ptxy, 0, im, jm, km, & jfirstxy, jlastxy, ifirstxy, ilastxy, & cp, akap, nx, 0)!! Transpose back to yz decomposition.! delpxy, ptxy and pexy are not output quantities on this call.! pkkp and wzkp are holding arrays, whose specific z-dimensions! are required by Pilgrim.! call redistributestart(inter_ijkp, .false., pkxy) call redistributefinish(inter_ijkp, .false., pkkp) call redistributestart(inter_ijkp, .false., wzxy) call redistributefinish(inter_ijkp, .false., wzkp)!$omp parallel do private(i, j, k) do k = kfirst, klastp do j = jfirst, jlast do i = 1, im pkc(i,j,k) = pkkp(i,j,k) wz(i,j,k) = wzkp(i,j,k) enddo enddo enddo if (npr_z .gt. 1) then!! Fill in klast+1! incount = 0 outcount = 0 if (kfirst .gt. 1) then incount = 0 call bufferpack3d(pkc, 1, im, jfirst-1, jlast+1, kfirst, klast+1, & 1, im, jfirst, jlast, kfirst, kfirst, & buff_s(incount+1)) incount = incount + im * (jlast-jfirst+1) call bufferpack3d(wz, 1, im, jfirst-1, jlast+1, kfirst, klast+1, & 1, im, jfirst, jlast, kfirst, kfirst, & buff_s(incount+1)) incount = incount + im * (jlast-jfirst+1) endif if (klast .lt. km) then outcount = 2 * im * (jlast - jfirst + 1) endif call mp_barrier() call mp_send(iam-npr_y,iam+npr_y,incount,outcount,buff_s,buff_r) call mp_barrier() call mp_recv(iam+npr_y,outcount,buff_r) if (klast .lt. km) then outcount = 0 call bufferunpack3d( pkc, 1,im,jfirst-1,jlast+1,kfirst,klast+1, & 1, im, jfirst, jlast, klast+1, klast+1, & buff_r(outcount+1)) outcount = outcount + im * (jlast - jfirst + 1) call bufferunpack3d( wz, 1,im,jfirst-1,jlast+1,kfirst,klast+1, & 1, im, jfirst,jlast, klast+1, klast+1, & buff_r(outcount+1) ) endif endif ! npr_z .gt. 1 endif ! geopkc16 else#endif if (geopkc16) then!! Use 16-byte reals (for compatibility with 2-D decomposition)! call geopk16(ptop, pe, ptk, pkcc, wzc, hs, ptc, 0, im, jm, km, & jfirst, jlast, 1, im, cp, akap, 1, km) else!! Use 8-byte reals (standard)! call geopk(ptop, pe, ptk, pkcc, wzc, hs, ptc, 0, im, jm, km, & jfirst, jlast, 1, im, & cp, akap, nx, 0) endif!! Geopk does not need j ghost zones of pkc and wz!!$omp parallel do private(i, j, k) do k = kfirst, klast+1 do j = jfirst, jlast do i = 1, im pkc(i,j,k) = pkcc(i,j,k) wz(i,j,k) = wzc(i,j,k) enddo enddo enddo#if defined( SPMD ) endif#endif call t_stopf('c_geop')! Upon exit from geopk, the quantities pe, pkc and wz will have been! updated at klast+1#if defined( SPMD ) call t_startf('send_pkc&wz')!! pkc & wz need to be ghosted only at jfirst-1! mp_send2_n() and mp_recv2_s() are specifically designed for ghosting pkc & wz! call mp_send2_n(im, jm, jfirst, jlast, kfirst, klast+1, 1, 1, pkc, wz) call t_stopf('send_pkc&wz')#endif call t_startf('uc_comp')#if defined( HIGH_P ) call t_startf('highp2') call highp2(pkc, wz, wz3, wzz, dpt, & im, jm, km, jfirst, jlast, & kfirst, klast, klastp, nx ) call t_stopf('highp2')!$omp parallel do private(i, j, k, p1d) do k=kfirst,klast do j=js2g0,jn2g0 do i=1,im p1d(i) = pkc(i,j,k+1) - pkc(i,j,k) enddo uc(1,j,k) = uc(1,j,k) + dtdx2(j) / (p1d(1)+p1d(im)) * & (dpt(im,j,k)-dpt(1,j,k)-wz3(1,j,k)+wz3(1,j,k+1)) do i=2,im uc(i,j,k) = uc(i,j,k) + dtdx2(j) / (p1d(i)+p1d(i-1)) * & (dpt(i-1,j,k)-dpt(i,j,k)-wz3(i,j,k)+wz3(i,j,k+1)) enddo enddo ! j-loop call pft2d(uc(1,js2g0,k), sc(js2g0), dc(1,js2g0), im, & jn2g0-js2g0+1, ifax, trigs ) enddo#else! Beware k+1 references directly below (AAM)!!$omp parallel do private(i, j, k, p1d) do k=kfirst,klast do j=js2g0,jn2g0 do i=1,im p1d(i) = pkc(i,j,k+1) - pkc(i,j,k) enddo! i=1 uc(1,j,k) = uc(1,j,k) + dtdx2(j) * ( & (wz(im,j,k+1)-wz(1,j,k))*(pkc(1,j,k+1)-pkc(im,j,k)) & + (wz(im,j,k)-wz(1,j,k+1))*(pkc(im,j,k+1)-pkc(1,j,k))) & / (p1d(1)+p1d(im)) do i=2,im uc(i,j,k) = uc(i,j,k) + dtdx2(j) * ( & (wz(i-1,j,k+1)-wz(i,j,k))*(pkc(i,j,k+1)-pkc(i-1,j,k)) & + (wz(i-1,j,k)-wz(i,j,k+1))*(pkc(i-1,j,k+1)-pkc(i,j,k))) & / (p1d(i)+p1d(i-1)) enddo enddo call pft2d(uc(1,js2g0,k), sc(js2g0), dc(1,js2g0), im, & jn2g0-js2g0+1, ifax, trigs ) enddo #endif call t_stopf('uc_comp')#if defined( SPMD ) call t_startf('recv_pkc&wz') call mp_barrier() call mp_recv2_s(im, jm, jfirst, jlast, kfirst, klast+1, 1, 1, pkc, wz) call mp_send3d_ns(im, jm, jfirst, jlast, kfirst, klast, & ng_d, ng_d, uc, 1 ) call t_stopf('recv_pkc&wz')#endif call t_startf('vc_comp')!! Beware k+1 references directly below (AAM)!!$omp parallel do private(i, j, k, wk1)! pkc and wz need only to be ghosted jfirst-1 do k=kfirst,klast do j=js1g1,jlast do i=1,im wk1(i,j) = pkc(i,j,k+1) - pkc(i,j,k) enddo enddo do j=js2g0,jlast do i=1,im vc(i,j,k) = vc(i,j,k) + dtdy5/(wk1(i,j)+wk1(i,j-1)) * &#if defined ( HIGH_P ) ( dpt(i,j-1,k)-dpt(i,j,k)-wzz(i,j,k)+wzz(i,j,k+1) )#else ( (wz(i,j-1,k+1)-wz(i,j,k))*(pkc(i,j,k+1)-pkc(i,j-1,k)) & + (wz(i,j-1,k)-wz(i,j,k+1))*(pkc(i,j-1,k+1)-pkc(i,j,k)) )#endif enddo enddo call pft2d(vc(1,js2g0,k), se(js2g0), de(1,js2g0), im, & jlast-js2g0+1, ifax, trigs ) enddo call t_stopf('vc_comp')#if defined( SPMD ) call t_startf('c_mpi_3') call mp_barrier() call mp_recv3d_ns(im, jm, jfirst, jlast, kfirst, klast, & ng_d, ng_d, uc, 1)! vc only needs to be ghosted at jlast+1 call mp_send_s(im, jm, jfirst, jlast, kfirst, klast, 2, 2, vc) call mp_barrier() call mp_recv_n(im, jm, jfirst, jlast, kfirst, klast, 2, 2, vc) call t_stopf('c_mpi_3')#endif call t_startf('d_core')!$omp parallel do private(i, j, k, iord, jord, uxx, vxx) do k=kfirst,klast if( k <= km/8 ) then if( k == 1 ) then iord = 1 jord = 1 else iord = min(2, iord_d) jord = min(2, jord_d) endif else iord = iord_d jord = jord_d endif do j=js1gd,jn1gd do i=1,im uxx(i,j) = u(i,j,k) vxx(i,j) = v(i,j,k) enddo enddo!-----------------------------------------------------------------! Call the vertical independent part of the dynamics on the D-grid!----------------------------------------------------------------- call d_sw( uxx(1,jfirst-ng_d), vxx(1,jfirst-ng_d), & uc(1,jfirst-ng_d,k), vc(1,jfirst-2,k), & pt(1,jfirst-ng_d,k), delp(1,jfirst,k), & delpf(1,jfirst-ng_d,k), cx3(1,jfirst-ng_d,k), & cy3(1,jfirst,k), mfx(1,jfirst,k), & mfy(1,jfirst,k), cdx(js2g0,k), cdy(js2g0,k), & dtdx, dtdxe, dtxe5, txe5, dyce, rdx, cy, & dx, f0(jfirst-ng_d), js2g0, jn1g1, im, jm, & jfirst, jlast, ng_d, nq, iord, & jord, zt_d, rcap, tiny, dtdy, & dtdy5, tdy5, rdy, cosp, acosp, cose, & sinlon, coslon, sinl5, cosl5 ) enddo call t_stopf('d_core') call t_startf('d_geop')#if defined( SPMD ) if (twod_decomp .eq. 1) then if (geopkd16) then!!! Stay in yz space and use semi-global z communications and 16-byte reals call geopk16(ptop, pe, delp, pkcc, wzc, hs, pt, ng_d, im, jm, km, & jfirst, jlast, 1, im, cp, akap, kfirst, klast)!! Geopk does not need j ghost zones of pkc and wz!!$omp parallel do private(i, j, k) do k = kfirst, klast+1 do j = jfirst, jlast do i = 1, im pkc(i,j,k) = pkcc(i,j,k) wz(i,j,k) = wzc(i,j,k) enddo enddo enddo else!!! Transpose to xy decomposition call redistributestart(inter_ijk, .true., delp) call redistributefinish(inter_ijk, .true., delpxy)!! Temporary solution to redistribute the unghosted version of PT! Art: can we phase out the redistribute of PT, by using on GEOPK16?!!$omp parallel do private(i,j,k) do k=kfirst,klast do j=jfirst,jlast do i=1,im
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -