nb_kernel400_x86_64_sse2.s

来自「最著名最快的分子模拟软件」· S 代码 · 共 1,214 行 · 第 1/3 页

S
1,214
字号
#### $Id: nb_kernel400_x86_64_sse2.s,v 1.4.2.3 2006/09/22 08:40:37 lindahl Exp $#### Gromacs 4.0                         Copyright (c) 1991-2003 ## David van der Spoel, Erik Lindahl#### This program is free software; you can redistribute it and/or## modify it under the terms of the GNU General Public License## as published by the Free Software Foundation; either version 2## of the License, or (at your option) any later version.#### To help us fund GROMACS development, we humbly ask that you cite## the research papers on the package. Check out http://www.gromacs.org## ## And Hey:## Gnomes, ROck Monsters And Chili Sauce##.globl nb_kernel400_x86_64_sse2.globl _nb_kernel400_x86_64_sse2nb_kernel400_x86_64_sse2:       _nb_kernel400_x86_64_sse2:      ##      Room for return address and rbp (16 bytes).set nb400_fshift, 16.set nb400_gid, 24.set nb400_pos, 32.set nb400_faction, 40.set nb400_charge, 48.set nb400_p_facel, 56.set nb400_argkrf, 64.set nb400_argcrf, 72.set nb400_Vc, 80.set nb400_type, 88.set nb400_p_ntype, 96.set nb400_vdwparam, 104.set nb400_Vvdw, 112.set nb400_p_tabscale, 120.set nb400_VFtab, 128.set nb400_invsqrta, 136.set nb400_dvda, 144.set nb400_p_gbtabscale, 152.set nb400_GBtab, 160.set nb400_p_nthreads, 168.set nb400_count, 176.set nb400_mtx, 184.set nb400_outeriter, 192.set nb400_inneriter, 200.set nb400_work, 208        ## stack offsets for local variables          ## bottom of stack is cache-aligned for sse2 use .set nb400_ix, 0.set nb400_iy, 16.set nb400_iz, 32.set nb400_iq, 48.set nb400_dx, 64.set nb400_dy, 80.set nb400_dz, 96.set nb400_two, 112.set nb400_gbtsc, 128.set nb400_qq, 144.set nb400_r, 160.set nb400_vctot, 176.set nb400_fix, 192.set nb400_fiy, 208.set nb400_fiz, 224.set nb400_half, 240.set nb400_three, 256.set nb400_isai, 272.set nb400_isaprod, 288.set nb400_dvdasum, 304.set nb400_gbscale, 320.set nb400_nri, 336.set nb400_iinr, 344.set nb400_jindex, 352.set nb400_jjnr, 360.set nb400_shift, 368.set nb400_shiftvec, 376.set nb400_facel, 384.set nb400_innerjjnr, 392.set nb400_is3, 400.set nb400_ii3, 404.set nb400_ii, 408.set nb400_innerk, 412.set nb400_n, 416.set nb400_nn1, 420.set nb400_nouter, 424.set nb400_ninner, 428        push %rbp        movq %rsp,%rbp        push %rbx        emms        push %r12        push %r13        push %r14        push %r15        subq $440,%rsp          ## local variable stack space (n*16+8)        ## zero 32-bit iteration counters        movl $0,%eax        movl %eax,nb400_nouter(%rsp)        movl %eax,nb400_ninner(%rsp)        movl (%rdi),%edi        movl %edi,nb400_nri(%rsp)        movq %rsi,nb400_iinr(%rsp)        movq %rdx,nb400_jindex(%rsp)        movq %rcx,nb400_jjnr(%rsp)        movq %r8,nb400_shift(%rsp)        movq %r9,nb400_shiftvec(%rsp)        movq nb400_p_facel(%rbp),%rsi        movsd (%rsi),%xmm0        movsd %xmm0,nb400_facel(%rsp)        movq nb400_p_gbtabscale(%rbp),%rbx        movsd (%rbx),%xmm4        shufpd $0,%xmm4,%xmm4        movapd %xmm4,nb400_gbtsc(%rsp)        ## create constant floating-point factors on stack        movl $0x00000000,%eax   ## lower half of double half IEEE (hex)        movl $0x3fe00000,%ebx        movl %eax,nb400_half(%rsp)        movl %ebx,nb400_half+4(%rsp)        movsd nb400_half(%rsp),%xmm1        shufpd $0,%xmm1,%xmm1  ## splat to all elements        movapd %xmm1,%xmm3        addpd  %xmm3,%xmm3      ## one        movapd %xmm3,%xmm2        addpd  %xmm2,%xmm2      ## two        addpd  %xmm2,%xmm3      ## three        movapd %xmm1,nb400_half(%rsp)        movapd %xmm2,nb400_two(%rsp)        movapd %xmm3,nb400_three(%rsp)_nb_kernel400_x86_64_sse2.nb400_threadloop:         movq  nb400_count(%rbp),%rsi            ## pointer to sync counter        movl  (%rsi),%eax_nb_kernel400_x86_64_sse2.nb400_spinlock:         movl  %eax,%ebx                         ## ebx=*count=nn0        addl  $1,%ebx                          ## ebx=nn1=nn0+10        lock         cmpxchgl %ebx,(%rsi)                    ## write nn1 to *counter,                                                ## if it hasnt changed.                                                ## or reread *counter to eax.        pause                                   ## -> better p4 performance        jnz _nb_kernel400_x86_64_sse2.nb400_spinlock        ## if(nn1>nri) nn1=nri        movl nb400_nri(%rsp),%ecx        movl %ecx,%edx        subl %ebx,%ecx        cmovlel %edx,%ebx                       ## if(nn1>nri) nn1=nri        ## Cleared the spinlock if we got here.        ## eax contains nn0, ebx contains nn1.        movl %eax,nb400_n(%rsp)        movl %ebx,nb400_nn1(%rsp)        subl %eax,%ebx                          ## calc number of outer lists        movl %eax,%esi                          ## copy n to esi        jg  _nb_kernel400_x86_64_sse2.nb400_outerstart        jmp _nb_kernel400_x86_64_sse2.nb400_end_nb_kernel400_x86_64_sse2.nb400_outerstart:         ## ebx contains number of outer iterations        addl nb400_nouter(%rsp),%ebx        movl %ebx,nb400_nouter(%rsp)_nb_kernel400_x86_64_sse2.nb400_outer:         movq  nb400_shift(%rsp),%rax        ## rax = pointer into shift[]         movl  (%rax,%rsi,4),%ebx        ## rbx=shift[n]         lea  (%rbx,%rbx,2),%rbx    ## rbx=3*is         movl  %ebx,nb400_is3(%rsp)      ## store is3         movq  nb400_shiftvec(%rsp),%rax     ## rax = base of shiftvec[]         movsd (%rax,%rbx,8),%xmm0        movsd 8(%rax,%rbx,8),%xmm1        movsd 16(%rax,%rbx,8),%xmm2        movq  nb400_iinr(%rsp),%rcx         ## rcx = pointer into iinr[]                movl  (%rcx,%rsi,4),%ebx    ## ebx =ii         movl  %ebx,nb400_ii(%rsp)        movq  nb400_charge(%rbp),%rdx        movsd (%rdx,%rbx,8),%xmm3        mulsd nb400_facel(%rsp),%xmm3        shufpd $0,%xmm3,%xmm3        movq  nb400_invsqrta(%rbp),%rdx         ## load invsqrta[ii]        movsd (%rdx,%rbx,8),%xmm4        shufpd $0,%xmm4,%xmm4        lea  (%rbx,%rbx,2),%rbx        ## rbx = 3*ii=ii3         movq  nb400_pos(%rbp),%rax      ## rax = base of pos[]          addsd (%rax,%rbx,8),%xmm0        addsd 8(%rax,%rbx,8),%xmm1        addsd 16(%rax,%rbx,8),%xmm2        movapd %xmm3,nb400_iq(%rsp)        movapd %xmm4,nb400_isai(%rsp)        shufpd $0,%xmm0,%xmm0        shufpd $0,%xmm1,%xmm1        shufpd $0,%xmm2,%xmm2        movapd %xmm0,nb400_ix(%rsp)        movapd %xmm1,nb400_iy(%rsp)        movapd %xmm2,nb400_iz(%rsp)        movl  %ebx,nb400_ii3(%rsp)        ## clear vctot and i forces         xorpd %xmm4,%xmm4        movapd %xmm4,%xmm8        movapd %xmm4,%xmm12        movapd %xmm4,%xmm13        movapd %xmm4,%xmm14        movapd %xmm4,%xmm15        movq  nb400_jindex(%rsp),%rax        movl  (%rax,%rsi,4),%ecx             ## jindex[n]         movl  4(%rax,%rsi,4),%edx            ## jindex[n+1]         subl  %ecx,%edx              ## number of innerloop atoms         movq  nb400_pos(%rbp),%rsi        movq  nb400_faction(%rbp),%rdi        movq  nb400_jjnr(%rsp),%rax        shll  $2,%ecx        addq  %rcx,%rax        movq  %rax,nb400_innerjjnr(%rsp)       ## pointer to jjnr[nj0]         movl  %edx,%ecx        subl  $2,%edx        addl  nb400_ninner(%rsp),%ecx        movl  %ecx,nb400_ninner(%rsp)        addl  $0,%edx        movl  %edx,nb400_innerk(%rsp)      ## number of innerloop atoms         jge   _nb_kernel400_x86_64_sse2.nb400_unroll_loop        jmp   _nb_kernel400_x86_64_sse2.nb400_checksingle_nb_kernel400_x86_64_sse2.nb400_unroll_loop:         ## twice unrolled innerloop here         movq  nb400_innerjjnr(%rsp),%rdx     ## pointer to jjnr[k]         movl  (%rdx),%r12d        movl  4(%rdx),%r13d        addq $8,nb400_innerjjnr(%rsp)                   ## advance pointer (unrolled 2)         movq nb400_pos(%rbp),%rsi               ## base of pos[]         lea  (%r12,%r12,2),%r8     ## j3         lea  (%r13,%r13,2),%r9        ## move two coordinates to xmm4-xmm6        movlpd (%rsi,%r8,8),%xmm4        movlpd 8(%rsi,%r8,8),%xmm5        movlpd 16(%rsi,%r8,8),%xmm6        movhpd (%rsi,%r9,8),%xmm4        movhpd 8(%rsi,%r9,8),%xmm5        movhpd 16(%rsi,%r9,8),%xmm6        ## calc dr         subpd nb400_ix(%rsp),%xmm4        subpd nb400_iy(%rsp),%xmm5        subpd nb400_iz(%rsp),%xmm6        ## store dr         movapd %xmm4,%xmm9        movapd %xmm5,%xmm10        movapd %xmm6,%xmm11        ## square it         mulpd %xmm4,%xmm4        mulpd %xmm5,%xmm5        mulpd %xmm6,%xmm6        addpd %xmm5,%xmm4        addpd %xmm6,%xmm4        ## rsq in xmm4         movq nb400_invsqrta(%rbp),%rsi        movlpd (%rsi,%r12,8),%xmm3        cvtpd2ps %xmm4,%xmm5        rsqrtps %xmm5,%xmm5        cvtps2pd %xmm5,%xmm2    ## lu in low xmm2         movhpd (%rsi,%r13,8),%xmm3        mulpd  nb400_isai(%rsp),%xmm3        movapd %xmm3,nb400_isaprod(%rsp)    movapd %xmm3,%xmm6        mulpd nb400_gbtsc(%rsp),%xmm3        movapd %xmm3,nb400_gbscale(%rsp)        ## lookup seed in xmm2         movapd %xmm2,%xmm5      ## copy of lu         mulpd %xmm2,%xmm2       ## lu*lu         movapd nb400_three(%rsp),%xmm1        mulpd %xmm4,%xmm2       ## rsq*lu*lu                            movapd nb400_half(%rsp),%xmm0        subpd %xmm2,%xmm1       ## 30-rsq*lu*lu         mulpd %xmm5,%xmm1        mulpd %xmm0,%xmm1       ## xmm0=iter1 of rinv (new lu)         movq nb400_charge(%rbp),%rsi     ## base of charge[]         movlpd (%rsi,%r12,8),%xmm3        movapd %xmm1,%xmm5      ## copy of lu         mulpd %xmm1,%xmm1       ## lu*lu         movapd nb400_three(%rsp),%xmm2        mulpd %xmm4,%xmm1       ## rsq*lu*lu                            movapd nb400_half(%rsp),%xmm0        subpd %xmm1,%xmm2       ## 30-rsq*lu*lu         mulpd %xmm5,%xmm2        mulpd %xmm2,%xmm0       ## xmm0=iter2 of rinv (new lu)         mulpd %xmm0,%xmm4       ## xmm4=r     mulpd  nb400_iq(%rsp),%xmm6        movhpd (%rsi,%r13,8),%xmm3        mulpd  %xmm6,%xmm3        movapd %xmm3,nb400_qq(%rsp)        movapd %xmm4,nb400_r(%rsp)        mulpd nb400_gbscale(%rsp),%xmm4        cvttpd2pi %xmm4,%mm6    ## mm6 = lu idx         cvtpi2pd %mm6,%xmm5        subpd %xmm5,%xmm4        movapd %xmm4,%xmm1      ## xmm1=eps         pslld $2,%mm6           ## idx *= 4         movq nb400_GBtab(%rbp),%rsi        movd %mm6,%r10d        psrlq $32,%mm6        movd %mm6,%r11d         ## indices in r10/r11        movapd (%rsi,%r10,8),%xmm4      ## Y1 F1                movapd (%rsi,%r11,8),%xmm3      ## Y2 F2         movapd %xmm4,%xmm5        unpcklpd %xmm3,%xmm4    ## Y1 Y2         unpckhpd %xmm3,%xmm5    ## F1 F2         movapd 16(%rsi,%r10,8),%xmm6    ## G1 H1                movapd 16(%rsi,%r11,8),%xmm3    ## G2 H2         movapd %xmm6,%xmm7        unpcklpd %xmm3,%xmm6    ## G1 G2         unpckhpd %xmm3,%xmm7    ## H1 H2         ## coulomb table ready, in xmm4-xmm7                    mulpd  %xmm1,%xmm7      ## xmm7=Heps        mulpd  %xmm1,%xmm6      ## xmm6=Geps         mulpd  %xmm1,%xmm7      ## xmm7=Heps2         addpd  %xmm6,%xmm5        addpd  %xmm7,%xmm5      ## xmm5=Fp              addpd  %xmm7,%xmm7      ## two*Heps2         movapd nb400_qq(%rsp),%xmm3        addpd  %xmm6,%xmm7        addpd  %xmm5,%xmm7 ## xmm7=FF         mulpd  %xmm1,%xmm5 ## xmm5=eps*Fp         addpd  %xmm4,%xmm5 ## xmm5=VV         mulpd  %xmm3,%xmm5 ## vcoul=qq*VV          mulpd  %xmm7,%xmm3 ## fijC=FF*qq         movq nb400_dvda(%rbp),%rsi        ## Calculate dVda        xorpd %xmm7,%xmm7        mulpd nb400_gbscale(%rsp),%xmm3        movapd %xmm3,%xmm6        mulpd  nb400_r(%rsp),%xmm6        addpd  %xmm5,%xmm6    ## update vctot        addpd  %xmm5,%xmm12        ## xmm6=(vcoul+fijC*r)        subpd  %xmm6,%xmm7        movapd %xmm7,%xmm6        ## update dvdasum        addpd  %xmm7,%xmm8        ## update j atoms dvdaj        movhlps %xmm6,%xmm7        addsd  (%rsi,%r12,8),%xmm6        addsd  (%rsi,%r13,8),%xmm7        movsd  %xmm6,(%rsi,%r12,8)        movsd  %xmm7,(%rsi,%r13,8)        ## the fj's - start by accumulating forces from memory     movq nb400_faction(%rbp),%rdi        movlpd (%rdi,%r8,8),%xmm5        movlpd 8(%rdi,%r8,8),%xmm6        movlpd 16(%rdi,%r8,8),%xmm7        movhpd (%rdi,%r9,8),%xmm5        movhpd 8(%rdi,%r9,8),%xmm6        movhpd 16(%rdi,%r9,8),%xmm7

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?