nb_kernel133_x86_64_sse2.s

来自「最著名最快的分子模拟软件」· S 代码 · 共 2,141 行 · 第 1/5 页

S
2,141
字号
#### $Id: nb_kernel133_x86_64_sse2.s,v 1.4.2.3 2006/09/22 08:40:33 lindahl Exp $#### Gromacs 4.0                         Copyright (c) 1991-2003 ## David van der Spoel, Erik Lindahl#### This program is free software; you can redistribute it and/or## modify it under the terms of the GNU General Public License## as published by the Free Software Foundation; either version 2## of the License, or (at your option) any later version.#### To help us fund GROMACS development, we humbly ask that you cite## the research papers on the package. Check out http://www.gromacs.org## ## And Hey:## Gnomes, ROck Monsters And Chili Sauce##.globl nb_kernel133_x86_64_sse2.globl _nb_kernel133_x86_64_sse2nb_kernel133_x86_64_sse2:       _nb_kernel133_x86_64_sse2:      ##      Room for return address and rbp (16 bytes).set nb133_fshift, 16.set nb133_gid, 24.set nb133_pos, 32.set nb133_faction, 40.set nb133_charge, 48.set nb133_p_facel, 56.set nb133_argkrf, 64.set nb133_argcrf, 72.set nb133_Vc, 80.set nb133_type, 88.set nb133_p_ntype, 96.set nb133_vdwparam, 104.set nb133_Vvdw, 112.set nb133_p_tabscale, 120.set nb133_VFtab, 128.set nb133_invsqrta, 136.set nb133_dvda, 144.set nb133_p_gbtabscale, 152.set nb133_GBtab, 160.set nb133_p_nthreads, 168.set nb133_count, 176.set nb133_mtx, 184.set nb133_outeriter, 192.set nb133_inneriter, 200.set nb133_work, 208        ## stack offsets for local variables          ## bottom of stack is cache-aligned for sse2 use .set nb133_ixO, 0.set nb133_iyO, 16.set nb133_izO, 32.set nb133_ixH1, 48.set nb133_iyH1, 64.set nb133_izH1, 80.set nb133_ixH2, 96.set nb133_iyH2, 112.set nb133_izH2, 128.set nb133_ixM, 144.set nb133_iyM, 160.set nb133_izM, 176.set nb133_iqH, 192.set nb133_iqM, 208.set nb133_dxO, 224.set nb133_dyO, 240.set nb133_dzO, 256.set nb133_dxH1, 272.set nb133_dyH1, 288.set nb133_dzH1, 304.set nb133_dxH2, 320.set nb133_dyH2, 336.set nb133_dzH2, 352.set nb133_dxM, 368.set nb133_dyM, 384.set nb133_dzM, 400.set nb133_qqH, 416.set nb133_qqM, 432.set nb133_c6, 448.set nb133_c12, 464.set nb133_tsc, 480.set nb133_fstmp, 496.set nb133_vctot, 512.set nb133_Vvdwtot, 528.set nb133_fixO, 544.set nb133_fiyO, 560.set nb133_fizO, 576.set nb133_fixH1, 592.set nb133_fiyH1, 608.set nb133_fizH1, 624.set nb133_fixH2, 640.set nb133_fiyH2, 656.set nb133_fizH2, 672.set nb133_fixM, 688.set nb133_fiyM, 704.set nb133_fizM, 720.set nb133_fjx, 736.set nb133_fjy, 752.set nb133_fjz, 768.set nb133_half, 784.set nb133_three, 800.set nb133_two, 816.set nb133_rinvH1, 832.set nb133_rinvH2, 848.set nb133_rinvM, 864.set nb133_krsqH1, 880.set nb133_krsqH2, 896.set nb133_krsqM, 912.set nb133_krf, 928.set nb133_crf, 944.set nb133_rsqO, 960.set nb133_facel, 976.set nb133_iinr, 992.set nb133_jindex, 1000.set nb133_jjnr, 1008.set nb133_shift, 1016.set nb133_shiftvec, 1024.set nb133_innerjjnr, 1032.set nb133_is3, 1040.set nb133_ii3, 1044.set nb133_nri, 1048.set nb133_ntia, 1052.set nb133_innerk, 1056.set nb133_n, 1060.set nb133_nn1, 1064.set nb133_nouter, 1068.set nb133_ninner, 1072        push %rbp        movq %rsp,%rbp        push %rbx        emms        push %r12        push %r13        push %r14        push %r15        subq $1080,%rsp         ## local variable stack space (n*16+8)        ## zero 32-bit iteration counters        movl $0,%eax        movl %eax,nb133_nouter(%rsp)        movl %eax,nb133_ninner(%rsp)        movl (%rdi),%edi        movl %edi,nb133_nri(%rsp)        movq %rsi,nb133_iinr(%rsp)        movq %rdx,nb133_jindex(%rsp)        movq %rcx,nb133_jjnr(%rsp)        movq %r8,nb133_shift(%rsp)        movq %r9,nb133_shiftvec(%rsp)        movq nb133_p_facel(%rbp),%rsi        movsd (%rsi),%xmm0        movsd %xmm0,nb133_facel(%rsp)        movq nb133_p_tabscale(%rbp),%rax        movsd (%rax),%xmm3        shufpd $0,%xmm3,%xmm3        movapd %xmm3,nb133_tsc(%rsp)        ## create constant floating-point factors on stack        movl $0x00000000,%eax   ## lower half of double half IEEE (hex)        movl $0x3fe00000,%ebx        movl %eax,nb133_half(%rsp)        movl %ebx,nb133_half+4(%rsp)        movsd nb133_half(%rsp),%xmm1        shufpd $0,%xmm1,%xmm1  ## splat to all elements        movapd %xmm1,%xmm3        addpd  %xmm3,%xmm3      ## one        movapd %xmm3,%xmm2        addpd  %xmm2,%xmm2      ## two        addpd  %xmm2,%xmm3      ## three        movapd %xmm1,nb133_half(%rsp)        movapd %xmm2,nb133_two(%rsp)        movapd %xmm3,nb133_three(%rsp)        ## assume we have at least one i particle - start directly         movq  nb133_iinr(%rsp),%rcx         ## rcx = pointer into iinr[]                movl  (%rcx),%ebx           ## ebx =ii         movq  nb133_charge(%rbp),%rdx        movsd 8(%rdx,%rbx,8),%xmm3        movsd 24(%rdx,%rbx,8),%xmm4        movsd nb133_facel(%rsp),%xmm5        mulsd  %xmm5,%xmm3        mulsd  %xmm5,%xmm4        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        movapd %xmm3,nb133_iqH(%rsp)        movapd %xmm4,nb133_iqM(%rsp)        movq  nb133_type(%rbp),%rdx        movl  (%rdx,%rbx,4),%ecx        shll  %ecx        movq nb133_p_ntype(%rbp),%rdi        imull (%rdi),%ecx     ## rcx = ntia = 2*ntype*type[ii0]         movl  %ecx,nb133_ntia(%rsp)_nb_kernel133_x86_64_sse2.nb133_threadloop:         movq  nb133_count(%rbp),%rsi            ## pointer to sync counter        movl  (%rsi),%eax_nb_kernel133_x86_64_sse2.nb133_spinlock:         movl  %eax,%ebx                         ## ebx=*count=nn0        addl  $1,%ebx                          ## ebx=nn1=nn0+10        lock         cmpxchgl %ebx,(%rsi)                    ## write nn1 to *counter,                                                ## if it hasnt changed.                                                ## or reread *counter to eax.        pause                                   ## -> better p4 performance        jnz _nb_kernel133_x86_64_sse2.nb133_spinlock        ## if(nn1>nri) nn1=nri        movl nb133_nri(%rsp),%ecx        movl %ecx,%edx        subl %ebx,%ecx        cmovlel %edx,%ebx                       ## if(nn1>nri) nn1=nri        ## Cleared the spinlock if we got here.        ## eax contains nn0, ebx contains nn1.        movl %eax,nb133_n(%rsp)        movl %ebx,nb133_nn1(%rsp)        subl %eax,%ebx                          ## calc number of outer lists        movl %eax,%esi                          ## copy n to esi        jg  _nb_kernel133_x86_64_sse2.nb133_outerstart        jmp _nb_kernel133_x86_64_sse2.nb133_end_nb_kernel133_x86_64_sse2.nb133_outerstart:         ## ebx contains number of outer iterations        addl nb133_nouter(%rsp),%ebx        movl %ebx,nb133_nouter(%rsp)_nb_kernel133_x86_64_sse2.nb133_outer:         movq  nb133_shift(%rsp),%rax        ## eax = pointer into shift[]         movl  (%rax,%rsi,4),%ebx        ## ebx=shift[n]         lea  (%rbx,%rbx,2),%rbx    ## rbx=3*is         movl  %ebx,nb133_is3(%rsp)      ## store is3         movq  nb133_shiftvec(%rsp),%rax     ## eax = base of shiftvec[]         movsd (%rax,%rbx,8),%xmm0        movsd 8(%rax,%rbx,8),%xmm1        movsd 16(%rax,%rbx,8),%xmm2        movq  nb133_iinr(%rsp),%rcx         ## ecx = pointer into iinr[]                movl  (%rcx,%rsi,4),%ebx    ## ebx =ii         movapd %xmm0,%xmm3        movapd %xmm1,%xmm4        movapd %xmm2,%xmm5        movapd %xmm0,%xmm6        movapd %xmm1,%xmm7        lea  (%rbx,%rbx,2),%rbx        ## rbx = 3*ii=ii3         movq  nb133_pos(%rbp),%rax      ## eax = base of pos[]          movl  %ebx,nb133_ii3(%rsp)        addsd (%rax,%rbx,8),%xmm3       ## ox        addsd 8(%rax,%rbx,8),%xmm4      ## oy        addsd 16(%rax,%rbx,8),%xmm5     ## oz           addsd 24(%rax,%rbx,8),%xmm6     ## h1x        addsd 32(%rax,%rbx,8),%xmm7     ## h1y        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        shufpd $0,%xmm6,%xmm6        shufpd $0,%xmm7,%xmm7        movapd %xmm3,nb133_ixO(%rsp)        movapd %xmm4,nb133_iyO(%rsp)        movapd %xmm5,nb133_izO(%rsp)        movapd %xmm6,nb133_ixH1(%rsp)        movapd %xmm7,nb133_iyH1(%rsp)        movsd %xmm2,%xmm6        movsd %xmm0,%xmm3        movsd %xmm1,%xmm4        movsd %xmm2,%xmm5        addsd 40(%rax,%rbx,8),%xmm6    ## h1z        addsd 48(%rax,%rbx,8),%xmm0    ## h2x        addsd 56(%rax,%rbx,8),%xmm1    ## h2y        addsd 64(%rax,%rbx,8),%xmm2    ## h2z        addsd 72(%rax,%rbx,8),%xmm3    ## mx        addsd 80(%rax,%rbx,8),%xmm4    ## my        addsd 88(%rax,%rbx,8),%xmm5    ## mz        shufpd $0,%xmm6,%xmm6        shufpd $0,%xmm0,%xmm0        shufpd $0,%xmm1,%xmm1        shufpd $0,%xmm2,%xmm2        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        movapd %xmm6,nb133_izH1(%rsp)        movapd %xmm0,nb133_ixH2(%rsp)        movapd %xmm1,nb133_iyH2(%rsp)        movapd %xmm2,nb133_izH2(%rsp)        movapd %xmm3,nb133_ixM(%rsp)        movapd %xmm4,nb133_iyM(%rsp)        movapd %xmm5,nb133_izM(%rsp)        ## clear vctot and i forces         xorpd %xmm4,%xmm4        movapd %xmm4,nb133_vctot(%rsp)        movapd %xmm4,nb133_Vvdwtot(%rsp)        movapd %xmm4,nb133_fixO(%rsp)        movapd %xmm4,nb133_fiyO(%rsp)        movapd %xmm4,nb133_fizO(%rsp)        movapd %xmm4,nb133_fixH1(%rsp)        movapd %xmm4,nb133_fiyH1(%rsp)        movapd %xmm4,nb133_fizH1(%rsp)        movapd %xmm4,nb133_fixH2(%rsp)        movapd %xmm4,nb133_fiyH2(%rsp)        movapd %xmm4,nb133_fizH2(%rsp)        movapd %xmm4,nb133_fixM(%rsp)        movapd %xmm4,nb133_fiyM(%rsp)        movapd %xmm4,nb133_fizM(%rsp)        movq  nb133_jindex(%rsp),%rax        movl  (%rax,%rsi,4),%ecx             ## jindex[n]         movl  4(%rax,%rsi,4),%edx            ## jindex[n+1]         subl  %ecx,%edx              ## number of innerloop atoms         movq  nb133_pos(%rbp),%rsi        movq  nb133_faction(%rbp),%rdi        movq  nb133_jjnr(%rsp),%rax        shll  $2,%ecx        addq  %rcx,%rax        movq  %rax,nb133_innerjjnr(%rsp)       ## pointer to jjnr[nj0]         movl  %edx,%ecx        subl  $2,%edx        addl  nb133_ninner(%rsp),%ecx        movl  %ecx,nb133_ninner(%rsp)        addl  $0,%edx        movl  %edx,nb133_innerk(%rsp)      ## number of innerloop atoms         jge   _nb_kernel133_x86_64_sse2.nb133_unroll_loop        jmp   _nb_kernel133_x86_64_sse2.nb133_checksingle_nb_kernel133_x86_64_sse2.nb133_unroll_loop:         ## twice unrolled innerloop here         movq  nb133_innerjjnr(%rsp),%rdx       ## pointer to jjnr[k]         movl  (%rdx),%eax        movl  4(%rdx),%ebx        addq $8,nb133_innerjjnr(%rsp)                   ## advance pointer (unrolled 2)         movq nb133_charge(%rbp),%rsi     ## base of charge[]         movlpd (%rsi,%rax,8),%xmm3        movhpd (%rsi,%rbx,8),%xmm3        movapd %xmm3,%xmm4        mulpd  nb133_iqM(%rsp),%xmm3        mulpd  nb133_iqH(%rsp),%xmm4        movapd  %xmm3,nb133_qqM(%rsp)        movapd  %xmm4,nb133_qqH(%rsp)        movq nb133_type(%rbp),%rsi        movl (%rsi,%rax,4),%r8d        movl (%rsi,%rbx,4),%r9d        movq nb133_vdwparam(%rbp),%rsi        shll %r8d        shll %r9d        movl nb133_ntia(%rsp),%edi        addl %edi,%r8d        addl %edi,%r9d        movlpd (%rsi,%r8,8),%xmm6       ## c6a        movlpd (%rsi,%r9,8),%xmm7       ## c6b        movhpd 8(%rsi,%r8,8),%xmm6      ## c6a c12a         movhpd 8(%rsi,%r9,8),%xmm7      ## c6b c12b         movapd %xmm6,%xmm4        unpcklpd %xmm7,%xmm4        unpckhpd %xmm7,%xmm6        movapd %xmm4,nb133_c6(%rsp)        movapd %xmm6,nb133_c12(%rsp)        movq nb133_pos(%rbp),%rsi        ## base of pos[]         lea  (%rax,%rax,2),%rax     ## replace jnr with j3         lea  (%rbx,%rbx,2),%rbx        ## move j coordinates to local temp variables     movlpd (%rsi,%rax,8),%xmm0    movlpd 8(%rsi,%rax,8),%xmm1    movlpd 16(%rsi,%rax,8),%xmm2    movhpd (%rsi,%rbx,8),%xmm0    movhpd 8(%rsi,%rbx,8),%xmm1    movhpd 16(%rsi,%rbx,8),%xmm2    ## xmm0 = jx    ## xmm1 = jy    ## xmm2 = jz    ## O interaction    ## copy to xmm3-xmm5    movapd %xmm0,%xmm3    movapd %xmm1,%xmm4    movapd %xmm2,%xmm5    subpd nb133_ixO(%rsp),%xmm3    subpd nb133_iyO(%rsp),%xmm4    subpd nb133_izO(%rsp),%xmm5    movapd %xmm3,nb133_dxO(%rsp)    movapd %xmm4,nb133_dyO(%rsp)    movapd %xmm5,nb133_dzO(%rsp)        mulpd  %xmm3,%xmm3        mulpd  %xmm4,%xmm4        mulpd  %xmm5,%xmm5        addpd  %xmm4,%xmm3        addpd  %xmm5,%xmm3    ## xmm3=rsq    cvtpd2ps %xmm3,%xmm15    rsqrtps %xmm15,%xmm15    cvtps2pd %xmm15,%xmm15    ## lu in low xmm2     ## lookup seed in xmm15    movapd %xmm15,%xmm5      ## copy of lu     mulpd %xmm15,%xmm15       ## lu*lu     movapd nb133_three(%rsp),%xmm7    mulpd %xmm3,%xmm15       ## rsq*lu*lu                        movapd nb133_half(%rsp),%xmm6    subpd %xmm15,%xmm7       ## 30-rsq*lu*lu 

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?