nb_kernel314_x86_64_sse2.s

来自「最著名最快的分子模拟软件」· S 代码 · 共 2,365 行 · 第 1/5 页

S
2,365
字号
#### $Id: nb_kernel314_x86_64_sse2.s,v 1.4.2.3 2006/09/22 08:40:37 lindahl Exp $#### Gromacs 4.0                         Copyright (c) 1991-2003 ## David van der Spoel, Erik Lindahl#### This program is free software; you can redistribute it and/or## modify it under the terms of the GNU General Public License## as published by the Free Software Foundation; either version 2## of the License, or (at your option) any later version.#### To help us fund GROMACS development, we humbly ask that you cite## the research papers on the package. Check out http://www.gromacs.org## ## And Hey:## Gnomes, ROck Monsters And Chili Sauce##.globl nb_kernel314_x86_64_sse2.globl _nb_kernel314_x86_64_sse2nb_kernel314_x86_64_sse2:       _nb_kernel314_x86_64_sse2:      ##      Room for return address and rbp (16 bytes).set nb314_fshift, 16.set nb314_gid, 24.set nb314_pos, 32.set nb314_faction, 40.set nb314_charge, 48.set nb314_p_facel, 56.set nb314_argkrf, 64.set nb314_argcrf, 72.set nb314_Vc, 80.set nb314_type, 88.set nb314_p_ntype, 96.set nb314_vdwparam, 104.set nb314_Vvdw, 112.set nb314_p_tabscale, 120.set nb314_VFtab, 128.set nb314_invsqrta, 136.set nb314_dvda, 144.set nb314_p_gbtabscale, 152.set nb314_GBtab, 160.set nb314_p_nthreads, 168.set nb314_count, 176.set nb314_mtx, 184.set nb314_outeriter, 192.set nb314_inneriter, 200.set nb314_work, 208        ## stack offsets for local variables          ## bottom of stack is cache-aligned for sse2 use .set nb314_ixO, 0.set nb314_iyO, 16.set nb314_izO, 32.set nb314_ixH1, 48.set nb314_iyH1, 64.set nb314_izH1, 80.set nb314_ixH2, 96.set nb314_iyH2, 112.set nb314_izH2, 128.set nb314_ixM, 144.set nb314_iyM, 160.set nb314_izM, 176.set nb314_jxO, 192.set nb314_jyO, 208.set nb314_jzO, 224.set nb314_jxH1, 240.set nb314_jyH1, 256.set nb314_jzH1, 272.set nb314_jxH2, 288.set nb314_jyH2, 304.set nb314_jzH2, 320.set nb314_jxM, 336.set nb314_jyM, 352.set nb314_jzM, 368.set nb314_dxOO, 384.set nb314_dyOO, 400.set nb314_dzOO, 416.set nb314_dxH1H1, 432.set nb314_dyH1H1, 448.set nb314_dzH1H1, 464.set nb314_dxH1H2, 480.set nb314_dyH1H2, 496.set nb314_dzH1H2, 512.set nb314_dxH1M, 528.set nb314_dyH1M, 544.set nb314_dzH1M, 560.set nb314_dxH2H1, 576.set nb314_dyH2H1, 592.set nb314_dzH2H1, 608.set nb314_dxH2H2, 624.set nb314_dyH2H2, 640.set nb314_dzH2H2, 656.set nb314_dxH2M, 672.set nb314_dyH2M, 688.set nb314_dzH2M, 704.set nb314_dxMH1, 720.set nb314_dyMH1, 736.set nb314_dzMH1, 752.set nb314_dxMH2, 768.set nb314_dyMH2, 784.set nb314_dzMH2, 800.set nb314_dxMM, 816.set nb314_dyMM, 832.set nb314_dzMM, 848.set nb314_qqMM, 864.set nb314_qqMH, 880.set nb314_qqHH, 896.set nb314_two, 912.set nb314_tsc, 928.set nb314_c6, 944.set nb314_c12, 960.set nb314_vctot, 976.set nb314_Vvdwtot, 992.set nb314_fixO, 1008.set nb314_fiyO, 1024.set nb314_fizO, 1040.set nb314_fixH1, 1056.set nb314_fiyH1, 1072.set nb314_fizH1, 1088.set nb314_fixH2, 1104.set nb314_fiyH2, 1120.set nb314_fizH2, 1136.set nb314_fixM, 1152.set nb314_fiyM, 1168.set nb314_fizM, 1184.set nb314_fjxO, 1200.set nb314_fjyO, 1216.set nb314_fjzO, 1232.set nb314_fjxH1, 1248.set nb314_fjyH1, 1264.set nb314_fjzH1, 1280.set nb314_fjxH2, 1296.set nb314_fjyH2, 1312.set nb314_fjzH2, 1328.set nb314_epsH1, 1344.set nb314_epsH2, 1360.set nb314_epsM, 1376.set nb314_half, 1392.set nb314_three, 1408.set nb314_six, 1424.set nb314_twelve, 1440.set nb314_rsqOO, 1456.set nb314_rsqH1H1, 1472.set nb314_rsqH1H2, 1488.set nb314_rsqH1M, 1504.set nb314_rsqH2H1, 1520.set nb314_rsqH2H2, 1536.set nb314_rsqH2M, 1552.set nb314_rsqMH1, 1568.set nb314_rsqMH2, 1584.set nb314_rsqMM, 1600.set nb314_rinvsqOO, 1616.set nb314_rinvH1H1, 1632.set nb314_rinvH1H2, 1648.set nb314_rinvH1M, 1664.set nb314_rinvH2H1, 1680.set nb314_rinvH2H2, 1696.set nb314_rinvH2M, 1712.set nb314_rinvMH1, 1728.set nb314_rinvMH2, 1744.set nb314_rinvMM, 1760.set nb314_is3, 1776.set nb314_ii3, 1780.set nb314_nri, 1784.set nb314_iinr, 1792.set nb314_jindex, 1800.set nb314_jjnr, 1808.set nb314_shift, 1816.set nb314_shiftvec, 1824.set nb314_facel, 1832.set nb314_innerjjnr, 1840.set nb314_innerk, 1848.set nb314_n, 1852.set nb314_nn1, 1856.set nb314_nouter, 1860.set nb314_ninner, 1864        push %rbp        movq %rsp,%rbp        push %rbx        emms        push %r12        push %r13        push %r14        push %r15        subq $1880,%rsp         ## local variable stack space (n*16+8)        ## zero 32-bit iteration counters        movl $0,%eax        movl %eax,nb314_nouter(%rsp)        movl %eax,nb314_ninner(%rsp)        movl (%rdi),%edi        movl %edi,nb314_nri(%rsp)        movq %rsi,nb314_iinr(%rsp)        movq %rdx,nb314_jindex(%rsp)        movq %rcx,nb314_jjnr(%rsp)        movq %r8,nb314_shift(%rsp)        movq %r9,nb314_shiftvec(%rsp)        movq nb314_p_facel(%rbp),%rsi        movsd (%rsi),%xmm0        movsd %xmm0,nb314_facel(%rsp)        movq nb314_p_tabscale(%rbp),%rax        movsd (%rax),%xmm3        shufpd $0,%xmm3,%xmm3        movapd %xmm3,nb314_tsc(%rsp)        ## create constant floating-point factors on stack        movl $0x00000000,%eax   ## lower half of double half IEEE (hex)        movl $0x3fe00000,%ebx        movl %eax,nb314_half(%rsp)        movl %ebx,nb314_half+4(%rsp)        movsd nb314_half(%rsp),%xmm1        shufpd $0,%xmm1,%xmm1  ## splat to all elements        movapd %xmm1,%xmm3        addpd  %xmm3,%xmm3      ## one        movapd %xmm3,%xmm2        addpd  %xmm2,%xmm2      ## two        addpd  %xmm2,%xmm3      ## three        movapd %xmm3,%xmm4        addpd  %xmm4,%xmm4      ## six        movapd %xmm4,%xmm5        addpd  %xmm5,%xmm5      ## twelve        movapd %xmm1,nb314_half(%rsp)        movapd %xmm2,nb314_two(%rsp)        movapd %xmm3,nb314_three(%rsp)        movapd %xmm4,nb314_six(%rsp)        movapd %xmm5,nb314_twelve(%rsp)        ## assume we have at least one i particle - start directly         movq  nb314_iinr(%rsp),%rcx         ## rcx = pointer into iinr[]                movl  (%rcx),%ebx           ## ebx =ii         movq  nb314_charge(%rbp),%rdx        movsd 24(%rdx,%rbx,8),%xmm3        movsd %xmm3,%xmm4        movsd 8(%rdx,%rbx,8),%xmm5        movq nb314_p_facel(%rbp),%rsi        movsd (%rsi),%xmm0        movsd nb314_facel(%rsp),%xmm6        mulsd  %xmm3,%xmm3        mulsd  %xmm5,%xmm4        mulsd  %xmm5,%xmm5        mulsd  %xmm6,%xmm3        mulsd  %xmm6,%xmm4        mulsd  %xmm6,%xmm5        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        movapd %xmm3,nb314_qqMM(%rsp)        movapd %xmm4,nb314_qqMH(%rsp)        movapd %xmm5,nb314_qqHH(%rsp)        xorpd %xmm0,%xmm0        movq  nb314_type(%rbp),%rdx        movl  (%rdx,%rbx,4),%ecx        shll  %ecx        movl  %ecx,%edx        movq nb314_p_ntype(%rbp),%rdi        imull (%rdi),%ecx     ## rcx = ntia = 2*ntype*type[ii0]         addl  %ecx,%edx        movq  nb314_vdwparam(%rbp),%rax        movlpd (%rax,%rdx,8),%xmm0        movlpd 8(%rax,%rdx,8),%xmm1        shufpd $0,%xmm0,%xmm0        shufpd $0,%xmm1,%xmm1        movapd %xmm0,nb314_c6(%rsp)        movapd %xmm1,nb314_c12(%rsp)_nb_kernel314_x86_64_sse2.nb314_threadloop:         movq  nb314_count(%rbp),%rsi            ## pointer to sync counter        movl  (%rsi),%eax_nb_kernel314_x86_64_sse2.nb314_spinlock:         movl  %eax,%ebx                         ## ebx=*count=nn0        addl  $1,%ebx                          ## ebx=nn1=nn0+10        lock         cmpxchgl %ebx,(%rsi)                    ## write nn1 to *counter,                                                ## if it hasnt changed.                                                ## or reread *counter to eax.        pause                                   ## -> better p4 performance        jnz _nb_kernel314_x86_64_sse2.nb314_spinlock        ## if(nn1>nri) nn1=nri        movl nb314_nri(%rsp),%ecx        movl %ecx,%edx        subl %ebx,%ecx        cmovlel %edx,%ebx                       ## if(nn1>nri) nn1=nri        ## Cleared the spinlock if we got here.        ## eax contains nn0, ebx contains nn1.        movl %eax,nb314_n(%rsp)        movl %ebx,nb314_nn1(%rsp)        subl %eax,%ebx                          ## calc number of outer lists        movl %eax,%esi                          ## copy n to esi        jg  _nb_kernel314_x86_64_sse2.nb314_outerstart        jmp _nb_kernel314_x86_64_sse2.nb314_end_nb_kernel314_x86_64_sse2.nb314_outerstart:         ## ebx contains number of outer iterations        addl nb314_nouter(%rsp),%ebx        movl %ebx,nb314_nouter(%rsp)_nb_kernel314_x86_64_sse2.nb314_outer:         movq  nb314_shift(%rsp),%rax        ## rax = pointer into shift[]         movl  (%rax,%rsi,4),%ebx        ## rbx=shift[n]         lea  (%rbx,%rbx,2),%rbx    ## rbx=3*is         movl  %ebx,nb314_is3(%rsp)      ## store is3         movq  nb314_shiftvec(%rsp),%rax     ## rax = base of shiftvec[]         movsd (%rax,%rbx,8),%xmm0        movsd 8(%rax,%rbx,8),%xmm1        movsd 16(%rax,%rbx,8),%xmm2        movq  nb314_iinr(%rsp),%rcx         ## rcx = pointer into iinr[]                movl  (%rcx,%rsi,4),%ebx    ## ebx =ii         movapd %xmm0,%xmm3        movapd %xmm1,%xmm4        movapd %xmm2,%xmm5        movapd %xmm0,%xmm6        movapd %xmm1,%xmm7        lea  (%rbx,%rbx,2),%rbx        ## rbx = 3*ii=ii3         movq  nb314_pos(%rbp),%rax      ## rax = base of pos[]          movl  %ebx,nb314_ii3(%rsp)        addsd (%rax,%rbx,8),%xmm3       ## ox        addsd 8(%rax,%rbx,8),%xmm4      ## oy        addsd 16(%rax,%rbx,8),%xmm5     ## oz           addsd 24(%rax,%rbx,8),%xmm6     ## h1x        addsd 32(%rax,%rbx,8),%xmm7     ## h1y        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        shufpd $0,%xmm6,%xmm6        shufpd $0,%xmm7,%xmm7        movapd %xmm3,nb314_ixO(%rsp)        movapd %xmm4,nb314_iyO(%rsp)        movapd %xmm5,nb314_izO(%rsp)        movapd %xmm6,nb314_ixH1(%rsp)        movapd %xmm7,nb314_iyH1(%rsp)        movsd %xmm2,%xmm6        movsd %xmm0,%xmm3        movsd %xmm1,%xmm4        movsd %xmm2,%xmm5        addsd 40(%rax,%rbx,8),%xmm6    ## h1z        addsd 48(%rax,%rbx,8),%xmm0    ## h2x        addsd 56(%rax,%rbx,8),%xmm1    ## h2y        addsd 64(%rax,%rbx,8),%xmm2    ## h2z        addsd 72(%rax,%rbx,8),%xmm3    ## mx        addsd 80(%rax,%rbx,8),%xmm4    ## my        addsd 88(%rax,%rbx,8),%xmm5    ## mz        shufpd $0,%xmm6,%xmm6        shufpd $0,%xmm0,%xmm0        shufpd $0,%xmm1,%xmm1        shufpd $0,%xmm2,%xmm2        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        movapd %xmm6,nb314_izH1(%rsp)        movapd %xmm0,nb314_ixH2(%rsp)        movapd %xmm1,nb314_iyH2(%rsp)        movapd %xmm2,nb314_izH2(%rsp)        movapd %xmm3,nb314_ixM(%rsp)        movapd %xmm4,nb314_iyM(%rsp)        movapd %xmm5,nb314_izM(%rsp)        ## clear vctot and i forces         xorpd %xmm4,%xmm4        movapd %xmm4,nb314_vctot(%rsp)        movapd %xmm4,nb314_Vvdwtot(%rsp)        movapd %xmm4,nb314_fixO(%rsp)        movapd %xmm4,nb314_fiyO(%rsp)        movapd %xmm4,nb314_fizO(%rsp)        movapd %xmm4,nb314_fixH1(%rsp)        movapd %xmm4,nb314_fiyH1(%rsp)        movapd %xmm4,nb314_fizH1(%rsp)        movapd %xmm4,nb314_fixH2(%rsp)        movapd %xmm4,nb314_fiyH2(%rsp)        movapd %xmm4,nb314_fizH2(%rsp)        movapd %xmm4,nb314_fixM(%rsp)        movapd %xmm4,nb314_fiyM(%rsp)        movapd %xmm4,nb314_fizM(%rsp)        movq  nb314_jindex(%rsp),%rax        movl  (%rax,%rsi,4),%ecx             ## jindex[n]         movl  4(%rax,%rsi,4),%edx            ## jindex[n+1]         subl  %ecx,%edx              ## number of innerloop atoms         movq  nb314_pos(%rbp),%rsi        movq  nb314_faction(%rbp),%rdi        movq  nb314_jjnr(%rsp),%rax        shll  $2,%ecx        addq  %rcx,%rax        movq  %rax,nb314_innerjjnr(%rsp)       ## pointer to jjnr[nj0]         movl  %edx,%ecx        subl  $2,%edx        addl  nb314_ninner(%rsp),%ecx        movl  %ecx,nb314_ninner(%rsp)        addl  $0,%edx        movl  %edx,nb314_innerk(%rsp)      ## number of innerloop atoms         jge   _nb_kernel314_x86_64_sse2.nb314_unroll_loop        jmp   _nb_kernel314_x86_64_sse2.nb314_checksingle_nb_kernel314_x86_64_sse2.nb314_unroll_loop:         ## twice unrolled innerloop here         movq  nb314_innerjjnr(%rsp),%rdx       ## pointer to jjnr[k]         movl  (%rdx),%eax        movl  4(%rdx),%ebx        addq $8,nb314_innerjjnr(%rsp)            ## advance pointer (unrolled 2)         movq nb314_pos(%rbp),%rsi        ## base of pos[]         lea  (%rax,%rax,2),%rax     ## replace jnr with j3         lea  (%rbx,%rbx,2),%rbx        ## load j O coordinates    movlpd (%rsi,%rax,8),%xmm4    movlpd 8(%rsi,%rax,8),%xmm5    movlpd 16(%rsi,%rax,8),%xmm6    movhpd (%rsi,%rbx,8),%xmm4    movhpd 8(%rsi,%rbx,8),%xmm5    movhpd 16(%rsi,%rbx,8),%xmm6    ## xmm4 = Ox    ## xmm5 = Oy    ## xmm6 = Oz    subpd nb314_ixO(%rsp),%xmm4    subpd nb314_iyO(%rsp),%xmm5    subpd nb314_izO(%rsp),%xmm6    ## store dx/dy/dz    movapd %xmm4,%xmm9    movapd %xmm5,%xmm10    movapd %xmm6,%xmm11        ## square it         mulpd %xmm4,%xmm4        mulpd %xmm5,%xmm5        mulpd %xmm6,%xmm6        addpd %xmm5,%xmm4        addpd %xmm6,%xmm4       ## rsq in xmm4         cvtpd2ps %xmm4,%xmm6        rcpps %xmm6,%xmm6        cvtps2pd %xmm6,%xmm6    ## lu in low xmm6         ## 1/x lookup seed in xmm6         movapd nb314_two(%rsp),%xmm0        movapd %xmm4,%xmm5        mulpd %xmm6,%xmm4       ## lu*rsq         subpd %xmm4,%xmm0       ## 2-lu*rsq         mulpd %xmm0,%xmm6       ## (new lu)         movapd nb314_two(%rsp),%xmm0        mulpd %xmm6,%xmm5       ## lu*rsq         subpd %xmm5,%xmm0       ## 2-lu*rsq         mulpd %xmm6,%xmm0       ## xmm0=rinvsq         movapd %xmm0,%xmm1        mulpd  %xmm0,%xmm1        mulpd  %xmm0,%xmm1      ## xmm1=rinvsix         movapd %xmm1,%xmm2

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?