nb_kernel201_x86_64_sse2.s

来自「最著名最快的分子模拟软件」· S 代码 · 共 1,714 行 · 第 1/4 页

S
1,714
字号
#### $Id: nb_kernel201_x86_64_sse2.s,v 1.4.2.3 2006/09/22 08:40:33 lindahl Exp $#### Gromacs 4.0                         Copyright (c) 1991-2003 ## David van der Spoel, Erik Lindahl#### This program is free software; you can redistribute it and/or## modify it under the terms of the GNU General Public License## as published by the Free Software Foundation; either version 2## of the License, or (at your option) any later version.#### To help us fund GROMACS development, we humbly ask that you cite## the research papers on the package. Check out http://www.gromacs.org## ## And Hey:## Gnomes, ROck Monsters And Chili Sauce##.globl nb_kernel201_x86_64_sse2.globl _nb_kernel201_x86_64_sse2nb_kernel201_x86_64_sse2:       _nb_kernel201_x86_64_sse2:      ##      Room for return address and rbp (16 bytes).set nb201_fshift, 16.set nb201_gid, 24.set nb201_pos, 32.set nb201_faction, 40.set nb201_charge, 48.set nb201_p_facel, 56.set nb201_argkrf, 64.set nb201_argcrf, 72.set nb201_Vc, 80.set nb201_type, 88.set nb201_p_ntype, 96.set nb201_vdwparam, 104.set nb201_Vvdw, 112.set nb201_p_tabscale, 120.set nb201_VFtab, 128.set nb201_invsqrta, 136.set nb201_dvda, 144.set nb201_p_gbtabscale, 152.set nb201_GBtab, 160.set nb201_p_nthreads, 168.set nb201_count, 176.set nb201_mtx, 184.set nb201_outeriter, 192.set nb201_inneriter, 200.set nb201_work, 208        ## stack offsets for local variables          ## bottom of stack is cache-aligned for sse2 use .set nb201_ixO, 0.set nb201_iyO, 16.set nb201_izO, 32.set nb201_ixH1, 48.set nb201_iyH1, 64.set nb201_izH1, 80.set nb201_ixH2, 96.set nb201_iyH2, 112.set nb201_izH2, 128.set nb201_iqO, 144.set nb201_iqH, 160.set nb201_dxO, 176.set nb201_dyO, 192.set nb201_dzO, 208.set nb201_dxH1, 224.set nb201_dyH1, 240.set nb201_dzH1, 256.set nb201_dxH2, 272.set nb201_dyH2, 288.set nb201_dzH2, 304.set nb201_qqO, 320.set nb201_qqH, 336.set nb201_vctot, 352.set nb201_fixO, 384.set nb201_fiyO, 400.set nb201_fizO, 416.set nb201_fixH1, 432.set nb201_fiyH1, 448.set nb201_fizH1, 464.set nb201_fixH2, 480.set nb201_fiyH2, 496.set nb201_fizH2, 512.set nb201_fjx, 528.set nb201_fjy, 544.set nb201_fjz, 560.set nb201_half, 576.set nb201_three, 592.set nb201_two, 608.set nb201_krf, 624.set nb201_crf, 640.set nb201_krsqO, 656.set nb201_krsqH1, 672.set nb201_krsqH2, 688.set nb201_nri, 704.set nb201_iinr, 712.set nb201_jindex, 720.set nb201_jjnr, 728.set nb201_shift, 736.set nb201_shiftvec, 744.set nb201_facel, 752.set nb201_innerjjnr, 760.set nb201_is3, 768.set nb201_ii3, 772.set nb201_innerk, 776.set nb201_n, 780.set nb201_nn1, 784.set nb201_nouter, 788.set nb201_ninner, 792        push %rbp        movq %rsp,%rbp        push %rbx        emms        push %r12        push %r13        push %r14        push %r15        subq $808,%rsp          ## local variable stack space (n*16+8)        ## zero 32-bit iteration counters        movl $0,%eax        movl %eax,nb201_nouter(%rsp)        movl %eax,nb201_ninner(%rsp)        movl (%rdi),%edi        movl %edi,nb201_nri(%rsp)        movq %rsi,nb201_iinr(%rsp)        movq %rdx,nb201_jindex(%rsp)        movq %rcx,nb201_jjnr(%rsp)        movq %r8,nb201_shift(%rsp)        movq %r9,nb201_shiftvec(%rsp)        movq nb201_p_facel(%rbp),%rsi        movsd (%rsi),%xmm0        movsd %xmm0,nb201_facel(%rsp)        movq nb201_argkrf(%rbp),%rsi        movq nb201_argcrf(%rbp),%rdi        movsd (%rsi),%xmm1        movsd (%rdi),%xmm2        shufpd $0,%xmm1,%xmm1        shufpd $0,%xmm2,%xmm2        movapd %xmm1,nb201_krf(%rsp)        movapd %xmm2,nb201_crf(%rsp)        ## create constant floating-point factors on stack        movl $0x00000000,%eax   ## lower half of double half IEEE (hex)        movl $0x3fe00000,%ebx        movl %eax,nb201_half(%rsp)        movl %ebx,nb201_half+4(%rsp)        movsd nb201_half(%rsp),%xmm1        shufpd $0,%xmm1,%xmm1  ## splat to all elements        movapd %xmm1,%xmm3        addpd  %xmm3,%xmm3      ## one        movapd %xmm3,%xmm2        addpd  %xmm2,%xmm2      ## two        addpd  %xmm2,%xmm3      ## three        movapd %xmm1,nb201_half(%rsp)        movapd %xmm2,nb201_two(%rsp)        movapd %xmm3,nb201_three(%rsp)        ## assume we have at least one i particle - start directly         movq  nb201_iinr(%rsp),%rcx         ## rcx = pointer into iinr[]                movl  (%rcx),%ebx           ## ebx =ii         movq  nb201_charge(%rbp),%rdx        movsd (%rdx,%rbx,8),%xmm3        movsd 8(%rdx,%rbx,8),%xmm4        movsd nb201_facel(%rsp),%xmm5        mulsd  %xmm5,%xmm3        mulsd  %xmm5,%xmm4        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        movapd %xmm3,nb201_iqO(%rsp)        movapd %xmm4,nb201_iqH(%rsp)_nb_kernel201_x86_64_sse2.nb201_threadloop:         movq  nb201_count(%rbp),%rsi            ## pointer to sync counter        movl  (%rsi),%eax_nb_kernel201_x86_64_sse2.nb201_spinlock:         movl  %eax,%ebx                         ## ebx=*count=nn0        addl  $1,%ebx                          ## ebx=nn1=nn0+10        lock         cmpxchgl %ebx,(%rsi)                    ## write nn1 to *counter,                                                ## if it hasnt changed.                                                ## or reread *counter to eax.        pause                                   ## -> better p4 performance        jnz _nb_kernel201_x86_64_sse2.nb201_spinlock        ## if(nn1>nri) nn1=nri        movl nb201_nri(%rsp),%ecx        movl %ecx,%edx        subl %ebx,%ecx        cmovlel %edx,%ebx                       ## if(nn1>nri) nn1=nri        ## Cleared the spinlock if we got here.        ## eax contains nn0, ebx contains nn1.        movl %eax,nb201_n(%rsp)        movl %ebx,nb201_nn1(%rsp)        subl %eax,%ebx                          ## calc number of outer lists        movl %eax,%esi                          ## copy n to esi        jg  _nb_kernel201_x86_64_sse2.nb201_outerstart        jmp _nb_kernel201_x86_64_sse2.nb201_end_nb_kernel201_x86_64_sse2.nb201_outerstart:         ## ebx contains number of outer iterations        addl nb201_nouter(%rsp),%ebx        movl %ebx,nb201_nouter(%rsp)_nb_kernel201_x86_64_sse2.nb201_outer:         movq  nb201_shift(%rsp),%rax        ## rax = pointer into shift[]         movl  (%rax,%rsi,4),%ebx                ## rbx=shift[n]         lea  (%rbx,%rbx,2),%rbx    ## rbx=3*is         movl  %ebx,nb201_is3(%rsp)      ## store is3         movq  nb201_shiftvec(%rsp),%rax     ## rax = base of shiftvec[]         movsd (%rax,%rbx,8),%xmm0        movsd 8(%rax,%rbx,8),%xmm1        movsd 16(%rax,%rbx,8),%xmm2        movq  nb201_iinr(%rsp),%rcx         ## rcx = pointer into iinr[]                movl  (%rcx,%rsi,4),%ebx    ## ebx =ii         movapd %xmm0,%xmm3        movapd %xmm1,%xmm4        movapd %xmm2,%xmm5        lea  (%rbx,%rbx,2),%rbx        ## rbx = 3*ii=ii3         movq  nb201_pos(%rbp),%rax      ## rax = base of pos[]          movl  %ebx,nb201_ii3(%rsp)        addsd (%rax,%rbx,8),%xmm3        addsd 8(%rax,%rbx,8),%xmm4        addsd 16(%rax,%rbx,8),%xmm5        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        movapd %xmm3,nb201_ixO(%rsp)        movapd %xmm4,nb201_iyO(%rsp)        movapd %xmm5,nb201_izO(%rsp)        movsd %xmm0,%xmm3        movsd %xmm1,%xmm4        movsd %xmm2,%xmm5        addsd 24(%rax,%rbx,8),%xmm0        addsd 32(%rax,%rbx,8),%xmm1        addsd 40(%rax,%rbx,8),%xmm2        addsd 48(%rax,%rbx,8),%xmm3        addsd 56(%rax,%rbx,8),%xmm4        addsd 64(%rax,%rbx,8),%xmm5        shufpd $0,%xmm0,%xmm0        shufpd $0,%xmm1,%xmm1        shufpd $0,%xmm2,%xmm2        shufpd $0,%xmm3,%xmm3        shufpd $0,%xmm4,%xmm4        shufpd $0,%xmm5,%xmm5        movapd %xmm0,nb201_ixH1(%rsp)        movapd %xmm1,nb201_iyH1(%rsp)        movapd %xmm2,nb201_izH1(%rsp)        movapd %xmm3,nb201_ixH2(%rsp)        movapd %xmm4,nb201_iyH2(%rsp)        movapd %xmm5,nb201_izH2(%rsp)        ## clear vctot and i forces         xorpd %xmm4,%xmm4        movapd %xmm4,nb201_vctot(%rsp)        movapd %xmm4,nb201_fixO(%rsp)        movapd %xmm4,nb201_fiyO(%rsp)        movapd %xmm4,nb201_fizO(%rsp)        movapd %xmm4,nb201_fixH1(%rsp)        movapd %xmm4,nb201_fiyH1(%rsp)        movapd %xmm4,nb201_fizH1(%rsp)        movapd %xmm4,nb201_fixH2(%rsp)        movapd %xmm4,nb201_fiyH2(%rsp)        movapd %xmm4,nb201_fizH2(%rsp)        movq  nb201_jindex(%rsp),%rax        movl  (%rax,%rsi,4),%ecx             ## jindex[n]         movl  4(%rax,%rsi,4),%edx            ## jindex[n+1]         subl  %ecx,%edx              ## number of innerloop atoms         movq  nb201_pos(%rbp),%rsi        movq  nb201_faction(%rbp),%rdi        movq  nb201_jjnr(%rsp),%rax        shll  $2,%ecx        addq  %rcx,%rax        movq  %rax,nb201_innerjjnr(%rsp)       ## pointer to jjnr[nj0]         movl  %edx,%ecx        subl  $2,%edx        addl  nb201_ninner(%rsp),%ecx        movl  %ecx,nb201_ninner(%rsp)        addl  $0,%edx        movl  %edx,nb201_innerk(%rsp)      ## number of innerloop atoms         jge   _nb_kernel201_x86_64_sse2.nb201_unroll_loop        jmp   _nb_kernel201_x86_64_sse2.nb201_checksingle_nb_kernel201_x86_64_sse2.nb201_unroll_loop:         ## twice unrolled innerloop here         movq  nb201_innerjjnr(%rsp),%rdx       ## pointer to jjnr[k]         movl  (%rdx),%eax        movl  4(%rdx),%ebx        addq $8,nb201_innerjjnr(%rsp)                   ## advance pointer (unrolled 2)         movq nb201_charge(%rbp),%rsi     ## base of charge[]         movlpd (%rsi,%rax,8),%xmm3        movhpd (%rsi,%rbx,8),%xmm3        movapd %xmm3,%xmm4        mulpd  nb201_iqO(%rsp),%xmm3        mulpd  nb201_iqH(%rsp),%xmm4        movapd  %xmm3,nb201_qqO(%rsp)        movapd  %xmm4,nb201_qqH(%rsp)        movq nb201_pos(%rbp),%rsi        ## base of pos[]         lea  (%rax,%rax,2),%rax     ## replace jnr with j3         lea  (%rbx,%rbx,2),%rbx        ## move j coordinates to local temp variables     movlpd (%rsi,%rax,8),%xmm0    movlpd 8(%rsi,%rax,8),%xmm1    movlpd 16(%rsi,%rax,8),%xmm2    movhpd (%rsi,%rbx,8),%xmm0    movhpd 8(%rsi,%rbx,8),%xmm1    movhpd 16(%rsi,%rbx,8),%xmm2    ## xmm0 = jx    ## xmm1 = jy    ## xmm2 = jz    movapd %xmm0,%xmm3    movapd %xmm1,%xmm4    movapd %xmm2,%xmm5    movapd %xmm0,%xmm6    movapd %xmm1,%xmm7    movapd %xmm2,%xmm8    subpd nb201_ixO(%rsp),%xmm0    subpd nb201_iyO(%rsp),%xmm1    subpd nb201_izO(%rsp),%xmm2    subpd nb201_ixH1(%rsp),%xmm3    subpd nb201_iyH1(%rsp),%xmm4    subpd nb201_izH1(%rsp),%xmm5    subpd nb201_ixH2(%rsp),%xmm6    subpd nb201_iyH2(%rsp),%xmm7    subpd nb201_izH2(%rsp),%xmm8        movapd %xmm0,nb201_dxO(%rsp)        movapd %xmm1,nb201_dyO(%rsp)        movapd %xmm2,nb201_dzO(%rsp)        mulpd  %xmm0,%xmm0        mulpd  %xmm1,%xmm1        mulpd  %xmm2,%xmm2        movapd %xmm3,nb201_dxH1(%rsp)        movapd %xmm4,nb201_dyH1(%rsp)        movapd %xmm5,nb201_dzH1(%rsp)        mulpd  %xmm3,%xmm3        mulpd  %xmm4,%xmm4        mulpd  %xmm5,%xmm5        movapd %xmm6,nb201_dxH2(%rsp)        movapd %xmm7,nb201_dyH2(%rsp)        movapd %xmm8,nb201_dzH2(%rsp)        mulpd  %xmm6,%xmm6        mulpd  %xmm7,%xmm7        mulpd  %xmm8,%xmm8        addpd  %xmm1,%xmm0        addpd  %xmm2,%xmm0        addpd  %xmm4,%xmm3        addpd  %xmm5,%xmm3    addpd  %xmm7,%xmm6    addpd  %xmm8,%xmm6        ## start doing invsqrt for j atoms    cvtpd2ps %xmm0,%xmm1    cvtpd2ps %xmm3,%xmm4    cvtpd2ps %xmm6,%xmm7        rsqrtps %xmm1,%xmm1        rsqrtps %xmm4,%xmm4    rsqrtps %xmm7,%xmm7    cvtps2pd %xmm1,%xmm1    cvtps2pd %xmm4,%xmm4    cvtps2pd %xmm7,%xmm7        movapd  %xmm1,%xmm2        movapd  %xmm4,%xmm5    movapd  %xmm7,%xmm8        mulpd   %xmm1,%xmm1 ## lu*lu        mulpd   %xmm4,%xmm4 ## lu*lu    mulpd   %xmm7,%xmm7 ## lu*lu        movapd  nb201_three(%rsp),%xmm9        movapd  %xmm9,%xmm10    movapd  %xmm9,%xmm11        mulpd   %xmm0,%xmm1 ## rsq*lu*lu        mulpd   %xmm3,%xmm4 ## rsq*lu*lu     mulpd   %xmm6,%xmm7 ## rsq*lu*lu        subpd   %xmm1,%xmm9        subpd   %xmm4,%xmm10    subpd   %xmm7,%xmm11 ## 3-rsq*lu*lu        mulpd   %xmm2,%xmm9        mulpd   %xmm5,%xmm10    mulpd   %xmm8,%xmm11 ## lu*(3-rsq*lu*lu)        movapd  nb201_half(%rsp),%xmm15        mulpd   %xmm15,%xmm9 ## first iteration for rinvO        mulpd   %xmm15,%xmm10 ## first iteration for rinvH1    mulpd   %xmm15,%xmm11 ## first iteration for rinvH2    ## second iteration step            movapd  %xmm9,%xmm2        movapd  %xmm10,%xmm5    movapd  %xmm11,%xmm8        mulpd   %xmm2,%xmm2 ## lu*lu        mulpd   %xmm5,%xmm5 ## lu*lu

⌨️ 快捷键说明

复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?