nb_kernel200_x86_64_sse2.s
来自「最著名最快的分子模拟软件」· S 代码 · 共 979 行 · 第 1/3 页
S
979 行
#### $Id: nb_kernel200_x86_64_sse2.s,v 1.4.2.3 2006/09/22 08:40:33 lindahl Exp $#### Gromacs 4.0 Copyright (c) 1991-2003 ## David van der Spoel, Erik Lindahl#### This program is free software; you can redistribute it and/or## modify it under the terms of the GNU General Public License## as published by the Free Software Foundation; either version 2## of the License, or (at your option) any later version.#### To help us fund GROMACS development, we humbly ask that you cite## the research papers on the package. Check out http://www.gromacs.org## ## And Hey:## Gnomes, ROck Monsters And Chili Sauce##.globl nb_kernel200_x86_64_sse2.globl _nb_kernel200_x86_64_sse2nb_kernel200_x86_64_sse2: _nb_kernel200_x86_64_sse2: ## Room for return address and rbp (16 bytes).set nb200_fshift, 16.set nb200_gid, 24.set nb200_pos, 32.set nb200_faction, 40.set nb200_charge, 48.set nb200_p_facel, 56.set nb200_argkrf, 64.set nb200_argcrf, 72.set nb200_Vc, 80.set nb200_type, 88.set nb200_p_ntype, 96.set nb200_vdwparam, 104.set nb200_Vvdw, 112.set nb200_p_tabscale, 120.set nb200_VFtab, 128.set nb200_invsqrta, 136.set nb200_dvda, 144.set nb200_p_gbtabscale, 152.set nb200_GBtab, 160.set nb200_p_nthreads, 168.set nb200_count, 176.set nb200_mtx, 184.set nb200_outeriter, 192.set nb200_inneriter, 200.set nb208_work, 200 ## stack offsets for local variables ## bottom of stack is cache-aligned for sse2 use .set nb200_ix, 0.set nb200_iy, 16.set nb200_iz, 32.set nb200_iq, 48.set nb200_dx, 64.set nb200_dy, 80.set nb200_dz, 96.set nb200_vctot, 112.set nb200_fix, 128.set nb200_fiy, 144.set nb200_fiz, 160.set nb200_half, 176.set nb200_three, 192.set nb200_two, 208.set nb200_krf, 224.set nb200_crf, 240.set nb200_is3, 256.set nb200_ii3, 260.set nb200_nri, 264.set nb200_iinr, 272.set nb200_jindex, 280.set nb200_jjnr, 288.set nb200_shift, 296.set nb200_shiftvec, 304.set nb200_facel, 312.set nb200_innerjjnr, 320.set nb200_innerk, 328.set nb200_n, 332.set nb200_nn1, 336.set nb200_nouter, 340.set nb200_ninner, 344 push %rbp movq %rsp,%rbp push %rbx emms push %r12 push %r13 push %r14 push %r15 subq $360,%rsp ## local variable stack space (n*16+8) ## zero 32-bit iteration counters movl $0,%eax movl %eax,nb200_nouter(%rsp) movl %eax,nb200_ninner(%rsp) movl (%rdi),%edi movl %edi,nb200_nri(%rsp) movq %rsi,nb200_iinr(%rsp) movq %rdx,nb200_jindex(%rsp) movq %rcx,nb200_jjnr(%rsp) movq %r8,nb200_shift(%rsp) movq %r9,nb200_shiftvec(%rsp) movq nb200_p_facel(%rbp),%rsi movsd (%rsi),%xmm0 movsd %xmm0,nb200_facel(%rsp) movq nb200_argkrf(%rbp),%rsi movq nb200_argcrf(%rbp),%rdi movsd (%rsi),%xmm1 movsd (%rdi),%xmm2 shufpd $0,%xmm1,%xmm1 shufpd $0,%xmm2,%xmm2 movapd %xmm1,nb200_krf(%rsp) movapd %xmm2,nb200_crf(%rsp) ## create constant floating-point factors on stack movl $0x00000000,%eax ## lower half of double half IEEE (hex) movl $0x3fe00000,%ebx movl %eax,nb200_half(%rsp) movl %ebx,nb200_half+4(%rsp) movsd nb200_half(%rsp),%xmm1 shufpd $0,%xmm1,%xmm1 ## splat to all elements movapd %xmm1,%xmm3 addpd %xmm3,%xmm3 ## one movapd %xmm3,%xmm2 addpd %xmm2,%xmm2 ## two addpd %xmm2,%xmm3 ## three movapd %xmm1,nb200_half(%rsp) movapd %xmm2,nb200_two(%rsp) movapd %xmm3,nb200_three(%rsp)_nb_kernel200_x86_64_sse2.nb200_threadloop: movq nb200_count(%rbp),%rsi ## pointer to sync counter movl (%rsi),%eax_nb_kernel200_x86_64_sse2.nb200_spinlock: movl %eax,%ebx ## ebx=*count=nn0 addl $1,%ebx ## ebx=nn1=nn0+10 lock cmpxchgl %ebx,(%rsi) ## write nn1 to *counter, ## if it hasnt changed. ## or reread *counter to eax. pause ## -> better p4 performance jnz _nb_kernel200_x86_64_sse2.nb200_spinlock ## if(nn1>nri) nn1=nri movl nb200_nri(%rsp),%ecx movl %ecx,%edx subl %ebx,%ecx cmovlel %edx,%ebx ## if(nn1>nri) nn1=nri ## Cleared the spinlock if we got here. ## eax contains nn0, ebx contains nn1. movl %eax,nb200_n(%rsp) movl %ebx,nb200_nn1(%rsp) subl %eax,%ebx ## calc number of outer lists movl %eax,%esi ## copy n to esi jg _nb_kernel200_x86_64_sse2.nb200_outerstart jmp _nb_kernel200_x86_64_sse2.nb200_end_nb_kernel200_x86_64_sse2.nb200_outerstart: ## ebx contains number of outer iterations addl nb200_nouter(%rsp),%ebx movl %ebx,nb200_nouter(%rsp)_nb_kernel200_x86_64_sse2.nb200_outer: movq nb200_shift(%rsp),%rax ## rax = pointer into shift[] movl (%rax,%rsi,4),%ebx ## rbx=shift[n] lea (%rbx,%rbx,2),%rbx ## rbx=3*is movl %ebx,nb200_is3(%rsp) ## store is3 movq nb200_shiftvec(%rsp),%rax ## rax = base of shiftvec[] movsd (%rax,%rbx,8),%xmm0 movsd 8(%rax,%rbx,8),%xmm1 movsd 16(%rax,%rbx,8),%xmm2 movq nb200_iinr(%rsp),%rcx ## rcx = pointer into iinr[] movl (%rcx,%rsi,4),%ebx ## ebx =ii movq nb200_charge(%rbp),%rdx movsd (%rdx,%rbx,8),%xmm3 mulsd nb200_facel(%rsp),%xmm3 shufpd $0,%xmm3,%xmm3 lea (%rbx,%rbx,2),%rbx ## rbx = 3*ii=ii3 movq nb200_pos(%rbp),%rax ## rax = base of pos[] addsd (%rax,%rbx,8),%xmm0 addsd 8(%rax,%rbx,8),%xmm1 addsd 16(%rax,%rbx,8),%xmm2 movapd %xmm3,nb200_iq(%rsp) shufpd $0,%xmm0,%xmm0 shufpd $0,%xmm1,%xmm1 shufpd $0,%xmm2,%xmm2 movapd %xmm0,nb200_ix(%rsp) movapd %xmm1,nb200_iy(%rsp) movapd %xmm2,nb200_iz(%rsp) movl %ebx,nb200_ii3(%rsp) ## clear vctot (xmm12) and i forces (xmm13-xmm15) xorpd %xmm12,%xmm12 movapd %xmm12,%xmm13 movapd %xmm12,%xmm14 movapd %xmm12,%xmm15 movq nb200_jindex(%rsp),%rax movl (%rax,%rsi,4),%ecx ## jindex[n] movl 4(%rax,%rsi,4),%edx ## jindex[n+1] subl %ecx,%edx ## number of innerloop atoms movq nb200_pos(%rbp),%rsi movq nb200_faction(%rbp),%rdi movq nb200_jjnr(%rsp),%rax shll $2,%ecx addq %rcx,%rax movq %rax,nb200_innerjjnr(%rsp) ## pointer to jjnr[nj0] movl %edx,%ecx subl $2,%edx addl nb200_ninner(%rsp),%ecx movl %ecx,nb200_ninner(%rsp) addl $0,%edx movl %edx,nb200_innerk(%rsp) ## number of innerloop atoms jge _nb_kernel200_x86_64_sse2.nb200_unroll_loop jmp _nb_kernel200_x86_64_sse2.nb200_checksingle_nb_kernel200_x86_64_sse2.nb200_unroll_loop: ## twice unrolled innerloop here movq nb200_innerjjnr(%rsp),%rdx ## pointer to jjnr[k] movl (%rdx),%r8d movl 4(%rdx),%r9d addq $8,nb200_innerjjnr(%rsp) ## advance pointer (unrolled 2) lea (%r8,%r8,2),%rax ## replace jnr with j3 lea (%r9,%r9,2),%rbx movq nb200_pos(%rbp),%rsi ## base of pos[] ## move two coordinates to xmm4-xmm6 movlpd (%rsi,%rax,8),%xmm4 movlpd 8(%rsi,%rax,8),%xmm5 movlpd 16(%rsi,%rax,8),%xmm6 movhpd (%rsi,%rbx,8),%xmm4 movhpd 8(%rsi,%rbx,8),%xmm5 movhpd 16(%rsi,%rbx,8),%xmm6 ## calc dr subpd nb200_ix(%rsp),%xmm4 subpd nb200_iy(%rsp),%xmm5 subpd nb200_iz(%rsp),%xmm6 ## store dr movapd %xmm4,%xmm9 movapd %xmm5,%xmm10 movapd %xmm6,%xmm11 movq nb200_charge(%rbp),%rsi ## base of charge[] ## square it mulpd %xmm4,%xmm4 mulpd %xmm5,%xmm5 mulpd %xmm6,%xmm6 addpd %xmm5,%xmm4 addpd %xmm6,%xmm4 ## rsq in xmm4 movlpd (%rsi,%r8,8),%xmm3 cvtpd2ps %xmm4,%xmm5 rsqrtps %xmm5,%xmm5 cvtps2pd %xmm5,%xmm2 ## lu in low xmm2 movhpd (%rsi,%r9,8),%xmm3 movapd nb200_krf(%rsp),%xmm7 ## lookup seed in xmm2 movapd %xmm2,%xmm5 ## copy of lu mulpd %xmm2,%xmm2 ## lu*lu movapd nb200_three(%rsp),%xmm1 mulpd %xmm4,%xmm7 ## krsq mulpd %xmm4,%xmm2 ## rsq*lu*lu movapd nb200_half(%rsp),%xmm0 subpd %xmm2,%xmm1 ## 30-rsq*lu*lu mulpd %xmm5,%xmm1 mulpd %xmm0,%xmm1 ## xmm0=iter1 of rinv (new lu) mulpd nb200_iq(%rsp),%xmm3 ## qq movapd %xmm1,%xmm5 ## copy of lu mulpd %xmm1,%xmm1 ## lu*lu movapd nb200_three(%rsp),%xmm2 mulpd %xmm4,%xmm1 ## rsq*lu*lu movapd nb200_half(%rsp),%xmm0 subpd %xmm1,%xmm2 ## 30-rsq*lu*lu mulpd %xmm5,%xmm2 mulpd %xmm2,%xmm0 ## xmm0=rinv movapd %xmm0,%xmm4 mulpd %xmm4,%xmm4 ## xmm4=rinvsq movapd %xmm0,%xmm6 addpd %xmm7,%xmm6 ## xmm6=rinv+ krsq movapd %xmm4,%xmm1 subpd nb200_crf(%rsp),%xmm6 mulpd %xmm3,%xmm6 ## xmm6=vcoul=qq*(rinv+ krsq) movq nb200_faction(%rbp),%rdi addpd %xmm7,%xmm7 subpd %xmm7,%xmm0 mulpd %xmm0,%xmm3 mulpd %xmm3,%xmm4 ## xmm4=total fscal ## increment vctot addpd %xmm6,%xmm12 mulpd %xmm4,%xmm9 mulpd %xmm4,%xmm10 mulpd %xmm4,%xmm11 ## the fj's - start by accumulating forces from memory
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?