nb_kernel410_x86_64_sse2.intel_syntax.s
来自「最著名最快的分子模拟软件」· S 代码 · 共 1,490 行 · 第 1/3 页
S
1,490 行
;#;# $Id: nb_kernel410_x86_64_sse2.intel_syntax.s,v 1.1.2.2 2006/09/22 08:40:37 lindahl Exp $;#;# Gromacs 4.0 Copyright (c) 1991-2003 ;# David van der Spoel, Erik Lindahl;#;# This program is free software; you can redistribute it and/or;# modify it under the terms of the GNU General Public License;# as published by the Free Software Foundation; either version 2;# of the License, or (at your option) any later version.;#;# To help us fund GROMACS development, we humbly ask that you cite;# the research papers on the package. Check out http://www.gromacs.org;# ;# And Hey:;# Gnomes, ROck Monsters And Chili Sauce;#;# These files require GNU binutils 2.10 or later, since we;# use intel syntax for portability, or a recent version ;# of NASM that understands Extended 3DNow and SSE2 instructions.;# (NASM is normally only used with MS Visual C++).;# Since NASM and gnu as disagree on some definitions and use ;# completely different preprocessing options I have to introduce a;# trick: NASM uses ';' for comments, while gnu as uses '#' on x86.;# Gnu as treats ';' as a line break, i.e. ignores it. This is the;# reason why all comments need both symbols...;# The source is written for GNU as, with intel syntax. When you use;# NASM we redefine a couple of things. The false if-statement around ;# the following code is seen by GNU as, but NASM doesn't see it, so ;# the code inside is read by NASM but not gcc.; .if 0 # block below only read by NASM%define .section section%define .long dd%define .align align%define .globl global;# NASM only wants 'dword', not 'dword ptr'.%define ptr.equiv .equiv 2 %1 equ %2%endmacro; .endif # End of NASM-specific block; .intel_syntax noprefix # Line only read by gnu as.globl nb_kernel410_x86_64_sse2.globl _nb_kernel410_x86_64_sse2nb_kernel410_x86_64_sse2: _nb_kernel410_x86_64_sse2: ;# Room for return address and rbp (16 bytes).equiv nb410_fshift, 16.equiv nb410_gid, 24.equiv nb410_pos, 32.equiv nb410_faction, 40.equiv nb410_charge, 48.equiv nb410_p_facel, 56.equiv nb410_argkrf, 64.equiv nb410_argcrf, 72.equiv nb410_Vc, 80.equiv nb410_type, 88.equiv nb410_p_ntype, 96.equiv nb410_vdwparam, 104.equiv nb410_Vvdw, 112.equiv nb410_p_tabscale, 120.equiv nb410_VFtab, 128.equiv nb410_invsqrta, 136.equiv nb410_dvda, 144.equiv nb410_p_gbtabscale, 152.equiv nb410_GBtab, 160.equiv nb410_p_nthreads, 168.equiv nb410_count, 176.equiv nb410_mtx, 184.equiv nb410_outeriter, 192.equiv nb410_inneriter, 200.equiv nb410_work, 208 ;# stack offsets for local variables ;# bottom of stack is cache-aligned for sse2 use .equiv nb410_ix, 0.equiv nb410_iy, 16.equiv nb410_iz, 32.equiv nb410_iq, 48.equiv nb410_dx, 64.equiv nb410_dy, 80.equiv nb410_dz, 96.equiv nb410_two, 112.equiv nb410_six, 128.equiv nb410_twelve, 144.equiv nb410_gbtsc, 160.equiv nb410_qq, 176.equiv nb410_c6, 192.equiv nb410_c12, 208.equiv nb410_fscal, 224.equiv nb410_vctot, 240.equiv nb410_Vvdwtot, 256.equiv nb410_fix, 272.equiv nb410_fiy, 288.equiv nb410_fiz, 304.equiv nb410_half, 320.equiv nb410_three, 336.equiv nb410_r, 352.equiv nb410_isai, 368.equiv nb410_isaprod, 384.equiv nb410_dvdasum, 400.equiv nb410_gbscale, 416.equiv nb410_nri, 432.equiv nb410_iinr, 440.equiv nb410_jindex, 448.equiv nb410_jjnr, 456.equiv nb410_shift, 464.equiv nb410_shiftvec, 472.equiv nb410_facel, 480.equiv nb410_innerjjnr, 488.equiv nb410_ii, 496.equiv nb410_is3, 500.equiv nb410_ii3, 504.equiv nb410_ntia, 508.equiv nb410_innerk, 512.equiv nb410_n, 516.equiv nb410_nn1, 520.equiv nb410_ntype, 524.equiv nb410_nouter, 528.equiv nb410_ninner, 532 push rbp mov rbp, rsp push rbx emms push r12 push r13 push r14 push r15 sub rsp, 552 ;# local variable stack space (n*16+8) ;# zero 32-bit iteration counters mov eax, 0 mov [rsp + nb410_nouter], eax mov [rsp + nb410_ninner], eax mov edi, [rdi] mov [rsp + nb410_nri], edi mov [rsp + nb410_iinr], rsi mov [rsp + nb410_jindex], rdx mov [rsp + nb410_jjnr], rcx mov [rsp + nb410_shift], r8 mov [rsp + nb410_shiftvec], r9 mov rdi, [rbp + nb410_p_ntype] mov edi, [rdi] mov [rsp + nb410_ntype], edi mov rsi, [rbp + nb410_p_facel] movsd xmm0, [rsi] movsd [rsp + nb410_facel], xmm0 mov rbx, [rbp + nb410_p_gbtabscale] movsd xmm4, [rbx] shufpd xmm4, xmm4, 0 movapd [rsp + nb410_gbtsc], xmm4 ;# create constant floating-point factors on stack mov eax, 0x00000000 ;# lower half of double half IEEE (hex) mov ebx, 0x3fe00000 mov [rsp + nb410_half], eax mov [rsp + nb410_half+4], ebx movsd xmm1, [rsp + nb410_half] shufpd xmm1, xmm1, 0 ;# splat to all elements movapd xmm3, xmm1 addpd xmm3, xmm3 ;# one movapd xmm2, xmm3 addpd xmm2, xmm2 ;# two addpd xmm3, xmm2 ;# three movapd xmm4, xmm3 addpd xmm4, xmm4 ;# six movapd xmm5, xmm4 addpd xmm5, xmm5 ;# twelve movapd [rsp + nb410_half], xmm1 movapd [rsp + nb410_two], xmm2 movapd [rsp + nb410_three], xmm3 movapd [rsp + nb410_six], xmm4 movapd [rsp + nb410_twelve], xmm5.nb410_threadloop: mov rsi, [rbp + nb410_count] ;# pointer to sync counter mov eax, [rsi].nb410_spinlock: mov ebx, eax ;# ebx=*count=nn0 add ebx, 1 ;# ebx=nn1=nn0+10 lock cmpxchg [esi], ebx ;# write nn1 to *counter, ;# if it hasnt changed. ;# or reread *counter to eax. pause ;# -> better p4 performance jnz .nb410_spinlock ;# if(nn1>nri) nn1=nri mov ecx, [rsp + nb410_nri] mov edx, ecx sub ecx, ebx cmovle ebx, edx ;# if(nn1>nri) nn1=nri ;# Cleared the spinlock if we got here. ;# eax contains nn0, ebx contains nn1. mov [rsp + nb410_n], eax mov [rsp + nb410_nn1], ebx sub ebx, eax ;# calc number of outer lists mov esi, eax ;# copy n to esi jg .nb410_outerstart jmp .nb410_end.nb410_outerstart: ;# ebx contains number of outer iterations add ebx, [rsp + nb410_nouter] mov [rsp + nb410_nouter], ebx.nb410_outer: mov rax, [rsp + nb410_shift] ;# rax = pointer into shift[] mov ebx, [rax+rsi*4] ;# rbx=shift[n] lea rbx, [rbx + rbx*2] ;# rbx=3*is mov [rsp + nb410_is3],ebx ;# store is3 mov rax, [rsp + nb410_shiftvec] ;# rax = base of shiftvec[] movsd xmm0, [rax + rbx*8] movsd xmm1, [rax + rbx*8 + 8] movsd xmm2, [rax + rbx*8 + 16] mov rcx, [rsp + nb410_iinr] ;# rcx = pointer into iinr[] mov ebx, [rcx+rsi*4] ;# ebx =ii mov [rsp + nb410_ii], ebx mov rdx, [rbp + nb410_charge] movsd xmm3, [rdx + rbx*8] mulsd xmm3, [rsp + nb410_facel] shufpd xmm3, xmm3, 0 mov rdx, [rbp + nb410_invsqrta] ;# load invsqrta[ii] movsd xmm4, [rdx + rbx*8] shufpd xmm4, xmm4, 0 mov rdx, [rbp + nb410_type] mov edx, [rdx + rbx*4] imul edx, [rsp + nb410_ntype] shl edx, 1 mov [rsp + nb410_ntia], edx lea rbx, [rbx + rbx*2] ;# rbx = 3*ii=ii3 mov rax, [rbp + nb410_pos] ;# rax = base of pos[] addsd xmm0, [rax + rbx*8] addsd xmm1, [rax + rbx*8 + 8] addsd xmm2, [rax + rbx*8 + 16] movapd [rsp + nb410_iq], xmm3 movapd [rsp + nb410_isai], xmm4 shufpd xmm0, xmm0, 0 shufpd xmm1, xmm1, 0 shufpd xmm2, xmm2, 0 movapd [rsp + nb410_ix], xmm0 movapd [rsp + nb410_iy], xmm1 movapd [rsp + nb410_iz], xmm2 mov [rsp + nb410_ii3], ebx ;# clear vctot and i forces xorpd xmm13, xmm13 movapd xmm12, xmm13 movapd [rsp + nb410_Vvdwtot], xmm13 movapd [rsp + nb410_dvdasum], xmm13 movapd xmm14, xmm13 movapd xmm15, xmm13 mov rax, [rsp + nb410_jindex] mov ecx, [rax + rsi*4] ;# jindex[n] mov edx, [rax + rsi*4 + 4] ;# jindex[n+1] sub edx, ecx ;# number of innerloop atoms mov rsi, [rbp + nb410_pos] mov rdi, [rbp + nb410_faction] mov rax, [rsp + nb410_jjnr] shl ecx, 2 add rax, rcx mov [rsp + nb410_innerjjnr], rax ;# pointer to jjnr[nj0] mov ecx, edx sub edx, 2 add ecx, [rsp + nb410_ninner] mov [rsp + nb410_ninner], ecx add edx, 0 mov [rsp + nb410_innerk], edx ;# number of innerloop atoms jge .nb410_unroll_loop jmp .nb410_checksingle.nb410_unroll_loop: ;# twice unrolled innerloop here mov rdx, [rsp + nb410_innerjjnr] ;# pointer to jjnr[k] mov r14d, [rdx] mov r15d, [rdx + 4] add qword ptr [rsp + nb410_innerjjnr], 8 ;# advance pointer (unrolled 2) mov rsi, [rbp + nb410_pos] ;# base of pos[] lea r10, [r14 + r14*2] ;# replace jnr with j3 lea r11, [r15 + r15*2] ;# move two coordinates to xmm4-xmm6 movlpd xmm4, [rsi + r10*8] movlpd xmm5, [rsi + r10*8 + 8] movlpd xmm6, [rsi + r10*8 + 16] movhpd xmm4, [rsi + r11*8] movhpd xmm5, [rsi + r11*8 + 8] movhpd xmm6, [rsi + r11*8 + 16] ;# calc dr subpd xmm4, [rsp + nb410_ix] subpd xmm5, [rsp + nb410_iy] subpd xmm6, [rsp + nb410_iz] ;# store dr movapd [rsp + nb410_dx], xmm4 movapd [rsp + nb410_dy], xmm5 movapd [rsp + nb410_dz], xmm6 ;# load isaj mov rsi, [rbp + nb410_invsqrta] ;# square it mulpd xmm4,xmm4 mulpd xmm5,xmm5 mulpd xmm6,xmm6 addpd xmm4, xmm5 addpd xmm4, xmm6 ;# rsq in xmm4 movlpd xmm3, [rsi + r14*8] movhpd xmm3, [rsi + r15*8] mov rdi, [rbp + nb410_type] mov r8d, [rdi + r14*4] mov r9d, [rdi + r15*4] cvtpd2ps xmm5, xmm4 rsqrtps xmm5, xmm5 cvtps2pd xmm2, xmm5 ;# lu in low xmm2 mulpd xmm3, [rsp + nb410_isai] movapd [rsp + nb410_isaprod], xmm3 ;# lookup seed in xmm2 movapd xmm5, xmm2 ;# copy of lu mulpd xmm2, xmm2 ;# lu*lu movapd xmm1, [rsp + nb410_three] mulpd xmm2, xmm4 ;# rsq*lu*lu movapd xmm0, [rsp + nb410_half] subpd xmm1, xmm2 ;# 30-rsq*lu*lu mulpd xmm1, xmm5 mulpd xmm1, xmm0 ;# xmm0=iter1 of rinv (new lu) movapd xmm6, xmm3 mulpd xmm6, [rsp + nb410_gbtsc] movapd [rsp + nb410_gbscale], xmm6 movapd xmm5, xmm1 ;# copy of lu mulpd xmm1, xmm1 ;# lu*lu movapd xmm2, [rsp + nb410_three] mulpd xmm1, xmm4 ;# rsq*lu*lu movapd xmm0, [rsp + nb410_half] subpd xmm2, xmm1 ;# 30-rsq*lu*lu mulpd xmm2, xmm5 mulpd xmm0, xmm2 ;# xmm0=rinv mulpd xmm3, [rsp + nb410_iq] mov rsi, [rbp + nb410_charge] ;# base of charge[] movlpd xmm6, [rsi + r14*8] movhpd xmm6, [rsi + r15*8] mulpd xmm6, xmm3 movapd [rsp + nb410_qq], xmm6 mulpd xmm4, xmm0 ;# xmm4=r movapd [rsp + nb410_r], xmm4 mulpd xmm4, [rsp + nb410_gbscale] mov edi, [rsp + nb410_ntia] cvttpd2pi mm6, xmm4 ;# mm6 = lu idx shl r8d, 1 shl r9d, 1 add r8d, edi add r9d, edi cvtpi2pd xmm5, mm6 subpd xmm4, xmm5 movapd xmm1, xmm4 ;# xmm1=eps movapd xmm2, xmm1 mulpd xmm2, xmm2 ;# xmm2=eps2 mov rdi, [rbp + nb410_vdwparam] pslld mm6, 2 ;# idx *= 4 mov rsi, [rbp + nb410_GBtab] movd r12d, mm6 psrlq mm6, 32 movd r13d, mm6 ;# indices in r12/r13 movlpd xmm6, [rdi + r8*8] movlpd xmm7, [rdi + r8*8 + 8] movapd xmm9, xmm0 ;# rinv mulpd xmm9, xmm9 ;# rinvsq movapd xmm10, xmm9 ;# rinvsq mulpd xmm10, xmm10 ;# rinv4 mulpd xmm10, xmm9 ;# rinv6 movapd xmm11, xmm10 mulpd xmm11, xmm11 ;# rinv12 movhpd xmm6, [rdi + r9*8] movhpd xmm7, [rdi + r9*8 + 8] ;# load table data movapd xmm4, [rsi + r12*8] ;# Y1 F1 movapd xmm3, [rsi + r13*8] ;# Y2 F2 movapd xmm5, xmm4 unpcklpd xmm4, xmm3 ;# Y1 Y2 unpckhpd xmm5, xmm3 ;# F1 F2 mulpd xmm10, xmm6 ;# vvdw6=c6*rinv6 mulpd xmm11, xmm7 ;# vvdw12=c12*rinv12 movapd xmm9, xmm11 subpd xmm11, xmm10 ;# Vvdw=Vvdw12-Vvdw6 ;# add potential to vvdwtot addpd xmm11, [rsp + nb410_Vvdwtot] movapd [rsp + nb410_Vvdwtot], xmm11 movapd xmm6, [rsi + r12*8 + 16] ;# G1 H1 movapd xmm3, [rsi + r13*8 + 16] ;# G2 H2 movapd xmm7, xmm6 unpcklpd xmm6, xmm3 ;# G1 G2 unpckhpd xmm7, xmm3 ;# H1 H2 ;# coulomb table ready, in xmm4-xmm7 mulpd xmm7, xmm1 ;# xmm7=Heps mulpd xmm6, xmm1 ;# xmm6=Geps mulpd xmm7, xmm1 ;# xmm7=Heps2 addpd xmm5, xmm6 addpd xmm5, xmm7 ;# xmm5=Fp mulpd xmm7, [rsp + nb410_two] ;# two*Heps2 movapd xmm3, [rsp + nb410_qq] addpd xmm7, xmm6 addpd xmm7, xmm5 ;# xmm7=FF mulpd xmm5, xmm1 ;# xmm5=eps*Fp addpd xmm5, xmm4 ;# xmm5=VV mulpd xmm5, xmm3 ;# vcoul=qq*VV mulpd xmm3, xmm7 ;# fijC=FF*qq ;# LJ forces mulpd xmm10, [rsp + nb410_six] mulpd xmm9, [rsp + nb410_twelve] subpd xmm9, xmm10 mulpd xmm9, xmm0 ;# (12*vnb12-6*vnb6)*rinv mov rsi, [rbp + nb410_dvda] ;# Calculate dVda xorpd xmm7, xmm7 mulpd xmm3, [rsp + nb410_gbscale] movapd xmm6, xmm3 mulpd xmm6, [rsp + nb410_r] addpd xmm6, xmm5 ;# update vctot addpd xmm12, xmm5 ;# xmm6=(vcoul+fijC*r) subpd xmm7, xmm6 movapd xmm6, xmm7 mov rdi, [rbp + nb410_faction] ;# the fj's - start by accumulating forces from memory movlpd xmm2, [rdi + r10*8] movlpd xmm4, [rdi + r10*8 + 8] movlpd xmm5, [rdi + r10*8 + 16] ;# update dvdasum addpd xmm7, [rsp + nb410_dvdasum] movapd [rsp + nb410_dvdasum], xmm7 ;# update j atoms dvdaj movhlps xmm7, xmm6 addsd xmm6, [rsi + r14*8] addsd xmm7, [rsi + r15*8] movsd [rsi + r14*8], xmm6
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?