nb_kernel304_x86_64_sse2.intel_syntax.s
来自「最著名最快的分子模拟软件」· S 代码 · 共 2,410 行 · 第 1/5 页
S
2,410 行
;#;# $Id: nb_kernel304_x86_64_sse2.intel_syntax.s,v 1.1.2.2 2006/09/22 08:40:36 lindahl Exp $;#;# Gromacs 4.0 Copyright (c) 1991-2003 ;# David van der Spoel, Erik Lindahl;#;# This program is free software; you can redistribute it and/or;# modify it under the terms of the GNU General Public License;# as published by the Free Software Foundation; either version 2;# of the License, or (at your option) any later version.;#;# To help us fund GROMACS development, we humbly ask that you cite;# the research papers on the package. Check out http://www.gromacs.org;# ;# And Hey:;# Gnomes, ROck Monsters And Chili Sauce;#;# These files require GNU binutils 2.10 or later, since we;# use intel syntax for portability, or a recent version ;# of NASM that understands Extended 3DNow and SSE2 instructions.;# (NASM is normally only used with MS Visual C++).;# Since NASM and gnu as disagree on some definitions and use ;# completely different preprocessing options I have to introduce a;# trick: NASM uses ';' for comments, while gnu as uses '#' on x86.;# Gnu as treats ';' as a line break, i.e. ignores it. This is the;# reason why all comments need both symbols...;# The source is written for GNU as, with intel syntax. When you use;# NASM we redefine a couple of things. The false if-statement around ;# the following code is seen by GNU as, but NASM doesn't see it, so ;# the code inside is read by NASM but not gcc.; .if 0 # block below only read by NASM%define .section section%define .long dd%define .align align%define .globl global;# NASM only wants 'dword', not 'dword ptr'.%define ptr.equiv .equiv 2 %1 equ %2%endmacro; .endif # End of NASM-specific block; .intel_syntax noprefix # Line only read by gnu as .globl nb_kernel304_x86_64_sse2.globl _nb_kernel304_x86_64_sse2nb_kernel304_x86_64_sse2: _nb_kernel304_x86_64_sse2: ;# Room for return address and rbp (16 bytes).equiv nb304_fshift, 16.equiv nb304_gid, 24.equiv nb304_pos, 32.equiv nb304_faction, 40.equiv nb304_charge, 48.equiv nb304_p_facel, 56.equiv nb304_argkrf, 64.equiv nb304_argcrf, 72.equiv nb304_Vc, 80.equiv nb304_type, 88.equiv nb304_p_ntype, 96.equiv nb304_vdwparam, 104.equiv nb304_Vvdw, 112.equiv nb304_p_tabscale, 120.equiv nb304_VFtab, 128.equiv nb304_invsqrta, 136.equiv nb304_dvda, 144.equiv nb304_p_gbtabscale, 152.equiv nb304_GBtab, 160.equiv nb304_p_nthreads, 168.equiv nb304_count, 176.equiv nb304_mtx, 184.equiv nb304_outeriter, 192.equiv nb304_inneriter, 200.equiv nb304_work, 208 ;# stack offsets for local variables ;# bottom of stack is cache-aligned for sse2 use .equiv nb304_ixM, 0.equiv nb304_iyM, 16.equiv nb304_izM, 32.equiv nb304_ixH1, 48.equiv nb304_iyH1, 64.equiv nb304_izH1, 80.equiv nb304_ixH2, 96.equiv nb304_iyH2, 112.equiv nb304_izH2, 128.equiv nb304_jxM, 144.equiv nb304_jyM, 160.equiv nb304_jzM, 176.equiv nb304_jxH1, 192.equiv nb304_jyH1, 208.equiv nb304_jzH1, 224.equiv nb304_jxH2, 240.equiv nb304_jyH2, 256.equiv nb304_jzH2, 272.equiv nb304_dxMM, 288.equiv nb304_dyMM, 304.equiv nb304_dzMM, 320.equiv nb304_dxMH1, 336.equiv nb304_dyMH1, 352.equiv nb304_dzMH1, 368.equiv nb304_dxMH2, 384.equiv nb304_dyMH2, 400.equiv nb304_dzMH2, 416.equiv nb304_dxH1M, 432.equiv nb304_dyH1M, 448.equiv nb304_dzH1M, 464.equiv nb304_dxH1H1, 480.equiv nb304_dyH1H1, 496.equiv nb304_dzH1H1, 512.equiv nb304_dxH1H2, 528.equiv nb304_dyH1H2, 544.equiv nb304_dzH1H2, 560.equiv nb304_dxH2M, 576.equiv nb304_dyH2M, 592.equiv nb304_dzH2M, 608.equiv nb304_dxH2H1, 624.equiv nb304_dyH2H1, 640.equiv nb304_dzH2H1, 656.equiv nb304_dxH2H2, 672.equiv nb304_dyH2H2, 688.equiv nb304_dzH2H2, 704.equiv nb304_qqMM, 720.equiv nb304_qqMH, 736.equiv nb304_qqHH, 752.equiv nb304_two, 768.equiv nb304_tsc, 784.equiv nb304_vctot, 800.equiv nb304_fixM, 816.equiv nb304_fiyM, 832.equiv nb304_fizM, 848.equiv nb304_fixH1, 864.equiv nb304_fiyH1, 880.equiv nb304_fizH1, 896.equiv nb304_fixH2, 912.equiv nb304_fiyH2, 928.equiv nb304_fizH2, 944.equiv nb304_epsH1, 960.equiv nb304_epsH2, 976.equiv nb304_epsM, 992.equiv nb304_fjxH1, 1008.equiv nb304_fjyH1, 1024.equiv nb304_fjzH1, 1040.equiv nb304_fjxH2, 1056.equiv nb304_fjyH2, 1072.equiv nb304_fjzH2, 1088.equiv nb304_half, 1104.equiv nb304_three, 1120.equiv nb304_rsqMM, 1136.equiv nb304_rsqMH1, 1152.equiv nb304_rsqMH2, 1168.equiv nb304_rsqH1M, 1184.equiv nb304_rsqH1H1, 1200.equiv nb304_rsqH1H2, 1216.equiv nb304_rsqH2M, 1232.equiv nb304_rsqH2H1, 1248.equiv nb304_rsqH2H2, 1264.equiv nb304_rinvMM, 1280.equiv nb304_rinvMH1, 1296.equiv nb304_rinvMH2, 1312.equiv nb304_rinvH1M, 1328.equiv nb304_rinvH1H1, 1344.equiv nb304_rinvH1H2, 1360.equiv nb304_rinvH2M, 1376.equiv nb304_rinvH2H1, 1392.equiv nb304_rinvH2H2, 1408.equiv nb304_is3, 1424.equiv nb304_ii3, 1428.equiv nb304_nri, 1432.equiv nb304_iinr, 1440.equiv nb304_jindex, 1448.equiv nb304_jjnr, 1456.equiv nb304_shift, 1464.equiv nb304_shiftvec, 1472.equiv nb304_facel, 1480.equiv nb304_innerjjnr, 1488.equiv nb304_innerk, 1496.equiv nb304_n, 1500.equiv nb304_nn1, 1504.equiv nb304_nouter, 1508.equiv nb304_ninner, 1512 push rbp mov rbp, rsp push rbx emms push r12 push r13 push r14 push r15 sub rsp, 1528 ;# local variable stack space (n*16+8) ;# zero 32-bit iteration counters mov eax, 0 mov [rsp + nb304_nouter], eax mov [rsp + nb304_ninner], eax mov edi, [rdi] mov [rsp + nb304_nri], edi mov [rsp + nb304_iinr], rsi mov [rsp + nb304_jindex], rdx mov [rsp + nb304_jjnr], rcx mov [rsp + nb304_shift], r8 mov [rsp + nb304_shiftvec], r9 mov rsi, [rbp + nb304_p_facel] movsd xmm0, [rsi] movsd [rsp + nb304_facel], xmm0 mov rax, [rbp + nb304_p_tabscale] movsd xmm3, [rax] shufpd xmm3, xmm3, 0 movapd [rsp + nb304_tsc], xmm3 ;# create constant floating-point factors on stack mov eax, 0x00000000 ;# lower half of double half IEEE (hex) mov ebx, 0x3fe00000 mov [rsp + nb304_half], eax mov [rsp + nb304_half+4], ebx movsd xmm1, [rsp + nb304_half] shufpd xmm1, xmm1, 0 ;# splat to all elements movapd xmm3, xmm1 addpd xmm3, xmm3 ;# one movapd xmm2, xmm3 addpd xmm2, xmm2 ;# two addpd xmm3, xmm2 ;# three movapd [rsp + nb304_half], xmm1 movapd [rsp + nb304_two], xmm2 movapd [rsp + nb304_three], xmm3 ;# assume we have at least one i particle - start directly mov rcx, [rsp + nb304_iinr] ;# rcx = pointer into iinr[] mov ebx, [rcx] ;# ebx =ii mov rdx, [rbp + nb304_charge] movsd xmm3, [rdx + rbx*8 + 24] movsd xmm4, xmm3 movsd xmm5, [rdx + rbx*8 + 8] mov rsi, [rbp + nb304_p_facel] movsd xmm0, [rsi] movsd xmm6, [rsp + nb304_facel] mulsd xmm3, xmm3 mulsd xmm4, xmm5 mulsd xmm5, xmm5 mulsd xmm3, xmm6 mulsd xmm4, xmm6 mulsd xmm5, xmm6 shufpd xmm3, xmm3, 0 shufpd xmm4, xmm4, 0 shufpd xmm5, xmm5, 0 movapd [rsp + nb304_qqMM], xmm3 movapd [rsp + nb304_qqMH], xmm4 movapd [rsp + nb304_qqHH], xmm5 .nb304_threadloop: mov rsi, [rbp + nb304_count] ;# pointer to sync counter mov eax, [rsi].nb304_spinlock: mov ebx, eax ;# ebx=*count=nn0 add ebx, 1 ;# ebx=nn1=nn0+10 lock cmpxchg [esi], ebx ;# write nn1 to *counter, ;# if it hasnt changed. ;# or reread *counter to eax. pause ;# -> better p4 performance jnz .nb304_spinlock ;# if(nn1>nri) nn1=nri mov ecx, [rsp + nb304_nri] mov edx, ecx sub ecx, ebx cmovle ebx, edx ;# if(nn1>nri) nn1=nri ;# Cleared the spinlock if we got here. ;# eax contains nn0, ebx contains nn1. mov [rsp + nb304_n], eax mov [rsp + nb304_nn1], ebx sub ebx, eax ;# calc number of outer lists mov esi, eax ;# copy n to esi jg .nb304_outerstart jmp .nb304_end.nb304_outerstart: ;# ebx contains number of outer iterations add ebx, [rsp + nb304_nouter] mov [rsp + nb304_nouter], ebx.nb304_outer: mov rax, [rsp + nb304_shift] ;# rax = pointer into shift[] mov ebx, [rax +rsi*4] ;# rbx=shift[n] lea rbx, [rbx + rbx*2] ;# rbx=3*is mov [rsp + nb304_is3],ebx ;# store is3 mov rax, [rsp + nb304_shiftvec] ;# rax = base of shiftvec[] movsd xmm0, [rax + rbx*8] movsd xmm1, [rax + rbx*8 + 8] movsd xmm2, [rax + rbx*8 + 16] mov rcx, [rsp + nb304_iinr] ;# rcx = pointer into iinr[] mov ebx, [rcx + rsi*4] ;# ebx =ii lea rbx, [rbx + rbx*2] ;# rbx = 3*ii=ii3 mov rax, [rbp + nb304_pos] ;# rax = base of pos[] mov [rsp + nb304_ii3], ebx movapd xmm3, xmm0 movapd xmm3, xmm0 movapd xmm4, xmm1 movapd xmm5, xmm2 addsd xmm3, [rax + rbx*8 + 24] addsd xmm4, [rax + rbx*8 + 32] addsd xmm5, [rax + rbx*8 + 40] shufpd xmm3, xmm3, 0 shufpd xmm4, xmm4, 0 shufpd xmm5, xmm5, 0 movapd [rsp + nb304_ixH1], xmm3 movapd [rsp + nb304_iyH1], xmm4 movapd [rsp + nb304_izH1], xmm5 movsd xmm3, xmm0 movsd xmm4, xmm1 movsd xmm5, xmm2 addsd xmm0, [rax + rbx*8 + 48] addsd xmm1, [rax + rbx*8 + 56] addsd xmm2, [rax + rbx*8 + 64] addsd xmm3, [rax + rbx*8 + 72] addsd xmm4, [rax + rbx*8 + 80] addsd xmm5, [rax + rbx*8 + 88] shufpd xmm0, xmm0, 0 shufpd xmm1, xmm1, 0 shufpd xmm2, xmm2, 0 shufpd xmm3, xmm3, 0 shufpd xmm4, xmm4, 0 shufpd xmm5, xmm5, 0 movapd [rsp + nb304_ixH2], xmm0 movapd [rsp + nb304_iyH2], xmm1 movapd [rsp + nb304_izH2], xmm2 movapd [rsp + nb304_ixM], xmm3 movapd [rsp + nb304_iyM], xmm4 movapd [rsp + nb304_izM], xmm5 ;# clear vctot and i forces xorpd xmm4, xmm4 movapd [rsp + nb304_vctot], xmm4 movapd [rsp + nb304_fixM], xmm4 movapd [rsp + nb304_fiyM], xmm4 movapd [rsp + nb304_fizM], xmm4 movapd [rsp + nb304_fixH1], xmm4 movapd [rsp + nb304_fiyH1], xmm4 movapd [rsp + nb304_fizH1], xmm4 movapd [rsp + nb304_fixH2], xmm4 movapd [rsp + nb304_fiyH2], xmm4 movapd [rsp + nb304_fizH2], xmm4 mov rax, [rsp + nb304_jindex] mov ecx, [rax + rsi*4] ;# jindex[n] mov edx, [rax + rsi*4 + 4] ;# jindex[n+1] sub edx, ecx ;# number of innerloop atoms mov rsi, [rbp + nb304_pos] mov rdi, [rbp + nb304_faction] mov rax, [rsp + nb304_jjnr] shl ecx, 2 add rax, rcx mov [rsp + nb304_innerjjnr], rax ;# pointer to jjnr[nj0] mov ecx, edx sub edx, 2 add ecx, [rsp + nb304_ninner] mov [rsp + nb304_ninner], ecx add edx, 0 mov [rsp + nb304_innerk], edx ;# number of innerloop atoms jge .nb304_unroll_loop jmp .nb304_checksingle.nb304_unroll_loop: ;# twice unrolled innerloop here mov rdx, [rsp + nb304_innerjjnr] ;# pointer to jjnr[k] mov eax, [rdx] mov ebx, [rdx + 4] add qword ptr [rsp + nb304_innerjjnr], 8 ;# advance pointer (unrolled 2) mov rsi, [rbp + nb304_pos] ;# base of pos[] lea rax, [rax + rax*2] ;# replace jnr with j3 lea rbx, [rbx + rbx*2] ;# move j H1 coordinates to local temp variables movlpd xmm0, [rsi + rax*8 + 24] movlpd xmm1, [rsi + rax*8 + 32] movlpd xmm2, [rsi + rax*8 + 40] movhpd xmm0, [rsi + rbx*8 + 24] movhpd xmm1, [rsi + rbx*8 + 32] movhpd xmm2, [rsi + rbx*8 + 40] ;# xmm0 = H1x ;# xmm1 = H1y ;# xmm2 = H1z movapd xmm3, xmm0 movapd xmm4, xmm1 movapd xmm5, xmm2 movapd xmm6, xmm0 movapd xmm7, xmm1 movapd xmm8, xmm2 subpd xmm0, [rsp + nb304_ixH1] subpd xmm1, [rsp + nb304_iyH1] subpd xmm2, [rsp + nb304_izH1] subpd xmm3, [rsp + nb304_ixH2] subpd xmm4, [rsp + nb304_iyH2] subpd xmm5, [rsp + nb304_izH2] subpd xmm6, [rsp + nb304_ixM] subpd xmm7, [rsp + nb304_iyM] subpd xmm8, [rsp + nb304_izM] movapd [rsp + nb304_dxH1H1], xmm0 movapd [rsp + nb304_dyH1H1], xmm1 movapd [rsp + nb304_dzH1H1], xmm2 mulpd xmm0, xmm0 mulpd xmm1, xmm1 mulpd xmm2, xmm2 movapd [rsp + nb304_dxH2H1], xmm3 movapd [rsp + nb304_dyH2H1], xmm4 movapd [rsp + nb304_dzH2H1], xmm5 mulpd xmm3, xmm3 mulpd xmm4, xmm4 mulpd xmm5, xmm5 movapd [rsp + nb304_dxMH1], xmm6 movapd [rsp + nb304_dyMH1], xmm7 movapd [rsp + nb304_dzMH1], xmm8 mulpd xmm6, xmm6 mulpd xmm7, xmm7 mulpd xmm8, xmm8 addpd xmm0, xmm1 addpd xmm0, xmm2 addpd xmm3, xmm4 addpd xmm3, xmm5 addpd xmm6, xmm7 addpd xmm6, xmm8 ;# start doing invsqrt for jH1 atoms cvtpd2ps xmm1, xmm0 cvtpd2ps xmm4, xmm3 cvtpd2ps xmm7, xmm6 rsqrtps xmm1, xmm1 rsqrtps xmm4, xmm4 rsqrtps xmm7, xmm7 cvtps2pd xmm1, xmm1 cvtps2pd xmm4, xmm4 cvtps2pd xmm7, xmm7 movapd xmm2, xmm1 movapd xmm5, xmm4 movapd xmm8, xmm7 mulpd xmm1, xmm1 ;# lu*lu mulpd xmm4, xmm4 ;# lu*lu mulpd xmm7, xmm7 ;# lu*lu movapd xmm9, [rsp + nb304_three] movapd xmm10, xmm9 movapd xmm11, xmm9 mulpd xmm1, xmm0 ;# rsq*lu*lu mulpd xmm4, xmm3 ;# rsq*lu*lu mulpd xmm7, xmm6 ;# rsq*lu*lu subpd xmm9, xmm1 subpd xmm10, xmm4 subpd xmm11, xmm7 ;# 3-rsq*lu*lu mulpd xmm9, xmm2 mulpd xmm10, xmm5 mulpd xmm11, xmm8 ;# lu*(3-rsq*lu*lu) movapd xmm15, [rsp + nb304_half] mulpd xmm9, xmm15 ;# first iteration for rinvH1H1
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?