nb_kernel231_x86_64_sse2.intel_syntax.s
来自「最著名最快的分子模拟软件」· S 代码 · 共 2,281 行 · 第 1/5 页
S
2,281 行
;#;# $Id: nb_kernel231_x86_64_sse2.intel_syntax.s,v 1.1.2.2 2006/09/22 08:40:34 lindahl Exp $;#;# Gromacs 4.0 Copyright (c) 1991-2003 ;# David van der Spoel, Erik Lindahl;#;# This program is free software; you can redistribute it and/or;# modify it under the terms of the GNU General Public License;# as published by the Free Software Foundation; either version 2;# of the License, or (at your option) any later version.;#;# To help us fund GROMACS development, we humbly ask that you cite;# the research papers on the package. Check out http://www.gromacs.org;# ;# And Hey:;# Gnomes, ROck Monsters And Chili Sauce;#;# These files require GNU binutils 2.10 or later, since we;# use intel syntax for portability, or a recent version ;# of NASM that understands Extended 3DNow and SSE2 instructions.;# (NASM is normally only used with MS Visual C++).;# Since NASM and gnu as disagree on some definitions and use ;# completely different preprocessing options I have to introduce a;# trick: NASM uses ';' for comments, while gnu as uses '#' on x86.;# Gnu as treats ';' as a line break, i.e. ignores it. This is the;# reason why all comments need both symbols...;# The source is written for GNU as, with intel syntax. When you use;# NASM we redefine a couple of things. The false if-statement around ;# the following code is seen by GNU as, but NASM doesn't see it, so ;# the code inside is read by NASM but not gcc.; .if 0 # block below only read by NASM%define .section section%define .long dd%define .align align%define .globl global;# NASM only wants 'dword', not 'dword ptr'.%define ptr.equiv .equiv 2 %1 equ %2%endmacro; .endif # End of NASM-specific block; .intel_syntax noprefix # Line only read by gnu as.globl nb_kernel231_x86_64_sse2.globl _nb_kernel231_x86_64_sse2nb_kernel231_x86_64_sse2: _nb_kernel231_x86_64_sse2: ;# Room for return address and rbp (16 bytes).equiv nb231_fshift, 16.equiv nb231_gid, 24.equiv nb231_pos, 32.equiv nb231_faction, 40.equiv nb231_charge, 48.equiv nb231_p_facel, 56.equiv nb231_argkrf, 64.equiv nb231_argcrf, 72.equiv nb231_Vc, 80.equiv nb231_type, 88.equiv nb231_p_ntype, 96.equiv nb231_vdwparam, 104.equiv nb231_Vvdw, 112.equiv nb231_p_tabscale, 120.equiv nb231_VFtab, 128.equiv nb231_invsqrta, 136.equiv nb231_dvda, 144.equiv nb231_p_gbtabscale, 152.equiv nb231_GBtab, 160.equiv nb231_p_nthreads, 168.equiv nb231_count, 176.equiv nb231_mtx, 184.equiv nb231_outeriter, 192.equiv nb231_inneriter, 200.equiv nb231_work, 208 ;# stack offsets for local variables ;# bottom of stack is cache-aligned for sse2 use .equiv nb231_ixO, 0.equiv nb231_iyO, 16.equiv nb231_izO, 32.equiv nb231_ixH1, 48.equiv nb231_iyH1, 64.equiv nb231_izH1, 80.equiv nb231_ixH2, 96.equiv nb231_iyH2, 112.equiv nb231_izH2, 128.equiv nb231_iqO, 144.equiv nb231_iqH, 160.equiv nb231_dxO, 176.equiv nb231_dyO, 192.equiv nb231_dzO, 208.equiv nb231_dxH1, 224.equiv nb231_dyH1, 240.equiv nb231_dzH1, 256.equiv nb231_dxH2, 272.equiv nb231_dyH2, 288.equiv nb231_dzH2, 304.equiv nb231_qqO, 320.equiv nb231_qqH, 336.equiv nb231_c6, 352.equiv nb231_c12, 368.equiv nb231_tsc, 384.equiv nb231_fstmp, 400.equiv nb231_vctot, 416.equiv nb231_Vvdwtot, 432.equiv nb231_fixO, 448.equiv nb231_fiyO, 464.equiv nb231_fizO, 480.equiv nb231_fixH1, 496.equiv nb231_fiyH1, 512.equiv nb231_fizH1, 528.equiv nb231_fixH2, 544.equiv nb231_fiyH2, 560.equiv nb231_fizH2, 576.equiv nb231_fjx, 592.equiv nb231_fjy, 608.equiv nb231_fjz, 624.equiv nb231_half, 640.equiv nb231_three, 656.equiv nb231_two, 672.equiv nb231_krf, 688.equiv nb231_crf, 704.equiv nb231_rsqO, 720.equiv nb231_rsqH1, 736.equiv nb231_rsqH2, 752.equiv nb231_rinvO, 768.equiv nb231_rinvH1, 784.equiv nb231_rinvH2, 800.equiv nb231_facel, 816.equiv nb231_iinr, 824.equiv nb231_jindex, 832.equiv nb231_jjnr, 840.equiv nb231_shift, 848.equiv nb231_shiftvec, 856.equiv nb231_innerjjnr, 864.equiv nb231_nri, 872.equiv nb231_is3, 876.equiv nb231_ii3, 880.equiv nb231_ntia, 884.equiv nb231_innerk, 888.equiv nb231_n, 892.equiv nb231_nn1, 896.equiv nb231_nouter, 900.equiv nb231_ninner, 904 push rbp mov rbp, rsp push rbx emms push r12 push r13 push r14 push r15 sub rsp, 920 ; # local variable stack space (n*16+8) ;# zero 32-bit iteration counters mov eax, 0 mov [rsp + nb231_nouter], eax mov [rsp + nb231_ninner], eax mov edi, [rdi] mov [rsp + nb231_nri], edi mov [rsp + nb231_iinr], rsi mov [rsp + nb231_jindex], rdx mov [rsp + nb231_jjnr], rcx mov [rsp + nb231_shift], r8 mov [rsp + nb231_shiftvec], r9 mov rsi, [rbp + nb231_p_facel] movsd xmm0, [rsi] movsd [rsp + nb231_facel], xmm0 mov rax, [rbp + nb231_p_tabscale] movsd xmm3, [rax] shufpd xmm3, xmm3, 0 movapd [rsp + nb231_tsc], xmm3 mov rsi, [rbp + nb231_argkrf] mov rdi, [rbp + nb231_argcrf] movsd xmm1, [rsi] movsd xmm2, [rdi] shufpd xmm1, xmm1, 0 shufpd xmm2, xmm2, 0 movapd [rsp + nb231_krf], xmm1 movapd [rsp + nb231_crf], xmm2 ;# create constant floating-point factors on stack mov eax, 0x00000000 ;# lower half of double half IEEE (hex) mov ebx, 0x3fe00000 mov [rsp + nb231_half], eax mov [rsp + nb231_half+4], ebx movsd xmm1, [rsp + nb231_half] shufpd xmm1, xmm1, 0 ;# splat to all elements movapd xmm3, xmm1 addpd xmm3, xmm3 ;# one movapd xmm2, xmm3 addpd xmm2, xmm2 ;# two addpd xmm3, xmm2 ;# three movapd [rsp + nb231_half], xmm1 movapd [rsp + nb231_two], xmm2 movapd [rsp + nb231_three], xmm3 ;# assume we have at least one i particle - start directly mov rcx, [rsp + nb231_iinr] ;# rcx = pointer into iinr[] mov ebx, [rcx] ;# ebx =ii mov rdx, [rbp + nb231_charge] movsd xmm3, [rdx + rbx*8] movsd xmm4, [rdx + rbx*8 + 8] movsd xmm5, [rsp + nb231_facel] mulsd xmm3, xmm5 mulsd xmm4, xmm5 shufpd xmm3, xmm3, 0 shufpd xmm4, xmm4, 0 movapd [rsp + nb231_iqO], xmm3 movapd [rsp + nb231_iqH], xmm4 mov rdx, [rbp + nb231_type] mov ecx, [rdx + rbx*4] shl ecx, 1 mov rdi, [rbp + nb231_p_ntype] imul ecx, [rdi] ;# rcx = ntia = 2*ntype*type[ii0] mov [rsp + nb231_ntia], ecx .nb231_threadloop: mov rsi, [rbp + nb231_count] ;# pointer to sync counter mov eax, [rsi].nb231_spinlock: mov ebx, eax ;# ebx=*count=nn0 add ebx, 1 ;# ebx=nn1=nn0+10 lock cmpxchg [esi], ebx ;# write nn1 to *counter, ;# if it hasnt changed. ;# or reread *counter to eax. pause ;# -> better p4 performance jnz .nb231_spinlock ;# if(nn1>nri) nn1=nri mov ecx, [rsp + nb231_nri] mov edx, ecx sub ecx, ebx cmovle ebx, edx ;# if(nn1>nri) nn1=nri ;# Cleared the spinlock if we got here. ;# eax contains nn0, ebx contains nn1. mov [rsp + nb231_n], eax mov [rsp + nb231_nn1], ebx sub ebx, eax ;# calc number of outer lists mov esi, eax ;# copy n to esi jg .nb231_outerstart jmp .nb231_end.nb231_outerstart: ;# ebx contains number of outer iterations add ebx, [rsp + nb231_nouter] mov [rsp + nb231_nouter], ebx.nb231_outer: mov rax, [rsp + nb231_shift] ;# eax = pointer into shift[] mov ebx, [rax+rsi*4] ;# ebx=shift[n] lea rbx, [rbx + rbx*2] ;# rbx=3*is mov [rsp + nb231_is3],ebx ;# store is3 mov rax, [rsp + nb231_shiftvec] ;# eax = base of shiftvec[] movsd xmm0, [rax + rbx*8] movsd xmm1, [rax + rbx*8 + 8] movsd xmm2, [rax + rbx*8 + 16] mov rcx, [rsp + nb231_iinr] ;# ecx = pointer into iinr[] mov ebx, [rcx+rsi*4] ;# ebx =ii movapd xmm3, xmm0 movapd xmm4, xmm1 movapd xmm5, xmm2 lea rbx, [rbx + rbx*2] ;# rbx = 3*ii=ii3 mov rax, [rbp + nb231_pos] ;# eax = base of pos[] mov [rsp + nb231_ii3], ebx addsd xmm3, [rax + rbx*8] addsd xmm4, [rax + rbx*8 + 8] addsd xmm5, [rax + rbx*8 + 16] shufpd xmm3, xmm3, 0 shufpd xmm4, xmm4, 0 shufpd xmm5, xmm5, 0 movapd [rsp + nb231_ixO], xmm3 movapd [rsp + nb231_iyO], xmm4 movapd [rsp + nb231_izO], xmm5 movsd xmm3, xmm0 movsd xmm4, xmm1 movsd xmm5, xmm2 addsd xmm0, [rax + rbx*8 + 24] addsd xmm1, [rax + rbx*8 + 32] addsd xmm2, [rax + rbx*8 + 40] addsd xmm3, [rax + rbx*8 + 48] addsd xmm4, [rax + rbx*8 + 56] addsd xmm5, [rax + rbx*8 + 64] shufpd xmm0, xmm0, 0 shufpd xmm1, xmm1, 0 shufpd xmm2, xmm2, 0 shufpd xmm3, xmm3, 0 shufpd xmm4, xmm4, 0 shufpd xmm5, xmm5, 0 movapd [rsp + nb231_ixH1], xmm0 movapd [rsp + nb231_iyH1], xmm1 movapd [rsp + nb231_izH1], xmm2 movapd [rsp + nb231_ixH2], xmm3 movapd [rsp + nb231_iyH2], xmm4 movapd [rsp + nb231_izH2], xmm5 ;# clear vctot and i forces xorpd xmm4, xmm4 movapd [rsp + nb231_vctot], xmm4 movapd [rsp + nb231_Vvdwtot], xmm4 movapd [rsp + nb231_fixO], xmm4 movapd [rsp + nb231_fiyO], xmm4 movapd [rsp + nb231_fizO], xmm4 movapd [rsp + nb231_fixH1], xmm4 movapd [rsp + nb231_fiyH1], xmm4 movapd [rsp + nb231_fizH1], xmm4 movapd [rsp + nb231_fixH2], xmm4 movapd [rsp + nb231_fiyH2], xmm4 movapd [rsp + nb231_fizH2], xmm4 mov rax, [rsp + nb231_jindex] mov ecx, [rax+rsi*4] ;# jindex[n] mov edx, [rax + rsi*4 + 4] ;# jindex[n+1] sub edx, ecx ;# number of innerloop atoms mov rsi, [rbp + nb231_pos] mov rdi, [rbp + nb231_faction] mov rax, [rsp + nb231_jjnr] shl ecx, 2 add rax, rcx mov [rsp + nb231_innerjjnr], rax ;# pointer to jjnr[nj0] mov ecx, edx sub edx, 2 add ecx, [rsp + nb231_ninner] mov [rsp + nb231_ninner], ecx add edx, 0 mov [rsp + nb231_innerk], edx ;# number of innerloop atoms jge .nb231_unroll_loop jmp .nb231_checksingle.nb231_unroll_loop: ;# twice unrolled innerloop here mov rdx, [rsp + nb231_innerjjnr] ;# pointer to jjnr[k] mov eax, [rdx] mov ebx, [rdx + 4] add qword ptr [rsp + nb231_innerjjnr], 8 ;# advance pointer (unrolled 2) mov rsi, [rbp + nb231_charge] ;# base of charge[] movlpd xmm3, [rsi + rax*8] movhpd xmm3, [rsi + rbx*8] movapd xmm4, xmm3 mulpd xmm3, [rsp + nb231_iqO] mulpd xmm4, [rsp + nb231_iqH] movapd [rsp + nb231_qqO], xmm3 movapd [rsp + nb231_qqH], xmm4 mov rsi, [rbp + nb231_type] mov r8d, [rsi + rax*4] mov r9d, [rsi + rbx*4] mov rsi, [rbp + nb231_vdwparam] shl r8d, 1 shl r9d, 1 mov edi, [rsp + nb231_ntia] add r8d, edi add r9d, edi movlpd xmm6, [rsi + r8*8] ;# c6a movlpd xmm7, [rsi + r9*8] ;# c6b movhpd xmm6, [rsi + r8*8 + 8] ;# c6a c12a movhpd xmm7, [rsi + r9*8 + 8] ;# c6b c12b movapd xmm4, xmm6 unpcklpd xmm4, xmm7 unpckhpd xmm6, xmm7 movapd [rsp + nb231_c6], xmm4 movapd [rsp + nb231_c12], xmm6 mov rsi, [rbp + nb231_pos] ;# base of pos[] lea rax, [rax + rax*2] ;# replace jnr with j3 lea rbx, [rbx + rbx*2] ;# move j coordinates to local temp variables movlpd xmm0, [rsi + rax*8] movlpd xmm1, [rsi + rax*8 + 8] movlpd xmm2, [rsi + rax*8 + 16] movhpd xmm0, [rsi + rbx*8] movhpd xmm1, [rsi + rbx*8 + 8] movhpd xmm2, [rsi + rbx*8 + 16] ;# xmm0 = jx ;# xmm1 = jy ;# xmm2 = jz movapd xmm3, xmm0 movapd xmm4, xmm1 movapd xmm5, xmm2 movapd xmm6, xmm0 movapd xmm7, xmm1 movapd xmm8, xmm2 subpd xmm0, [rsp + nb231_ixO] subpd xmm1, [rsp + nb231_iyO] subpd xmm2, [rsp + nb231_izO] subpd xmm3, [rsp + nb231_ixH1] subpd xmm4, [rsp + nb231_iyH1] subpd xmm5, [rsp + nb231_izH1] subpd xmm6, [rsp + nb231_ixH2] subpd xmm7, [rsp + nb231_iyH2] subpd xmm8, [rsp + nb231_izH2] movapd [rsp + nb231_dxO], xmm0 movapd [rsp + nb231_dyO], xmm1 movapd [rsp + nb231_dzO], xmm2 mulpd xmm0, xmm0 mulpd xmm1, xmm1 mulpd xmm2, xmm2 movapd [rsp + nb231_dxH1], xmm3 movapd [rsp + nb231_dyH1], xmm4 movapd [rsp + nb231_dzH1], xmm5 mulpd xmm3, xmm3 mulpd xmm4, xmm4 mulpd xmm5, xmm5 movapd [rsp + nb231_dxH2], xmm6 movapd [rsp + nb231_dyH2], xmm7 movapd [rsp + nb231_dzH2], xmm8 mulpd xmm6, xmm6 mulpd xmm7, xmm7 mulpd xmm8, xmm8 addpd xmm0, xmm1 addpd xmm0, xmm2 addpd xmm3, xmm4 addpd xmm3, xmm5 addpd xmm6, xmm7 addpd xmm6, xmm8 ;# start doing invsqrt for j atoms cvtpd2ps xmm1, xmm0 cvtpd2ps xmm4, xmm3 cvtpd2ps xmm7, xmm6 rsqrtps xmm1, xmm1 rsqrtps xmm4, xmm4
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?