📄 integer.cpp
字号:
{
MulPrologue
// now: [esp] = Z, esi = X, ecx = Y
MulStartup
MulAccumulate(0,0)
MulStoreDigit(0)
MulShiftCarry
MulAccumulate(1,0)
MulAccumulate(0,1)
MulStoreDigit(1)
MulShiftCarry
MulAccumulate(2,0)
MulAccumulate(1,1)
MulAccumulate(0,2)
MulStoreDigit(2)
MulShiftCarry
MulAccumulate(3,0)
MulAccumulate(2,1)
MulAccumulate(1,2)
MulAccumulate(0,3)
MulStoreDigit(3)
MulShiftCarry
MulAccumulate(3,1)
MulAccumulate(2,2)
MulAccumulate(1,3)
MulStoreDigit(4)
MulShiftCarry
MulAccumulate(3,2)
MulAccumulate(2,3)
MulStoreDigit(5)
MulShiftCarry
MulLastDiagonal(4)
MulEpilogue
}
TAOCRYPT_NAKED void PentiumOptimized::Multiply8(word* Z, const word* X,
const word* Y)
{
MulPrologue
// now: [esp] = Z, esi = X, ecx = Y
MulStartup
MulAccumulate(0,0)
MulStoreDigit(0)
MulShiftCarry
MulAccumulate(1,0)
MulAccumulate(0,1)
MulStoreDigit(1)
MulShiftCarry
MulAccumulate(2,0)
MulAccumulate(1,1)
MulAccumulate(0,2)
MulStoreDigit(2)
MulShiftCarry
MulAccumulate(3,0)
MulAccumulate(2,1)
MulAccumulate(1,2)
MulAccumulate(0,3)
MulStoreDigit(3)
MulShiftCarry
MulAccumulate(4,0)
MulAccumulate(3,1)
MulAccumulate(2,2)
MulAccumulate(1,3)
MulAccumulate(0,4)
MulStoreDigit(4)
MulShiftCarry
MulAccumulate(5,0)
MulAccumulate(4,1)
MulAccumulate(3,2)
MulAccumulate(2,3)
MulAccumulate(1,4)
MulAccumulate(0,5)
MulStoreDigit(5)
MulShiftCarry
MulAccumulate(6,0)
MulAccumulate(5,1)
MulAccumulate(4,2)
MulAccumulate(3,3)
MulAccumulate(2,4)
MulAccumulate(1,5)
MulAccumulate(0,6)
MulStoreDigit(6)
MulShiftCarry
MulAccumulate(7,0)
MulAccumulate(6,1)
MulAccumulate(5,2)
MulAccumulate(4,3)
MulAccumulate(3,4)
MulAccumulate(2,5)
MulAccumulate(1,6)
MulAccumulate(0,7)
MulStoreDigit(7)
MulShiftCarry
MulAccumulate(7,1)
MulAccumulate(6,2)
MulAccumulate(5,3)
MulAccumulate(4,4)
MulAccumulate(3,5)
MulAccumulate(2,6)
MulAccumulate(1,7)
MulStoreDigit(8)
MulShiftCarry
MulAccumulate(7,2)
MulAccumulate(6,3)
MulAccumulate(5,4)
MulAccumulate(4,5)
MulAccumulate(3,6)
MulAccumulate(2,7)
MulStoreDigit(9)
MulShiftCarry
MulAccumulate(7,3)
MulAccumulate(6,4)
MulAccumulate(5,5)
MulAccumulate(4,6)
MulAccumulate(3,7)
MulStoreDigit(10)
MulShiftCarry
MulAccumulate(7,4)
MulAccumulate(6,5)
MulAccumulate(5,6)
MulAccumulate(4,7)
MulStoreDigit(11)
MulShiftCarry
MulAccumulate(7,5)
MulAccumulate(6,6)
MulAccumulate(5,7)
MulStoreDigit(12)
MulShiftCarry
MulAccumulate(7,6)
MulAccumulate(6,7)
MulStoreDigit(13)
MulShiftCarry
MulLastDiagonal(8)
MulEpilogue
}
TAOCRYPT_NAKED void PentiumOptimized::Multiply8Bottom(word* Z, const word* X,
const word* Y)
{
MulPrologue
// now: [esp] = Z, esi = X, ecx = Y
MulStartup
MulAccumulate(0,0)
MulStoreDigit(0)
MulShiftCarry
MulAccumulate(1,0)
MulAccumulate(0,1)
MulStoreDigit(1)
MulShiftCarry
MulAccumulate(2,0)
MulAccumulate(1,1)
MulAccumulate(0,2)
MulStoreDigit(2)
MulShiftCarry
MulAccumulate(3,0)
MulAccumulate(2,1)
MulAccumulate(1,2)
MulAccumulate(0,3)
MulStoreDigit(3)
MulShiftCarry
MulAccumulate(4,0)
MulAccumulate(3,1)
MulAccumulate(2,2)
MulAccumulate(1,3)
MulAccumulate(0,4)
MulStoreDigit(4)
MulShiftCarry
MulAccumulate(5,0)
MulAccumulate(4,1)
MulAccumulate(3,2)
MulAccumulate(2,3)
MulAccumulate(1,4)
MulAccumulate(0,5)
MulStoreDigit(5)
MulShiftCarry
MulAccumulate(6,0)
MulAccumulate(5,1)
MulAccumulate(4,2)
MulAccumulate(3,3)
MulAccumulate(2,4)
MulAccumulate(1,5)
MulAccumulate(0,6)
MulStoreDigit(6)
MulShiftCarry
MulAccumulateBottom(7,0)
MulAccumulateBottom(6,1)
MulAccumulateBottom(5,2)
MulAccumulateBottom(4,3)
MulAccumulateBottom(3,4)
MulAccumulateBottom(2,5)
MulAccumulateBottom(1,6)
MulAccumulateBottom(0,7)
MulStoreDigit(7)
MulEpilogue
}
#undef AS1
#undef AS2
#else // not x86 - no processor specific code at this layer
typedef Portable LowLevel;
#endif
#ifdef SSE2_INTRINSICS_AVAILABLE
#ifdef __GNUC__
#define TAOCRYPT_FASTCALL
#else
#define TAOCRYPT_FASTCALL __fastcall
#endif
static void TAOCRYPT_FASTCALL P4_Mul(__m128i *C, const __m128i *A,
const __m128i *B)
{
__m128i a3210 = _mm_load_si128(A);
__m128i b3210 = _mm_load_si128(B);
__m128i sum;
__m128i z = _mm_setzero_si128();
__m128i a2b2_a0b0 = _mm_mul_epu32(a3210, b3210);
C[0] = a2b2_a0b0;
__m128i a3120 = _mm_shuffle_epi32(a3210, _MM_SHUFFLE(3, 1, 2, 0));
__m128i b3021 = _mm_shuffle_epi32(b3210, _MM_SHUFFLE(3, 0, 2, 1));
__m128i a1b0_a0b1 = _mm_mul_epu32(a3120, b3021);
__m128i a1b0 = _mm_unpackhi_epi32(a1b0_a0b1, z);
__m128i a0b1 = _mm_unpacklo_epi32(a1b0_a0b1, z);
C[1] = _mm_add_epi64(a1b0, a0b1);
__m128i a31 = _mm_srli_epi64(a3210, 32);
__m128i b31 = _mm_srli_epi64(b3210, 32);
__m128i a3b3_a1b1 = _mm_mul_epu32(a31, b31);
C[6] = a3b3_a1b1;
__m128i a1b1 = _mm_unpacklo_epi32(a3b3_a1b1, z);
__m128i b3012 = _mm_shuffle_epi32(b3210, _MM_SHUFFLE(3, 0, 1, 2));
__m128i a2b0_a0b2 = _mm_mul_epu32(a3210, b3012);
__m128i a0b2 = _mm_unpacklo_epi32(a2b0_a0b2, z);
__m128i a2b0 = _mm_unpackhi_epi32(a2b0_a0b2, z);
sum = _mm_add_epi64(a1b1, a0b2);
C[2] = _mm_add_epi64(sum, a2b0);
__m128i a2301 = _mm_shuffle_epi32(a3210, _MM_SHUFFLE(2, 3, 0, 1));
__m128i b2103 = _mm_shuffle_epi32(b3210, _MM_SHUFFLE(2, 1, 0, 3));
__m128i a3b0_a1b2 = _mm_mul_epu32(a2301, b3012);
__m128i a2b1_a0b3 = _mm_mul_epu32(a3210, b2103);
__m128i a3b0 = _mm_unpackhi_epi32(a3b0_a1b2, z);
__m128i a1b2 = _mm_unpacklo_epi32(a3b0_a1b2, z);
__m128i a2b1 = _mm_unpackhi_epi32(a2b1_a0b3, z);
__m128i a0b3 = _mm_unpacklo_epi32(a2b1_a0b3, z);
__m128i sum1 = _mm_add_epi64(a3b0, a1b2);
sum = _mm_add_epi64(a2b1, a0b3);
C[3] = _mm_add_epi64(sum, sum1);
__m128i a3b1_a1b3 = _mm_mul_epu32(a2301, b2103);
__m128i a2b2 = _mm_unpackhi_epi32(a2b2_a0b0, z);
__m128i a3b1 = _mm_unpackhi_epi32(a3b1_a1b3, z);
__m128i a1b3 = _mm_unpacklo_epi32(a3b1_a1b3, z);
sum = _mm_add_epi64(a2b2, a3b1);
C[4] = _mm_add_epi64(sum, a1b3);
__m128i a1302 = _mm_shuffle_epi32(a3210, _MM_SHUFFLE(1, 3, 0, 2));
__m128i b1203 = _mm_shuffle_epi32(b3210, _MM_SHUFFLE(1, 2, 0, 3));
__m128i a3b2_a2b3 = _mm_mul_epu32(a1302, b1203);
__m128i a3b2 = _mm_unpackhi_epi32(a3b2_a2b3, z);
__m128i a2b3 = _mm_unpacklo_epi32(a3b2_a2b3, z);
C[5] = _mm_add_epi64(a3b2, a2b3);
}
void P4Optimized::Multiply4(word *C, const word *A, const word *B)
{
__m128i temp[7];
const word *w = (word *)temp;
const __m64 *mw = (__m64 *)w;
P4_Mul(temp, (__m128i *)A, (__m128i *)B);
C[0] = w[0];
__m64 s1, s2;
__m64 w1 = _mm_cvtsi32_si64(w[1]);
__m64 w4 = mw[2];
__m64 w6 = mw[3];
__m64 w8 = mw[4];
__m64 w10 = mw[5];
__m64 w12 = mw[6];
__m64 w14 = mw[7];
__m64 w16 = mw[8];
__m64 w18 = mw[9];
__m64 w20 = mw[10];
__m64 w22 = mw[11];
__m64 w26 = _mm_cvtsi32_si64(w[26]);
s1 = _mm_add_si64(w1, w4);
C[1] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w6, w8);
s1 = _mm_add_si64(s1, s2);
C[2] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w10, w12);
s1 = _mm_add_si64(s1, s2);
C[3] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w14, w16);
s1 = _mm_add_si64(s1, s2);
C[4] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w18, w20);
s1 = _mm_add_si64(s1, s2);
C[5] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w22, w26);
s1 = _mm_add_si64(s1, s2);
C[6] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
C[7] = _mm_cvtsi64_si32(s1) + w[27];
_mm_empty();
}
void P4Optimized::Multiply8(word *C, const word *A, const word *B)
{
__m128i temp[28];
const word *w = (word *)temp;
const __m64 *mw = (__m64 *)w;
const word *x = (word *)temp+7*4;
const __m64 *mx = (__m64 *)x;
const word *y = (word *)temp+7*4*2;
const __m64 *my = (__m64 *)y;
const word *z = (word *)temp+7*4*3;
const __m64 *mz = (__m64 *)z;
P4_Mul(temp, (__m128i *)A, (__m128i *)B);
P4_Mul(temp+7, (__m128i *)A+1, (__m128i *)B);
P4_Mul(temp+14, (__m128i *)A, (__m128i *)B+1);
P4_Mul(temp+21, (__m128i *)A+1, (__m128i *)B+1);
C[0] = w[0];
__m64 s1, s2, s3, s4;
__m64 w1 = _mm_cvtsi32_si64(w[1]);
__m64 w4 = mw[2];
__m64 w6 = mw[3];
__m64 w8 = mw[4];
__m64 w10 = mw[5];
__m64 w12 = mw[6];
__m64 w14 = mw[7];
__m64 w16 = mw[8];
__m64 w18 = mw[9];
__m64 w20 = mw[10];
__m64 w22 = mw[11];
__m64 w26 = _mm_cvtsi32_si64(w[26]);
__m64 w27 = _mm_cvtsi32_si64(w[27]);
__m64 x0 = _mm_cvtsi32_si64(x[0]);
__m64 x1 = _mm_cvtsi32_si64(x[1]);
__m64 x4 = mx[2];
__m64 x6 = mx[3];
__m64 x8 = mx[4];
__m64 x10 = mx[5];
__m64 x12 = mx[6];
__m64 x14 = mx[7];
__m64 x16 = mx[8];
__m64 x18 = mx[9];
__m64 x20 = mx[10];
__m64 x22 = mx[11];
__m64 x26 = _mm_cvtsi32_si64(x[26]);
__m64 x27 = _mm_cvtsi32_si64(x[27]);
__m64 y0 = _mm_cvtsi32_si64(y[0]);
__m64 y1 = _mm_cvtsi32_si64(y[1]);
__m64 y4 = my[2];
__m64 y6 = my[3];
__m64 y8 = my[4];
__m64 y10 = my[5];
__m64 y12 = my[6];
__m64 y14 = my[7];
__m64 y16 = my[8];
__m64 y18 = my[9];
__m64 y20 = my[10];
__m64 y22 = my[11];
__m64 y26 = _mm_cvtsi32_si64(y[26]);
__m64 y27 = _mm_cvtsi32_si64(y[27]);
__m64 z0 = _mm_cvtsi32_si64(z[0]);
__m64 z1 = _mm_cvtsi32_si64(z[1]);
__m64 z4 = mz[2];
__m64 z6 = mz[3];
__m64 z8 = mz[4];
__m64 z10 = mz[5];
__m64 z12 = mz[6];
__m64 z14 = mz[7];
__m64 z16 = mz[8];
__m64 z18 = mz[9];
__m64 z20 = mz[10];
__m64 z22 = mz[11];
__m64 z26 = _mm_cvtsi32_si64(z[26]);
s1 = _mm_add_si64(w1, w4);
C[1] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w6, w8);
s1 = _mm_add_si64(s1, s2);
C[2] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s2 = _mm_add_si64(w10, w12);
s1 = _mm_add_si64(s1, s2);
C[3] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s3 = _mm_add_si64(x0, y0);
s2 = _mm_add_si64(w14, w16);
s1 = _mm_add_si64(s1, s3);
s1 = _mm_add_si64(s1, s2);
C[4] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s3 = _mm_add_si64(x1, y1);
s4 = _mm_add_si64(x4, y4);
s1 = _mm_add_si64(s1, w18);
s3 = _mm_add_si64(s3, s4);
s1 = _mm_add_si64(s1, w20);
s1 = _mm_add_si64(s1, s3);
C[5] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s3 = _mm_add_si64(x6, y6);
s4 = _mm_add_si64(x8, y8);
s1 = _mm_add_si64(s1, w22);
s3 = _mm_add_si64(s3, s4);
s1 = _mm_add_si64(s1, w26);
s1 = _mm_add_si64(s1, s3);
C[6] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s3 = _mm_add_si64(x10, y10);
s4 = _mm_add_si64(x12, y12);
s1 = _mm_add_si64(s1, w27);
s3 = _mm_add_si64(s3, s4);
s1 = _mm_add_si64(s1, s3);
C[7] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s3 = _mm_add_si64(x14, y14);
s4 = _mm_add_si64(x16, y16);
s1 = _mm_add_si64(s1, z0);
s3 = _mm_add_si64(s3, s4);
s1 = _mm_add_si64(s1, s3);
C[8] = _mm_cvtsi64_si32(s1);
s1 = _mm_srli_si64(s1, 32);
s3 = _mm_add_si64(x18, y18);
s4 = _mm_add_si64(x20, y20);
s1 = _mm_add_si64(s1, z1);
s3 = _mm_add_si64(s3, s4);
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -