📄 sha.cpp
字号:
// sha.cpp - modified by Wei Dai from Steve Reid's public domain sha1.c// Steve Reid implemented SHA-1. Wei Dai implemented SHA-2.// Both are in the public domain.// use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM sha.cpp" to generate MASM code#include "pch.h"#ifndef CRYPTOPP_IMPORTS#ifndef CRYPTOPP_GENERATE_X64_MASM#include "sha.h"#include "misc.h"#include "cpu.h"NAMESPACE_BEGIN(CryptoPP)// start of Steve Reid's code#define blk0(i) (W[i] = data[i])#define blk1(i) (W[i&15] = rotlFixed(W[(i+13)&15]^W[(i+8)&15]^W[(i+2)&15]^W[i&15],1))void SHA1::InitState(HashWordType *state){ state[0] = 0x67452301L; state[1] = 0xEFCDAB89L; state[2] = 0x98BADCFEL; state[3] = 0x10325476L; state[4] = 0xC3D2E1F0L;}#define f1(x,y,z) (z^(x&(y^z)))#define f2(x,y,z) (x^y^z)#define f3(x,y,z) ((x&y)|(z&(x|y)))#define f4(x,y,z) (x^y^z)/* (R0+R1), R2, R3, R4 are the different operations used in SHA1 */#define R0(v,w,x,y,z,i) z+=f1(w,x,y)+blk0(i)+0x5A827999+rotlFixed(v,5);w=rotlFixed(w,30);#define R1(v,w,x,y,z,i) z+=f1(w,x,y)+blk1(i)+0x5A827999+rotlFixed(v,5);w=rotlFixed(w,30);#define R2(v,w,x,y,z,i) z+=f2(w,x,y)+blk1(i)+0x6ED9EBA1+rotlFixed(v,5);w=rotlFixed(w,30);#define R3(v,w,x,y,z,i) z+=f3(w,x,y)+blk1(i)+0x8F1BBCDC+rotlFixed(v,5);w=rotlFixed(w,30);#define R4(v,w,x,y,z,i) z+=f4(w,x,y)+blk1(i)+0xCA62C1D6+rotlFixed(v,5);w=rotlFixed(w,30);void SHA1::Transform(word32 *state, const word32 *data){ word32 W[16]; /* Copy context->state[] to working vars */ word32 a = state[0]; word32 b = state[1]; word32 c = state[2]; word32 d = state[3]; word32 e = state[4]; /* 4 rounds of 20 operations each. Loop unrolled. */ R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3); R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7); R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11); R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15); R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19); R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23); R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27); R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31); R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35); R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39); R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43); R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47); R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51); R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55); R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59); R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63); R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67); R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71); R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75); R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79); /* Add the working vars back into context.state[] */ state[0] += a; state[1] += b; state[2] += c; state[3] += d; state[4] += e;}// end of Steve Reid's code// *************************************************************void SHA224::InitState(HashWordType *state){ static const word32 s[8] = {0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4}; memcpy(state, s, sizeof(s));}void SHA256::InitState(HashWordType *state){ static const word32 s[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; memcpy(state, s, sizeof(s));}#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLECRYPTOPP_ALIGN_DATA(16) extern const word32 SHA256_K[64] CRYPTOPP_SECTION_ALIGN16 = {#elseextern const word32 SHA256_K[64] = {#endif 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2};#endif // #ifndef CRYPTOPP_GENERATE_X64_MASM#if defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_GENERATE_X64_MASM)#pragma warning(disable: 4731) // frame pointer register 'ebp' modified by inline assembly codestatic void CRYPTOPP_FASTCALL X86_SHA256_HashBlocks(word32 *state, const word32 *data, size_t len#if defined(_MSC_VER) && (_MSC_VER == 1200) , ... // VC60 workaround: prevent VC 6 from inlining this function#endif ){#if defined(_MSC_VER) && (_MSC_VER == 1200) AS2(mov ecx, [state]) AS2(mov edx, [data])#endif #define LOCALS_SIZE 8*4 + 16*4 + 4*WORD_SZ #define H(i) [BASE+ASM_MOD(1024+7-(i),8)*4] #define G(i) H(i+1) #define F(i) H(i+2) #define E(i) H(i+3) #define D(i) H(i+4) #define C(i) H(i+5) #define B(i) H(i+6) #define A(i) H(i+7) #define Wt(i) BASE+8*4+ASM_MOD(1024+15-(i),16)*4 #define Wt_2(i) Wt((i)-2) #define Wt_15(i) Wt((i)-15) #define Wt_7(i) Wt((i)-7) #define K_END [BASE+8*4+16*4+0*WORD_SZ] #define STATE_SAVE [BASE+8*4+16*4+1*WORD_SZ] #define DATA_SAVE [BASE+8*4+16*4+2*WORD_SZ] #define DATA_END [BASE+8*4+16*4+3*WORD_SZ] #define Kt(i) WORD_REG(si)+(i)*4#if CRYPTOPP_BOOL_X86 #define BASE esp+4#elif defined(__GNUC__) #define BASE r8#else #define BASE rsp#endif#define RA0(i, edx, edi) \ AS2( add edx, [Kt(i)] )\ AS2( add edx, [Wt(i)] )\ AS2( add edx, H(i) )\#define RA1(i, edx, edi)#define RB0(i, edx, edi)#define RB1(i, edx, edi) \ AS2( mov AS_REG_7d, [Wt_2(i)] )\ AS2( mov edi, [Wt_15(i)])\ AS2( mov ebx, AS_REG_7d )\ AS2( shr AS_REG_7d, 10 )\ AS2( ror ebx, 17 )\ AS2( xor AS_REG_7d, ebx )\ AS2( ror ebx, 2 )\ AS2( xor ebx, AS_REG_7d )/* s1(W_t-2) */\ AS2( add ebx, [Wt_7(i)])\ AS2( mov AS_REG_7d, edi )\ AS2( shr AS_REG_7d, 3 )\ AS2( ror edi, 7 )\ AS2( add ebx, [Wt(i)])/* s1(W_t-2) + W_t-7 + W_t-16 */\ AS2( xor AS_REG_7d, edi )\ AS2( add edx, [Kt(i)])\ AS2( ror edi, 11 )\ AS2( add edx, H(i) )\ AS2( xor AS_REG_7d, edi )/* s0(W_t-15) */\ AS2( add AS_REG_7d, ebx )/* W_t = s1(W_t-2) + W_t-7 + s0(W_t-15) W_t-16*/\ AS2( mov [Wt(i)], AS_REG_7d)\ AS2( add edx, AS_REG_7d )\#define ROUND(i, r, eax, ecx, edi, edx)\ /* in: edi = E */\ /* unused: eax, ecx, temp: ebx, AS_REG_7d, out: edx = T1 */\ AS2( mov edx, F(i) )\ AS2( xor edx, G(i) )\ AS2( and edx, edi )\ AS2( xor edx, G(i) )/* Ch(E,F,G) = (G^(E&(F^G))) */\ AS2( mov AS_REG_7d, edi )\ AS2( ror edi, 6 )\ AS2( ror AS_REG_7d, 25 )\ RA##r(i, edx, edi )/* H + Wt + Kt + Ch(E,F,G) */\ AS2( xor AS_REG_7d, edi )\ AS2( ror edi, 5 )\ AS2( xor AS_REG_7d, edi )/* S1(E) */\ AS2( add edx, AS_REG_7d )/* T1 = S1(E) + Ch(E,F,G) + H + Wt + Kt */\ RB##r(i, edx, edi )/* H + Wt + Kt + Ch(E,F,G) */\ /* in: ecx = A, eax = B^C, edx = T1 */\ /* unused: edx, temp: ebx, AS_REG_7d, out: eax = A, ecx = B^C, edx = E */\ AS2( mov ebx, ecx )\ AS2( xor ecx, B(i) )/* A^B */\ AS2( and eax, ecx )\ AS2( xor eax, B(i) )/* Maj(A,B,C) = B^((A^B)&(B^C) */\ AS2( mov AS_REG_7d, ebx )\ AS2( ror ebx, 2 )\ AS2( add eax, edx )/* T1 + Maj(A,B,C) */\ AS2( add edx, D(i) )\ AS2( mov D(i), edx )\ AS2( ror AS_REG_7d, 22 )\ AS2( xor AS_REG_7d, ebx )\ AS2( ror ebx, 11 )\ AS2( xor AS_REG_7d, ebx )\ AS2( add eax, AS_REG_7d )/* T1 + S0(A) + Maj(A,B,C) */\ AS2( mov H(i), eax )\#define SWAP_COPY(i) \ AS2( mov WORD_REG(bx), [WORD_REG(dx)+i*WORD_SZ])\ AS1( bswap WORD_REG(bx))\ AS2( mov [Wt(i*(1+CRYPTOPP_BOOL_X64)+CRYPTOPP_BOOL_X64)], WORD_REG(bx))#if defined(__GNUC__) #if CRYPTOPP_BOOL_X64 FixedSizeAlignedSecBlock<byte, LOCALS_SIZE> workspace; #endif __asm__ __volatile__ ( #if CRYPTOPP_BOOL_X64 "lea %4, %%r8;" #endif ".intel_syntax noprefix;"#elif defined(CRYPTOPP_GENERATE_X64_MASM) ALIGN 8 X86_SHA256_HashBlocks PROC FRAME rex_push_reg rsi push_reg rdi push_reg rbx push_reg rbp alloc_stack(LOCALS_SIZE+8) .endprolog mov rdi, r8 lea rsi, [?SHA256_K@CryptoPP@@3QBIB + 48*4]#endif#if CRYPTOPP_BOOL_X86 #ifndef __GNUC__ AS2( mov edi, [len]) AS2( lea WORD_REG(si), [SHA256_K+48*4]) #endif #if !defined(_MSC_VER) || (_MSC_VER < 1400) AS_PUSH_IF86(bx) #endif AS_PUSH_IF86(bp) AS2( mov ebx, esp) AS2( and esp, -16) AS2( sub WORD_REG(sp), LOCALS_SIZE) AS_PUSH_IF86(bx)#endif AS2( mov STATE_SAVE, WORD_REG(cx)) AS2( mov DATA_SAVE, WORD_REG(dx)) AS2( add WORD_REG(di), WORD_REG(dx)) AS2( mov DATA_END, WORD_REG(di)) AS2( mov K_END, WORD_REG(si))#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE#if CRYPTOPP_BOOL_X86 AS2( test edi, 1) ASJ( jnz, 2, f)#endif AS2( movdqa xmm0, XMMWORD_PTR [WORD_REG(cx)+0*16]) AS2( movdqa xmm1, XMMWORD_PTR [WORD_REG(cx)+1*16])#endif#if CRYPTOPP_BOOL_X86#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASJ( jmp, 0, f)#endif ASL(2) // non-SSE2 AS2( mov esi, ecx) AS2( lea edi, A(0)) AS2( mov ecx, 8) AS1( rep movsd) AS2( mov esi, K_END) ASJ( jmp, 3, f)#endif#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASL(0) AS2( movdqa E(0), xmm1) AS2( movdqa A(0), xmm0)#endif#if CRYPTOPP_BOOL_X86 ASL(3)#endif AS2( sub WORD_REG(si), 48*4) SWAP_COPY(0) SWAP_COPY(1) SWAP_COPY(2) SWAP_COPY(3) SWAP_COPY(4) SWAP_COPY(5) SWAP_COPY(6) SWAP_COPY(7)#if CRYPTOPP_BOOL_X86 SWAP_COPY(8) SWAP_COPY(9) SWAP_COPY(10) SWAP_COPY(11) SWAP_COPY(12) SWAP_COPY(13) SWAP_COPY(14) SWAP_COPY(15)#endif AS2( mov edi, E(0)) // E AS2( mov eax, B(0)) // B AS2( xor eax, C(0)) // B^C AS2( mov ecx, A(0)) // A ROUND(0, 0, eax, ecx, edi, edx) ROUND(1, 0, ecx, eax, edx, edi) ROUND(2, 0, eax, ecx, edi, edx) ROUND(3, 0, ecx, eax, edx, edi) ROUND(4, 0, eax, ecx, edi, edx) ROUND(5, 0, ecx, eax, edx, edi) ROUND(6, 0, eax, ecx, edi, edx) ROUND(7, 0, ecx, eax, edx, edi) ROUND(8, 0, eax, ecx, edi, edx) ROUND(9, 0, ecx, eax, edx, edi) ROUND(10, 0, eax, ecx, edi, edx) ROUND(11, 0, ecx, eax, edx, edi) ROUND(12, 0, eax, ecx, edi, edx) ROUND(13, 0, ecx, eax, edx, edi) ROUND(14, 0, eax, ecx, edi, edx) ROUND(15, 0, ecx, eax, edx, edi) ASL(1) AS2(add WORD_REG(si), 4*16) ROUND(0, 1, eax, ecx, edi, edx) ROUND(1, 1, ecx, eax, edx, edi) ROUND(2, 1, eax, ecx, edi, edx) ROUND(3, 1, ecx, eax, edx, edi) ROUND(4, 1, eax, ecx, edi, edx) ROUND(5, 1, ecx, eax, edx, edi) ROUND(6, 1, eax, ecx, edi, edx) ROUND(7, 1, ecx, eax, edx, edi) ROUND(8, 1, eax, ecx, edi, edx) ROUND(9, 1, ecx, eax, edx, edi) ROUND(10, 1, eax, ecx, edi, edx) ROUND(11, 1, ecx, eax, edx, edi) ROUND(12, 1, eax, ecx, edi, edx) ROUND(13, 1, ecx, eax, edx, edi) ROUND(14, 1, eax, ecx, edi, edx) ROUND(15, 1, ecx, eax, edx, edi) AS2( cmp WORD_REG(si), K_END) ASJ( jne, 1, b) AS2( mov WORD_REG(dx), DATA_SAVE) AS2( add WORD_REG(dx), 64) AS2( mov AS_REG_7, STATE_SAVE) AS2( mov DATA_SAVE, WORD_REG(dx))#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE#if CRYPTOPP_BOOL_X86 AS2( test DWORD PTR DATA_END, 1) ASJ( jnz, 4, f)#endif AS2( movdqa xmm1, XMMWORD_PTR [AS_REG_7+1*16]) AS2( movdqa xmm0, XMMWORD_PTR [AS_REG_7+0*16]) AS2( paddd xmm1, E(0)) AS2( paddd xmm0, A(0)) AS2( movdqa [AS_REG_7+1*16], xmm1) AS2( movdqa [AS_REG_7+0*16], xmm0) AS2( cmp WORD_REG(dx), DATA_END) ASJ( jl, 0, b)#endif#if CRYPTOPP_BOOL_X86#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASJ( jmp, 5, f) ASL(4) // non-SSE2#endif AS2( add [AS_REG_7+0*4], ecx) // A AS2( add [AS_REG_7+4*4], edi) // E AS2( mov eax, B(0)) AS2( mov ebx, C(0)) AS2( mov ecx, D(0)) AS2( add [AS_REG_7+1*4], eax) AS2( add [AS_REG_7+2*4], ebx) AS2( add [AS_REG_7+3*4], ecx) AS2( mov eax, F(0)) AS2( mov ebx, G(0)) AS2( mov ecx, H(0)) AS2( add [AS_REG_7+5*4], eax) AS2( add [AS_REG_7+6*4], ebx) AS2( add [AS_REG_7+7*4], ecx) AS2( mov ecx, AS_REG_7d) AS2( cmp WORD_REG(dx), DATA_END) ASJ( jl, 2, b)#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE ASL(5)#endif#endif AS_POP_IF86(sp) AS_POP_IF86(bp) #if !defined(_MSC_VER) || (_MSC_VER < 1400) AS_POP_IF86(bx) #endif#ifdef CRYPTOPP_GENERATE_X64_MASM add rsp, LOCALS_SIZE+8 pop rbp pop rbx pop rdi pop rsi ret X86_SHA256_HashBlocks ENDP#endif#ifdef __GNUC__ ".att_syntax prefix;" : : "c" (state), "d" (data), "S" (SHA256_K+48), "D" (len) #if CRYPTOPP_BOOL_X64 , "m" (workspace[0]) #endif : "memory", "cc", "%eax" #if CRYPTOPP_BOOL_X64 , "%rbx", "%r8" #endif );#endif}#endif // #if defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_GENERATE_X64_MASM)#ifndef CRYPTOPP_GENERATE_X64_MASM#ifdef CRYPTOPP_X64_MASM_AVAILABLEextern "C" {void CRYPTOPP_FASTCALL X86_SHA256_HashBlocks(word32 *state, const word32 *data, size_t len);}#endif#if defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X64_MASM_AVAILABLE)size_t SHA256::HashMultipleBlocks(const word32 *input, size_t length)
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -