📄 ripemd.cpp
字号:
Subround(I, c2, d2, e2, a2, b2, buffer_[10], 11, k6);
Subround(I, b2, c2, d2, e2, a2, buffer_[14], 7, k6);
Subround(I, a2, b2, c2, d2, e2, buffer_[15], 7, k6);
Subround(I, e2, a2, b2, c2, d2, buffer_[ 8], 12, k6);
Subround(I, d2, e2, a2, b2, c2, buffer_[12], 7, k6);
Subround(I, c2, d2, e2, a2, b2, buffer_[ 4], 6, k6);
Subround(I, b2, c2, d2, e2, a2, buffer_[ 9], 15, k6);
Subround(I, a2, b2, c2, d2, e2, buffer_[ 1], 13, k6);
Subround(I, e2, a2, b2, c2, d2, buffer_[ 2], 11, k6);
Subround(H, d2, e2, a2, b2, c2, buffer_[15], 9, k7);
Subround(H, c2, d2, e2, a2, b2, buffer_[ 5], 7, k7);
Subround(H, b2, c2, d2, e2, a2, buffer_[ 1], 15, k7);
Subround(H, a2, b2, c2, d2, e2, buffer_[ 3], 11, k7);
Subround(H, e2, a2, b2, c2, d2, buffer_[ 7], 8, k7);
Subround(H, d2, e2, a2, b2, c2, buffer_[14], 6, k7);
Subround(H, c2, d2, e2, a2, b2, buffer_[ 6], 6, k7);
Subround(H, b2, c2, d2, e2, a2, buffer_[ 9], 14, k7);
Subround(H, a2, b2, c2, d2, e2, buffer_[11], 12, k7);
Subround(H, e2, a2, b2, c2, d2, buffer_[ 8], 13, k7);
Subround(H, d2, e2, a2, b2, c2, buffer_[12], 5, k7);
Subround(H, c2, d2, e2, a2, b2, buffer_[ 2], 14, k7);
Subround(H, b2, c2, d2, e2, a2, buffer_[10], 13, k7);
Subround(H, a2, b2, c2, d2, e2, buffer_[ 0], 13, k7);
Subround(H, e2, a2, b2, c2, d2, buffer_[ 4], 7, k7);
Subround(H, d2, e2, a2, b2, c2, buffer_[13], 5, k7);
Subround(G, c2, d2, e2, a2, b2, buffer_[ 8], 15, k8);
Subround(G, b2, c2, d2, e2, a2, buffer_[ 6], 5, k8);
Subround(G, a2, b2, c2, d2, e2, buffer_[ 4], 8, k8);
Subround(G, e2, a2, b2, c2, d2, buffer_[ 1], 11, k8);
Subround(G, d2, e2, a2, b2, c2, buffer_[ 3], 14, k8);
Subround(G, c2, d2, e2, a2, b2, buffer_[11], 14, k8);
Subround(G, b2, c2, d2, e2, a2, buffer_[15], 6, k8);
Subround(G, a2, b2, c2, d2, e2, buffer_[ 0], 14, k8);
Subround(G, e2, a2, b2, c2, d2, buffer_[ 5], 6, k8);
Subround(G, d2, e2, a2, b2, c2, buffer_[12], 9, k8);
Subround(G, c2, d2, e2, a2, b2, buffer_[ 2], 12, k8);
Subround(G, b2, c2, d2, e2, a2, buffer_[13], 9, k8);
Subround(G, a2, b2, c2, d2, e2, buffer_[ 9], 12, k8);
Subround(G, e2, a2, b2, c2, d2, buffer_[ 7], 5, k8);
Subround(G, d2, e2, a2, b2, c2, buffer_[10], 15, k8);
Subround(G, c2, d2, e2, a2, b2, buffer_[14], 8, k8);
Subround(F, b2, c2, d2, e2, a2, buffer_[12], 8, k9);
Subround(F, a2, b2, c2, d2, e2, buffer_[15], 5, k9);
Subround(F, e2, a2, b2, c2, d2, buffer_[10], 12, k9);
Subround(F, d2, e2, a2, b2, c2, buffer_[ 4], 9, k9);
Subround(F, c2, d2, e2, a2, b2, buffer_[ 1], 12, k9);
Subround(F, b2, c2, d2, e2, a2, buffer_[ 5], 5, k9);
Subround(F, a2, b2, c2, d2, e2, buffer_[ 8], 14, k9);
Subround(F, e2, a2, b2, c2, d2, buffer_[ 7], 6, k9);
Subround(F, d2, e2, a2, b2, c2, buffer_[ 6], 8, k9);
Subround(F, c2, d2, e2, a2, b2, buffer_[ 2], 13, k9);
Subround(F, b2, c2, d2, e2, a2, buffer_[13], 6, k9);
Subround(F, a2, b2, c2, d2, e2, buffer_[14], 5, k9);
Subround(F, e2, a2, b2, c2, d2, buffer_[ 0], 15, k9);
Subround(F, d2, e2, a2, b2, c2, buffer_[ 3], 13, k9);
Subround(F, c2, d2, e2, a2, b2, buffer_[ 9], 11, k9);
Subround(F, b2, c2, d2, e2, a2, buffer_[11], 11, k9);
c1 = digest_[1] + c1 + d2;
digest_[1] = digest_[2] + d1 + e2;
digest_[2] = digest_[3] + e1 + a2;
digest_[3] = digest_[4] + a1 + b2;
digest_[4] = digest_[0] + b1 + c2;
digest_[0] = c1;
}
#ifdef DO_RIPEMD_ASM
/*
// F(x ^ y ^ z)
// place in esi
#define ASMF(x, y, z) \
AS2( mov esi, x ) \
AS2( xor esi, y ) \
AS2( xor esi, z )
// G(z ^ (x & (y^z)))
// place in esi
#define ASMG(x, y, z) \
AS2( mov esi, z ) \
AS2( xor esi, y ) \
AS2( and esi, x ) \
AS2( xor esi, z )
// H(z ^ (x | ~y))
// place in esi
#define ASMH(x, y, z) \
AS2( mov esi, y ) \
AS1( not esi ) \
AS2( or esi, x ) \
AS2( xor esi, z )
// I(y ^ (z & (x^y)))
// place in esi
#define ASMI(x, y, z) \
AS2( mov esi, y ) \
AS2( xor esi, x ) \
AS2( and esi, z ) \
AS2( xor esi, y )
// J(x ^ (y | ~z)))
// place in esi
#define ASMJ(x, y, z) \
AS2( mov esi, z ) \
AS1( not esi ) \
AS2( or esi, y ) \
AS2( xor esi, x )
// for 160 and 320
// #define ASMSubround(f, a, b, c, d, e, i, s, k)
// a += f(b, c, d) + data[i] + k;
// a = rotlFixed((word32)a, s) + e;
// c = rotlFixed((word32)c, 10U)
#define ASMSubround(f, a, b, c, d, e, index, s, k) \
// a += f(b, c, d) + data[i] + k \
AS2( mov esp, [edi + index * 4] ) \
f(b, c, d) \
AS2( add esi, k ) \
AS2( add esi, esp ) \
AS2( add a, esi ) \
// a = rotlFixed((word32)a, s) + e \
AS2( rol a, s ) \
AS2( rol c, 10 ) \
// c = rotlFixed((word32)c, 10U) \
AS2( add a, e )
*/
// combine F into subround w/ setup
// esi already has c, setup for next round when done
// esp already has edi[index], setup for next round when done
#define ASMSubroundF(a, b, c, d, e, index, s) \
/* a += (b ^ c ^ d) + data[i] + k */ \
AS2( xor esi, b ) \
AS2( add a, [edi + index * 4] ) \
AS2( xor esi, d ) \
AS2( add a, esi ) \
/* a = rotlFixed((word32)a, s) + e */ \
AS2( mov esi, b ) \
AS2( rol a, s ) \
/* c = rotlFixed((word32)c, 10U) */ \
AS2( rol c, 10 ) \
AS2( add a, e )
// combine G into subround w/ setup
// esi already has c, setup for next round when done
// esp already has edi[index], setup for next round when done
#define ASMSubroundG(a, b, c, d, e, index, s, k) \
/* a += (d ^ (b & (c^d))) + data[i] + k */ \
AS2( xor esi, d ) \
AS2( and esi, b ) \
AS2( add a, [edi + index * 4] ) \
AS2( xor esi, d ) \
AS2( lea a, [esi + a + k] ) \
/* a = rotlFixed((word32)a, s) + e */ \
AS2( mov esi, b ) \
AS2( rol a, s ) \
/* c = rotlFixed((word32)c, 10U) */ \
AS2( rol c, 10 ) \
AS2( add a, e )
// combine H into subround w/ setup
// esi already has c, setup for next round when done
// esp already has edi[index], setup for next round when done
#define ASMSubroundH(a, b, c, d, e, index, s, k) \
/* a += (d ^ (b | ~c)) + data[i] + k */ \
AS1( not esi ) \
AS2( or esi, b ) \
AS2( add a, [edi + index * 4] ) \
AS2( xor esi, d ) \
AS2( lea a, [esi + a + k] ) \
/* a = rotlFixed((word32)a, s) + e */ \
AS2( mov esi, b ) \
AS2( rol a, s ) \
/* c = rotlFixed((word32)c, 10U) */ \
AS2( rol c, 10 ) \
AS2( add a, e )
// combine I into subround w/ setup
// esi already has c, setup for next round when done
// esp already has edi[index], setup for next round when done
#define ASMSubroundI(a, b, c, d, e, index, s, k) \
/* a += (c ^ (d & (b^c))) + data[i] + k */ \
AS2( xor esi, b ) \
AS2( and esi, d ) \
AS2( add a, [edi + index * 4] ) \
AS2( xor esi, c ) \
AS2( lea a, [esi + a + k] ) \
/* a = rotlFixed((word32)a, s) + e */ \
AS2( mov esi, b ) \
AS2( rol a, s ) \
/* c = rotlFixed((word32)c, 10U) */ \
AS2( rol c, 10 ) \
AS2( add a, e )
// combine J into subround w/ setup
// esi already has d, setup for next round when done
// esp already has edi[index], setup for next round when done
#define ASMSubroundJ(a, b, c, d, e, index, s, k) \
/* a += (b ^ (c | ~d))) + data[i] + k */ \
AS1( not esi ) \
AS2( or esi, c ) \
/* c = rotlFixed((word32)c, 10U) */ \
AS2( add a, [edi + index * 4] ) \
AS2( xor esi, b ) \
AS2( rol c, 10 ) \
AS2( lea a, [esi + a + k] ) \
/* a = rotlFixed((word32)a, s) + e */ \
AS2( rol a, s ) \
AS2( mov esi, c ) \
AS2( add a, e )
#ifdef _MSC_VER
__declspec(naked)
#endif
void RIPEMD160::AsmTransform(const byte* data, word32 times)
{
#ifdef __GNUC__
#define AS1(x) asm(#x);
#define AS2(x, y) asm(#x ", " #y);
#define PROLOG() \
asm(".intel_syntax noprefix"); \
AS2( movd mm3, edi ) \
AS2( movd mm4, ebx ) \
AS2( movd mm5, esi ) \
AS2( movd mm6, ebp ) \
AS2( mov ecx, DWORD PTR [ebp + 8] ) \
AS2( mov edi, DWORD PTR [ebp + 12] ) \
AS2( mov edx, DWORD PTR [ebp + 16] )
#define EPILOG() \
AS2( movd ebp, mm6 ) \
AS2( movd esi, mm5 ) \
AS2( movd ebx, mm4 ) \
AS2( mov esp, ebp ) \
AS2( movd edi, mm3 ) \
AS1( emms ) \
asm(".att_syntax");
#else
#define AS1(x) __asm x
#define AS2(x, y) __asm x, y
#define PROLOG() \
AS1( push ebp ) \
AS2( mov ebp, esp ) \
AS2( movd mm3, edi ) \
AS2( movd mm4, ebx ) \
AS2( movd mm5, esi ) \
AS2( movd mm6, ebp ) \
AS2( mov edi, DWORD PTR [ebp + 8] ) \
AS2( mov edx, DWORD PTR [ebp + 12] )
#define EPILOG() \
AS2( movd ebp, mm6 ) \
AS2( movd esi, mm5 ) \
AS2( movd ebx, mm4 ) \
AS2( movd edi, mm3 ) \
AS2( mov esp, ebp ) \
AS1( pop ebp ) \
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -