📄 sha.cpp
字号:
/* sha.cpp
*
* Copyright (C) 2003 Sawtooth Consulting Ltd.
*
* This file is part of yaSSL.
*
* yaSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* yaSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*/
/* based on Wei Dai's sha.cpp from CryptoPP */
#include "runtime.hpp"
#include <string.h>
#include "algorithm.hpp" // mySTL::swap
#include "sha.hpp"
#if defined(TAOCRYPT_X86ASM_AVAILABLE) && defined(TAO_ASM)
#define DO_SHA_ASM
#endif
namespace TaoCrypt {
#define blk0(i) (W[i] = buffer_[i])
#define blk1(i) (W[i&15] = \
rotlFixed(W[(i+13)&15]^W[(i+8)&15]^W[(i+2)&15]^W[i&15],1))
#define f1(x,y,z) (z^(x &(y^z)))
#define f2(x,y,z) (x^y^z)
#define f3(x,y,z) ((x&y)|(z&(x|y)))
#define f4(x,y,z) (x^y^z)
// (R0+R1), R2, R3, R4 are the different operations used in SHA1
#define R0(v,w,x,y,z,i) z+= f1(w,x,y) + blk0(i) + 0x5A827999+ \
rotlFixed(v,5); w = rotlFixed(w,30);
#define R1(v,w,x,y,z,i) z+= f1(w,x,y) + blk1(i) + 0x5A827999+ \
rotlFixed(v,5); w = rotlFixed(w,30);
#define R2(v,w,x,y,z,i) z+= f2(w,x,y) + blk1(i) + 0x6ED9EBA1+ \
rotlFixed(v,5); w = rotlFixed(w,30);
#define R3(v,w,x,y,z,i) z+= f3(w,x,y) + blk1(i) + 0x8F1BBCDC+ \
rotlFixed(v,5); w = rotlFixed(w,30);
#define R4(v,w,x,y,z,i) z+= f4(w,x,y) + blk1(i) + 0xCA62C1D6+ \
rotlFixed(v,5); w = rotlFixed(w,30);
void SHA::Init()
{
digest_[0] = 0x67452301L;
digest_[1] = 0xEFCDAB89L;
digest_[2] = 0x98BADCFEL;
digest_[3] = 0x10325476L;
digest_[4] = 0xC3D2E1F0L;
buffLen_ = 0;
loLen_ = 0;
hiLen_ = 0;
}
SHA::SHA(const SHA& that) : HASHwithTransform(DIGEST_SIZE / sizeof(word32),
BLOCK_SIZE)
{
buffLen_ = that.buffLen_;
loLen_ = that.loLen_;
hiLen_ = that.hiLen_;
memcpy(digest_, that.digest_, DIGEST_SIZE);
memcpy(buffer_, that.buffer_, BLOCK_SIZE);
}
SHA& SHA::operator= (const SHA& that)
{
SHA tmp(that);
Swap(tmp);
return *this;
}
void SHA::Swap(SHA& other)
{
mySTL::swap(loLen_, other.loLen_);
mySTL::swap(hiLen_, other.hiLen_);
mySTL::swap(buffLen_, other.buffLen_);
memcpy(digest_, other.digest_, DIGEST_SIZE);
memcpy(buffer_, other.buffer_, BLOCK_SIZE);
}
// Update digest with data of size len, do in blocks
void SHA::Update(const byte* data, word32 len)
{
byte* local = (byte*)buffer_;
// remove buffered data if possible
if (buffLen_) {
word32 add = min(len, BLOCK_SIZE - buffLen_);
memcpy(&local[buffLen_], data, add);
buffLen_ += add;
data += add;
len -= add;
if (buffLen_ == BLOCK_SIZE) {
ByteReverseIf(local, local, BLOCK_SIZE, BigEndianOrder);
Transform();
AddLength(BLOCK_SIZE);
buffLen_ = 0;
}
}
// do block size transforms or all at once for asm
if (buffLen_ == 0) {
#ifndef DO_SHA_ASM
while (len >= BLOCK_SIZE) {
memcpy(&local[0], data, BLOCK_SIZE);
data += BLOCK_SIZE;
len -= BLOCK_SIZE;
ByteReverseIf(local, local, BLOCK_SIZE, BigEndianOrder);
Transform();
AddLength(BLOCK_SIZE);
}
#else
word32 times = len / BLOCK_SIZE;
if (times) {
AsmTransform(data, times);
const word32 add = BLOCK_SIZE * times;
AddLength(add);
len -= add;
data += add;
}
#endif
}
// cache any data left
if (len) {
memcpy(&local[buffLen_], data, len);
buffLen_ += len;
}
}
void SHA::Transform()
{
word32 W[BLOCK_SIZE / sizeof(word32)];
// Copy context->state[] to working vars
word32 a = digest_[0];
word32 b = digest_[1];
word32 c = digest_[2];
word32 d = digest_[3];
word32 e = digest_[4];
// 4 rounds of 20 operations each. Loop unrolled.
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
// Add the working vars back into digest state[]
digest_[0] += a;
digest_[1] += b;
digest_[2] += c;
digest_[3] += d;
digest_[4] += e;
// Wipe variables
a = b = c = d = e = 0;
memset(W, 0, sizeof(W));
}
#ifdef DO_SHA_ASM
// f1(x,y,z) (z^(x &(y^z)))
// place in esi
#define ASMf1(x,y,z) \
AS2( mov esi, y ) \
AS2( xor esi, z ) \
AS2( and esi, x ) \
AS2( xor esi, z )
// R0(v,w,x,y,z,i) =
// z+= f1(w,x,y) + W[i] + 0x5A827999 + rotlFixed(v,5);
// w = rotlFixed(w,30);
// use esi for f
// use edi as tmp
#define ASMR0(v,w,x,y,z,i) \
AS2( mov esi, x ) \
AS2( mov edi, [esp + i * 4] ) \
AS2( xor esi, y ) \
AS2( and esi, w ) \
AS2( lea z, [edi + z + 0x5A827999] ) \
AS2( mov edi, v ) \
AS2( xor esi, y ) \
AS2( rol edi, 5 ) \
AS2( add z, esi ) \
AS2( rol w, 30 ) \
AS2( add z, edi )
/* Some macro stuff, but older gas ( < 2,16 ) can't process &, so do by hand
% won't work on gas at all
#define xstr(s) str(s)
#define str(s) #s
#define WOFF1(a) ( a & 15)
#define WOFF2(a) ((a + 2) & 15)
#define WOFF3(a) ((a + 8) & 15)
#define WOFF4(a) ((a + 13) & 15)
#ifdef __GNUC__
#define WGET1(i) asm("mov esp, [edi - "xstr(WOFF1(i))" * 4] ");
#define WGET2(i) asm("xor esp, [edi - "xstr(WOFF2(i))" * 4] ");
#define WGET3(i) asm("xor esp, [edi - "xstr(WOFF3(i))" * 4] ");
#define WGET4(i) asm("xor esp, [edi - "xstr(WOFF4(i))" * 4] ");
#define WPUT1(i) asm("mov [edi - "xstr(WOFF1(i))" * 4], esp ");
#else
#define WGET1(i) AS2( mov esp, [edi - WOFF1(i) * 4] )
#define WGET2(i) AS2( xor esp, [edi - WOFF2(i) * 4] )
#define WGET3(i) AS2( xor esp, [edi - WOFF3(i) * 4] )
#define WGET4(i) AS2( xor esp, [edi - WOFF4(i) * 4] )
#define WPUT1(i) AS2( mov [edi - WOFF1(i) * 4], esp )
#endif
*/
// ASMR1 = ASMR0 but use esp for W calcs
#define ASMR1(v,w,x,y,z,i,W1,W2,W3,W4) \
AS2( mov edi, [esp + W1 * 4] ) \
AS2( mov esi, x ) \
AS2( xor edi, [esp + W2 * 4] ) \
AS2( xor esi, y ) \
AS2( xor edi, [esp + W3 * 4] ) \
AS2( and esi, w ) \
AS2( xor edi, [esp + W4 * 4] ) \
AS2( rol edi, 1 ) \
AS2( xor esi, y ) \
AS2( mov [esp + W1 * 4], edi ) \
AS2( lea z, [edi + z + 0x5A827999] ) \
AS2( mov edi, v ) \
AS2( rol edi, 5 ) \
AS2( add z, esi ) \
AS2( rol w, 30 ) \
AS2( add z, edi )
// ASMR2 = ASMR1 but f is xor, xor instead
#define ASMR2(v,w,x,y,z,i,W1,W2,W3,W4) \
AS2( mov edi, [esp + W1 * 4] ) \
AS2( mov esi, x ) \
AS2( xor edi, [esp + W2 * 4] ) \
AS2( xor esi, y ) \
AS2( xor edi, [esp + W3 * 4] ) \
AS2( xor esi, w ) \
AS2( xor edi, [esp + W4 * 4] ) \
AS2( rol edi, 1 ) \
AS2( add z, esi ) \
AS2( mov [esp + W1 * 4], edi ) \
AS2( lea z, [edi + z + 0x6ED9EBA1] ) \
AS2( mov edi, v ) \
AS2( rol edi, 5 ) \
AS2( rol w, 30 ) \
AS2( add z, edi )
// ASMR3 = ASMR2 but f is (x&y)|(z&(x|y))
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -