diff --git a/GNUmakefile b/GNUmakefile index f74e64ae..178dfab9 100755 --- a/GNUmakefile +++ b/GNUmakefile @@ -416,6 +416,9 @@ endif # No ASM # Undefined Behavior Sanitizer (UBsan) testing. Issue 'make ubsan'. ifeq ($(findstring ubsan,$(MAKECMDGOALS)),ubsan) +CXXFLAGS := $(CXXFLAGS:-g%=-g3) +CXXFLAGS := $(CXXFLAGS:-O%=-O1) +CXXFLAGS := $(CXXFLAGS:-xO%=-xO1) ifeq ($(findstring -fsanitize=undefined,$(CXXFLAGS)),) CXXFLAGS += -fsanitize=undefined endif # CXXFLAGS @@ -426,6 +429,9 @@ endif # UBsan # Address Sanitizer (Asan) testing. Issue 'make asan'. ifeq ($(findstring asan,$(MAKECMDGOALS)),asan) +CXXFLAGS := $(CXXFLAGS:-g%=-g3) +CXXFLAGS := $(CXXFLAGS:-O%=-O1) +CXXFLAGS := $(CXXFLAGS:-xO%=-xO1) ifeq ($(findstring -fsanitize=address,$(CXXFLAGS)),) CXXFLAGS += -fsanitize=address endif # CXXFLAGS diff --git a/TestScripts/cryptest.sh b/TestScripts/cryptest.sh index 602abd86..af492f3a 100755 --- a/TestScripts/cryptest.sh +++ b/TestScripts/cryptest.sh @@ -250,8 +250,9 @@ if [[ ("$SUNCC_510_OR_ABOVE" -ne "0") ]]; then HAVE_OFAST=0 fi +# GCC compile farm is mounted RO if [[ (-z "$TMPDIR") ]]; then - if [[ (-d "/tmp") ]]; then + if [[ (-d "/tmp") ]] && [[ `touch "/tmp/ok-to-delete" &>/dev/null` ]]; then TMPDIR=/tmp elif [[ (-d "/temp") ]]; then TMPDIR=/temp @@ -263,6 +264,9 @@ if [[ (-z "$TMPDIR") ]]; then fi fi +# Make temp if it does not exist +mkdir -p "$TMPDIR" &>/dev/null + # Sun Studio does not allow '-x c++'. Copy it here... rm -f adhoc.cpp > /dev/null 2>&1 cp adhoc.cpp.proto adhoc.cpp @@ -1382,6 +1386,44 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t fi fi + ############################################ + # X86 CRC32 code generation + + "$CXX" -DCRYPTOPP_ADHOC_MAIN -msse4.2 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + if [[ "$?" -eq "0" ]]; then + X86_CRC32=1 + fi + + if [[ ("$X86_CRC32" -ne "0") ]]; then + echo + echo "************************************" | tee -a "$TEST_RESULTS" + echo "Testing: X86 CRC32 code generation" | tee -a "$TEST_RESULTS" + echo + + OBJFILE=crc.o; rm -f "$OBJFILE" 2>/dev/null + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + + COUNT=0 + FAILED=0 + DISASS_TEXT=$("$DISASS" "${DISASSARGS[@]}" "$OBJFILE" 2>/dev/null) + + COUNT=$(echo -n "$DISASS_TEXT" | "$GREP" -i -c crc32l) + if [[ ("$COUNT" -eq "0") ]]; then + FAILED=1 + echo "ERROR: failed to generate crc32l instruction" | tee -a "$TEST_RESULTS" + fi + + COUNT=$(echo -n "$DISASS_TEXT" | "$GREP" -i -c crc32b) + if [[ ("$COUNT" -eq "0") ]]; then + FAILED=1 + echo "ERROR: failed to generate crc32b instruction" | tee -a "$TEST_RESULTS" + fi + + if [[ ("$FAILED" -eq "0") ]]; then + echo "Verified crc32l and crc32b machine instructions" | tee -a "$TEST_RESULTS" + fi + fi + ############################################ # X86 SHA code generation diff --git a/cryptest.sh b/cryptest.sh index 602abd86..af492f3a 100755 --- a/cryptest.sh +++ b/cryptest.sh @@ -250,8 +250,9 @@ if [[ ("$SUNCC_510_OR_ABOVE" -ne "0") ]]; then HAVE_OFAST=0 fi +# GCC compile farm is mounted RO if [[ (-z "$TMPDIR") ]]; then - if [[ (-d "/tmp") ]]; then + if [[ (-d "/tmp") ]] && [[ `touch "/tmp/ok-to-delete" &>/dev/null` ]]; then TMPDIR=/tmp elif [[ (-d "/temp") ]]; then TMPDIR=/temp @@ -263,6 +264,9 @@ if [[ (-z "$TMPDIR") ]]; then fi fi +# Make temp if it does not exist +mkdir -p "$TMPDIR" &>/dev/null + # Sun Studio does not allow '-x c++'. Copy it here... rm -f adhoc.cpp > /dev/null 2>&1 cp adhoc.cpp.proto adhoc.cpp @@ -1382,6 +1386,44 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t fi fi + ############################################ + # X86 CRC32 code generation + + "$CXX" -DCRYPTOPP_ADHOC_MAIN -msse4.2 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + if [[ "$?" -eq "0" ]]; then + X86_CRC32=1 + fi + + if [[ ("$X86_CRC32" -ne "0") ]]; then + echo + echo "************************************" | tee -a "$TEST_RESULTS" + echo "Testing: X86 CRC32 code generation" | tee -a "$TEST_RESULTS" + echo + + OBJFILE=crc.o; rm -f "$OBJFILE" 2>/dev/null + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + + COUNT=0 + FAILED=0 + DISASS_TEXT=$("$DISASS" "${DISASSARGS[@]}" "$OBJFILE" 2>/dev/null) + + COUNT=$(echo -n "$DISASS_TEXT" | "$GREP" -i -c crc32l) + if [[ ("$COUNT" -eq "0") ]]; then + FAILED=1 + echo "ERROR: failed to generate crc32l instruction" | tee -a "$TEST_RESULTS" + fi + + COUNT=$(echo -n "$DISASS_TEXT" | "$GREP" -i -c crc32b) + if [[ ("$COUNT" -eq "0") ]]; then + FAILED=1 + echo "ERROR: failed to generate crc32b instruction" | tee -a "$TEST_RESULTS" + fi + + if [[ ("$FAILED" -eq "0") ]]; then + echo "Verified crc32l and crc32b machine instructions" | tee -a "$TEST_RESULTS" + fi + fi + ############################################ # X86 SHA code generation diff --git a/rijndael.cpp b/rijndael.cpp index e04d323c..fb767275 100644 --- a/rijndael.cpp +++ b/rijndael.cpp @@ -92,6 +92,10 @@ NAMESPACE_BEGIN(CryptoPP) # define MAYBE_CONST const #endif +// Clang __m128i casts +#define M128I_CAST(x) ((__m128i *)(void *)(x)) +#define CONST_M128I_CAST(x) ((const __m128i *)(const void *)(x)) + #if defined(CRYPTOPP_ALLOW_UNALIGNED_DATA_ACCESS) || defined(CRYPTOPP_ALLOW_RIJNDAEL_UNALIGNED_DATA_ACCESS) # if (CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_RIJNDAEL_ASM) namespace rdtable {CRYPTOPP_ALIGN_DATA(16) word64 Te[256+2];} diff --git a/seal.cpp b/seal.cpp index 85e9b784..fef2656c 100644 --- a/seal.cpp +++ b/seal.cpp @@ -38,12 +38,8 @@ word32 SEAL_Gamma::Apply(word32 i) word32 shaIndex = i/5; if (shaIndex != lastIndex) { -#if CRYPTOPP_SHANI_AVAILABLE - D[0] = ConditionalByteReverse(HasSHA() ? BIG_ENDIAN_ORDER : LITTLE_ENDIAN_ORDER, shaIndex); -#else - D[0] = shaIndex; -#endif memcpy(Z, H, 20); + D[0] = shaIndex; SHA1::Transform(Z, D); lastIndex = shaIndex; } diff --git a/sha.cpp b/sha.cpp index 7142b03a..4f126c0f 100644 --- a/sha.cpp +++ b/sha.cpp @@ -6,6 +6,24 @@ // code from Johannes Schneiders, Skip Hovsmith and Barry O'Rourke. // All code is in the public domain. +// In August 2017 Walton reworked the internals to align all the implementations. +// Formerly all hashes were software based, IterHashBase handled endian conversions, +// and IterHashBase dispatched a single to block SHA{N}::Transform. SHA{N}::Transform +// then performed the single block hashing. It was repeated for multiple blocks. +// +// The rework added SHA{N}::HashMultipleBlocks (class) and SHA{N}_HashMultipleBlocks +// (free standing). There are also hardware accelerated variations. Callers enter +// SHA{N}::HashMultipleBlocks (class), and the function calls SHA{N}_HashMultipleBlocks +// (free standing) or SHA{N}_HashBlock (free standing) as a fallback. +// +// An added wrinkle is hardware is little endian, C++ is big endian, and callers use big endian, +// so SHA{N}_HashMultipleBlock accepts a ByteOrder for the incoming data arrangement. Hardware +// based SHA{N}_HashMultipleBlock can often perform the endian swap much easier by setting +// an EPI mask. Endian swap incurs no penalty on Intel SHA, and 4-instruction penaly on ARM SHA. +// Under C++ the full software based swap penalty is incurred due to use of ReverseBytes(). +// +// The rework also removed the hacked-in pointers to implementations. + // use "cl /EP /P /DCRYPTOPP_GENERATE_X64_MASM sha.cpp" to generate MASM code #include "pch.h" @@ -30,16 +48,21 @@ # undef CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE #endif -NAMESPACE_BEGIN(CryptoPP) +// Clang __m128i casts +#define M128_CAST(x) ((__m128i *)(void *)(x)) +#define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) -// Function pointer for specific SHA1 or SHA256 Transform function -typedef void (*pfnSHATransform)(word32 *state, const word32 *data); -typedef void (CRYPTOPP_FASTCALL *pfnSHAHashBlocks)(word32 *state, const word32 *data, size_t length); +// C++ makes const internal linkage +#define EXPORT_TABLE extern + +NAMESPACE_BEGIN(CryptoPP) //////////////////////////////// // start of Steve Reid's code // //////////////////////////////// +ANONYMOUS_NAMESPACE_BEGIN + #define blk0(i) (W[i] = data[i]) #define blk1(i) (W[i&15] = rotlFixed(W[(i+13)&15]^W[(i+8)&15]^W[(i+2)&15]^W[i&15],1)) @@ -55,8 +78,11 @@ typedef void (CRYPTOPP_FASTCALL *pfnSHAHashBlocks)(word32 *state, const word32 * #define R3(v,w,x,y,z,i) z+=f3(w,x,y)+blk1(i)+0x8F1BBCDC+rotlFixed(v,5);w=rotlFixed(w,30); #define R4(v,w,x,y,z,i) z+=f4(w,x,y)+blk1(i)+0xCA62C1D6+rotlFixed(v,5);w=rotlFixed(w,30); -static void SHA1_Transform_CXX(word32 *state, const word32 *data) +void SHA1_CXX_HashBlock(word32 *state, const word32 *data) { + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + word32 W[16]; /* Copy context->state[] to working vars */ word32 a = state[0]; @@ -93,33 +119,429 @@ static void SHA1_Transform_CXX(word32 *state, const word32 *data) state[4] += e; } +ANONYMOUS_NAMESPACE_END + ////////////////////////////// // end of Steve Reid's code // ////////////////////////////// -#if CRYPTOPP_SHANI_AVAILABLE -extern void SHA1_Transform_SHANI(word32 *state, const word32 *data); -extern void CRYPTOPP_FASTCALL SHA256_HashBlocks_SHANI(word32 *state, const word32 *data, size_t length); -#elif CRYPTOPP_ARM_SHA_AVAILABLE -extern void SHA1_Transform_ARMV8(word32 *state, const word32 *data); -extern void CRYPTOPP_FASTCALL SHA256_HashBlocks_ARMV8(word32 *state, const word32 *data, size_t length); -#endif +/////////////////////////////////// +// start of Walton/Gulley's code // +/////////////////////////////////// -static pfnSHATransform InitializeSHA1Transform() +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + +ANONYMOUS_NAMESPACE_BEGIN + +// Based on http://software.intel.com/en-us/articles/intel-sha-extensions and code by Sean Gulley. +void SHA1_SHANI_HashMultipleBlocks(word32 *state, const word32 *data, size_t length, ByteOrder order) { -#if CRYPTOPP_SHANI_AVAILABLE - if (HasSHA()) - return &SHA1_Transform_SHANI; - else -#endif -#if CRYPTOPP_ARM_SHA_AVAILABLE - if (HasSHA1()) - return &SHA1_Transform_ARMV8; - else -#endif - return &SHA1_Transform_CXX; + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + CRYPTOPP_ASSERT(length >= SHA1::BLOCKSIZE); + + __m128i ABCD, ABCD_SAVE, E0, E0_SAVE, E1; + __m128i MASK, MSG0, MSG1, MSG2, MSG3; + + // Load initial values + ABCD = _mm_loadu_si128(CONST_M128_CAST(state)); + E0 = _mm_set_epi32(state[4], 0, 0, 0); + ABCD = _mm_shuffle_epi32(ABCD, 0x1B); + + // IA-32 SHA is little endian, SHA::Transform is big endian, + // and SHA::HashMultipleBlocks can be either. ByteOrder + // allows us to avoid extra endian reversals. It saves 1.0 cpb. + MASK = order == BIG_ENDIAN_ORDER ? // Data arrangement + _mm_set_epi8(0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15) : + _mm_set_epi8(3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12) ; + + while (length >= SHA1::BLOCKSIZE) + { + // Save current hash + ABCD_SAVE = ABCD; + E0_SAVE = E0; + + // Rounds 0-3 + MSG0 = _mm_loadu_si128(CONST_M128_CAST(data+0)); + MSG0 = _mm_shuffle_epi8(MSG0, MASK); + E0 = _mm_add_epi32(E0, MSG0); + E1 = ABCD; + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0); + + // Rounds 4-7 + MSG1 = _mm_loadu_si128(CONST_M128_CAST(data+4)); + MSG1 = _mm_shuffle_epi8(MSG1, MASK); + E1 = _mm_sha1nexte_epu32(E1, MSG1); + E0 = ABCD; + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0); + MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1); + + // Rounds 8-11 + MSG2 = _mm_loadu_si128(CONST_M128_CAST(data+8)); + MSG2 = _mm_shuffle_epi8(MSG2, MASK); + E0 = _mm_sha1nexte_epu32(E0, MSG2); + E1 = ABCD; + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0); + MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2); + MSG0 = _mm_xor_si128(MSG0, MSG2); + + // Rounds 12-15 + MSG3 = _mm_loadu_si128(CONST_M128_CAST(data+12)); + MSG3 = _mm_shuffle_epi8(MSG3, MASK); + E1 = _mm_sha1nexte_epu32(E1, MSG3); + E0 = ABCD; + MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0); + MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3); + MSG1 = _mm_xor_si128(MSG1, MSG3); + + // Rounds 16-19 + E0 = _mm_sha1nexte_epu32(E0, MSG0); + E1 = ABCD; + MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0); + MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0); + MSG2 = _mm_xor_si128(MSG2, MSG0); + + // Rounds 20-23 + E1 = _mm_sha1nexte_epu32(E1, MSG1); + E0 = ABCD; + MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1); + MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1); + MSG3 = _mm_xor_si128(MSG3, MSG1); + + // Rounds 24-27 + E0 = _mm_sha1nexte_epu32(E0, MSG2); + E1 = ABCD; + MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1); + MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2); + MSG0 = _mm_xor_si128(MSG0, MSG2); + + // Rounds 28-31 + E1 = _mm_sha1nexte_epu32(E1, MSG3); + E0 = ABCD; + MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1); + MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3); + MSG1 = _mm_xor_si128(MSG1, MSG3); + + // Rounds 32-35 + E0 = _mm_sha1nexte_epu32(E0, MSG0); + E1 = ABCD; + MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1); + MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0); + MSG2 = _mm_xor_si128(MSG2, MSG0); + + // Rounds 36-39 + E1 = _mm_sha1nexte_epu32(E1, MSG1); + E0 = ABCD; + MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1); + MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1); + MSG3 = _mm_xor_si128(MSG3, MSG1); + + // Rounds 40-43 + E0 = _mm_sha1nexte_epu32(E0, MSG2); + E1 = ABCD; + MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2); + MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2); + MSG0 = _mm_xor_si128(MSG0, MSG2); + + // Rounds 44-47 + E1 = _mm_sha1nexte_epu32(E1, MSG3); + E0 = ABCD; + MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2); + MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3); + MSG1 = _mm_xor_si128(MSG1, MSG3); + + // Rounds 48-51 + E0 = _mm_sha1nexte_epu32(E0, MSG0); + E1 = ABCD; + MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2); + MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0); + MSG2 = _mm_xor_si128(MSG2, MSG0); + + // Rounds 52-55 + E1 = _mm_sha1nexte_epu32(E1, MSG1); + E0 = ABCD; + MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2); + MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1); + MSG3 = _mm_xor_si128(MSG3, MSG1); + + // Rounds 56-59 + E0 = _mm_sha1nexte_epu32(E0, MSG2); + E1 = ABCD; + MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2); + MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2); + MSG0 = _mm_xor_si128(MSG0, MSG2); + + // Rounds 60-63 + E1 = _mm_sha1nexte_epu32(E1, MSG3); + E0 = ABCD; + MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3); + MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3); + MSG1 = _mm_xor_si128(MSG1, MSG3); + + // Rounds 64-67 + E0 = _mm_sha1nexte_epu32(E0, MSG0); + E1 = ABCD; + MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3); + MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0); + MSG2 = _mm_xor_si128(MSG2, MSG0); + + // Rounds 68-71 + E1 = _mm_sha1nexte_epu32(E1, MSG1); + E0 = ABCD; + MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1); + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3); + MSG3 = _mm_xor_si128(MSG3, MSG1); + + // Rounds 72-75 + E0 = _mm_sha1nexte_epu32(E0, MSG2); + E1 = ABCD; + MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2); + ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3); + + // Rounds 76-79 + E1 = _mm_sha1nexte_epu32(E1, MSG3); + E0 = ABCD; + ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3); + + // Add values back to state + E0 = _mm_sha1nexte_epu32(E0, E0_SAVE); + ABCD = _mm_add_epi32(ABCD, ABCD_SAVE); + + data += SHA1::BLOCKSIZE/sizeof(word32); + length -= SHA1::BLOCKSIZE; + } + + // Save state + ABCD = _mm_shuffle_epi32(ABCD, 0x1B); + _mm_storeu_si128(M128_CAST(state), ABCD); + state[4] = _mm_extract_epi32(E0, 3); } +ANONYMOUS_NAMESPACE_END + +#endif // CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + +///////////////////////////////// +// end of Walton/Gulley's code // +///////////////////////////////// + +////////////////////////////////////////////////////////////// +// start of Walton/Schneiders/O'Rourke/Skip Hovsmith's code // +////////////////////////////////////////////////////////////// + +#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + +ANONYMOUS_NAMESPACE_BEGIN + +void SHA1_ARM_SHA_HashMultipleBlocks(word32 *state, const word32 *data, size_t length, ByteOrder order) +{ + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + CRYPTOPP_ASSERT(length >= SHA1::BLOCKSIZE); + + uint32x4_t C0, C1, C2, C3; + uint32x4_t ABCD, ABCD_SAVED; + uint32x4_t MSG0, MSG1, MSG2, MSG3; + uint32x4_t TMP0, TMP1; + uint32_t E0, E0_SAVED, E1; + + // Load initial values + C0 = vdupq_n_u32(0x5A827999); + C1 = vdupq_n_u32(0x6ED9EBA1); + C2 = vdupq_n_u32(0x8F1BBCDC); + C3 = vdupq_n_u32(0xCA62C1D6); + + ABCD = vld1q_u32(&state[0]); + E0 = state[4]; + + while (length >= SHA1::BLOCKSIZE) + { + // Save current hash + ABCD_SAVED = ABCD; + E0_SAVED = E0; + + MSG0 = vld1q_u32(data + 0); + MSG1 = vld1q_u32(data + 4); + MSG2 = vld1q_u32(data + 8); + MSG3 = vld1q_u32(data + 12); + + if (order == BIG_ENDIAN_ORDER) // Data arrangement + { + MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); + MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); + MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); + MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); + } + + TMP0 = vaddq_u32(MSG0, C0); + TMP1 = vaddq_u32(MSG1, C0); + + // Rounds 0-3 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1cq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG2, C0); + MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); + + // Rounds 4-7 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1cq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG3, C0); + MSG0 = vsha1su1q_u32(MSG0, MSG3); + MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); + + // Rounds 8-11 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1cq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG0, C0); + MSG1 = vsha1su1q_u32(MSG1, MSG0); + MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); + + // Rounds 12-15 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1cq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG1, C1); + MSG2 = vsha1su1q_u32(MSG2, MSG1); + MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); + + // Rounds 16-19 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1cq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG2, C1); + MSG3 = vsha1su1q_u32(MSG3, MSG2); + MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); + + // Rounds 20-23 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG3, C1); + MSG0 = vsha1su1q_u32(MSG0, MSG3); + MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); + + // Rounds 24-27 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG0, C1); + MSG1 = vsha1su1q_u32(MSG1, MSG0); + MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); + + // Rounds 28-31 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG1, C1); + MSG2 = vsha1su1q_u32(MSG2, MSG1); + MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); + + // Rounds 32-35 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG2, C2); + MSG3 = vsha1su1q_u32(MSG3, MSG2); + MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); + + // Rounds 36-39 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG3, C2); + MSG0 = vsha1su1q_u32(MSG0, MSG3); + MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); + + // Rounds 40-43 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1mq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG0, C2); + MSG1 = vsha1su1q_u32(MSG1, MSG0); + MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); + + // Rounds 44-47 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1mq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG1, C2); + MSG2 = vsha1su1q_u32(MSG2, MSG1); + MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); + + // Rounds 48-51 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1mq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG2, C2); + MSG3 = vsha1su1q_u32(MSG3, MSG2); + MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); + + // Rounds 52-55 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1mq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG3, C3); + MSG0 = vsha1su1q_u32(MSG0, MSG3); + MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3); + + // Rounds 56-59 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1mq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG0, C3); + MSG1 = vsha1su1q_u32(MSG1, MSG0); + MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0); + + // Rounds 60-63 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG1, C3); + MSG2 = vsha1su1q_u32(MSG2, MSG1); + MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1); + + // Rounds 64-67 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E0, TMP0); + TMP0 = vaddq_u32(MSG2, C3); + MSG3 = vsha1su1q_u32(MSG3, MSG2); + MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2); + + // Rounds 68-71 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E1, TMP1); + TMP1 = vaddq_u32(MSG3, C3); + MSG0 = vsha1su1q_u32(MSG0, MSG3); + + // Rounds 72-75 + E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E0, TMP0); + + // Rounds 76-79 + E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0)); + ABCD = vsha1pq_u32(ABCD, E1, TMP1); + + E0 += E0_SAVED; + ABCD = vaddq_u32(ABCD_SAVED, ABCD); + + data += SHA1::BLOCKSIZE/sizeof(word32); + length -= SHA1::BLOCKSIZE; + } + + // Save state + vst1q_u32(&state[0], ABCD); + state[4] = E0; +} + +ANONYMOUS_NAMESPACE_END + +#endif // CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + +/////////////////////////////////////////////////////// +// end of Walton/Schneiders/O'Rourke/Hovsmith's code // +/////////////////////////////////////////////////////// + void SHA1::InitState(HashWordType *state) { state[0] = 0x67452301; @@ -131,53 +553,73 @@ void SHA1::InitState(HashWordType *state) void SHA1::Transform(word32 *state, const word32 *data) { - const pfnSHATransform s_pfn = InitializeSHA1Transform(); - s_pfn(state, data); + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + if (HasSHA()) + { + SHA1_SHANI_HashMultipleBlocks(state, data, SHA1::BLOCKSIZE, LITTLE_ENDIAN_ORDER); + return; + } +#endif +#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + if (HasSHA1()) + { + SHA1_ARM_SHA_HashMultipleBlocks(state, data, SHA1::BLOCKSIZE, LITTLE_ENDIAN_ORDER); + return; + } +#endif + + SHA1_CXX_HashBlock(state, data); } -#if CRYPTOPP_SHANI_AVAILABLE size_t SHA1::HashMultipleBlocks(const word32 *input, size_t length) { - const bool noReverse = HasSHA() || NativeByteOrderIs(this->GetByteOrder()); - const unsigned int blockSize = this->BlockSize(); - word32* dataBuf = this->DataBuf(); + CRYPTOPP_ASSERT(input); + CRYPTOPP_ASSERT(length >= SHA1::BLOCKSIZE); + +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + if (HasSHA()) + { + SHA1_SHANI_HashMultipleBlocks(m_state, input, length, BIG_ENDIAN_ORDER); + return length & (SHA1::BLOCKSIZE - 1); + } +#endif +#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + if (HasSHA1()) + { + SHA1_ARM_SHA_HashMultipleBlocks(m_state, input, length, BIG_ENDIAN_ORDER); + return length & (SHA1::BLOCKSIZE - 1); + } +#endif + + const bool noReverse = NativeByteOrderIs(this->GetByteOrder()); + word32 *dataBuf = this->DataBuf(); do { if (noReverse) - this->HashEndianCorrectedBlock(input); + { + SHA1_CXX_HashBlock(m_state, input); + } else { - ByteReverse(dataBuf, input, this->BlockSize()); - this->HashEndianCorrectedBlock(dataBuf); + ByteReverse(dataBuf, input, SHA1::BLOCKSIZE); + SHA1_CXX_HashBlock(m_state, dataBuf); } - input += blockSize/sizeof(word32); - length -= blockSize; + input += SHA1::BLOCKSIZE/sizeof(word32); + length -= SHA1::BLOCKSIZE; } - while (length >= blockSize); + while (length >= SHA1::BLOCKSIZE); return length; } -#endif // ************************************************************* -void SHA224::InitState(HashWordType *state) -{ - static const word32 s[8] = {0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4}; - memcpy(state, s, sizeof(s)); -} +CRYPTOPP_ALIGN_DATA(16) EXPORT_TABLE +const word32 SHA256_K[64] CRYPTOPP_SECTION_ALIGN16 = { -void SHA256::InitState(HashWordType *state) -{ - static const word32 s[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; - memcpy(state, s, sizeof(s)); -} - -#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || CRYPTOPP_ARM_SHA_AVAILABLE -CRYPTOPP_ALIGN_DATA(16) extern const word32 SHA256_K[64] CRYPTOPP_SECTION_ALIGN16 = { -#else -extern const word32 SHA256_K[64] = { -#endif 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, @@ -196,11 +638,81 @@ extern const word32 SHA256_K[64] = { 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2 }; -#endif // #ifndef CRYPTOPP_GENERATE_X64_MASM +ANONYMOUS_NAMESPACE_BEGIN + +#define blk2(i) (W[i&15]+=s1(W[(i-2)&15])+W[(i-7)&15]+s0(W[(i-15)&15])) + +#define Ch(x,y,z) (z^(x&(y^z))) +#define Maj(x,y,z) (y^((x^y)&(y^z))) + +#define a(i) T[(0-i)&7] +#define b(i) T[(1-i)&7] +#define c(i) T[(2-i)&7] +#define d(i) T[(3-i)&7] +#define e(i) T[(4-i)&7] +#define f(i) T[(5-i)&7] +#define g(i) T[(6-i)&7] +#define h(i) T[(7-i)&7] + +#define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA256_K[i+j]+(j?blk2(i):blk0(i));\ + d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i)) + +// for SHA256 +#define S0(x) (rotrFixed(x,2)^rotrFixed(x,13)^rotrFixed(x,22)) +#define S1(x) (rotrFixed(x,6)^rotrFixed(x,11)^rotrFixed(x,25)) +#define s0(x) (rotrFixed(x,7)^rotrFixed(x,18)^(x>>3)) +#define s1(x) (rotrFixed(x,17)^rotrFixed(x,19)^(x>>10)) + +void SHA256_CXX_HashBlock(word32 *state, const word32 *data) +{ + word32 W[16], T[8]; + /* Copy context->state[] to working vars */ + memcpy(T, state, sizeof(T)); + /* 64 operations, partially loop unrolled */ + for (unsigned int j=0; j<64; j+=16) + { + R( 0); R( 1); R( 2); R( 3); + R( 4); R( 5); R( 6); R( 7); + R( 8); R( 9); R(10); R(11); + R(12); R(13); R(14); R(15); + } + /* Add the working vars back into context.state[] */ + state[0] += a(0); + state[1] += b(0); + state[2] += c(0); + state[3] += d(0); + state[4] += e(0); + state[5] += f(0); + state[6] += g(0); + state[7] += h(0); +} + +#undef S0 +#undef S1 +#undef s0 +#undef s1 +#undef R + +ANONYMOUS_NAMESPACE_END + +void SHA224::InitState(HashWordType *state) +{ + static const word32 s[8] = {0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4}; + memcpy(state, s, sizeof(s)); +} + +void SHA256::InitState(HashWordType *state) +{ + static const word32 s[8] = {0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19}; + memcpy(state, s, sizeof(s)); +} +#endif // Not CRYPTOPP_GENERATE_X64_MASM #if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_GENERATE_X64_MASM)) -static void CRYPTOPP_FASTCALL SHA256_HashBlocks_SSE2(word32 *state, const word32 *data, size_t len) +ANONYMOUS_NAMESPACE_BEGIN + +void CRYPTOPP_FASTCALL SHA256_SSE_HashMultipleBlocks(word32 *state, const word32 *data, size_t len) { #define LOCALS_SIZE 8*4 + 16*4 + 4*WORD_SZ #define H(i) [BASE+ASM_MOD(1024+7-(i),8)*4] @@ -322,7 +834,7 @@ static void CRYPTOPP_FASTCALL SHA256_HashBlocks_SSE2(word32 *state, const word32 INTEL_NOPREFIX #elif defined(CRYPTOPP_GENERATE_X64_MASM) ALIGN 8 - SHA256_HashBlocks_SSE2 PROC FRAME + SHA256_SSE_HashMultipleBlocks PROC FRAME rex_push_reg rsi push_reg rdi push_reg rbx @@ -501,7 +1013,7 @@ INTEL_NOPREFIX pop rdi pop rsi ret - SHA256_HashBlocks_SSE2 ENDP + SHA256_SSE_HashMultipleBlocks ENDP #endif #ifdef __GNUC__ @@ -519,230 +1031,541 @@ INTEL_NOPREFIX #endif } -#endif // (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_GENERATE_X64_MASM)) +ANONYMOUS_NAMESPACE_END + +#endif // CRYPTOPP_X86_ASM_AVAILABLE or CRYPTOPP_GENERATE_X64_MASM #ifndef CRYPTOPP_GENERATE_X64_MASM #ifdef CRYPTOPP_X64_MASM_AVAILABLE -extern "C" { -void CRYPTOPP_FASTCALL SHA256_HashBlocks_SSE2(word32 *state, const word32 *data, size_t len); +EXPORT_TABLE "C" { +void CRYPTOPP_FASTCALL SHA256_SSE_HashMultipleBlocks(word32 *state, const word32 *data, size_t len); } #endif -#if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_SHA_ASM) - -static pfnSHAHashBlocks InitializeSHA256HashBlocks() -{ -#if CRYPTOPP_SHANI_AVAILABLE - if (HasSHA()) - return &SHA256_HashBlocks_SHANI; - else -#endif -#if CRYPTOPP_ARM_SHA_AVAILABLE - if (HasSHA2()) - return &SHA256_HashBlocks_ARMV8; - else -#endif - - return &SHA256_HashBlocks_SSE2; -} - -size_t SHA256::HashMultipleBlocks(const word32 *input, size_t length) -{ - const pfnSHAHashBlocks s_pfn = InitializeSHA256HashBlocks(); - s_pfn(m_state, input, (length&(size_t(0)-BLOCKSIZE)) - !HasSSE2()); - return length % BLOCKSIZE; -} - -size_t SHA224::HashMultipleBlocks(const word32 *input, size_t length) -{ - const pfnSHAHashBlocks s_pfn = InitializeSHA256HashBlocks(); - s_pfn(m_state, input, (length&(size_t(0)-BLOCKSIZE)) - !HasSSE2()); - return length % BLOCKSIZE; -} -#endif - -#define blk2(i) (W[i&15]+=s1(W[(i-2)&15])+W[(i-7)&15]+s0(W[(i-15)&15])) - -#define Ch(x,y,z) (z^(x&(y^z))) -#define Maj(x,y,z) (y^((x^y)&(y^z))) - -#define a(i) T[(0-i)&7] -#define b(i) T[(1-i)&7] -#define c(i) T[(2-i)&7] -#define d(i) T[(3-i)&7] -#define e(i) T[(4-i)&7] -#define f(i) T[(5-i)&7] -#define g(i) T[(6-i)&7] -#define h(i) T[(7-i)&7] - -#define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA256_K[i+j]+(j?blk2(i):blk0(i));\ - d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i)) - -// for SHA256 -#define S0(x) (rotrFixed(x,2)^rotrFixed(x,13)^rotrFixed(x,22)) -#define S1(x) (rotrFixed(x,6)^rotrFixed(x,11)^rotrFixed(x,25)) -#define s0(x) (rotrFixed(x,7)^rotrFixed(x,18)^(x>>3)) -#define s1(x) (rotrFixed(x,17)^rotrFixed(x,19)^(x>>10)) - -#if defined(__OPTIMIZE_SIZE__) -// Smaller but slower -void SHA256_Transform_CXX(word32 *state, const word32 *data) -{ - word32 W[32], T[20]; - unsigned int i = 0, j = 0; - word32 *t = T+8; - - memcpy(t, state, 8*4); - word32 e = t[4], a = t[0]; - - do - { - word32 w = data[j]; - W[j] = w; - w += SHA256_K[j]; - w += t[7]; - w += S1(e); - w += Ch(e, t[5], t[6]); - e = t[3] + w; - t[3] = t[3+8] = e; - w += S0(t[0]); - a = w + Maj(a, t[1], t[2]); - t[-1] = t[7] = a; - --t; - ++j; - if (j%8 == 0) - t += 8; - } while (j<16); - - do - { - i = j&0xf; - word32 w = s1(W[i+16-2]) + s0(W[i+16-15]) + W[i] + W[i+16-7]; - W[i+16] = W[i] = w; - w += SHA256_K[j]; - w += t[7]; - w += S1(e); - w += Ch(e, t[5], t[6]); - e = t[3] + w; - t[3] = t[3+8] = e; - w += S0(t[0]); - a = w + Maj(a, t[1], t[2]); - t[-1] = t[7] = a; - - w = s1(W[(i+1)+16-2]) + s0(W[(i+1)+16-15]) + W[(i+1)] + W[(i+1)+16-7]; - W[(i+1)+16] = W[(i+1)] = w; - w += SHA256_K[j+1]; - w += (t-1)[7]; - w += S1(e); - w += Ch(e, (t-1)[5], (t-1)[6]); - e = (t-1)[3] + w; - (t-1)[3] = (t-1)[3+8] = e; - w += S0((t-1)[0]); - a = w + Maj(a, (t-1)[1], (t-1)[2]); - (t-1)[-1] = (t-1)[7] = a; - - t-=2; - j+=2; - if (j%8 == 0) - t += 8; - } while (j<64); - - state[0] += a; - state[1] += t[1]; - state[2] += t[2]; - state[3] += t[3]; - state[4] += e; - state[5] += t[5]; - state[6] += t[6]; - state[7] += t[7]; -} -#else -// Bigger but faster -void SHA256_Transform_CXX(word32 *state, const word32 *data) -{ - word32 W[16], T[8]; - /* Copy context->state[] to working vars */ - memcpy(T, state, sizeof(T)); - /* 64 operations, partially loop unrolled */ - for (unsigned int j=0; j<64; j+=16) - { - R( 0); R( 1); R( 2); R( 3); - R( 4); R( 5); R( 6); R( 7); - R( 8); R( 9); R(10); R(11); - R(12); R(13); R(14); R(15); - } - /* Add the working vars back into context.state[] */ - state[0] += a(0); - state[1] += b(0); - state[2] += c(0); - state[3] += d(0); - state[4] += e(0); - state[5] += f(0); - state[6] += g(0); - state[7] += h(0); -} -#endif // __OPTIMIZE_SIZE__ - -#undef S0 -#undef S1 -#undef s0 -#undef s1 -#undef R - -#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE -static void SHA256_Transform_SSE2(word32 *state, const word32 *data) -{ - // this byte reverse is a waste of time, but this function is only called by MDC - word32 W[16]; - ByteReverse(W, data, SHA256::BLOCKSIZE); - SHA256_HashBlocks_SSE2(state, W, SHA256::BLOCKSIZE - !HasSSE2()); -} -#endif // CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE - -#if CRYPTOPP_SHANI_AVAILABLE -static void SHA256_Transform_SHANI(word32 *state, const word32 *data) -{ - return SHA256_HashBlocks_SHANI(state, data, SHA256::BLOCKSIZE); -} -#endif // CRYPTOPP_SHANI_AVAILABLE - -#if CRYPTOPP_ARM_SHA_AVAILABLE -static void SHA256_Transform_ARMV8(word32 *state, const word32 *data) -{ - return SHA256_HashBlocks_ARMV8(state, data, SHA256::BLOCKSIZE); -} -#endif // CRYPTOPP_ARM_SHA_AVAILABLE - /////////////////////////////////// // start of Walton/Gulley's code // /////////////////////////////////// -static pfnSHATransform InitializeSHA256Transform() -{ -#if CRYPTOPP_SHANI_AVAILABLE - if (HasSHA()) - return &SHA256_Transform_SHANI; - else -#endif -#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE - if (HasSSE2()) - return &SHA256_Transform_SSE2; - else -#endif -#if CRYPTOPP_ARM_SHA_AVAILABLE - if (HasSHA2()) - return &SHA256_Transform_ARMV8; - else -#endif +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE - return &SHA256_Transform_CXX; +ANONYMOUS_NAMESPACE_BEGIN + +// Based on http://software.intel.com/en-us/articles/intel-sha-extensions and code by Sean Gulley. +void SHA256_SHANI_HashMultipleBlocks(word32 *state, const word32 *data, size_t length, ByteOrder order) +{ + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + CRYPTOPP_ASSERT(length >= SHA256::BLOCKSIZE); + + __m128i STATE0, STATE1; + __m128i MSG, TMP, MASK; + __m128i TMSG0, TMSG1, TMSG2, TMSG3; + __m128i ABEF_SAVE, CDGH_SAVE; + + // Load initial values + TMP = _mm_loadu_si128(M128_CAST(&state[0])); + STATE1 = _mm_loadu_si128(M128_CAST(&state[4])); + + // IA-32 SHA is little endian, SHA::Transform is big endian, + // and SHA::HashMultipleBlocks can be either. ByteOrder + // allows us to avoid extra endian reversals. It saves 1.0 cpb. + MASK = order == BIG_ENDIAN_ORDER ? // Data arrangement + _mm_set_epi8(12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3) : + _mm_set_epi8(15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0) ; + + TMP = _mm_shuffle_epi32(TMP, 0xB1); // CDAB + STATE1 = _mm_shuffle_epi32(STATE1, 0x1B); // EFGH + STATE0 = _mm_alignr_epi8(TMP, STATE1, 8); // ABEF + STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0); // CDGH + + while (length >= SHA256::BLOCKSIZE) + { + // Save current hash + ABEF_SAVE = STATE0; + CDGH_SAVE = STATE1; + + // Rounds 0-3 + MSG = _mm_loadu_si128(CONST_M128_CAST(data+0)); + TMSG0 = _mm_shuffle_epi8(MSG, MASK); + MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0xE9B5DBA5B5C0FBCF), W64LIT(0x71374491428A2F98))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + + // Rounds 4-7 + TMSG1 = _mm_loadu_si128(CONST_M128_CAST(data+4)); + TMSG1 = _mm_shuffle_epi8(TMSG1, MASK); + MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0xAB1C5ED5923F82A4), W64LIT(0x59F111F13956C25B))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1); + + // Rounds 8-11 + TMSG2 = _mm_loadu_si128(CONST_M128_CAST(data+8)); + TMSG2 = _mm_shuffle_epi8(TMSG2, MASK); + MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0x550C7DC3243185BE), W64LIT(0x12835B01D807AA98))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2); + + // Rounds 12-15 + TMSG3 = _mm_loadu_si128(CONST_M128_CAST(data+12)); + TMSG3 = _mm_shuffle_epi8(TMSG3, MASK); + MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0xC19BF1749BDC06A7), W64LIT(0x80DEB1FE72BE5D74))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); + TMSG0 = _mm_add_epi32(TMSG0, TMP); + TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3); + + // Rounds 16-19 + MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0x240CA1CC0FC19DC6), W64LIT(0xEFBE4786E49B69C1))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); + TMSG1 = _mm_add_epi32(TMSG1, TMP); + TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0); + + // Rounds 20-23 + MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0x76F988DA5CB0A9DC), W64LIT(0x4A7484AA2DE92C6F))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); + TMSG2 = _mm_add_epi32(TMSG2, TMP); + TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1); + + // Rounds 24-27 + MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0xBF597FC7B00327C8), W64LIT(0xA831C66D983E5152))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); + TMSG3 = _mm_add_epi32(TMSG3, TMP); + TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2); + + // Rounds 28-31 + MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0x1429296706CA6351), W64LIT(0xD5A79147C6E00BF3))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); + TMSG0 = _mm_add_epi32(TMSG0, TMP); + TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3); + + // Rounds 32-35 + MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0x53380D134D2C6DFC), W64LIT(0x2E1B213827B70A85))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); + TMSG1 = _mm_add_epi32(TMSG1, TMP); + TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0); + + // Rounds 36-39 + MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0x92722C8581C2C92E), W64LIT(0x766A0ABB650A7354))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); + TMSG2 = _mm_add_epi32(TMSG2, TMP); + TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1); + + // Rounds 40-43 + MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0xC76C51A3C24B8B70), W64LIT(0xA81A664BA2BFE8A1))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); + TMSG3 = _mm_add_epi32(TMSG3, TMP); + TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2); + + // Rounds 44-47 + MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0x106AA070F40E3585), W64LIT(0xD6990624D192E819))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4); + TMSG0 = _mm_add_epi32(TMSG0, TMP); + TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3); + + // Rounds 48-51 + MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0x34B0BCB52748774C), W64LIT(0x1E376C0819A4C116))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4); + TMSG1 = _mm_add_epi32(TMSG1, TMP); + TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0); + + // Rounds 52-55 + MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0x682E6FF35B9CCA4F), W64LIT(0x4ED8AA4A391C0CB3))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4); + TMSG2 = _mm_add_epi32(TMSG2, TMP); + TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + + // Rounds 56-59 + MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0x8CC7020884C87814), W64LIT(0x78A5636F748F82EE))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4); + TMSG3 = _mm_add_epi32(TMSG3, TMP); + TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + + // Rounds 60-63 + MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0xC67178F2BEF9A3F7), W64LIT(0xA4506CEB90BEFFFA))); + STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG); + MSG = _mm_shuffle_epi32(MSG, 0x0E); + STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG); + + // Add values back to state + STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE); + STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE); + + data += SHA256::BLOCKSIZE/sizeof(word32); + length -= SHA256::BLOCKSIZE; + } + + TMP = _mm_shuffle_epi32(STATE0, 0x1B); // FEBA + STATE1 = _mm_shuffle_epi32(STATE1, 0xB1); // DCHG + STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0); // DCBA + STATE1 = _mm_alignr_epi8(STATE1, TMP, 8); // ABEF + + // Save state + _mm_storeu_si128(M128_CAST(&state[0]), STATE0); + _mm_storeu_si128(M128_CAST(&state[4]), STATE1); } +ANONYMOUS_NAMESPACE_END + +#endif // CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + +///////////////////////////////// +// end of Walton/Gulley's code // +///////////////////////////////// + +///////////////////////////////////////////////////////// +// start of Walton/Schneiders/O'Rourke/Hovsmith's code // +///////////////////////////////////////////////////////// + +#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + +ANONYMOUS_NAMESPACE_BEGIN + +void SHA256_ARM_SHA_HashMultipleBlocks(word32 *state, const word32 *data, size_t length, ByteOrder order) +{ + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + CRYPTOPP_ASSERT(length >= SHA256::BLOCKSIZE); + + uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE; + uint32x4_t MSG0, MSG1, MSG2, MSG3; + uint32x4_t TMP0, TMP1, TMP2; + + // Load initial values + STATE0 = vld1q_u32(&state[0]); + STATE1 = vld1q_u32(&state[4]); + + while (length >= SHA256::BLOCKSIZE) + { + // Save current hash + ABEF_SAVE = STATE0; + CDGH_SAVE = STATE1; + + // Load message + MSG0 = vld1q_u32(data + 0); + MSG1 = vld1q_u32(data + 4); + MSG2 = vld1q_u32(data + 8); + MSG3 = vld1q_u32(data + 12); + + if (order == BIG_ENDIAN_ORDER) // Data arrangement + { + MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0))); + MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1))); + MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2))); + MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3))); + } + + TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x00])); + + // Rounds 0-3 + MSG0 = vsha256su0q_u32(MSG0, MSG1); + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x04])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; + + // Rounds 4-7 + MSG1 = vsha256su0q_u32(MSG1, MSG2); + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x08])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; + + // Rounds 8-11 + MSG2 = vsha256su0q_u32(MSG2, MSG3); + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x0c])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; + + // Rounds 12-15 + MSG3 = vsha256su0q_u32(MSG3, MSG0); + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x10])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; + + // Rounds 16-19 + MSG0 = vsha256su0q_u32(MSG0, MSG1); + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x14])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; + + // Rounds 20-23 + MSG1 = vsha256su0q_u32(MSG1, MSG2); + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x18])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; + + // Rounds 24-27 + MSG2 = vsha256su0q_u32(MSG2, MSG3); + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x1c])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; + + // Rounds 28-31 + MSG3 = vsha256su0q_u32(MSG3, MSG0); + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x20])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; + + // Rounds 32-35 + MSG0 = vsha256su0q_u32(MSG0, MSG1); + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x24])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);; + + // Rounds 36-39 + MSG1 = vsha256su0q_u32(MSG1, MSG2); + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x28])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); + MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);; + + // Rounds 40-43 + MSG2 = vsha256su0q_u32(MSG2, MSG3); + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x2c])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0); + MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);; + + // Rounds 44-47 + MSG3 = vsha256su0q_u32(MSG3, MSG0); + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x30])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1); + MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);; + + // Rounds 48-51 + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x34])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);; + + // Rounds 52-55 + TMP2 = STATE0; + TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x38])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);; + + // Rounds 56-59 + TMP2 = STATE0; + TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x3c])); + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);; + + // Rounds 60-63 + TMP2 = STATE0; + STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1); + STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);; + + // Add back to state + STATE0 = vaddq_u32(STATE0, ABEF_SAVE); + STATE1 = vaddq_u32(STATE1, CDGH_SAVE); + + data += SHA256::BLOCKSIZE/sizeof(word32); + length -= SHA256::BLOCKSIZE; + } + + // Save state + vst1q_u32(&state[0], STATE0); + vst1q_u32(&state[4], STATE1); +} + +ANONYMOUS_NAMESPACE_END + +#endif // CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + +/////////////////////////////////////////////////////// +// end of Walton/Schneiders/O'Rourke/Hovsmith's code // +/////////////////////////////////////////////////////// + void SHA256::Transform(word32 *state, const word32 *data) { - const pfnSHATransform s_pfn = InitializeSHA256Transform(); - s_pfn(state, data); + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + if (HasSHA()) + { + SHA256_SHANI_HashMultipleBlocks(state, data, SHA256::BLOCKSIZE, LITTLE_ENDIAN_ORDER); + return; + } +#endif +#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + if (HasSHA2()) + { + SHA256_ARM_SHA_HashMultipleBlocks(state, data, SHA256::BLOCKSIZE, LITTLE_ENDIAN_ORDER); + return; + } +#endif + + SHA256_CXX_HashBlock(state, data); +} + +size_t SHA256::HashMultipleBlocks(const word32 *input, size_t length) +{ + CRYPTOPP_ASSERT(input); + CRYPTOPP_ASSERT(length >= SHA256::BLOCKSIZE); + +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + if (HasSHA()) + { + SHA256_SHANI_HashMultipleBlocks(m_state, input, length, BIG_ENDIAN_ORDER); + return length & (SHA256::BLOCKSIZE - 1); + } +#endif +#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE + if (HasSSE2()) + { + const size_t res = length & (SHA256::BLOCKSIZE - 1); + SHA256_SSE_HashMultipleBlocks(m_state, input, length-res); + return res; + } +#endif +#if CRYPTOPP_ARM_SHA_AVAILABLE + if (HasSHA2()) + { + SHA256_ARM_SHA_HashMultipleBlocks(m_state, input, length, BIG_ENDIAN_ORDER); + return length & (SHA256::BLOCKSIZE - 1); + } +#endif + + const bool noReverse = NativeByteOrderIs(this->GetByteOrder()); + word32 *dataBuf = this->DataBuf(); + do + { + if (noReverse) + { + SHA256_CXX_HashBlock(m_state, input); + } + else + { + ByteReverse(dataBuf, input, SHA256::BLOCKSIZE); + SHA256_CXX_HashBlock(m_state, dataBuf); + } + + input += SHA256::BLOCKSIZE/sizeof(word32); + length -= SHA256::BLOCKSIZE; + } + while (length >= SHA256::BLOCKSIZE); + return length; +} + +size_t SHA224::HashMultipleBlocks(const word32 *input, size_t length) +{ + CRYPTOPP_ASSERT(input); + CRYPTOPP_ASSERT(length >= SHA256::BLOCKSIZE); + +#if CRYPTOPP_BOOL_SSE_SHA_INTRINSICS_AVAILABLE + if (HasSHA()) + { + SHA256_SHANI_HashMultipleBlocks(m_state, input, length, BIG_ENDIAN_ORDER); + return length & (SHA256::BLOCKSIZE - 1); + } +#endif +#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE + if (HasSSE2()) + { + const size_t res = length & (SHA256::BLOCKSIZE - 1); + SHA256_SSE_HashMultipleBlocks(m_state, input, length-res); + return res; + } +#endif +#if CRYPTOPP_BOOL_ARM_CRYPTO_INTRINSICS_AVAILABLE + if (HasSHA2()) + { + SHA256_ARM_SHA_HashMultipleBlocks(m_state, input, length, BIG_ENDIAN_ORDER); + return length & (SHA256::BLOCKSIZE - 1); + } +#endif + + const bool noReverse = NativeByteOrderIs(this->GetByteOrder()); + word32 *dataBuf = this->DataBuf(); + do + { + if (noReverse) + { + SHA256_CXX_HashBlock(m_state, input); + } + else + { + ByteReverse(dataBuf, input, SHA256::BLOCKSIZE); + SHA256_CXX_HashBlock(m_state, dataBuf); + } + + input += SHA256::BLOCKSIZE/sizeof(word32); + length -= SHA256::BLOCKSIZE; + } + while (length >= SHA256::BLOCKSIZE); + return length; } // ************************************************************* @@ -767,11 +1590,8 @@ void SHA512::InitState(HashWordType *state) memcpy(state, s, sizeof(s)); } -#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32) -CRYPTOPP_ALIGN_DATA(16) static const word64 SHA512_K[80] CRYPTOPP_SECTION_ALIGN16 = { -#else -CRYPTOPP_ALIGN_DATA(16) static const word64 SHA512_K[80] CRYPTOPP_SECTION_ALIGN16 = { -#endif +CRYPTOPP_ALIGN_DATA(16) +static const word64 SHA512_K[80] CRYPTOPP_SECTION_ALIGN16 = { W64LIT(0x428a2f98d728ae22), W64LIT(0x7137449123ef65cd), W64LIT(0xb5c0fbcfec4d3b2f), W64LIT(0xe9b5dba58189dbbc), W64LIT(0x3956c25bf348b538), W64LIT(0x59f111f1b605d019), @@ -815,8 +1635,10 @@ CRYPTOPP_ALIGN_DATA(16) static const word64 SHA512_K[80] CRYPTOPP_SECTION_ALIGN1 }; #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32) -// put assembly version in separate function, otherwise MSVC 2005 SP1 doesn't generate correct code for the non-assembly version -CRYPTOPP_NAKED static void CRYPTOPP_FASTCALL SHA512_SSE2_Transform(word64 *state, const word64 *data) + +ANONYMOUS_NAMESPACE_BEGIN + +CRYPTOPP_NAKED void CRYPTOPP_FASTCALL SHA512_SSE2_Transform(word64 *state, const word64 *data) { #ifdef __GNUC__ __asm__ __volatile__ @@ -1009,28 +1831,25 @@ CRYPTOPP_NAKED static void CRYPTOPP_FASTCALL SHA512_SSE2_Transform(word64 *state AS1( ret) #endif } -#endif // #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE -void SHA512::Transform(word64 *state, const word64 *data) -{ - CRYPTOPP_ASSERT(IsAlignedOn(state, GetAlignmentOf())); - CRYPTOPP_ASSERT(IsAlignedOn(data, GetAlignmentOf())); +ANONYMOUS_NAMESPACE_END -#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32) - if (HasSSE2()) - { - SHA512_SSE2_Transform(state, data); - return; - } -#endif +#endif // CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE + +ANONYMOUS_NAMESPACE_BEGIN #define S0(x) (rotrFixed(x,28)^rotrFixed(x,34)^rotrFixed(x,39)) #define S1(x) (rotrFixed(x,14)^rotrFixed(x,18)^rotrFixed(x,41)) #define s0(x) (rotrFixed(x,1)^rotrFixed(x,8)^(x>>7)) #define s1(x) (rotrFixed(x,19)^rotrFixed(x,61)^(x>>6)) -#define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA512_K[i+j]+(j?blk2(i):blk0(i));\ - d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i)) +#define R(i) h(i)+=S1(e(i))+Ch(e(i),f(i),g(i))+SHA512_K[i+j]+\ + (j?blk2(i):blk0(i));d(i)+=h(i);h(i)+=S0(a(i))+Maj(a(i),b(i),c(i)) + +void SHA512_CXX_HashBlock(word64 *state, const word64 *data) +{ + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); word64 W[16]; word64 T[8]; @@ -1055,7 +1874,25 @@ void SHA512::Transform(word64 *state, const word64 *data) state[7] += h(0); } +ANONYMOUS_NAMESPACE_END + +void SHA512::Transform(word64 *state, const word64 *data) +{ + CRYPTOPP_ASSERT(state); + CRYPTOPP_ASSERT(data); + +#if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE && (CRYPTOPP_BOOL_X86 || CRYPTOPP_BOOL_X32) + if (HasSSE2()) + { + SHA512_SSE2_Transform(state, data); + return; + } +#endif + + SHA512_CXX_HashBlock(state, data); +} + NAMESPACE_END -#endif // #ifndef CRYPTOPP_GENERATE_X64_MASM -#endif // #ifndef CRYPTOPP_IMPORTS +#endif // Not CRYPTOPP_GENERATE_X64_MASM +#endif // Not CRYPTOPP_IMPORTS diff --git a/sha.h b/sha.h index ced23254..0b2db7f3 100644 --- a/sha.h +++ b/sha.h @@ -2,7 +2,8 @@ //! \file sha.h //! \brief Classes for SHA-1 and SHA-2 family of message digests -//! \since SHA1 since Crypto++ 1.0, SHA2 since Crypto++ 4.0, Intel SHA extensions since Crypto++ 6.0 +//! \since SHA1 since Crypto++ 1.0, SHA2 since Crypto++ 4.0, +//! ARM SHA since Crypto++ 6.0, Intel SHA since Crypto++ 6.0 #ifndef CRYPTOPP_SHA_H #define CRYPTOPP_SHA_H @@ -38,21 +39,20 @@ public: //! \param digest the state of the hash //! \param data the data to be digested //! \details Transform operates the hash on data. When the call is invoked - //! digest holds initial state. Upon return digest holds the hash or - //! updated state. + //! digest holds initial state. Upon return digest holds the hash + //! or updated state. //! \details Hashes which derive from IteratedHashWithStaticTransform provide static //! member functions InitState and Transform. External classes, like SEAL and MDC, //! can initialize state with a user provided key and operate the hash on the data //! with the user supplied state. //! \note On Intel platforms the state array and data must be 16-byte aligned for SSE2. - static void CRYPTOPP_API Transform(word32 *digest, const word32 *data); + static void CRYPTOPP_API Transform(HashWordType *digest, const HashWordType *data); //! \brief The algorithm name //! \returns C-style string "SHA-1" CRYPTOPP_STATIC_CONSTEXPR const char* CRYPTOPP_API StaticAlgorithmName() {return "SHA-1";} -#if CRYPTOPP_SHANI_AVAILABLE - size_t HashMultipleBlocks(const word32 *input, size_t length); -#endif +protected: + size_t HashMultipleBlocks(const HashWordType *input, size_t length); }; //! \class SHA256 @@ -75,21 +75,20 @@ public: //! \param digest the state of the hash //! \param data the data to be digested //! \details Transform operates the hash on data. When the call is invoked - //! digest holds initial state. Upon return digest holds the hash or - //! updated state. + //! digest holds initial state. Upon return digest holds the hash + //! or updated state. //! \details Hashes which derive from IteratedHashWithStaticTransform provide static //! member functions InitState and Transform. External classes, like SEAL and MDC, //! can initialize state with a user provided key and operate the hash on the data //! with the user supplied state. //! \note On Intel platforms the state array and data must be 16-byte aligned for SSE2. - static void CRYPTOPP_API Transform(word32 *digest, const word32 *data); + static void CRYPTOPP_API Transform(HashWordType *digest, const HashWordType *data); //! \brief The algorithm name //! \returns C-style string "SHA-256" CRYPTOPP_STATIC_CONSTEXPR const char* CRYPTOPP_API StaticAlgorithmName() {return "SHA-256";} -#if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_SHA_ASM) - size_t HashMultipleBlocks(const word32 *input, size_t length); -#endif +protected: + size_t HashMultipleBlocks(const HashWordType *input, size_t length); }; //! \class SHA224 @@ -112,21 +111,20 @@ public: //! \param digest the state of the hash //! \param data the data to be digested //! \details Transform operates the hash on data. When the call is invoked - //! digest holds initial state. Upon return digest holds the hash or - //! updated state. + //! digest holds initial state. Upon return digest holds the hash + //! or updated state. //! \details Hashes which derive from IteratedHashWithStaticTransform provide static //! member functions InitState and Transform. External classes, like SEAL and MDC, //! can initialize state with a user provided key and operate the hash on the data //! with the user supplied state. //! \note On Intel platforms the state array and data must be 16-byte aligned for SSE2. - static void CRYPTOPP_API Transform(word32 *digest, const word32 *data) {SHA256::Transform(digest, data);} + static void CRYPTOPP_API Transform(HashWordType *digest, const HashWordType *data) {SHA256::Transform(digest, data);} //! \brief The algorithm name //! \returns C-style string "SHA-224" CRYPTOPP_STATIC_CONSTEXPR const char* CRYPTOPP_API StaticAlgorithmName() {return "SHA-224";} -#if (defined(CRYPTOPP_X86_ASM_AVAILABLE) || defined(CRYPTOPP_X32_ASM_AVAILABLE) || defined(CRYPTOPP_X64_MASM_AVAILABLE)) && !defined(CRYPTOPP_DISABLE_SHA_ASM) - size_t HashMultipleBlocks(const word32 *input, size_t length); -#endif +protected: + size_t HashMultipleBlocks(const HashWordType *input, size_t length); }; //! \class SHA512 @@ -149,14 +147,14 @@ public: //! \param digest the state of the hash //! \param data the data to be digested //! \details Transform operates the hash on data. When the call is invoked - //! digest holds initial state. Upon return digest holds the hash or - //! updated state. + //! digest holds initial state. Upon return digest holds the hash + //! or updated state. //! \details Hashes which derive from IteratedHashWithStaticTransform provide static //! member functions InitState and Transform. External classes, like SEAL and MDC, //! can initialize state with a user provided key and operate the hash on the data //! with the user supplied state. //! \note On Intel platforms the state array and data must be 16-byte aligned for SSE2. - static void CRYPTOPP_API Transform(word64 *digest, const word64 *data); + static void CRYPTOPP_API Transform(HashWordType *digest, const HashWordType *data); //! \brief The algorithm name //! \returns C-style string "SHA-512" CRYPTOPP_STATIC_CONSTEXPR const char* CRYPTOPP_API StaticAlgorithmName() {return "SHA-512";} @@ -182,14 +180,14 @@ public: //! \param digest the state of the hash //! \param data the data to be digested //! \details Transform operates the hash on data. When the call is invoked - //! digest holds initial state. Upon return digest holds the hash or - //! updated state. + //! digest holds initial state. Upon return digest holds the hash + //! or updated state. //! \details Hashes which derive from IteratedHashWithStaticTransform provide static //! member functions InitState and Transform. External classes, like SEAL and MDC, //! can initialize state with a user provided key and operate the hash on the data //! with the user supplied state. //! \note On Intel platforms the state array and data must be 16-byte aligned for SSE2. - static void CRYPTOPP_API Transform(word64 *digest, const word64 *data) {SHA512::Transform(digest, data);} + static void CRYPTOPP_API Transform(HashWordType *digest, const HashWordType *data) {SHA512::Transform(digest, data);} //! \brief The algorithm name //! \returns C-style string "SHA-384" CRYPTOPP_STATIC_CONSTEXPR const char* CRYPTOPP_API StaticAlgorithmName() {return "SHA-384";} diff --git a/x64dll.asm b/x64dll.asm index 3e4373d4..97c9aba3 100644 --- a/x64dll.asm +++ b/x64dll.asm @@ -676,7 +676,7 @@ ret GCM_AuthenticateBlocks_64K ENDP ALIGN 8 -SHA256_HashBlocks_SSE2 PROC FRAME +SHA256_SSE_HashMultipleBlocks PROC FRAME rex_push_reg rsi push_reg rdi push_reg rbx @@ -1962,7 +1962,7 @@ pop rbx pop rdi pop rsi ret -SHA256_HashBlocks_SSE2 ENDP +SHA256_SSE_HashMultipleBlocks ENDP _TEXT ENDS END