From 8338d90c1002bfedc4150bec403a94522f4b665d Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sat, 29 Jul 2017 23:40:56 -0400 Subject: [PATCH] Add ARIA, BLAKE2 and SHA support for ARMv7, ARMv8 and Intel --- Filelist.txt | 5 +- GNUmakefile | 66 +- aria-simd.cpp | 75 ++ aria.cpp | 62 +- blake2-simd.cpp | 2162 +++++++++++++++++++++++++++++++++++++++++++ blake2.cpp | 2275 ++-------------------------------------------- config.h | 8 +- cpu.cpp | 100 +- cryptest.nmake | 4 +- cryptest.sh | 216 ++--- cryptlib.vcxproj | 2 + gcm.cpp | 10 +- neon.cpp | 102 +++ rdrand-masm.cmd | 0 sha-simd.cpp | 2 +- 15 files changed, 2596 insertions(+), 2493 deletions(-) mode change 100755 => 100644 GNUmakefile create mode 100644 aria-simd.cpp create mode 100644 blake2-simd.cpp create mode 100644 neon.cpp mode change 100755 => 100644 rdrand-masm.cmd diff --git a/Filelist.txt b/Filelist.txt index 85f58cf0..8eb578ed 100644 --- a/Filelist.txt +++ b/Filelist.txt @@ -11,6 +11,7 @@ algparam.h arc4.cpp arc4.h aria.cpp +aria-simd.cpp aria.h argnames.h asn.cpp @@ -29,6 +30,7 @@ bench1.cpp bench2.cpp bfinit.cpp blake2.cpp +blake2-simd.cpp blake2.h blowfish.cpp blowfish.h @@ -178,6 +180,7 @@ mqv.cpp mqv.h nbtheory.cpp nbtheory.h +neon.cpp network.cpp network.h nr.h @@ -248,8 +251,8 @@ seed.h serpent.cpp serpent.h serpentp.h -sha-simd.cpp sha.cpp +sha-simd.cpp sha.h sha3.cpp sha3.h diff --git a/GNUmakefile b/GNUmakefile old mode 100755 new mode 100644 index 984e0beb..ae781361 --- a/GNUmakefile +++ b/GNUmakefile @@ -2,7 +2,7 @@ ##### System Attributes and Programs ##### ########################################################### -TEMPDIR ?= /tmp +TMPDIR ?= /tmp AR ?= ar ARFLAGS ?= -cr # ar needs the dash on OpenBSD @@ -24,6 +24,7 @@ IS_PPC := $(shell uname -m | $(EGREP) -i -c "ppc|power") IS_ARM32 := $(shell uname -m | $(EGREP) -v "arm64" | $(EGREP) -i -c "arm") IS_ARM64 := $(shell uname -m | $(EGREP) -i -c "aarch64") IS_ARMV8 ?= $(shell uname -m | $(EGREP) -i -c 'aarch32|aarch64') +IS_NEON ?= $(shell uname -m | $(EGREP) -i -c 'armv7|armv8|aarch32|aarch64') IS_SPARC := $(shell uname -m | $(EGREP) -i -c "sparc") IS_SPARC64 := $(shell uname -m | $(EGREP) -i -c "sparc64") @@ -194,14 +195,16 @@ endif # -DCRYPTOPP_DISABLE_SSSE3 endif # -DCRYPTOPP_DISABLE_ASM endif # CXXFLAGS -HAS_CRC := $(shell $(CXX) $(CXXFLAGS) -msse4.2 -o $(TEMPDIR)/t.o -c crc-simd.cpp; echo $$?) -ifeq ($(HAS_CRC),0) -CRC_FLAG := -msse4.2 +ifeq ($(findstring -DCRYPTOPP_DISABLE_SSE4,$(CXXFLAGS)),) +SSE42_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -msse4.2 -dM -E - | grep -i -c -q __SSE4_2__ && echo "-msse4.2") +ifeq ($(findstring -DCRYPTOPP_DISABLE_AESNI,$(CXXFLAGS)),) +AES_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -maes -dM -E - | grep -i -c -q __AES__ && echo "-maes") +ifeq ($(findstring -DCRYPTOPP_DISABLE_SHA,$(CXXFLAGS)),) +SHA_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -msse4.2 -msha -dM -E - | grep -i -c -q __SHA__ && echo "-msse4.2 -msha") +BLAKE2_FLAG = $(SSE42_FLAG) +CRC_FLAG = $(SSE42_FLAG) +endif endif - -HAS_SHA := $(shell $(CXX) $(CXXFLAGS) -msse4.2 -msha -o $(TEMPDIR)/t.o -c sha.cpp; echo $$?) -ifeq ($(HAS_SHA),0) -SHA_FLAG := -msse4.2 -msha endif # BEGIN_NATIVE_ARCH @@ -293,15 +296,23 @@ CXXFLAGS += -pipe endif endif +ifeq ($(IS_NEON),1) + NEON_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -mfpu=neon -dM -E - | grep -i -c -q __SSE4_2__ && echo "-mfpu=neon") + GCM_FLAG = $(NEON_FLAG) + ARIA_FLAG = $(NEON_FLAG) + BLAKE2_FLAG = $(NEON_FLAG) + NEON_FLAG = $(NEON_FLAG) +endif + ifeq ($(IS_ARMV8),1) - HAS_CRC := $(shell $(CXX) $(CXXFLAGS) -march=armv8-a+crc -o $(TEMPDIR)/t.o -c crc-simd.cpp; echo $$?) - ifeq ($(HAS_CRC),0) - CRC_FLAG := -march=armv8-a+crc - endif - HAS_SHA := $(shell $(CXX) $(CXXFLAGS) -march=armv8-a+crc -o $(TEMPDIR)/t.o -c sha.cpp; echo $$?) - ifeq ($(HAS_SHA),0) - SHA_FLAG := -march=armv8-a+crypto - endif + ARMV8A_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -march=armv8-a -dM -E - | grep -i -c -q __ARM_NEON && echo "-march=armv8-a") + CRC_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -march=armv8-a+crc -dM -E - | grep -i -c -q __ARM_FEATURE_CRC32 && echo "-march=armv8-a+crc") + AES_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -march=armv8-a+crypto -dM -E - | grep -i -c -q __ARM_FEATURE_CRYPTO && echo "-march=armv8-a+crypto") + SHA_FLAG = $(shell echo | $(CXX) $(CXXFLAGS) -march=armv8-a+crypto -dM -E - | grep -i -c -q __ARM_FEATURE_CRYPTO && echo "-march=armv8-a+crypto") + GCM_FLAG = $(ARMV8A_FLAG) + ARIA_FLAG = $(ARMV8A_FLAG) + BLAKE2_FLAG = $(ARMV8A_FLAG) + NEON_FLAG = $(ARMV8A_FLAG) endif endif # IS_X86 @@ -545,12 +556,13 @@ endif endif # Nasm # List test.cpp first to tame C++ static initialization problems. -TESTSRCS := adhoc.cpp test.cpp bench1.cpp bench2.cpp validat0.cpp validat1.cpp validat2.cpp validat3.cpp datatest.cpp regtest1.cpp regtest2.cpp regtest3.cpp fipsalgt.cpp dlltest.cpp +TESTSRCS := adhoc.cpp test.cpp bench1.cpp bench2.cpp validat0.cpp validat1.cpp validat2.cpp validat3.cpp datatest.cpp regtest1.cpp regtest2.cpp regtest3.cpp dlltest.cpp fipsalgt.cpp TESTOBJS := $(TESTSRCS:.cpp=.o) LIBOBJS := $(filter-out $(TESTOBJS),$(OBJS)) # List cryptlib.cpp first, then cpu.cpp, then integer.cpp to tame C++ static initialization problems. -DLLSRCS := cryptlib.cpp cpu.cpp integer.cpp shacal2.cpp md5.cpp shark.cpp zinflate.cpp gf2n.cpp salsa.cpp xtr.cpp oaep.cpp poly1305.cpp polynomi.cpp rc2.cpp default.cpp wait.cpp wake.cpp twofish.cpp iterhash.cpp adler32.cpp elgamal.cpp marss.cpp blowfish.cpp ecp.cpp filters.cpp strciphr.cpp camellia.cpp ida.cpp zlib.cpp des.cpp crc.cpp crc-simd.cpp algparam.cpp dessp.cpp tea.cpp eax.cpp network.cpp emsa2.cpp pkcspad.cpp squaretb.cpp idea.cpp authenc.cpp kalyna.cpp threefish.cpp hmac.cpp zdeflate.cpp xtrcrypt.cpp queue.cpp mars.cpp rc5.cpp blake2.cpp hrtimer.cpp eprecomp.cpp hex.cpp dsa.cpp sha.cpp fips140.cpp gzip.cpp seal.cpp files.cpp base32.cpp vmac.cpp tigertab.cpp sharkbox.cpp safer.cpp randpool.cpp esign.cpp arc4.cpp osrng.cpp skipjack.cpp seed.cpp sha3.cpp sosemanuk.cpp bfinit.cpp rabin.cpp 3way.cpp rw.cpp rdrand.cpp rsa.cpp rdtables.cpp gost.cpp socketft.cpp tftables.cpp nbtheory.cpp panama.cpp modes.cpp rijndael.cpp casts.cpp chacha.cpp gfpcrypt.cpp poly1305.cpp dll.cpp ec2n.cpp blumshub.cpp algebra.cpp basecode.cpp base64.cpp cbcmac.cpp rc6.cpp dh2.cpp gf256.cpp mqueue.cpp misc.cpp pssr.cpp channels.cpp tiger.cpp cast.cpp rng.cpp square.cpp asn.cpp whrlpool.cpp md4.cpp dh.cpp ccm.cpp md2.cpp mqv.cpp gf2_32.cpp ttmac.cpp luc.cpp trdlocal.cpp pubkey.cpp gcm.cpp ripemd.cpp eccrypto.cpp serpent.cpp cmac.cpp +DLLSRCS := cryptlib.cpp cpu.cpp integer.cpp 3way.cpp adler32.cpp algebra.cpp algparam.cpp arc4.cpp aria.cpp asn.cpp authenc.cpp base32.cpp base64.cpp basecode.cpp bfinit.cpp blake2-simd.cpp blake2.cpp blowfish.cpp blumshub.cpp camellia.cpp cast.cpp casts.cpp cbcmac.cpp ccm.cpp chacha.cpp channels.cpp cmac.cpp crc-simd.cpp crc.cpp default.cpp des.cpp dessp.cpp dh.cpp dh2.cpp dll.cpp dsa.cpp eax.cpp ec2n.cpp eccrypto.cpp ecp.cpp elgamal.cpp emsa2.cpp eprecomp.cpp esign.cpp files.cpp filters.cpp fips140.cpp fipstest.cpp gcm.cpp gf256.cpp gf2_32.cpp gf2n.cpp gfpcrypt.cpp gost.cpp gzip.cpp hex.cpp hmac.cpp hrtimer.cpp ida.cpp idea.cpp iterhash.cpp kalyna.cpp kalynatab.cpp keccak.cpp luc.cpp mars.cpp marss.cpp md2.cpp md4.cpp md5.cpp misc.cpp modes.cpp mqueue.cpp mqv.cpp nbtheory.cpp neon.cpp network.cpp oaep.cpp ospstore.cpp osrng.cpp panama.cpp pkcspad.cpp poly1305.cpp polynomi.cpp pssr.cpp pubkey.cpp queue.cpp rabin.cpp randpool.cpp rc2.cpp rc5.cpp rc6.cpp rdrand.cpp rdtables.cpp rijndael.cpp ripemd.cpp rng.cpp rsa.cpp rw.cpp safer.cpp salsa.cpp seal.cpp seed.cpp serpent.cpp sha-simd.cpp sha.cpp sha3.cpp shacal2.cpp shark.cpp sharkbox.cpp skipjack.cpp socketft.cpp sosemanuk.cpp square.cpp squaretb.cpp strciphr.cpp tea.cpp tftables.cpp threefish.cpp tiger.cpp tigertab.cpp trdlocal.cpp ttmac.cpp twofish.cpp vmac.cpp wait.cpp wake.cpp whrlpool.cpp xtr.cpp xtrcrypt.cpp zdeflate.cpp zinflate.cpp zlib.cpp + DLLOBJS := $(DLLSRCS:.cpp=.export.o) # Import lib testing @@ -573,8 +585,8 @@ static: libcryptopp.a shared dynamic: libcryptopp.so$(SOLIB_VERSION_SUFFIX) endif -.PHONY: deps -deps GNUmakefile.deps: +.PHONY: dep deps depend +dep deps depend GNUmakefile.deps: $(CXX) $(strip $(CXXFLAGS)) -MM *.cpp > GNUmakefile.deps # CXXFLAGS are tuned earlier. @@ -841,11 +853,23 @@ rdrand-%.o: ./rdrand-nasm.sh endif +# SSE4.2 or NEON available +aria-simd.o : aria-simd.cpp + $(CXX) $(strip $(CXXFLAGS) $(ARIA_FLAG) -c) $< + +# SSE4.2 or NEON available +neon.o : neon.cpp + $(CXX) $(strip $(CXXFLAGS) $(NEON_FLAG) -c) $< + +# SSE4.2 or ARMv8a available +blake2-simd.o : blake2-simd.cpp + $(CXX) $(strip $(CXXFLAGS) $(BLAKE2_FLAG) -c) $< + # SSE4.2 or ARMv8a available crc-simd.o : crc-simd.cpp $(CXX) $(strip $(CXXFLAGS) $(CRC_FLAG) -c) $< -# SSE4.2/SHANI or ARMv8a available +# SSE4.2/SHA-NI or ARMv8a available sha-simd.o : sha-simd.cpp $(CXX) $(strip $(CXXFLAGS) $(SHA_FLAG) -c) $< diff --git a/aria-simd.cpp b/aria-simd.cpp new file mode 100644 index 00000000..fe50f650 --- /dev/null +++ b/aria-simd.cpp @@ -0,0 +1,75 @@ +// crc-simd.cpp - written and placed in the public domain by +// Jeffrey Walton, Uri Blumenthal and Marcel Raad. +// +// This source file uses intrinsics to gain access to ARMv7a and +// ARMv8a NEON instructions. A separate source file is needed +// because additional CXXFLAGS are required to enable the +// appropriate instructions sets in some build configurations. + +#include "pch.h" +#include "config.h" +#include "misc.h" + +#if (CRYPTOPP_ARM_NEON_AVAILABLE) && defined(__GNUC__) +# include "arm_neon.h" +#endif + +NAMESPACE_BEGIN(CryptoPP) + +#if (CRYPTOPP_ARM_NEON_AVAILABLE) +template +inline void ARIA_GSRK_NEON(const uint32x4_t X, const uint32x4_t Y, byte RK[16]) +{ + static const unsigned int Q1 = (4-(N/32)) % 4; + static const unsigned int Q2 = (3-(N/32)) % 4; + static const unsigned int R = N % 32; + + vst1q_u32(reinterpret_cast(RK), + veorq_u32(X, veorq_u32( + vshrq_n_u32(vextq_u32(Y, Y, Q1), R), + vshlq_n_u32(vextq_u32(Y, Y, Q2), 32-R)))); +} + +void ARIA_UncheckedSetKey_Schedule_NEON(byte* rk, word32* ws, unsigned int keylen) +{ + const uint32x4_t w0 = vld1q_u32((const uint32_t*)(ws+ 0)); + const uint32x4_t w1 = vld1q_u32((const uint32_t*)(ws+ 8)); + const uint32x4_t w2 = vld1q_u32((const uint32_t*)(ws+12)); + const uint32x4_t w3 = vld1q_u32((const uint32_t*)(ws+16)); + + ARIA_GSRK_NEON<19>(w0, w1, rk + 0); + ARIA_GSRK_NEON<19>(w1, w2, rk + 16); + ARIA_GSRK_NEON<19>(w2, w3, rk + 32); + ARIA_GSRK_NEON<19>(w3, w0, rk + 48); + ARIA_GSRK_NEON<31>(w0, w1, rk + 64); + ARIA_GSRK_NEON<31>(w1, w2, rk + 80); + ARIA_GSRK_NEON<31>(w2, w3, rk + 96); + ARIA_GSRK_NEON<31>(w3, w0, rk + 112); + ARIA_GSRK_NEON<67>(w0, w1, rk + 128); + ARIA_GSRK_NEON<67>(w1, w2, rk + 144); + ARIA_GSRK_NEON<67>(w2, w3, rk + 160); + ARIA_GSRK_NEON<67>(w3, w0, rk + 176); + ARIA_GSRK_NEON<97>(w0, w1, rk + 192); + + if (keylen > 16) + { + ARIA_GSRK_NEON<97>(w1, w2, rk + 208); + ARIA_GSRK_NEON<97>(w2, w3, rk + 224); + + if (keylen > 24) + { + ARIA_GSRK_NEON< 97>(w3, w0, rk + 240); + ARIA_GSRK_NEON<109>(w0, w1, rk + 256); + } + } +} + +void ARIA_ProcessAndXorBlock_Xor_NEON(const byte* xorBlock, byte* outBlock) +{ + vst1q_u32(reinterpret_cast(outBlock), veorq_u32( + vld1q_u32(reinterpret_cast(outBlock)), + vld1q_u32(reinterpret_cast(xorBlock)))); +} +#endif + +NAMESPACE_END diff --git a/aria.cpp b/aria.cpp index bfa9ab07..6f9e2a15 100644 --- a/aria.cpp +++ b/aria.cpp @@ -15,7 +15,7 @@ # define CRYPTOPP_ENABLE_ARIA_SSSE3_INTRINSICS 1 #endif -#if CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +#if CRYPTOPP_ARM_NEON_AVAILABLE # define CRYPTOPP_ENABLE_ARIA_NEON_INTRINSICS 1 #endif @@ -222,6 +222,11 @@ inline byte ARIA_BRF(const word32 x, const int y) { #define ARIA_FO {SBL1_M(t[0],t[1],t[2],t[3]) ARIA_MM(t[0],t[1],t[2],t[3]) ARIA_P(t[0],t[1],t[2],t[3]) ARIA_MM(t[0],t[1],t[2],t[3])} #define ARIA_FE {SBL2_M(t[0],t[1],t[2],t[3]) ARIA_MM(t[0],t[1],t[2],t[3]) ARIA_P(t[2],t[3],t[0],t[1]) ARIA_MM(t[0],t[1],t[2],t[3])} +#if (CRYPTOPP_ARM_NEON_AVAILABLE) +extern void ARIA_UncheckedSetKey_Schedule_NEON(byte* rk, word32* ws, unsigned int keylen); +extern void ARIA_ProcessAndXorBlock_Xor_NEON(const byte* xorBlock, byte* outblock); +#endif + // n-bit right shift of Y XORed to X template inline void ARIA_GSRK(const word32 X[4], const word32 Y[4], byte RK[16]) @@ -235,21 +240,6 @@ inline void ARIA_GSRK(const word32 X[4], const word32 Y[4], byte RK[16]) reinterpret_cast(RK)[3] = (X[3]) ^ ((Y[(Q+3)%4])>>R) ^ ((Y[(Q+2)%4])<<(32-R)); } -#if CRYPTOPP_ENABLE_ARIA_NEON_INTRINSICS -template -inline void ARIA_GSRK_NEON(const uint32x4_t X, const uint32x4_t Y, byte RK[16]) -{ - static const unsigned int Q1 = (4-(N/32)) % 4; - static const unsigned int Q2 = (3-(N/32)) % 4; - static const unsigned int R = N % 32; - - vst1q_u32(reinterpret_cast(RK), - veorq_u32(X, veorq_u32( - vshrq_n_u32(vextq_u32(Y, Y, Q1), R), - vshlq_n_u32(vextq_u32(Y, Y, Q2), 32-R)))); -} -#endif - void ARIA::Base::UncheckedSetKey(const byte *key, unsigned int keylen, const NameValuePairs ¶ms) { CRYPTOPP_UNUSED(params); @@ -385,36 +375,7 @@ void ARIA::Base::UncheckedSetKey(const byte *key, unsigned int keylen, const Nam #if CRYPTOPP_ENABLE_ARIA_NEON_INTRINSICS if (HasNEON()) { - const uint32x4_t w0 = vld1q_u32((const uint32_t*)(m_w.data()+0)); - const uint32x4_t w1 = vld1q_u32((const uint32_t*)(m_w.data()+8)); - const uint32x4_t w2 = vld1q_u32((const uint32_t*)(m_w.data()+12)); - const uint32x4_t w3 = vld1q_u32((const uint32_t*)(m_w.data()+16)); - - ARIA_GSRK_NEON<19>(w0, w1, rk + 0); - ARIA_GSRK_NEON<19>(w1, w2, rk + 16); - ARIA_GSRK_NEON<19>(w2, w3, rk + 32); - ARIA_GSRK_NEON<19>(w3, w0, rk + 48); - ARIA_GSRK_NEON<31>(w0, w1, rk + 64); - ARIA_GSRK_NEON<31>(w1, w2, rk + 80); - ARIA_GSRK_NEON<31>(w2, w3, rk + 96); - ARIA_GSRK_NEON<31>(w3, w0, rk + 112); - ARIA_GSRK_NEON<67>(w0, w1, rk + 128); - ARIA_GSRK_NEON<67>(w1, w2, rk + 144); - ARIA_GSRK_NEON<67>(w2, w3, rk + 160); - ARIA_GSRK_NEON<67>(w3, w0, rk + 176); - ARIA_GSRK_NEON<97>(w0, w1, rk + 192); - - if (keylen > 16) - { - ARIA_GSRK_NEON<97>(w1, w2, rk + 208); - ARIA_GSRK_NEON<97>(w2, w3, rk + 224); - - if (keylen > 24) - { - ARIA_GSRK_NEON< 97>(w3, w0, rk + 240); - ARIA_GSRK_NEON<109>(w0, w1, rk + 256); - } - } + ARIA_UncheckedSetKey_Schedule_NEON(rk, m_w, keylen); } else #endif // CRYPTOPP_ENABLE_ARIA_NEON_INTRINSICS @@ -621,18 +582,13 @@ void ARIA::Base::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, b if (HasNEON()) { if (xorBlock != NULLPTR) - { - vst1q_u32(reinterpret_cast(outBlock), - veorq_u32( - vld1q_u32((const uint32_t*)outBlock), - vld1q_u32((const uint32_t*)xorBlock))); - } + ARIA_ProcessAndXorBlock_Xor_NEON(xorBlock, outBlock); } else #endif // CRYPTOPP_ENABLE_ARIA_NEON_INTRINSICS { if (xorBlock != NULLPTR) - for (unsigned int n=0; n<16; ++n) + for (unsigned int n=0; n +# include +#endif + +NAMESPACE_BEGIN(CryptoPP) + +ANONYMOUS_NAMESPACE_BEGIN + +CRYPTOPP_ALIGN_DATA(16) +const word32 BLAKE2S_IV[8] = { + 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, + 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL +}; + +CRYPTOPP_ALIGN_DATA(16) +const word64 BLAKE2B_IV[8] = { + W64LIT(0x6a09e667f3bcc908), W64LIT(0xbb67ae8584caa73b), + W64LIT(0x3c6ef372fe94f82b), W64LIT(0xa54ff53a5f1d36f1), + W64LIT(0x510e527fade682d1), W64LIT(0x9b05688c2b3e6c1f), + W64LIT(0x1f83d9abfb41bd6b), W64LIT(0x5be0cd19137e2179) +}; + +ANONYMOUS_NAMESPACE_END + +#if CRYPTOPP_SSE42_AVAILABLE +void BLAKE2_Compress32_SSE4(const byte* input, BLAKE2_State& state) +{ + __m128i row1, row2, row3, row4; + __m128i buf1, buf2, buf3, buf4; + + __m128i t0, t1, t2; + __m128i ff0, ff1; + + const __m128i r8 = _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1); + const __m128i r16 = _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2); + + const __m128i m0 = _mm_loadu_si128((const __m128i*)(const void*)(input + 00)); + const __m128i m1 = _mm_loadu_si128((const __m128i*)(const void*)(input + 16)); + const __m128i m2 = _mm_loadu_si128((const __m128i*)(const void*)(input + 32)); + const __m128i m3 = _mm_loadu_si128((const __m128i*)(const void*)(input + 48)); + + row1 = ff0 = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[0])); + row2 = ff1 = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])); + row3 = _mm_setr_epi32(BLAKE2S_IV[0], BLAKE2S_IV[1], BLAKE2S_IV[2], BLAKE2S_IV[3]); + row4 = _mm_xor_si128(_mm_setr_epi32(BLAKE2S_IV[4], BLAKE2S_IV[5], BLAKE2S_IV[6], BLAKE2S_IV[7]), _mm_loadu_si128((const __m128i*)(const void*)(&state.t[0]))); + buf1 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m0)), _mm_castsi128_ps((m1)), _MM_SHUFFLE(2,0,2,0)))); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + buf2 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m0)), _mm_castsi128_ps((m1)), _MM_SHUFFLE(3,1,3,1)))); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + buf3 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m2)), _mm_castsi128_ps((m3)), _MM_SHUFFLE(2,0,2,0)))); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + buf4 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m2)), _mm_castsi128_ps((m3)), _MM_SHUFFLE(3,1,3,1)))); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_blend_epi16(m1, m2, 0x0C); + t1 = _mm_slli_si128(m3, 4); + t2 = _mm_blend_epi16(t0, t1, 0xF0); + buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_shuffle_epi32(m2,_MM_SHUFFLE(0,0,2,0)); + t1 = _mm_blend_epi16(m1,m3,0xC0); + t2 = _mm_blend_epi16(t0, t1, 0xF0); + buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_slli_si128(m1, 4); + t1 = _mm_blend_epi16(m2, t0, 0x30); + t2 = _mm_blend_epi16(m0, t1, 0xF0); + buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpackhi_epi32(m0,m1); + t1 = _mm_slli_si128(m3, 4); + t2 = _mm_blend_epi16(t0, t1, 0x0C); + buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_unpackhi_epi32(m2,m3); + t1 = _mm_blend_epi16(m3,m1,0x0C); + t2 = _mm_blend_epi16(t0, t1, 0x0F); + buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpacklo_epi32(m2,m0); + t1 = _mm_blend_epi16(t0, m0, 0xF0); + t2 = _mm_slli_si128(m3, 8); + buf2 = _mm_blend_epi16(t1, t2, 0xC0); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_blend_epi16(m0, m2, 0x3C); + t1 = _mm_srli_si128(m1, 12); + t2 = _mm_blend_epi16(t0,t1,0x03); + buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,3,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_slli_si128(m3, 4); + t1 = _mm_blend_epi16(m0, m1, 0x33); + t2 = _mm_blend_epi16(t1, t0, 0xC0); + buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(0,1,2,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_unpackhi_epi32(m0,m1); + t1 = _mm_unpackhi_epi32(t0, m2); + t2 = _mm_blend_epi16(t1, m3, 0x0C); + buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_slli_si128(m2, 8); + t1 = _mm_blend_epi16(m3,m0,0x0C); + t2 = _mm_blend_epi16(t1, t0, 0xC0); + buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_blend_epi16(m0,m1,0x0F); + t1 = _mm_blend_epi16(t0, m3, 0xC0); + buf3 = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpacklo_epi32(m0,m2); + t1 = _mm_unpackhi_epi32(m1,m2); + buf4 = _mm_unpacklo_epi64(t1,t0); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_unpacklo_epi64(m1,m2); + t1 = _mm_unpackhi_epi64(m0,m2); + t2 = _mm_blend_epi16(t0,t1,0x33); + buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpackhi_epi64(m1,m3); + t1 = _mm_unpacklo_epi64(m0,m1); + buf2 = _mm_blend_epi16(t0,t1,0x33); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_unpackhi_epi64(m3,m1); + t1 = _mm_unpackhi_epi64(m2,m0); + buf3 = _mm_blend_epi16(t1,t0,0x33); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_blend_epi16(m0,m2,0x03); + t1 = _mm_slli_si128(t0, 8); + t2 = _mm_blend_epi16(t1,m3,0x0F); + buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,0,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_unpackhi_epi32(m0,m1); + t1 = _mm_unpacklo_epi32(m0,m2); + buf1 = _mm_unpacklo_epi64(t0,t1); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_srli_si128(m2, 4); + t1 = _mm_blend_epi16(m0,m3,0x03); + buf2 = _mm_blend_epi16(t1,t0,0x3C); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_blend_epi16(m1,m0,0x0C); + t1 = _mm_srli_si128(m3, 4); + t2 = _mm_blend_epi16(t0,t1,0x30); + buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,3,0)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpacklo_epi64(m1,m2); + t1= _mm_shuffle_epi32(m3, _MM_SHUFFLE(0,2,0,1)); + buf4 = _mm_blend_epi16(t0,t1,0x33); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_slli_si128(m1, 12); + t1 = _mm_blend_epi16(m0,m3,0x33); + buf1 = _mm_blend_epi16(t1,t0,0xC0); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_blend_epi16(m3,m2,0x30); + t1 = _mm_srli_si128(m1, 4); + t2 = _mm_blend_epi16(t0,t1,0x03); + buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,3,0)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_unpacklo_epi64(m0,m2); + t1 = _mm_srli_si128(m1, 4); + buf3 = _mm_shuffle_epi32(_mm_blend_epi16(t0,t1,0x0C), _MM_SHUFFLE(2,3,1,0)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpackhi_epi32(m1,m2); + t1 = _mm_unpackhi_epi64(m0,t0); + buf4 = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_unpackhi_epi32(m0,m1); + t1 = _mm_blend_epi16(t0,m3,0x0F); + buf1 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(2,0,3,1)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_blend_epi16(m2,m3,0x30); + t1 = _mm_srli_si128(m0,4); + t2 = _mm_blend_epi16(t0,t1,0x03); + buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,2,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_unpackhi_epi64(m0,m3); + t1 = _mm_unpacklo_epi64(m1,m2); + t2 = _mm_blend_epi16(t0,t1,0x3C); + buf3 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,2,3,1)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpacklo_epi32(m0,m1); + t1 = _mm_unpackhi_epi32(m1,m2); + buf4 = _mm_unpacklo_epi64(t0,t1); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_unpackhi_epi32(m1,m3); + t1 = _mm_unpacklo_epi64(t0,m0); + t2 = _mm_blend_epi16(t1,m2,0xC0); + buf1 = _mm_shufflehi_epi16(t2,_MM_SHUFFLE(1,0,3,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_unpackhi_epi32(m0,m3); + t1 = _mm_blend_epi16(m2,t0,0xF0); + buf2 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(0,2,1,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_blend_epi16(m2,m0,0x0C); + t1 = _mm_slli_si128(t0,4); + buf3 = _mm_blend_epi16(t1,m3,0x0F); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_blend_epi16(m1,m0,0x30); + buf4 = _mm_shuffle_epi32(t0,_MM_SHUFFLE(1,0,3,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + t0 = _mm_blend_epi16(m0,m2,0x03); + t1 = _mm_blend_epi16(m1,m2,0x30); + t2 = _mm_blend_epi16(t1,t0,0x0F); + buf1 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(1,3,0,2)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_slli_si128(m0,4); + t1 = _mm_blend_epi16(m1,t0,0xC0); + buf2 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(1,2,0,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); + + t0 = _mm_unpackhi_epi32(m0,m3); + t1 = _mm_unpacklo_epi32(m2,m3); + t2 = _mm_unpackhi_epi64(t0,t1); + buf3 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(3,0,2,1)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r16); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); + + t0 = _mm_blend_epi16(m3,m2,0xC0); + t1 = _mm_unpacklo_epi32(m0,m3); + t2 = _mm_blend_epi16(t0,t1,0x0F); + buf4 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,1,2,3)); + + row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); + row4 = _mm_xor_si128(row4, row1); + row4 = _mm_shuffle_epi8(row4,r8); + row3 = _mm_add_epi32(row3, row4); + row2 = _mm_xor_si128(row2, row3); + row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); + + row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); + row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); + row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); + + _mm_storeu_si128((__m128i *)(void*)(&state.h[0]), _mm_xor_si128(ff0, _mm_xor_si128(row1, row3))); + _mm_storeu_si128((__m128i *)(void*)(&state.h[4]), _mm_xor_si128(ff1, _mm_xor_si128(row2, row4))); +} + +void BLAKE2_Compress64_SSE4(const byte* input, BLAKE2_State& state) +{ + __m128i row1l, row1h; + __m128i row2l, row2h; + __m128i row3l, row3h; + __m128i row4l, row4h; + __m128i b0, b1, t0, t1; + + const __m128i r16 = _mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9); + const __m128i r24 = _mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10); + + const __m128i m0 = _mm_loadu_si128((const __m128i*)(const void*)(input + 00)); + const __m128i m1 = _mm_loadu_si128((const __m128i*)(const void*)(input + 16)); + const __m128i m2 = _mm_loadu_si128((const __m128i*)(const void*)(input + 32)); + const __m128i m3 = _mm_loadu_si128((const __m128i*)(const void*)(input + 48)); + const __m128i m4 = _mm_loadu_si128((const __m128i*)(const void*)(input + 64)); + const __m128i m5 = _mm_loadu_si128((const __m128i*)(const void*)(input + 80)); + const __m128i m6 = _mm_loadu_si128((const __m128i*)(const void*)(input + 96)); + const __m128i m7 = _mm_loadu_si128((const __m128i*)(const void*)(input + 112)); + + row1l = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[0])); + row1h = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[2])); + row2l = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])); + row2h = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[6])); + row3l = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[0])); + row3h = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[2])); + row4l = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[4])), _mm_loadu_si128((const __m128i*)(const void*)(&state.t[0]))); + row4h = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[6])), _mm_loadu_si128((const __m128i*)(const void*)(&state.f[0]))); + + b0 = _mm_unpacklo_epi64(m0, m1); + b1 = _mm_unpacklo_epi64(m2, m3); + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m0, m1); + b1 = _mm_unpackhi_epi64(m2, m3); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m4, m5); + b1 = _mm_unpacklo_epi64(m6, m7); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m4, m5); + b1 = _mm_unpackhi_epi64(m6, m7); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m7, m2); + b1 = _mm_unpackhi_epi64(m4, m6); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m5, m4); + b1 = _mm_alignr_epi8(m3, m7, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); + b1 = _mm_unpackhi_epi64(m5, m2); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m6, m1); + b1 = _mm_unpackhi_epi64(m3, m1); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_alignr_epi8(m6, m5, 8); + b1 = _mm_unpackhi_epi64(m2, m7); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m4, m0); + b1 = _mm_blend_epi16(m1, m6, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_blend_epi16(m5, m1, 0xF0); + b1 = _mm_unpackhi_epi64(m3, m4); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m7, m3); + b1 = _mm_alignr_epi8(m2, m0, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpackhi_epi64(m3, m1); + b1 = _mm_unpackhi_epi64(m6, m5); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m4, m0); + b1 = _mm_unpacklo_epi64(m6, m7); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_blend_epi16(m1, m2, 0xF0); + b1 = _mm_blend_epi16(m2, m7, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m3, m5); + b1 = _mm_unpacklo_epi64(m0, m4); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpackhi_epi64(m4, m2); + b1 = _mm_unpacklo_epi64(m1, m5); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_blend_epi16(m0, m3, 0xF0); + b1 = _mm_blend_epi16(m2, m7, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_blend_epi16(m7, m5, 0xF0); + b1 = _mm_blend_epi16(m3, m1, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_alignr_epi8(m6, m0, 8); + b1 = _mm_blend_epi16(m4, m6, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m1, m3); + b1 = _mm_unpacklo_epi64(m0, m4); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m6, m5); + b1 = _mm_unpackhi_epi64(m5, m1); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_blend_epi16(m2, m3, 0xF0); + b1 = _mm_unpackhi_epi64(m7, m0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m6, m2); + b1 = _mm_blend_epi16(m7, m4, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_blend_epi16(m6, m0, 0xF0); + b1 = _mm_unpacklo_epi64(m7, m2); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m2, m7); + b1 = _mm_alignr_epi8(m5, m6, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m0, m3); + b1 = _mm_shuffle_epi32(m4, _MM_SHUFFLE(1,0,3,2)); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m3, m1); + b1 = _mm_blend_epi16(m1, m5, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpackhi_epi64(m6, m3); + b1 = _mm_blend_epi16(m6, m1, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_alignr_epi8(m7, m5, 8); + b1 = _mm_unpackhi_epi64(m0, m4); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpackhi_epi64(m2, m7); + b1 = _mm_unpacklo_epi64(m4, m1); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m0, m2); + b1 = _mm_unpacklo_epi64(m3, m5); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m3, m7); + b1 = _mm_alignr_epi8(m0, m5, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m7, m4); + b1 = _mm_alignr_epi8(m4, m1, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = m6; + b1 = _mm_alignr_epi8(m5, m0, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_blend_epi16(m1, m3, 0xF0); + b1 = m2; + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m5, m4); + b1 = _mm_unpackhi_epi64(m3, m0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m1, m2); + b1 = _mm_blend_epi16(m3, m2, 0xF0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpackhi_epi64(m7, m4); + b1 = _mm_unpackhi_epi64(m1, m6); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_alignr_epi8(m7, m5, 8); + b1 = _mm_unpacklo_epi64(m6, m0); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m0, m1); + b1 = _mm_unpacklo_epi64(m2, m3); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m0, m1); + b1 = _mm_unpackhi_epi64(m2, m3); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m4, m5); + b1 = _mm_unpacklo_epi64(m6, m7); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpackhi_epi64(m4, m5); + b1 = _mm_unpackhi_epi64(m6, m7); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + b0 = _mm_unpacklo_epi64(m7, m2); + b1 = _mm_unpackhi_epi64(m4, m6); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m5, m4); + b1 = _mm_alignr_epi8(m3, m7, 8); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2h, row2l, 8); + t1 = _mm_alignr_epi8(row2l, row2h, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4h, row4l, 8); + t1 = _mm_alignr_epi8(row4l, row4h, 8); + row4l = t1, row4h = t0; + + b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); + b1 = _mm_unpackhi_epi64(m5, m2); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); + row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_shuffle_epi8(row2l, r24); + row2h = _mm_shuffle_epi8(row2h, r24); + + b0 = _mm_unpacklo_epi64(m6, m1); + b1 = _mm_unpackhi_epi64(m3, m1); + + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); + row4l = _mm_xor_si128(row4l, row1l); + row4h = _mm_xor_si128(row4h, row1h); + row4l = _mm_shuffle_epi8(row4l, r16); + row4h = _mm_shuffle_epi8(row4h, r16); + row3l = _mm_add_epi64(row3l, row4l); + row3h = _mm_add_epi64(row3h, row4h); + row2l = _mm_xor_si128(row2l, row3l); + row2h = _mm_xor_si128(row2h, row3h); + row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); + row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); + + t0 = _mm_alignr_epi8(row2l, row2h, 8); + t1 = _mm_alignr_epi8(row2h, row2l, 8); + row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; + t0 = _mm_alignr_epi8(row4l, row4h, 8); + t1 = _mm_alignr_epi8(row4h, row4l, 8); + row4l = t1, row4h = t0; + + row1l = _mm_xor_si128(row3l, row1l); + row1h = _mm_xor_si128(row3h, row1h); + _mm_storeu_si128((__m128i *)(void*)(&state.h[0]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[0])), row1l)); + _mm_storeu_si128((__m128i *)(void*)(&state.h[2]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[2])), row1h)); + + row2l = _mm_xor_si128(row4l, row2l); + row2h = _mm_xor_si128(row4h, row2h); + _mm_storeu_si128((__m128i *)(void*)(&state.h[4]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])), row2l)); + _mm_storeu_si128((__m128i *)(void*)(&state.h[6]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[6])), row2h)); +} +#endif // CRYPTOPP_SSE42_AVAILABLE + +// Disable NEON for Cortex-A53 and A57. Also see http://github.com/weidai11/cryptopp/issues/367 +#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_ARM_NEON_AVAILABLE +void BLAKE2_Compress32_NEON(const byte* input, BLAKE2_State& state) +{ + XXX + + #define BLAKE2S_LOAD_MSG_0_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m0)).val[0]; \ + t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m1)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_0_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m0)).val[1]; \ + t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m1)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_0_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m2), vget_high_u32(m2)).val[0]; \ + t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_0_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m2), vget_high_u32(m2)).val[1]; \ + t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_1_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m1)).val[0]; \ + t1 = vzip_u32(vget_low_u32(m2), vget_low_u32(m3)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_1_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m2), vget_low_u32(m2)).val[0]; \ + t1 = vext_u32(vget_high_u32(m3), vget_high_u32(m1), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_1_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vext_u32(vget_low_u32(m0), vget_low_u32(m0), 1); \ + t1 = vzip_u32(vget_high_u32(m2), vget_low_u32(m1)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_1_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m0)).val[0]; \ + t1 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_2_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vext_u32(vget_high_u32(m2), vget_low_u32(m3), 1); \ + t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_2_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m0)).val[0]; \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_low_u32(m3)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_2_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m2), vget_high_u32(m0)); \ + t1 = vzip_u32(vget_high_u32(m1), vget_low_u32(m2)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_2_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m3), vget_high_u32(m1)).val[0]; \ + t1 = vext_u32(vget_low_u32(m0), vget_low_u32(m1), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_3_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \ + t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m2)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_3_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m0)).val[1]; \ + t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_3_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_low_u32(m1)); \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m3)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_3_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m2)).val[0]; \ + t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_4_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m1)).val[1]; \ + t1 = vzip_u32((vget_high_u32(m0)), vget_high_u32(m2)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_4_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m0), vget_high_u32(m1)); \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m3)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_4_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m3), vget_high_u32(m2)); \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m1), vget_high_u32(m0)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_4_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vext_u32(vget_low_u32(m0), vget_low_u32(m3), 1); \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m2), vget_low_u32(m3)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_5_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32((vget_high_u32(m0)), vget_high_u32(m1)).val[0]; \ + t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_5_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m2)).val[0]; \ + t1 = vzip_u32(vget_high_u32(m2), vget_high_u32(m0)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_5_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m1)); \ + t1 = vzip_u32(vget_high_u32(m3), vget_low_u32(m0)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_5_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m3), vget_low_u32(m1)).val[1]; \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m3), vget_low_u32(m2)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_6_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_low_u32(m0)); \ + t1 = vzip_u32(vget_high_u32(m3), vget_low_u32(m1)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_6_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \ + t1 = vext_u32(vget_low_u32(m3), vget_high_u32(m2), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_6_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m1)).val[0]; \ + t1 = vext_u32(vget_low_u32(m2), vget_low_u32(m2), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_6_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_high_u32(m2)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_7_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m1)).val[1]; \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_high_u32(m0)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_7_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vext_u32(vget_high_u32(m2), vget_high_u32(m3), 1); \ + t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_7_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \ + t1 = vzip_u32(vget_low_u32(m2), vget_high_u32(m0)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_7_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_low_u32(m0), vget_low_u32(m1)).val[0]; \ + t1 = vzip_u32(vget_high_u32(m1), vget_high_u32(m2)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_8_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m3)).val[0]; \ + t1 = vext_u32(vget_high_u32(m2), vget_low_u32(m0), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_8_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m2)).val[1]; \ + t1 = vext_u32(vget_high_u32(m0), vget_low_u32(m2), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_8_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_low_u32(m3)); \ + t1 = vext_u32(vget_low_u32(m0), vget_high_u32(m2), 1); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_8_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_high_u32(m1)); \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_low_u32(m1)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_9_1(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m2), vget_low_u32(m2)).val[0]; \ + t1 = vzip_u32(vget_high_u32(m1), vget_low_u32(m0)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_9_2(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32((vget_high_u32(m0)), vget_low_u32(m1)).val[0]; \ + t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m1), vget_low_u32(m1)); \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_9_3(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m2)).val[1]; \ + t1 = vzip_u32((vget_high_u32(m0)), vget_low_u32(m3)).val[1]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define BLAKE2S_LOAD_MSG_9_4(buf) \ + do { uint32x2_t t0, t1; \ + t0 = vext_u32(vget_high_u32(m2), vget_high_u32(m3), 1); \ + t1 = vzip_u32(vget_low_u32(m3), vget_low_u32(m0)).val[0]; \ + buf = vcombine_u32(t0, t1); } while(0) + + #define vrorq_n_u32_16(x) vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x))) + + #define vrorq_n_u32_8(x) vsriq_n_u32(vshlq_n_u32((x), 24), (x), 8) + + #define vrorq_n_u32(x, c) vsriq_n_u32(vshlq_n_u32((x), 32-(c)), (x), (c)) + + #define BLAKE2S_G1(row1,row2,row3,row4,buf) \ + do { \ + row1 = vaddq_u32(vaddq_u32(row1, buf), row2); row4 = veorq_u32(row4, row1); \ + row4 = vrorq_n_u32_16(row4); row3 = vaddq_u32(row3, row4); \ + row2 = veorq_u32(row2, row3); row2 = vrorq_n_u32(row2, 12); \ + } while(0) + + #define BLAKE2S_G2(row1,row2,row3,row4,buf) \ + do { \ + row1 = vaddq_u32(vaddq_u32(row1, buf), row2); row4 = veorq_u32(row4, row1); \ + row4 = vrorq_n_u32_8(row4); row3 = vaddq_u32(row3, row4); \ + row2 = veorq_u32(row2, row3); row2 = vrorq_n_u32(row2, 7); \ + } while(0) + + #define BLAKE2S_DIAGONALIZE(row1,row2,row3,row4) \ + do { \ + row4 = vextq_u32(row4, row4, 3); row3 = vextq_u32(row3, row3, 2); row2 = vextq_u32(row2, row2, 1); \ + } while(0) + + #define BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4) \ + do { \ + row4 = vextq_u32(row4, row4, 1); \ + row3 = vextq_u32(row3, row3, 2); \ + row2 = vextq_u32(row2, row2, 3); \ + } while(0) + + #define BLAKE2S_ROUND(r) \ + do { \ + uint32x4_t buf1, buf2, buf3, buf4; \ + BLAKE2S_LOAD_MSG_ ##r ##_1(buf1); \ + BLAKE2S_G1(row1,row2,row3,row4,buf1); \ + BLAKE2S_LOAD_MSG_ ##r ##_2(buf2); \ + BLAKE2S_G2(row1,row2,row3,row4,buf2); \ + BLAKE2S_DIAGONALIZE(row1,row2,row3,row4); \ + BLAKE2S_LOAD_MSG_ ##r ##_3(buf3); \ + BLAKE2S_G1(row1,row2,row3,row4,buf3); \ + BLAKE2S_LOAD_MSG_ ##r ##_4(buf4); \ + BLAKE2S_G2(row1,row2,row3,row4,buf4); \ + BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4); \ + } while(0) + + CRYPTOPP_ASSERT(IsAlignedOn(&state.h[0],GetAlignmentOf())); + CRYPTOPP_ASSERT(IsAlignedOn(&state.t[0],GetAlignmentOf())); + CRYPTOPP_ASSERT(IsAlignedOn(&state.f[0],GetAlignmentOf())); + + const uint32x4_t m0 = vreinterpretq_u32_u8(vld1q_u8((input + 00))); + const uint32x4_t m1 = vreinterpretq_u32_u8(vld1q_u8((input + 16))); + const uint32x4_t m2 = vreinterpretq_u32_u8(vld1q_u8((input + 32))); + const uint32x4_t m3 = vreinterpretq_u32_u8(vld1q_u8((input + 48))); + + uint32x4_t row1, row2, row3, row4; + + const uint32x4_t f0 = row1 = vld1q_u32(&state.h[0]); + const uint32x4_t f1 = row2 = vld1q_u32(&state.h[4]); + row3 = vld1q_u32(&BLAKE2S_IV[0]); + row4 = veorq_u32(vld1q_u32(&BLAKE2S_IV[4]), vld1q_u32(&state.t[0])); + + BLAKE2S_ROUND(0); + BLAKE2S_ROUND(1); + BLAKE2S_ROUND(2); + BLAKE2S_ROUND(3); + BLAKE2S_ROUND(4); + BLAKE2S_ROUND(5); + BLAKE2S_ROUND(6); + BLAKE2S_ROUND(7); + BLAKE2S_ROUND(8); + BLAKE2S_ROUND(9); + + vst1q_u32(&state.h[0], veorq_u32(f0, veorq_u32(row1, row3))); + vst1q_u32(&state.h[4], veorq_u32(f1, veorq_u32(row2, row4))); +} + +void BLAKE2_Compress64_NEON(const byte* input, BLAKE2_State& state) +{ + #define BLAKE2B_LOAD_MSG_0_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m1)); b1 = vcombine_u64(vget_low_u64(m2), vget_low_u64(m3)); } while(0) + + #define BLAKE2B_LOAD_MSG_0_2(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m3)); } while(0) + + #define BLAKE2B_LOAD_MSG_0_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_0_4(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m5)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_1_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); b1 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m6)); } while(0) + + #define BLAKE2B_LOAD_MSG_1_2(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vextq_u64(m7, m3, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_1_3(b0, b1) \ + do { b0 = vextq_u64(m0, m0, 1); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m2)); } while(0) + + #define BLAKE2B_LOAD_MSG_1_4(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); } while(0) + + #define BLAKE2B_LOAD_MSG_2_1(b0, b1) \ + do { b0 = vextq_u64(m5, m6, 1); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_2_2(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m0)); b1 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m6)); } while(0) + + #define BLAKE2B_LOAD_MSG_2_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m5), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m4)); } while(0) + + #define BLAKE2B_LOAD_MSG_2_4(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m3)); b1 = vextq_u64(m0, m2, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_3_1(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m5)); } while(0) + + #define BLAKE2B_LOAD_MSG_3_2(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m0)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_3_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_3_4(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m4)); } while(0) + + #define BLAKE2B_LOAD_MSG_4_1(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m5)); } while(0) + + #define BLAKE2B_LOAD_MSG_4_2(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m0), vget_high_u64(m3)); b1 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_4_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m7), vget_high_u64(m5)); b1 = vcombine_u64(vget_low_u64(m3), vget_high_u64(m1)); } while(0) + + #define BLAKE2B_LOAD_MSG_4_4(b0, b1) \ + do { b0 = vextq_u64(m0, m6, 1); b1 = vcombine_u64(vget_low_u64(m4), vget_high_u64(m6)); } while(0) + + #define BLAKE2B_LOAD_MSG_5_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m3)); b1 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m4)); } while(0) + + #define BLAKE2B_LOAD_MSG_5_2(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m5)); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m1)); } while(0) + + #define BLAKE2B_LOAD_MSG_5_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m3)); b1 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m0)); } while(0) + + #define BLAKE2B_LOAD_MSG_5_4(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m7), vget_high_u64(m4)); } while(0) + + #define BLAKE2B_LOAD_MSG_6_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m6), vget_high_u64(m0)); b1 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); } while(0) + + #define BLAKE2B_LOAD_MSG_6_2(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); b1 = vextq_u64(m6, m5, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_6_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m3)); b1 = vextq_u64(m4, m4, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_6_4(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); b1 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m5)); } while(0) + + #define BLAKE2B_LOAD_MSG_7_1(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m3)); b1 = vcombine_u64(vget_low_u64(m6), vget_high_u64(m1)); } while(0) + + #define BLAKE2B_LOAD_MSG_7_2(b0, b1) \ + do { b0 = vextq_u64(m5, m7, 1); b1 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m4)); } while(0) + + #define BLAKE2B_LOAD_MSG_7_3(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); b1 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m1)); } while(0) + + #define BLAKE2B_LOAD_MSG_7_4(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m2)); b1 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m5)); } while(0) + + #define BLAKE2B_LOAD_MSG_8_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m7)); b1 = vextq_u64(m5, m0, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_8_2(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m4)); b1 = vextq_u64(m1, m4, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_8_3(b0, b1) \ + do { b0 = m6; b1 = vextq_u64(m0, m5, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_8_4(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m3)); b1 = m2; } while(0) + + #define BLAKE2B_LOAD_MSG_9_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m0)); } while(0) + + #define BLAKE2B_LOAD_MSG_9_2(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m2)); b1 = vcombine_u64(vget_low_u64(m3), vget_high_u64(m2)); } while(0) + + #define BLAKE2B_LOAD_MSG_9_3(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m4)); b1 = vcombine_u64(vget_high_u64(m1), vget_high_u64(m6)); } while(0) + + #define BLAKE2B_LOAD_MSG_9_4(b0, b1) \ + do { b0 = vextq_u64(m5, m7, 1); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m0)); } while(0) + + #define BLAKE2B_LOAD_MSG_10_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m1)); b1 = vcombine_u64(vget_low_u64(m2), vget_low_u64(m3)); } while(0) + + #define BLAKE2B_LOAD_MSG_10_2(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m3)); } while(0) + + #define BLAKE2B_LOAD_MSG_10_3(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_10_4(b0, b1) \ + do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m5)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m7)); } while(0) + + #define BLAKE2B_LOAD_MSG_11_1(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); b1 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m6)); } while(0) + + #define BLAKE2B_LOAD_MSG_11_2(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vextq_u64(m7, m3, 1); } while(0) + + #define BLAKE2B_LOAD_MSG_11_3(b0, b1) \ + do { b0 = vextq_u64(m0, m0, 1); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m2)); } while(0) + + #define BLAKE2B_LOAD_MSG_11_4(b0, b1) \ + do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); } while(0) + + #define vrorq_n_u64_32(x) vreinterpretq_u64_u32(vrev64q_u32(vreinterpretq_u32_u64((x)))) + + #define vrorq_n_u64_24(x) vcombine_u64(\ + vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_low_u64(x)), vreinterpret_u8_u64(vget_low_u64(x)), 3)), \ + vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_high_u64(x)), vreinterpret_u8_u64(vget_high_u64(x)), 3))) + + #define vrorq_n_u64_16(x) vcombine_u64(\ + vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_low_u64(x)), vreinterpret_u8_u64(vget_low_u64(x)), 2)), \ + vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_high_u64(x)), vreinterpret_u8_u64(vget_high_u64(x)), 2))) + + #define vrorq_n_u64_63(x) veorq_u64(vaddq_u64(x, x), vshrq_n_u64(x, 63)) + + #define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + do { \ + row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ + row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ + row4l = veorq_u64(row4l, row1l); row4h = veorq_u64(row4h, row1h); \ + row4l = vrorq_n_u64_32(row4l); row4h = vrorq_n_u64_32(row4h); \ + row3l = vaddq_u64(row3l, row4l); row3h = vaddq_u64(row3h, row4h); \ + row2l = veorq_u64(row2l, row3l); row2h = veorq_u64(row2h, row3h); \ + row2l = vrorq_n_u64_24(row2l); row2h = vrorq_n_u64_24(row2h); \ + } while(0) + + #define BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + do { \ + row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ + row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ + row4l = veorq_u64(row4l, row1l); row4h = veorq_u64(row4h, row1h); \ + row4l = vrorq_n_u64_16(row4l); row4h = vrorq_n_u64_16(row4h); \ + row3l = vaddq_u64(row3l, row4l); row3h = vaddq_u64(row3h, row4h); \ + row2l = veorq_u64(row2l, row3l); row2h = veorq_u64(row2h, row3h); \ + row2l = vrorq_n_u64_63(row2l); row2h = vrorq_n_u64_63(row2h); \ + } while(0) + + #define BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + do { \ + uint64x2_t t0 = vextq_u64(row2l, row2h, 1); \ + uint64x2_t t1 = vextq_u64(row2h, row2l, 1); \ + row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ + t0 = vextq_u64(row4h, row4l, 1); t1 = vextq_u64(row4l, row4h, 1); \ + row4l = t0; row4h = t1; \ + } while(0) + + #define BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + do { \ + uint64x2_t t0 = vextq_u64(row2h, row2l, 1); \ + uint64x2_t t1 = vextq_u64(row2l, row2h, 1); \ + row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ + t0 = vextq_u64(row4l, row4h, 1); t1 = vextq_u64(row4h, row4l, 1); \ + row4l = t0; row4h = t1; \ + } while(0) + + #define BLAKE2B_ROUND(r) \ + do { \ + uint64x2_t b0, b1; \ + BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ + BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ + BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ + BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ + BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ + BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ + } while(0) + + CRYPTOPP_ASSERT(IsAlignedOn(&state.h[0],GetAlignmentOf())); + CRYPTOPP_ASSERT(IsAlignedOn(&state.t[0],GetAlignmentOf())); + CRYPTOPP_ASSERT(IsAlignedOn(&state.f[0],GetAlignmentOf())); + + const uint64x2_t m0 = vreinterpretq_u64_u8(vld1q_u8(input + 00)); + const uint64x2_t m1 = vreinterpretq_u64_u8(vld1q_u8(input + 16)); + const uint64x2_t m2 = vreinterpretq_u64_u8(vld1q_u8(input + 32)); + const uint64x2_t m3 = vreinterpretq_u64_u8(vld1q_u8(input + 48)); + const uint64x2_t m4 = vreinterpretq_u64_u8(vld1q_u8(input + 64)); + const uint64x2_t m5 = vreinterpretq_u64_u8(vld1q_u8(input + 80)); + const uint64x2_t m6 = vreinterpretq_u64_u8(vld1q_u8(input + 96)); + const uint64x2_t m7 = vreinterpretq_u64_u8(vld1q_u8(input + 112)); + + uint64x2_t row1l, row1h, row2l, row2h; + uint64x2_t row3l, row3h, row4l, row4h; + + const uint64x2_t h0 = row1l = vld1q_u64(&state.h[0]); + const uint64x2_t h1 = row1h = vld1q_u64(&state.h[2]); + const uint64x2_t h2 = row2l = vld1q_u64(&state.h[4]); + const uint64x2_t h3 = row2h = vld1q_u64(&state.h[6]); + + row3l = vld1q_u64(&BLAKE2B_IV[0]); + row3h = vld1q_u64(&BLAKE2B_IV[2]); + row4l = veorq_u64(vld1q_u64(&BLAKE2B_IV[4]), vld1q_u64(&state.t[0])); + row4h = veorq_u64(vld1q_u64(&BLAKE2B_IV[6]), vld1q_u64(&state.f[0])); + + BLAKE2B_ROUND(0); + BLAKE2B_ROUND(1); + BLAKE2B_ROUND(2); + BLAKE2B_ROUND(3); + BLAKE2B_ROUND(4); + BLAKE2B_ROUND(5); + BLAKE2B_ROUND(6); + BLAKE2B_ROUND(7); + BLAKE2B_ROUND(8); + BLAKE2B_ROUND(9); + BLAKE2B_ROUND(10); + BLAKE2B_ROUND(11); + + vst1q_u64(&state.h[0], veorq_u64(h0, veorq_u64(row1l, row3l))); + vst1q_u64(&state.h[2], veorq_u64(h1, veorq_u64(row1h, row3h))); + vst1q_u64(&state.h[4], veorq_u64(h2, veorq_u64(row2l, row4l))); + vst1q_u64(&state.h[6], veorq_u64(h3, veorq_u64(row2h, row4h))); +} +#endif // CRYPTOPP_BOOL_ARM32 && CRYPTOPP_ARM_NEON_AVAILABLE + +NAMESPACE_END \ No newline at end of file diff --git a/blake2.cpp b/blake2.cpp index a5279c31..10c74d3c 100644 --- a/blake2.cpp +++ b/blake2.cpp @@ -14,7 +14,7 @@ NAMESPACE_BEGIN(CryptoPP) // Uncomment for benchmarking C++ against SSE2 or NEON // #undef CRYPTOPP_SSE42_AVAILABLE -// #undef CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +// #undef CRYPTOPP_ARM_NEON_AVAILABLE // Apple Clang 6.0/Clang 3.5 does not have SSSE3 intrinsics // http://llvm.org/bugs/show_bug.cgi?id=20213 @@ -36,84 +36,46 @@ inline __m128i MM_SET_EPI64X(const word64 a, const word64 b) #endif // C/C++ implementation -static void BLAKE2_CXX_Compress32(const byte* input, BLAKE2_State& state); -static void BLAKE2_CXX_Compress64(const byte* input, BLAKE2_State& state); +static void BLAKE2_Compress32_CXX(const byte* input, BLAKE2_State& state); +static void BLAKE2_Compress64_CXX(const byte* input, BLAKE2_State& state); // Also see http://github.com/weidai11/cryptopp/issues/247 for SunCC 5.12 #if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE -static void BLAKE2_SSE2_Compress32(const byte* input, BLAKE2_State& state); +static void BLAKE2_Compress32_SSE2(const byte* input, BLAKE2_State& state); # if (__SUNPRO_CC != 0x5120) -static void BLAKE2_SSE2_Compress64(const byte* input, BLAKE2_State& state); +static void BLAKE2_Compress64_SSE2(const byte* input, BLAKE2_State& state); # endif #endif #if CRYPTOPP_SSE42_AVAILABLE -static void BLAKE2_SSE4_Compress32(const byte* input, BLAKE2_State& state); -static void BLAKE2_SSE4_Compress64(const byte* input, BLAKE2_State& state); +extern void BLAKE2_Compress32_SSE4(const byte* input, BLAKE2_State& state); +extern void BLAKE2_Compress64_SSE4(const byte* input, BLAKE2_State& state); #endif // Disable NEON for Cortex-A53 and A57. Also see http://github.com/weidai11/cryptopp/issues/367 -#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE -static void BLAKE2_NEON_Compress32(const byte* input, BLAKE2_State& state); -static void BLAKE2_NEON_Compress64(const byte* input, BLAKE2_State& state); +#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_ARM_NEON_AVAILABLE +extern void BLAKE2_Compress32_NEON(const byte* input, BLAKE2_State& state); +extern void BLAKE2_Compress64_NEON(const byte* input, BLAKE2_State& state); #endif -#ifndef CRYPTOPP_DOXYGEN_PROCESSING - -// IV and Sigma are a better fit as part of BLAKE2_Base, but that places -// the constants out of reach for the NEON, SSE2 and SSE4 implementations. -template -struct CRYPTOPP_NO_VTABLE BLAKE2_IV {}; - -//! \brief BLAKE2s initialization vector specialization -template<> -struct CRYPTOPP_NO_VTABLE BLAKE2_IV -{ - CRYPTOPP_CONSTANT(IVSIZE = 8) - // Always align for NEON and SSE - CRYPTOPP_ALIGN_DATA(16) static const word32 iv[8]; -}; +ANONYMOUS_NAMESPACE_BEGIN CRYPTOPP_ALIGN_DATA(16) -const word32 BLAKE2_IV::iv[8] = { +const word32 BLAKE2S_IV[8] = { 0x6A09E667UL, 0xBB67AE85UL, 0x3C6EF372UL, 0xA54FF53AUL, 0x510E527FUL, 0x9B05688CUL, 0x1F83D9ABUL, 0x5BE0CD19UL }; -#define BLAKE2S_IV(n) BLAKE2_IV::iv[n] - -template<> -struct CRYPTOPP_NO_VTABLE BLAKE2_IV -{ - CRYPTOPP_CONSTANT(IVSIZE = 8) - // Always align for NEON and SSE - CRYPTOPP_ALIGN_DATA(16) static const word64 iv[8]; -}; - CRYPTOPP_ALIGN_DATA(16) -const word64 BLAKE2_IV::iv[8] = { +const word64 BLAKE2B_IV[8] = { W64LIT(0x6a09e667f3bcc908), W64LIT(0xbb67ae8584caa73b), W64LIT(0x3c6ef372fe94f82b), W64LIT(0xa54ff53a5f1d36f1), W64LIT(0x510e527fade682d1), W64LIT(0x9b05688c2b3e6c1f), W64LIT(0x1f83d9abfb41bd6b), W64LIT(0x5be0cd19137e2179) }; -#define BLAKE2B_IV(n) BLAKE2_IV::iv[n] - -// IV and Sigma are a better fit as part of BLAKE2_Base, but that places -// the constants out of reach for the NEON, SSE2 and SSE4 implementations. -template -struct CRYPTOPP_NO_VTABLE BLAKE2_Sigma {}; - -template<> -struct CRYPTOPP_NO_VTABLE BLAKE2_Sigma -{ - // Always align for NEON and SSE - CRYPTOPP_ALIGN_DATA(16) static const byte sigma[10][16]; -}; - CRYPTOPP_ALIGN_DATA(16) -const byte BLAKE2_Sigma::sigma[10][16] = { +const byte BLAKE2S_SIGMA[10][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, @@ -126,16 +88,8 @@ const byte BLAKE2_Sigma::sigma[10][16] = { { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 }, }; -//! \brief BLAKE2b sigma table specialization -template<> -struct CRYPTOPP_NO_VTABLE BLAKE2_Sigma -{ - // Always align for NEON and SSE - CRYPTOPP_ALIGN_DATA(16) static const byte sigma[12][16]; -}; - CRYPTOPP_ALIGN_DATA(16) -const byte BLAKE2_Sigma::sigma[12][16] = { +const byte BLAKE2B_SIGMA[12][16] = { { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, @@ -157,45 +111,45 @@ pfnCompress64 InitializeCompress64Fn() { #if CRYPTOPP_SSE42_AVAILABLE if (HasSSE4()) - return &BLAKE2_SSE4_Compress64; + return &BLAKE2_Compress64_SSE4; else #endif #if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE # if (__SUNPRO_CC != 0x5120) if (HasSSE2()) - return &BLAKE2_SSE2_Compress64; + return &BLAKE2_Compress64_SSE2; else # endif #endif -#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_ARM_NEON_AVAILABLE if (HasNEON()) - return &BLAKE2_NEON_Compress64; + return &BLAKE2_Compress64_NEON; else #endif - return &BLAKE2_CXX_Compress64; + return &BLAKE2_Compress64_CXX; } pfnCompress32 InitializeCompress32Fn() { #if CRYPTOPP_SSE42_AVAILABLE if (HasSSE4()) - return &BLAKE2_SSE4_Compress32; + return &BLAKE2_Compress32_SSE4; else #endif #if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE if (HasSSE2()) - return &BLAKE2_SSE2_Compress32; + return &BLAKE2_Compress32_SSE2; else #endif -#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_ARM_NEON_AVAILABLE if (HasNEON()) - return &BLAKE2_NEON_Compress32; + return &BLAKE2_Compress32_NEON; else #endif - return &BLAKE2_CXX_Compress32; + return &BLAKE2_Compress32_CXX; } -#endif // CRYPTOPP_DOXYGEN_PROCESSING +ANONYMOUS_NAMESPACE_END BLAKE2_ParameterBlock::BLAKE2_ParameterBlock(size_t digestLen, size_t keyLen, const byte* saltStr, size_t saltLen, @@ -398,9 +352,9 @@ void BLAKE2_Base::Restart(const BLAKE2_ParameterBlock& bloc state.t[1] = counter[1]; } + const W* IV = T_64bit ? reinterpret_cast(BLAKE2B_IV) : reinterpret_cast(BLAKE2S_IV); PutBlock put(m_block.data(), &state.h[0]); - put(BLAKE2_IV::iv[0])(BLAKE2_IV::iv[1])(BLAKE2_IV::iv[2])(BLAKE2_IV::iv[3]); - put(BLAKE2_IV::iv[4])(BLAKE2_IV::iv[5])(BLAKE2_IV::iv[6])(BLAKE2_IV::iv[7]); + put(IV[0])(IV[1])(IV[2])(IV[3])(IV[4])(IV[5])(IV[6])(IV[7]); // When BLAKE2 is keyed, the input stream is simply {key||message}. Key it // during Restart to avoid FirstPut and friends. Key size == 0 means no key. @@ -491,18 +445,18 @@ void BLAKE2_Base::Compress(const byte *input) s_pfn(input, *m_state.data()); } -void BLAKE2_CXX_Compress64(const byte* input, BLAKE2_State& state) +void BLAKE2_Compress64_CXX(const byte* input, BLAKE2_State& state) { #undef BLAKE2_G #undef BLAKE2_ROUND #define BLAKE2_G(r,i,a,b,c,d) \ do { \ - a = a + b + m[BLAKE2_Sigma::sigma[r][2*i+0]]; \ + a = a + b + m[BLAKE2B_SIGMA[r][2*i+0]]; \ d = rotrVariable(d ^ a, 32); \ c = c + d; \ b = rotrVariable(b ^ c, 24); \ - a = a + b + m[BLAKE2_Sigma::sigma[r][2*i+1]]; \ + a = a + b + m[BLAKE2B_SIGMA[r][2*i+1]]; \ d = rotrVariable(d ^ a, 16); \ c = c + d; \ b = rotrVariable(b ^ c, 63); \ @@ -528,14 +482,14 @@ void BLAKE2_CXX_Compress64(const byte* input, BLAKE2_State& state) GetBlock get2(&state.h[0]); get2(v[0])(v[1])(v[2])(v[3])(v[4])(v[5])(v[6])(v[7]); - v[ 8] = BLAKE2B_IV(0); - v[ 9] = BLAKE2B_IV(1); - v[10] = BLAKE2B_IV(2); - v[11] = BLAKE2B_IV(3); - v[12] = state.t[0] ^ BLAKE2B_IV(4); - v[13] = state.t[1] ^ BLAKE2B_IV(5); - v[14] = state.f[0] ^ BLAKE2B_IV(6); - v[15] = state.f[1] ^ BLAKE2B_IV(7); + v[ 8] = BLAKE2B_IV[0]; + v[ 9] = BLAKE2B_IV[1]; + v[10] = BLAKE2B_IV[2]; + v[11] = BLAKE2B_IV[3]; + v[12] = state.t[0] ^ BLAKE2B_IV[4]; + v[13] = state.t[1] ^ BLAKE2B_IV[5]; + v[14] = state.f[0] ^ BLAKE2B_IV[6]; + v[15] = state.f[1] ^ BLAKE2B_IV[7]; BLAKE2_ROUND(0); BLAKE2_ROUND(1); @@ -554,18 +508,18 @@ void BLAKE2_CXX_Compress64(const byte* input, BLAKE2_State& state) state.h[i] = state.h[i] ^ ConditionalByteReverse(LittleEndian::ToEnum(), v[i] ^ v[i + 8]); } -void BLAKE2_CXX_Compress32(const byte* input, BLAKE2_State& state) +void BLAKE2_Compress32_CXX(const byte* input, BLAKE2_State& state) { #undef BLAKE2_G #undef BLAKE2_ROUND #define BLAKE2_G(r,i,a,b,c,d) \ do { \ - a = a + b + m[BLAKE2_Sigma::sigma[r][2*i+0]]; \ + a = a + b + m[BLAKE2S_SIGMA[r][2*i+0]]; \ d = rotrVariable(d ^ a, 16); \ c = c + d; \ b = rotrVariable(b ^ c, 12); \ - a = a + b + m[BLAKE2_Sigma::sigma[r][2*i+1]]; \ + a = a + b + m[BLAKE2S_SIGMA[r][2*i+1]]; \ d = rotrVariable(d ^ a, 8); \ c = c + d; \ b = rotrVariable(b ^ c, 7); \ @@ -591,14 +545,14 @@ void BLAKE2_CXX_Compress32(const byte* input, BLAKE2_State& state GetBlock get2(&state.h[0]); get2(v[0])(v[1])(v[2])(v[3])(v[4])(v[5])(v[6])(v[7]); - v[ 8] = BLAKE2S_IV(0); - v[ 9] = BLAKE2S_IV(1); - v[10] = BLAKE2S_IV(2); - v[11] = BLAKE2S_IV(3); - v[12] = state.t[0] ^ BLAKE2S_IV(4); - v[13] = state.t[1] ^ BLAKE2S_IV(5); - v[14] = state.f[0] ^ BLAKE2S_IV(6); - v[15] = state.f[1] ^ BLAKE2S_IV(7); + v[ 8] = BLAKE2S_IV[0]; + v[ 9] = BLAKE2S_IV[1]; + v[10] = BLAKE2S_IV[2]; + v[11] = BLAKE2S_IV[3]; + v[12] = state.t[0] ^ BLAKE2S_IV[4]; + v[13] = state.t[1] ^ BLAKE2S_IV[5]; + v[14] = state.f[0] ^ BLAKE2S_IV[6]; + v[15] = state.f[1] ^ BLAKE2S_IV[7]; BLAKE2_ROUND(0); BLAKE2_ROUND(1); @@ -616,7 +570,7 @@ void BLAKE2_CXX_Compress32(const byte* input, BLAKE2_State& state } #if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE -static void BLAKE2_SSE2_Compress32(const byte* input, BLAKE2_State& state) +static void BLAKE2_Compress32_SSE2(const byte* input, BLAKE2_State& state) { word32 m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15; GetBlock get(input); @@ -628,8 +582,8 @@ static void BLAKE2_SSE2_Compress32(const byte* input, BLAKE2_State& state) +static void BLAKE2_Compress64_SSE2(const byte* input, BLAKE2_State& state) { word64 m0, m1, m2, m3, m4, m5, m6, m7, m8, m9, m10, m11, m12, m13, m14, m15; GetBlock get(input); @@ -1049,10 +1003,10 @@ static void BLAKE2_SSE2_Compress64(const byte* input, BLAKE2_State row1h = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[2])); row2l = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])); row2h = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[6])); - row3l = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(0))); - row3h = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(2))); - row4l = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(4))), _mm_loadu_si128((const __m128i*)(const void*)(&state.t[0]))); - row4h = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(6))), _mm_loadu_si128((const __m128i*)(const void*)(&state.f[0]))); + row3l = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[0])); + row3h = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[2])); + row4l = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[4])), _mm_loadu_si128((const __m128i*)(const void*)(&state.t[0]))); + row4h = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV[6])), _mm_loadu_si128((const __m128i*)(const void*)(&state.f[0]))); b0 = MM_SET_EPI64X(m2, m0); b1 = MM_SET_EPI64X(m6, m4); @@ -1929,2119 +1883,6 @@ static void BLAKE2_SSE2_Compress64(const byte* input, BLAKE2_State # endif // (__SUNPRO_CC != 0x5120) #endif // CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE -#if CRYPTOPP_SSE42_AVAILABLE -static void BLAKE2_SSE4_Compress32(const byte* input, BLAKE2_State& state) -{ - __m128i row1, row2, row3, row4; - __m128i buf1, buf2, buf3, buf4; - - __m128i t0, t1, t2; - __m128i ff0, ff1; - - const __m128i r8 = _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1); - const __m128i r16 = _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2); - - const __m128i m0 = _mm_loadu_si128((const __m128i*)(const void*)(input + 00)); - const __m128i m1 = _mm_loadu_si128((const __m128i*)(const void*)(input + 16)); - const __m128i m2 = _mm_loadu_si128((const __m128i*)(const void*)(input + 32)); - const __m128i m3 = _mm_loadu_si128((const __m128i*)(const void*)(input + 48)); - - row1 = ff0 = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[0])); - row2 = ff1 = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])); - row3 = _mm_setr_epi32(BLAKE2S_IV(0), BLAKE2S_IV(1), BLAKE2S_IV(2), BLAKE2S_IV(3)); - row4 = _mm_xor_si128(_mm_setr_epi32(BLAKE2S_IV(4), BLAKE2S_IV(5), BLAKE2S_IV(6), BLAKE2S_IV(7)), _mm_loadu_si128((const __m128i*)(const void*)(&state.t[0]))); - buf1 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m0)), _mm_castsi128_ps((m1)), _MM_SHUFFLE(2,0,2,0)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - buf2 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m0)), _mm_castsi128_ps((m1)), _MM_SHUFFLE(3,1,3,1)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - buf3 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m2)), _mm_castsi128_ps((m3)), _MM_SHUFFLE(2,0,2,0)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - buf4 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m2)), _mm_castsi128_ps((m3)), _MM_SHUFFLE(3,1,3,1)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_blend_epi16(m1, m2, 0x0C); - t1 = _mm_slli_si128(m3, 4); - t2 = _mm_blend_epi16(t0, t1, 0xF0); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_shuffle_epi32(m2,_MM_SHUFFLE(0,0,2,0)); - t1 = _mm_blend_epi16(m1,m3,0xC0); - t2 = _mm_blend_epi16(t0, t1, 0xF0); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_slli_si128(m1, 4); - t1 = _mm_blend_epi16(m2, t0, 0x30); - t2 = _mm_blend_epi16(m0, t1, 0xF0); - buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_slli_si128(m3, 4); - t2 = _mm_blend_epi16(t0, t1, 0x0C); - buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m2,m3); - t1 = _mm_blend_epi16(m3,m1,0x0C); - t2 = _mm_blend_epi16(t0, t1, 0x0F); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi32(m2,m0); - t1 = _mm_blend_epi16(t0, m0, 0xF0); - t2 = _mm_slli_si128(m3, 8); - buf2 = _mm_blend_epi16(t1, t2, 0xC0); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m0, m2, 0x3C); - t1 = _mm_srli_si128(m1, 12); - t2 = _mm_blend_epi16(t0,t1,0x03); - buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,3,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_slli_si128(m3, 4); - t1 = _mm_blend_epi16(m0, m1, 0x33); - t2 = _mm_blend_epi16(t1, t0, 0xC0); - buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(0,1,2,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_unpackhi_epi32(t0, m2); - t2 = _mm_blend_epi16(t1, m3, 0x0C); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_slli_si128(m2, 8); - t1 = _mm_blend_epi16(m3,m0,0x0C); - t2 = _mm_blend_epi16(t1, t0, 0xC0); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m0,m1,0x0F); - t1 = _mm_blend_epi16(t0, m3, 0xC0); - buf3 = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi32(m0,m2); - t1 = _mm_unpackhi_epi32(m1,m2); - buf4 = _mm_unpacklo_epi64(t1,t0); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpacklo_epi64(m1,m2); - t1 = _mm_unpackhi_epi64(m0,m2); - t2 = _mm_blend_epi16(t0,t1,0x33); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi64(m1,m3); - t1 = _mm_unpacklo_epi64(m0,m1); - buf2 = _mm_blend_epi16(t0,t1,0x33); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpackhi_epi64(m3,m1); - t1 = _mm_unpackhi_epi64(m2,m0); - buf3 = _mm_blend_epi16(t1,t0,0x33); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m0,m2,0x03); - t1 = _mm_slli_si128(t0, 8); - t2 = _mm_blend_epi16(t1,m3,0x0F); - buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,0,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_unpacklo_epi32(m0,m2); - buf1 = _mm_unpacklo_epi64(t0,t1); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_srli_si128(m2, 4); - t1 = _mm_blend_epi16(m0,m3,0x03); - buf2 = _mm_blend_epi16(t1,t0,0x3C); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m1,m0,0x0C); - t1 = _mm_srli_si128(m3, 4); - t2 = _mm_blend_epi16(t0,t1,0x30); - buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,3,0)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi64(m1,m2); - t1= _mm_shuffle_epi32(m3, _MM_SHUFFLE(0,2,0,1)); - buf4 = _mm_blend_epi16(t0,t1,0x33); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_slli_si128(m1, 12); - t1 = _mm_blend_epi16(m0,m3,0x33); - buf1 = _mm_blend_epi16(t1,t0,0xC0); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m3,m2,0x30); - t1 = _mm_srli_si128(m1, 4); - t2 = _mm_blend_epi16(t0,t1,0x03); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,3,0)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpacklo_epi64(m0,m2); - t1 = _mm_srli_si128(m1, 4); - buf3 = _mm_shuffle_epi32(_mm_blend_epi16(t0,t1,0x0C), _MM_SHUFFLE(2,3,1,0)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi32(m1,m2); - t1 = _mm_unpackhi_epi64(m0,t0); - buf4 = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_blend_epi16(t0,m3,0x0F); - buf1 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(2,0,3,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m2,m3,0x30); - t1 = _mm_srli_si128(m0,4); - t2 = _mm_blend_epi16(t0,t1,0x03); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,2,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpackhi_epi64(m0,m3); - t1 = _mm_unpacklo_epi64(m1,m2); - t2 = _mm_blend_epi16(t0,t1,0x3C); - buf3 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,2,3,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi32(m0,m1); - t1 = _mm_unpackhi_epi32(m1,m2); - buf4 = _mm_unpacklo_epi64(t0,t1); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m1,m3); - t1 = _mm_unpacklo_epi64(t0,m0); - t2 = _mm_blend_epi16(t1,m2,0xC0); - buf1 = _mm_shufflehi_epi16(t2,_MM_SHUFFLE(1,0,3,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi32(m0,m3); - t1 = _mm_blend_epi16(m2,t0,0xF0); - buf2 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(0,2,1,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m2,m0,0x0C); - t1 = _mm_slli_si128(t0,4); - buf3 = _mm_blend_epi16(t1,m3,0x0F); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m1,m0,0x30); - buf4 = _mm_shuffle_epi32(t0,_MM_SHUFFLE(1,0,3,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_blend_epi16(m0,m2,0x03); - t1 = _mm_blend_epi16(m1,m2,0x30); - t2 = _mm_blend_epi16(t1,t0,0x0F); - buf1 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(1,3,0,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_slli_si128(m0,4); - t1 = _mm_blend_epi16(m1,t0,0xC0); - buf2 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(1,2,0,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpackhi_epi32(m0,m3); - t1 = _mm_unpacklo_epi32(m2,m3); - t2 = _mm_unpackhi_epi64(t0,t1); - buf3 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(3,0,2,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m3,m2,0xC0); - t1 = _mm_unpacklo_epi32(m0,m3); - t2 = _mm_blend_epi16(t0,t1,0x0F); - buf4 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,1,2,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - _mm_storeu_si128((__m128i *)(void*)(&state.h[0]), _mm_xor_si128(ff0, _mm_xor_si128(row1, row3))); - _mm_storeu_si128((__m128i *)(void*)(&state.h[4]), _mm_xor_si128(ff1, _mm_xor_si128(row2, row4))); -} - -static void BLAKE2_SSE4_Compress64(const byte* input, BLAKE2_State& state) -{ - __m128i row1l, row1h; - __m128i row2l, row2h; - __m128i row3l, row3h; - __m128i row4l, row4h; - __m128i b0, b1, t0, t1; - - const __m128i r16 = _mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9); - const __m128i r24 = _mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10); - - const __m128i m0 = _mm_loadu_si128((const __m128i*)(const void*)(input + 00)); - const __m128i m1 = _mm_loadu_si128((const __m128i*)(const void*)(input + 16)); - const __m128i m2 = _mm_loadu_si128((const __m128i*)(const void*)(input + 32)); - const __m128i m3 = _mm_loadu_si128((const __m128i*)(const void*)(input + 48)); - const __m128i m4 = _mm_loadu_si128((const __m128i*)(const void*)(input + 64)); - const __m128i m5 = _mm_loadu_si128((const __m128i*)(const void*)(input + 80)); - const __m128i m6 = _mm_loadu_si128((const __m128i*)(const void*)(input + 96)); - const __m128i m7 = _mm_loadu_si128((const __m128i*)(const void*)(input + 112)); - - row1l = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[0])); - row1h = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[2])); - row2l = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])); - row2h = _mm_loadu_si128((const __m128i*)(const void*)(&state.h[6])); - row3l = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(0))); - row3h = _mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(2))); - row4l = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(4))), _mm_loadu_si128((const __m128i*)(const void*)(&state.t[0]))); - row4h = _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&BLAKE2B_IV(6))), _mm_loadu_si128((const __m128i*)(const void*)(&state.f[0]))); - - b0 = _mm_unpacklo_epi64(m0, m1); - b1 = _mm_unpacklo_epi64(m2, m3); - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m0, m1); - b1 = _mm_unpackhi_epi64(m2, m3); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m4, m5); - b1 = _mm_unpacklo_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m4, m5); - b1 = _mm_unpackhi_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m7, m2); - b1 = _mm_unpackhi_epi64(m4, m6); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m5, m4); - b1 = _mm_alignr_epi8(m3, m7, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); - b1 = _mm_unpackhi_epi64(m5, m2); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m6, m1); - b1 = _mm_unpackhi_epi64(m3, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_alignr_epi8(m6, m5, 8); - b1 = _mm_unpackhi_epi64(m2, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m4, m0); - b1 = _mm_blend_epi16(m1, m6, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m5, m1, 0xF0); - b1 = _mm_unpackhi_epi64(m3, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m7, m3); - b1 = _mm_alignr_epi8(m2, m0, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m3, m1); - b1 = _mm_unpackhi_epi64(m6, m5); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m4, m0); - b1 = _mm_unpacklo_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m1, m2, 0xF0); - b1 = _mm_blend_epi16(m2, m7, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m3, m5); - b1 = _mm_unpacklo_epi64(m0, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m4, m2); - b1 = _mm_unpacklo_epi64(m1, m5); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_blend_epi16(m0, m3, 0xF0); - b1 = _mm_blend_epi16(m2, m7, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m7, m5, 0xF0); - b1 = _mm_blend_epi16(m3, m1, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_alignr_epi8(m6, m0, 8); - b1 = _mm_blend_epi16(m4, m6, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m1, m3); - b1 = _mm_unpacklo_epi64(m0, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m6, m5); - b1 = _mm_unpackhi_epi64(m5, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m2, m3, 0xF0); - b1 = _mm_unpackhi_epi64(m7, m0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m6, m2); - b1 = _mm_blend_epi16(m7, m4, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m6, m0, 0xF0); - b1 = _mm_unpacklo_epi64(m7, m2); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m2, m7); - b1 = _mm_alignr_epi8(m5, m6, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m0, m3); - b1 = _mm_shuffle_epi32(m4, _MM_SHUFFLE(1,0,3,2)); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m3, m1); - b1 = _mm_blend_epi16(m1, m5, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m6, m3); - b1 = _mm_blend_epi16(m6, m1, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_alignr_epi8(m7, m5, 8); - b1 = _mm_unpackhi_epi64(m0, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m2, m7); - b1 = _mm_unpacklo_epi64(m4, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m0, m2); - b1 = _mm_unpacklo_epi64(m3, m5); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m3, m7); - b1 = _mm_alignr_epi8(m0, m5, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m7, m4); - b1 = _mm_alignr_epi8(m4, m1, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = m6; - b1 = _mm_alignr_epi8(m5, m0, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_blend_epi16(m1, m3, 0xF0); - b1 = m2; - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m5, m4); - b1 = _mm_unpackhi_epi64(m3, m0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m1, m2); - b1 = _mm_blend_epi16(m3, m2, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m7, m4); - b1 = _mm_unpackhi_epi64(m1, m6); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_alignr_epi8(m7, m5, 8); - b1 = _mm_unpacklo_epi64(m6, m0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m0, m1); - b1 = _mm_unpacklo_epi64(m2, m3); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m0, m1); - b1 = _mm_unpackhi_epi64(m2, m3); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m4, m5); - b1 = _mm_unpacklo_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m4, m5); - b1 = _mm_unpackhi_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m7, m2); - b1 = _mm_unpackhi_epi64(m4, m6); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m5, m4); - b1 = _mm_alignr_epi8(m3, m7, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); - b1 = _mm_unpackhi_epi64(m5, m2); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m6, m1); - b1 = _mm_unpackhi_epi64(m3, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - row1l = _mm_xor_si128(row3l, row1l); - row1h = _mm_xor_si128(row3h, row1h); - _mm_storeu_si128((__m128i *)(void*)(&state.h[0]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[0])), row1l)); - _mm_storeu_si128((__m128i *)(void*)(&state.h[2]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[2])), row1h)); - - row2l = _mm_xor_si128(row4l, row2l); - row2h = _mm_xor_si128(row4h, row2h); - _mm_storeu_si128((__m128i *)(void*)(&state.h[4]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[4])), row2l)); - _mm_storeu_si128((__m128i *)(void*)(&state.h[6]), _mm_xor_si128(_mm_loadu_si128((const __m128i*)(const void*)(&state.h[6])), row2h)); -} -#endif // CRYPTOPP_SSE42_AVAILABLE - -// Disable NEON for Cortex-A53 and A57. Also see http://github.com/weidai11/cryptopp/issues/367 -#if CRYPTOPP_BOOL_ARM32 && CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE -static void BLAKE2_NEON_Compress32(const byte* input, BLAKE2_State& state) -{ - #define BLAKE2S_LOAD_MSG_0_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m0)).val[0]; \ - t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m1)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_0_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m0)).val[1]; \ - t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m1)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_0_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m2), vget_high_u32(m2)).val[0]; \ - t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_0_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m2), vget_high_u32(m2)).val[1]; \ - t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_1_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m1)).val[0]; \ - t1 = vzip_u32(vget_low_u32(m2), vget_low_u32(m3)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_1_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m2), vget_low_u32(m2)).val[0]; \ - t1 = vext_u32(vget_high_u32(m3), vget_high_u32(m1), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_1_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vext_u32(vget_low_u32(m0), vget_low_u32(m0), 1); \ - t1 = vzip_u32(vget_high_u32(m2), vget_low_u32(m1)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_1_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m0)).val[0]; \ - t1 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_2_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vext_u32(vget_high_u32(m2), vget_low_u32(m3), 1); \ - t1 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_2_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m0)).val[0]; \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_low_u32(m3)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_2_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m2), vget_high_u32(m0)); \ - t1 = vzip_u32(vget_high_u32(m1), vget_low_u32(m2)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_2_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m3), vget_high_u32(m1)).val[0]; \ - t1 = vext_u32(vget_low_u32(m0), vget_low_u32(m1), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_3_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \ - t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m2)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_3_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m0)).val[1]; \ - t1 = vzip_u32(vget_low_u32(m3), vget_high_u32(m3)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_3_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_low_u32(m1)); \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m3)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_3_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m2)).val[0]; \ - t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_4_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m2), vget_low_u32(m1)).val[1]; \ - t1 = vzip_u32((vget_high_u32(m0)), vget_high_u32(m2)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_4_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m0), vget_high_u32(m1)); \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m3)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_4_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m3), vget_high_u32(m2)); \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m1), vget_high_u32(m0)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_4_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vext_u32(vget_low_u32(m0), vget_low_u32(m3), 1); \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m2), vget_low_u32(m3)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_5_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32((vget_high_u32(m0)), vget_high_u32(m1)).val[0]; \ - t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_5_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m2)).val[0]; \ - t1 = vzip_u32(vget_high_u32(m2), vget_high_u32(m0)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_5_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_high_u32(m1)); \ - t1 = vzip_u32(vget_high_u32(m3), vget_low_u32(m0)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_5_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m3), vget_low_u32(m1)).val[1]; \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m3), vget_low_u32(m2)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_6_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_low_u32(m0)); \ - t1 = vzip_u32(vget_high_u32(m3), vget_low_u32(m1)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_6_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \ - t1 = vext_u32(vget_low_u32(m3), vget_high_u32(m2), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_6_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m0), vget_high_u32(m1)).val[0]; \ - t1 = vext_u32(vget_low_u32(m2), vget_low_u32(m2), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_6_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m0)).val[1]; \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_high_u32(m2)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_7_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m3), vget_high_u32(m1)).val[1]; \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_high_u32(m0)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_7_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vext_u32(vget_high_u32(m2), vget_high_u32(m3), 1); \ - t1 = vzip_u32(vget_low_u32(m0), vget_low_u32(m2)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_7_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m1), vget_high_u32(m3)).val[1]; \ - t1 = vzip_u32(vget_low_u32(m2), vget_high_u32(m0)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_7_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_low_u32(m0), vget_low_u32(m1)).val[0]; \ - t1 = vzip_u32(vget_high_u32(m1), vget_high_u32(m2)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_8_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m1), vget_high_u32(m3)).val[0]; \ - t1 = vext_u32(vget_high_u32(m2), vget_low_u32(m0), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_8_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m2)).val[1]; \ - t1 = vext_u32(vget_high_u32(m0), vget_low_u32(m2), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_8_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m3), vget_low_u32(m3)); \ - t1 = vext_u32(vget_low_u32(m0), vget_high_u32(m2), 1); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_8_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m0), vget_high_u32(m1)); \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_low_u32(m1), vget_low_u32(m1)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_9_1(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m2), vget_low_u32(m2)).val[0]; \ - t1 = vzip_u32(vget_high_u32(m1), vget_low_u32(m0)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_9_2(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32((vget_high_u32(m0)), vget_low_u32(m1)).val[0]; \ - t1 = vbsl_u32(vcreate_u32(0xFFFFFFFF), vget_high_u32(m1), vget_low_u32(m1)); \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_9_3(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vzip_u32(vget_high_u32(m3), vget_low_u32(m2)).val[1]; \ - t1 = vzip_u32((vget_high_u32(m0)), vget_low_u32(m3)).val[1]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define BLAKE2S_LOAD_MSG_9_4(buf) \ - do { uint32x2_t t0, t1; \ - t0 = vext_u32(vget_high_u32(m2), vget_high_u32(m3), 1); \ - t1 = vzip_u32(vget_low_u32(m3), vget_low_u32(m0)).val[0]; \ - buf = vcombine_u32(t0, t1); } while(0) - - #define vrorq_n_u32_16(x) vreinterpretq_u32_u16(vrev32q_u16(vreinterpretq_u16_u32(x))) - - #define vrorq_n_u32_8(x) vsriq_n_u32(vshlq_n_u32((x), 24), (x), 8) - - #define vrorq_n_u32(x, c) vsriq_n_u32(vshlq_n_u32((x), 32-(c)), (x), (c)) - - #define BLAKE2S_G1(row1,row2,row3,row4,buf) \ - do { \ - row1 = vaddq_u32(vaddq_u32(row1, buf), row2); row4 = veorq_u32(row4, row1); \ - row4 = vrorq_n_u32_16(row4); row3 = vaddq_u32(row3, row4); \ - row2 = veorq_u32(row2, row3); row2 = vrorq_n_u32(row2, 12); \ - } while(0) - - #define BLAKE2S_G2(row1,row2,row3,row4,buf) \ - do { \ - row1 = vaddq_u32(vaddq_u32(row1, buf), row2); row4 = veorq_u32(row4, row1); \ - row4 = vrorq_n_u32_8(row4); row3 = vaddq_u32(row3, row4); \ - row2 = veorq_u32(row2, row3); row2 = vrorq_n_u32(row2, 7); \ - } while(0) - - #define BLAKE2S_DIAGONALIZE(row1,row2,row3,row4) \ - do { \ - row4 = vextq_u32(row4, row4, 3); row3 = vextq_u32(row3, row3, 2); row2 = vextq_u32(row2, row2, 1); \ - } while(0) - - #define BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4) \ - do { \ - row4 = vextq_u32(row4, row4, 1); \ - row3 = vextq_u32(row3, row3, 2); \ - row2 = vextq_u32(row2, row2, 3); \ - } while(0) - - #define BLAKE2S_ROUND(r) \ - do { \ - uint32x4_t buf1, buf2, buf3, buf4; \ - BLAKE2S_LOAD_MSG_ ##r ##_1(buf1); \ - BLAKE2S_G1(row1,row2,row3,row4,buf1); \ - BLAKE2S_LOAD_MSG_ ##r ##_2(buf2); \ - BLAKE2S_G2(row1,row2,row3,row4,buf2); \ - BLAKE2S_DIAGONALIZE(row1,row2,row3,row4); \ - BLAKE2S_LOAD_MSG_ ##r ##_3(buf3); \ - BLAKE2S_G1(row1,row2,row3,row4,buf3); \ - BLAKE2S_LOAD_MSG_ ##r ##_4(buf4); \ - BLAKE2S_G2(row1,row2,row3,row4,buf4); \ - BLAKE2S_UNDIAGONALIZE(row1,row2,row3,row4); \ - } while(0) - - CRYPTOPP_ASSERT(IsAlignedOn(&state.h[0],GetAlignmentOf())); - CRYPTOPP_ASSERT(IsAlignedOn(&state.t[0],GetAlignmentOf())); - CRYPTOPP_ASSERT(IsAlignedOn(&state.f[0],GetAlignmentOf())); - - const uint32x4_t m0 = vreinterpretq_u32_u8(vld1q_u8((input + 00))); - const uint32x4_t m1 = vreinterpretq_u32_u8(vld1q_u8((input + 16))); - const uint32x4_t m2 = vreinterpretq_u32_u8(vld1q_u8((input + 32))); - const uint32x4_t m3 = vreinterpretq_u32_u8(vld1q_u8((input + 48))); - - uint32x4_t row1, row2, row3, row4; - - const uint32x4_t f0 = row1 = vld1q_u32(&state.h[0]); - const uint32x4_t f1 = row2 = vld1q_u32(&state.h[4]); - row3 = vld1q_u32(&BLAKE2S_IV(0)); - row4 = veorq_u32(vld1q_u32(&BLAKE2S_IV(4)), vld1q_u32(&state.t[0])); - - BLAKE2S_ROUND(0); - BLAKE2S_ROUND(1); - BLAKE2S_ROUND(2); - BLAKE2S_ROUND(3); - BLAKE2S_ROUND(4); - BLAKE2S_ROUND(5); - BLAKE2S_ROUND(6); - BLAKE2S_ROUND(7); - BLAKE2S_ROUND(8); - BLAKE2S_ROUND(9); - - vst1q_u32(&state.h[0], veorq_u32(f0, veorq_u32(row1, row3))); - vst1q_u32(&state.h[4], veorq_u32(f1, veorq_u32(row2, row4))); -} - -static void BLAKE2_NEON_Compress64(const byte* input, BLAKE2_State& state) -{ - #define BLAKE2B_LOAD_MSG_0_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m1)); b1 = vcombine_u64(vget_low_u64(m2), vget_low_u64(m3)); } while(0) - - #define BLAKE2B_LOAD_MSG_0_2(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m3)); } while(0) - - #define BLAKE2B_LOAD_MSG_0_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_0_4(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m5)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_1_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); b1 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m6)); } while(0) - - #define BLAKE2B_LOAD_MSG_1_2(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vextq_u64(m7, m3, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_1_3(b0, b1) \ - do { b0 = vextq_u64(m0, m0, 1); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m2)); } while(0) - - #define BLAKE2B_LOAD_MSG_1_4(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); } while(0) - - #define BLAKE2B_LOAD_MSG_2_1(b0, b1) \ - do { b0 = vextq_u64(m5, m6, 1); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_2_2(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m0)); b1 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m6)); } while(0) - - #define BLAKE2B_LOAD_MSG_2_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m5), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m4)); } while(0) - - #define BLAKE2B_LOAD_MSG_2_4(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m3)); b1 = vextq_u64(m0, m2, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_3_1(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m5)); } while(0) - - #define BLAKE2B_LOAD_MSG_3_2(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m0)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_3_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_3_4(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m4)); } while(0) - - #define BLAKE2B_LOAD_MSG_4_1(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m5)); } while(0) - - #define BLAKE2B_LOAD_MSG_4_2(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m0), vget_high_u64(m3)); b1 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_4_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m7), vget_high_u64(m5)); b1 = vcombine_u64(vget_low_u64(m3), vget_high_u64(m1)); } while(0) - - #define BLAKE2B_LOAD_MSG_4_4(b0, b1) \ - do { b0 = vextq_u64(m0, m6, 1); b1 = vcombine_u64(vget_low_u64(m4), vget_high_u64(m6)); } while(0) - - #define BLAKE2B_LOAD_MSG_5_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m3)); b1 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m4)); } while(0) - - #define BLAKE2B_LOAD_MSG_5_2(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m5)); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m1)); } while(0) - - #define BLAKE2B_LOAD_MSG_5_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m2), vget_high_u64(m3)); b1 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m0)); } while(0) - - #define BLAKE2B_LOAD_MSG_5_4(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m2)); b1 = vcombine_u64(vget_low_u64(m7), vget_high_u64(m4)); } while(0) - - #define BLAKE2B_LOAD_MSG_6_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m6), vget_high_u64(m0)); b1 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); } while(0) - - #define BLAKE2B_LOAD_MSG_6_2(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); b1 = vextq_u64(m6, m5, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_6_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m3)); b1 = vextq_u64(m4, m4, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_6_4(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); b1 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m5)); } while(0) - - #define BLAKE2B_LOAD_MSG_7_1(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m3)); b1 = vcombine_u64(vget_low_u64(m6), vget_high_u64(m1)); } while(0) - - #define BLAKE2B_LOAD_MSG_7_2(b0, b1) \ - do { b0 = vextq_u64(m5, m7, 1); b1 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m4)); } while(0) - - #define BLAKE2B_LOAD_MSG_7_3(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m7)); b1 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m1)); } while(0) - - #define BLAKE2B_LOAD_MSG_7_4(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m2)); b1 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m5)); } while(0) - - #define BLAKE2B_LOAD_MSG_8_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m3), vget_low_u64(m7)); b1 = vextq_u64(m5, m0, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_8_2(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m4)); b1 = vextq_u64(m1, m4, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_8_3(b0, b1) \ - do { b0 = m6; b1 = vextq_u64(m0, m5, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_8_4(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m1), vget_high_u64(m3)); b1 = m2; } while(0) - - #define BLAKE2B_LOAD_MSG_9_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m0)); } while(0) - - #define BLAKE2B_LOAD_MSG_9_2(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m1), vget_low_u64(m2)); b1 = vcombine_u64(vget_low_u64(m3), vget_high_u64(m2)); } while(0) - - #define BLAKE2B_LOAD_MSG_9_3(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m7), vget_high_u64(m4)); b1 = vcombine_u64(vget_high_u64(m1), vget_high_u64(m6)); } while(0) - - #define BLAKE2B_LOAD_MSG_9_4(b0, b1) \ - do { b0 = vextq_u64(m5, m7, 1); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m0)); } while(0) - - #define BLAKE2B_LOAD_MSG_10_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m0), vget_low_u64(m1)); b1 = vcombine_u64(vget_low_u64(m2), vget_low_u64(m3)); } while(0) - - #define BLAKE2B_LOAD_MSG_10_2(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m0), vget_high_u64(m1)); b1 = vcombine_u64(vget_high_u64(m2), vget_high_u64(m3)); } while(0) - - #define BLAKE2B_LOAD_MSG_10_3(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m4), vget_low_u64(m5)); b1 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_10_4(b0, b1) \ - do { b0 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m5)); b1 = vcombine_u64(vget_high_u64(m6), vget_high_u64(m7)); } while(0) - - #define BLAKE2B_LOAD_MSG_11_1(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m7), vget_low_u64(m2)); b1 = vcombine_u64(vget_high_u64(m4), vget_high_u64(m6)); } while(0) - - #define BLAKE2B_LOAD_MSG_11_2(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m5), vget_low_u64(m4)); b1 = vextq_u64(m7, m3, 1); } while(0) - - #define BLAKE2B_LOAD_MSG_11_3(b0, b1) \ - do { b0 = vextq_u64(m0, m0, 1); b1 = vcombine_u64(vget_high_u64(m5), vget_high_u64(m2)); } while(0) - - #define BLAKE2B_LOAD_MSG_11_4(b0, b1) \ - do { b0 = vcombine_u64(vget_low_u64(m6), vget_low_u64(m1)); b1 = vcombine_u64(vget_high_u64(m3), vget_high_u64(m1)); } while(0) - - #define vrorq_n_u64_32(x) vreinterpretq_u64_u32(vrev64q_u32(vreinterpretq_u32_u64((x)))) - - #define vrorq_n_u64_24(x) vcombine_u64(\ - vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_low_u64(x)), vreinterpret_u8_u64(vget_low_u64(x)), 3)), \ - vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_high_u64(x)), vreinterpret_u8_u64(vget_high_u64(x)), 3))) - - #define vrorq_n_u64_16(x) vcombine_u64(\ - vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_low_u64(x)), vreinterpret_u8_u64(vget_low_u64(x)), 2)), \ - vreinterpret_u64_u8(vext_u8(vreinterpret_u8_u64(vget_high_u64(x)), vreinterpret_u8_u64(vget_high_u64(x)), 2))) - - #define vrorq_n_u64_63(x) veorq_u64(vaddq_u64(x, x), vshrq_n_u64(x, 63)) - - #define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ - do { \ - row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ - row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ - row4l = veorq_u64(row4l, row1l); row4h = veorq_u64(row4h, row1h); \ - row4l = vrorq_n_u64_32(row4l); row4h = vrorq_n_u64_32(row4h); \ - row3l = vaddq_u64(row3l, row4l); row3h = vaddq_u64(row3h, row4h); \ - row2l = veorq_u64(row2l, row3l); row2h = veorq_u64(row2h, row3h); \ - row2l = vrorq_n_u64_24(row2l); row2h = vrorq_n_u64_24(row2h); \ - } while(0) - - #define BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ - do { \ - row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ - row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ - row4l = veorq_u64(row4l, row1l); row4h = veorq_u64(row4h, row1h); \ - row4l = vrorq_n_u64_16(row4l); row4h = vrorq_n_u64_16(row4h); \ - row3l = vaddq_u64(row3l, row4l); row3h = vaddq_u64(row3h, row4h); \ - row2l = veorq_u64(row2l, row3l); row2h = veorq_u64(row2h, row3h); \ - row2l = vrorq_n_u64_63(row2l); row2h = vrorq_n_u64_63(row2h); \ - } while(0) - - #define BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ - do { \ - uint64x2_t t0 = vextq_u64(row2l, row2h, 1); \ - uint64x2_t t1 = vextq_u64(row2h, row2l, 1); \ - row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ - t0 = vextq_u64(row4h, row4l, 1); t1 = vextq_u64(row4l, row4h, 1); \ - row4l = t0; row4h = t1; \ - } while(0) - - #define BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ - do { \ - uint64x2_t t0 = vextq_u64(row2h, row2l, 1); \ - uint64x2_t t1 = vextq_u64(row2l, row2h, 1); \ - row2l = t0; row2h = t1; t0 = row3l; row3l = row3h; row3h = t0; \ - t0 = vextq_u64(row4l, row4h, 1); t1 = vextq_u64(row4h, row4l, 1); \ - row4l = t0; row4h = t1; \ - } while(0) - - #define BLAKE2B_ROUND(r) \ - do { \ - uint64x2_t b0, b1; \ - BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ - BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ - BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ - BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ - BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ - BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ - BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ - BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ - BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ - BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ - } while(0) - - CRYPTOPP_ASSERT(IsAlignedOn(&state.h[0],GetAlignmentOf())); - CRYPTOPP_ASSERT(IsAlignedOn(&state.t[0],GetAlignmentOf())); - CRYPTOPP_ASSERT(IsAlignedOn(&state.f[0],GetAlignmentOf())); - - const uint64x2_t m0 = vreinterpretq_u64_u8(vld1q_u8(input + 00)); - const uint64x2_t m1 = vreinterpretq_u64_u8(vld1q_u8(input + 16)); - const uint64x2_t m2 = vreinterpretq_u64_u8(vld1q_u8(input + 32)); - const uint64x2_t m3 = vreinterpretq_u64_u8(vld1q_u8(input + 48)); - const uint64x2_t m4 = vreinterpretq_u64_u8(vld1q_u8(input + 64)); - const uint64x2_t m5 = vreinterpretq_u64_u8(vld1q_u8(input + 80)); - const uint64x2_t m6 = vreinterpretq_u64_u8(vld1q_u8(input + 96)); - const uint64x2_t m7 = vreinterpretq_u64_u8(vld1q_u8(input + 112)); - - uint64x2_t row1l, row1h, row2l, row2h; - uint64x2_t row3l, row3h, row4l, row4h; - - const uint64x2_t h0 = row1l = vld1q_u64(&state.h[0]); - const uint64x2_t h1 = row1h = vld1q_u64(&state.h[2]); - const uint64x2_t h2 = row2l = vld1q_u64(&state.h[4]); - const uint64x2_t h3 = row2h = vld1q_u64(&state.h[6]); - - row3l = vld1q_u64(&BLAKE2B_IV(0)); - row3h = vld1q_u64(&BLAKE2B_IV(2)); - row4l = veorq_u64(vld1q_u64(&BLAKE2B_IV(4)), vld1q_u64(&state.t[0])); - row4h = veorq_u64(vld1q_u64(&BLAKE2B_IV(6)), vld1q_u64(&state.f[0])); - - BLAKE2B_ROUND(0); - BLAKE2B_ROUND(1); - BLAKE2B_ROUND(2); - BLAKE2B_ROUND(3); - BLAKE2B_ROUND(4); - BLAKE2B_ROUND(5); - BLAKE2B_ROUND(6); - BLAKE2B_ROUND(7); - BLAKE2B_ROUND(8); - BLAKE2B_ROUND(9); - BLAKE2B_ROUND(10); - BLAKE2B_ROUND(11); - - vst1q_u64(&state.h[0], veorq_u64(h0, veorq_u64(row1l, row3l))); - vst1q_u64(&state.h[2], veorq_u64(h1, veorq_u64(row1h, row3h))); - vst1q_u64(&state.h[4], veorq_u64(h2, veorq_u64(row2l, row4l))); - vst1q_u64(&state.h[6], veorq_u64(h3, veorq_u64(row2h, row4h))); -} -#endif // CRYPTOPP_BOOL_ARM32 && CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE - template class BLAKE2_Base; template class BLAKE2_Base; diff --git a/config.h b/config.h index 100853fc..1fdf92b2 100644 --- a/config.h +++ b/config.h @@ -516,9 +516,9 @@ NAMESPACE_END #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARM64) // Requires ARMv7 and ACLE 1.0. Testing shows ARMv7 is really ARMv7a under most toolchains. -#if !defined(CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ASM) -# if defined(__ARM_NEON__) || defined(__ARM_NEON) || defined(_M_ARM) -# define CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE 1 +#if !defined(CRYPTOPP_ARM_NEON_AVAILABLE) && !defined(CRYPTOPP_DISABLE_ASM) +# if defined(__ARM_NEON__) || defined(__ARM_NEON) || (CRYPTOPP_GCC_VERSION >= 40800) || (CRYPTOPP_LLVM_CLANG_VERSION >= 30500) +# define CRYPTOPP_ARM_NEON_AVAILABLE 1 # endif #endif @@ -563,7 +563,7 @@ NAMESPACE_END // ***************** Miscellaneous ******************** -#if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE || CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE) +#if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE || CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || CRYPTOPP_ARM_NEON_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE) #define CRYPTOPP_BOOL_ALIGN16 1 #else #define CRYPTOPP_BOOL_ALIGN16 0 diff --git a/cpu.cpp b/cpu.cpp index a51c1423..11723c39 100644 --- a/cpu.cpp +++ b/cpu.cpp @@ -334,15 +334,16 @@ bool CRYPTOPP_SECTION_INIT g_hasNEON = false, CRYPTOPP_SECTION_INIT g_hasPMULL = bool CRYPTOPP_SECTION_INIT g_hasAES = false, CRYPTOPP_SECTION_INIT g_hasSHA1 = false, CRYPTOPP_SECTION_INIT g_hasSHA2 = false; word32 CRYPTOPP_SECTION_INIT g_cacheLineSize = CRYPTOPP_L1_CACHE_LINE_SIZE; +extern bool CPU_TryNEON_ARM(); +extern bool CPU_TryAES_ARMV8(); +extern bool CPU_TrySHA1_ARMV8(); +extern bool CPU_TrySHA2_ARMV8(); +extern bool CPU_TryCRC32_ARMV8(); +extern bool CPU_TryPMULL_ARMV8(); + #ifndef CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY extern "C" { - static jmp_buf s_jmpNoNEON; - static void SigIllHandlerNEON(int) - { - longjmp(s_jmpNoNEON, 1); - } - static jmp_buf s_jmpNoPMULL; static void SigIllHandlerPMULL(int) { @@ -359,70 +360,20 @@ extern "C" static bool TryNEON() { -#if (CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE) -# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) - volatile bool result = true; - __try - { - uint32_t v1[4] = {1,1,1,1}; - uint32x4_t x1 = vld1q_u32(v1); - uint64_t v2[2] = {1,1}; - uint64x2_t x2 = vld1q_u64(v2); - - uint32x4_t x3 = vdupq_n_u32(2); - x3 = vsetq_lane_u32(vgetq_lane_u32(x1,0),x3,0); - x3 = vsetq_lane_u32(vgetq_lane_u32(x1,3),x3,3); - uint64x2_t x4 = vdupq_n_u64(2); - x4 = vsetq_lane_u64(vgetq_lane_u64(x2,0),x4,0); - x4 = vsetq_lane_u64(vgetq_lane_u64(x2,1),x4,1); - - result = !!(vgetq_lane_u32(x3,0) | vgetq_lane_u64(x4,1)); - } - __except (EXCEPTION_EXECUTE_HANDLER) - { - return false; - } - return result; -# else - // longjmp and clobber warnings. Volatile is required. - // http://github.com/weidai11/cryptopp/issues/24 and http://stackoverflow.com/q/7721854 - volatile bool result = true; - - volatile SigHandler oldHandler = signal(SIGILL, SigIllHandlerNEON); - if (oldHandler == SIG_ERR) - return false; - - volatile sigset_t oldMask; - if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask)) - return false; - - if (setjmp(s_jmpNoNEON)) - result = false; - else - { - uint32_t v1[4] = {1,1,1,1}; - uint32x4_t x1 = vld1q_u32(v1); - uint64_t v2[2] = {1,1}; - uint64x2_t x2 = vld1q_u64(v2); - - uint32x4_t x3 = {0,0,0,0}; - x3 = vsetq_lane_u32(vgetq_lane_u32(x1,0),x3,0); - x3 = vsetq_lane_u32(vgetq_lane_u32(x1,3),x3,3); - uint64x2_t x4 = {0,0}; - x4 = vsetq_lane_u64(vgetq_lane_u64(x2,0),x4,0); - x4 = vsetq_lane_u64(vgetq_lane_u64(x2,1),x4,1); - - // Hack... GCC optimizes away the code and returns true - result = !!(vgetq_lane_u32(x3,0) | vgetq_lane_u64(x4,1)); - } - - sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR); - signal(SIGILL, oldHandler); - return result; -# endif +#if (CRYPTOPP_ARMV8A_CRC32_AVAILABLE) + return CPU_TryCRC32_ARMV8(); #else return false; -#endif // CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +#endif +} + +static bool TryCRC32() +{ +#if (CRYPTOPP_ARMV8A_CRC32_AVAILABLE) + return CPU_TryCRC32_ARMV8(); +#else + return false; +#endif } static bool TryPMULL() @@ -492,19 +443,6 @@ static bool TryPMULL() #endif // CRYPTOPP_ARMV8A_CRYPTO_AVAILABLE } -extern bool CPU_TryCRC32_ARMV8(); -extern bool CPU_TrySHA1_ARMV8(); -extern bool CPU_TrySHA2_ARMV8(); - -static bool TryCRC32() -{ -#if (CRYPTOPP_ARMV8A_CRC32_AVAILABLE) - return CPU_TryCRC32_ARMV8(); -#else - return false; -#endif -} - static bool TryAES() { #if (CRYPTOPP_ARMV8A_CRYPTO_AVAILABLE) diff --git a/cryptest.nmake b/cryptest.nmake index 05d2c420..c48579ad 100644 --- a/cryptest.nmake +++ b/cryptest.nmake @@ -48,9 +48,9 @@ # If you use 'make sources' from Linux makefile, then add 'winpipes.cpp'. Platform specific # classes, like 'rdrand.cpp', should not be included. Add them under the X86 and X64 rules. -LIB_SRCS = cryptlib.cpp cpu.cpp integer.cpp shacal2.cpp md5.cpp shark.cpp zinflate.cpp gf2n.cpp salsa.cpp xtr.cpp oaep.cpp rc2.cpp default.cpp wait.cpp wake.cpp twofish.cpp iterhash.cpp adler32.cpp algparam.cpp marss.cpp blowfish.cpp ecp.cpp strciphr.cpp aria.cpp camellia.cpp dh2.cpp ida.cpp zlib.cpp elgamal.cpp crc.cpp crc-simd.cpp dessp.cpp tea.cpp eax.cpp network.cpp sha.cpp emsa2.cpp pkcspad.cpp squaretb.cpp idea.cpp authenc.cpp hmac.cpp xtrcrypt.cpp queue.cpp mars.cpp rc5.cpp md2.cpp hrtimer.cpp vmac.cpp eprecomp.cpp hex.cpp dsa.cpp fips140.cpp gzip.cpp seal.cpp blake2.cpp files.cpp base32.cpp sharkbox.cpp safer.cpp randpool.cpp sosemanuk.cpp arc4.cpp osrng.cpp skipjack.cpp seed.cpp sha3.cpp filters.cpp bfinit.cpp rabin.cpp 3way.cpp rw.cpp rdtables.cpp rsa.cpp tftables.cpp gost.cpp socketft.cpp nbtheory.cpp panama.cpp modes.cpp rijndael.cpp casts.cpp algebra.cpp esign.cpp gfpcrypt.cpp dll.cpp ec2n.cpp poly1305.cpp polynomi.cpp blumshub.cpp des.cpp basecode.cpp zdeflate.cpp base64.cpp rc6.cpp gf256.cpp mqueue.cpp misc.cpp pssr.cpp channels.cpp rng.cpp threefish.cpp tiger.cpp cast.cpp square.cpp asn.cpp chacha.cpp whrlpool.cpp md4.cpp dh.cpp ccm.cpp mqv.cpp tigertab.cpp gf2_32.cpp cbcmac.cpp ttmac.cpp luc.cpp trdlocal.cpp pubkey.cpp gcm.cpp ripemd.cpp kalyna.cpp kalynatab.cpp keccak.cpp eccrypto.cpp serpent.cpp cmac.cpp winpipes.cpp +LIB_SRCS = cryptlib.cpp cpu.cpp integer.cpp shacal2.cpp md5.cpp shark.cpp zinflate.cpp gf2n.cpp salsa.cpp xtr.cpp oaep.cpp rc2.cpp default.cpp wait.cpp wake.cpp twofish.cpp iterhash.cpp adler32.cpp algparam.cpp marss.cpp blowfish.cpp ecp.cpp strciphr.cpp aria.cpp aria-simd.cpp camellia.cpp dh2.cpp ida.cpp zlib.cpp elgamal.cpp crc.cpp crc-simd.cpp dessp.cpp tea.cpp eax.cpp network.cpp sha.cpp sha-simd.cpp emsa2.cpp pkcspad.cpp squaretb.cpp idea.cpp authenc.cpp hmac.cpp xtrcrypt.cpp queue.cpp mars.cpp rc5.cpp md2.cpp hrtimer.cpp vmac.cpp eprecomp.cpp hex.cpp dsa.cpp fips140.cpp gzip.cpp seal.cpp blake2.cpp blake2-simd.cpp files.cpp base32.cpp sharkbox.cpp safer.cpp randpool.cpp sosemanuk.cpp arc4.cpp osrng.cpp skipjack.cpp seed.cpp sha3.cpp filters.cpp bfinit.cpp rabin.cpp 3way.cpp rw.cpp rdtables.cpp rsa.cpp tftables.cpp gost.cpp socketft.cpp nbtheory.cpp panama.cpp modes.cpp rijndael.cpp casts.cpp algebra.cpp esign.cpp gfpcrypt.cpp dll.cpp ec2n.cpp poly1305.cpp polynomi.cpp blumshub.cpp des.cpp basecode.cpp zdeflate.cpp base64.cpp rc6.cpp gf256.cpp mqueue.cpp misc.cpp pssr.cpp channels.cpp rng.cpp threefish.cpp tiger.cpp cast.cpp square.cpp asn.cpp chacha.cpp whrlpool.cpp md4.cpp dh.cpp ccm.cpp mqv.cpp tigertab.cpp gf2_32.cpp cbcmac.cpp ttmac.cpp luc.cpp trdlocal.cpp pubkey.cpp gcm.cpp ripemd.cpp kalyna.cpp kalynatab.cpp keccak.cpp eccrypto.cpp serpent.cpp cmac.cpp winpipes.cpp -LIB_OBJS = cryptlib.obj cpu.obj integer.obj shacal2.obj md5.obj shark.obj zinflate.obj gf2n.obj salsa.obj xtr.obj oaep.obj rc2.obj default.obj wait.obj wake.obj twofish.obj iterhash.obj adler32.obj algparam.obj marss.obj blowfish.obj ecp.obj strciphr.obj aria.obj camellia.obj dh2.obj ida.obj zlib.obj elgamal.obj crc.obj crc-simd.obj dessp.obj tea.obj eax.obj network.obj sha.obj emsa2.obj pkcspad.obj squaretb.obj idea.obj authenc.obj hmac.obj xtrcrypt.obj queue.obj mars.obj rc5.obj md2.obj hrtimer.obj vmac.obj eprecomp.obj hex.obj dsa.obj fips140.obj gzip.obj seal.obj blake2.obj files.obj base32.obj sharkbox.obj safer.obj randpool.obj sosemanuk.obj arc4.obj osrng.obj skipjack.obj seed.obj sha3.obj filters.obj bfinit.obj rabin.obj 3way.obj rw.obj rdtables.obj rsa.obj tftables.obj gost.obj socketft.obj nbtheory.obj panama.obj modes.obj rijndael.obj casts.obj algebra.obj esign.obj gfpcrypt.obj dll.obj ec2n.obj poly1305.obj polynomi.obj blumshub.obj des.obj basecode.obj zdeflate.obj base64.obj rc6.obj gf256.obj mqueue.obj misc.obj pssr.obj channels.obj rng.obj threefish.obj tiger.obj cast.obj square.obj asn.obj chacha.obj whrlpool.obj md4.obj dh.obj ccm.obj mqv.obj tigertab.obj gf2_32.obj cbcmac.obj ttmac.obj luc.obj trdlocal.obj pubkey.obj gcm.obj ripemd.obj kalyna.obj kalynatab.obj keccak.obj eccrypto.obj serpent.obj cmac.obj winpipes.obj +LIB_OBJS = cryptlib.obj cpu.obj integer.obj shacal2.obj md5.obj shark.obj zinflate.obj gf2n.obj salsa.obj xtr.obj oaep.obj rc2.obj default.obj wait.obj wake.obj twofish.obj iterhash.obj adler32.obj algparam.obj marss.obj blowfish.obj ecp.obj strciphr.obj aria.obj aria-simd.obj camellia.obj dh2.obj ida.obj zlib.obj elgamal.obj crc.obj crc-simd.obj dessp.obj tea.obj eax.obj network.obj sha.obj sha-simd.obj emsa2.obj pkcspad.obj squaretb.obj idea.obj authenc.obj hmac.obj xtrcrypt.obj queue.obj mars.obj rc5.obj md2.obj hrtimer.obj vmac.obj eprecomp.obj hex.obj dsa.obj fips140.obj gzip.obj seal.obj blake2.obj blake2-simd.obj files.obj base32.obj sharkbox.obj safer.obj randpool.obj sosemanuk.obj arc4.obj osrng.obj skipjack.obj seed.obj sha3.obj filters.obj bfinit.obj rabin.obj 3way.obj rw.obj rdtables.obj rsa.obj tftables.obj gost.obj socketft.obj nbtheory.obj panama.obj modes.obj rijndael.obj casts.obj algebra.obj esign.obj gfpcrypt.obj dll.obj ec2n.obj poly1305.obj polynomi.obj blumshub.obj des.obj basecode.obj zdeflate.obj base64.obj rc6.obj gf256.obj mqueue.obj misc.obj pssr.obj channels.obj rng.obj threefish.obj tiger.obj cast.obj square.obj asn.obj chacha.obj whrlpool.obj md4.obj dh.obj ccm.obj mqv.obj tigertab.obj gf2_32.obj cbcmac.obj ttmac.obj luc.obj trdlocal.obj pubkey.obj gcm.obj ripemd.obj kalyna.obj kalynatab.obj keccak.obj eccrypto.obj serpent.obj cmac.obj winpipes.obj TEST_SRCS = bench1.cpp bench2.cpp test.cpp validat0.cpp validat1.cpp validat2.cpp validat3.cpp datatest.cpp regtest1.cpp regtest2.cpp regtest3.cpp fipsalgt.cpp dlltest.cpp fipstest.cpp diff --git a/cryptest.sh b/cryptest.sh index 21cc2af9..e2aa8714 100755 --- a/cryptest.sh +++ b/cryptest.sh @@ -250,15 +250,15 @@ if [[ ("$SUNCC_510_OR_ABOVE" -ne "0") ]]; then HAVE_OFAST=0 fi -if [[ (-z "$TMP") ]]; then +if [[ (-z "$TMPDIR") ]]; then if [[ (-d "/tmp") ]]; then - TMP=/tmp + TMPDIR=/tmp elif [[ (-d "/temp") ]]; then - TMP=/temp + TMPDIR=/temp elif [[ (-d "$HOME/tmp") ]]; then - TMP="$HOME/tmp" + TMPDIR="$HOME/tmp" else - echo "Please set TMP to a valid directory" + echo "Please set TMPDIR to a valid directory" [[ "$0" = "$BASH_SOURCE" ]] && exit 1 || return 1 fi fi @@ -267,74 +267,74 @@ fi rm -f adhoc.cpp > /dev/null 2>&1 cp adhoc.cpp.proto adhoc.cpp -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_CXX17") ]]; then HAVE_CXX17=0 - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++17 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++17 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_CXX17=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_GNU17") ]]; then HAVE_GNU17=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++17 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++17 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_GNU17=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_CXX14") ]]; then HAVE_CXX14=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++14 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++14 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_CXX14=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_GNU14") ]]; then HAVE_GNU14=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++14 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++14 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_GNU14=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_CXX11") ]]; then HAVE_CXX11=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++11 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++11 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_CXX11=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_GNU11") ]]; then HAVE_GNU11=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++11 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++11 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_GNU11=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_CXX03") ]]; then HAVE_CXX03=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++03 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=c++03 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_CXX03=1 fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_GNU03") ]]; then HAVE_GNU03=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++03 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -std=gnu++03 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_GNU03=1 fi @@ -342,13 +342,13 @@ fi # Use a fallback strategy so OPT_O0 can be used with DEBUG_CXXFLAGS OPT_O0= -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 -"$CXX" -DCRYPTOPP_ADHOC_MAIN -O0 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 +"$CXX" -DCRYPTOPP_ADHOC_MAIN -O0 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_O0=-O0 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO0 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO0 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_O0=-xO0 fi @@ -356,13 +356,13 @@ fi # Use a fallback strategy so OPT_O1 can be used with VALGRIND_CXXFLAGS OPT_O1= -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 -"$CXX" -DCRYPTOPP_ADHOC_MAIN -O1 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 +"$CXX" -DCRYPTOPP_ADHOC_MAIN -O1 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_O1=-O1 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO1 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO1 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_O1=-xO1 fi @@ -370,13 +370,13 @@ fi # Use a fallback strategy so OPT_O2 can be used with RELEASE_CXXFLAGS OPT_O2= -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 -"$CXX" -DCRYPTOPP_ADHOC_MAIN -O2 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 +"$CXX" -DCRYPTOPP_ADHOC_MAIN -O2 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_O2=-O2 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO2 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO2 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_O2=-xO2 fi @@ -385,14 +385,14 @@ fi if [[ (-z "$HAVE_O3") ]]; then HAVE_O3=0 OPT_O3= - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -O3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -O3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_O3=1 OPT_O3=-O3 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_O3=1 OPT_O3=-xO3 @@ -404,14 +404,14 @@ fi if [[ ( (-z "$HAVE_O5") && ("$CLANG_COMPILER" -eq "0") ) ]]; then HAVE_O5=0 OPT_O5= - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -O5 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -O5 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_O5=1 OPT_O5=-O5 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO5 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -xO5 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_O5=1 OPT_O5=-xO5 @@ -423,8 +423,8 @@ fi if [[ (-z "$HAVE_OS") ]]; then HAVE_OS=0 OPT_OS= - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -Os adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -Os adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_OS=1 OPT_OS=-Os @@ -435,8 +435,8 @@ fi if [[ (-z "$HAVE_OFAST") ]]; then HAVE_OFAST=0 OPT_OFAST= - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -Ofast adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -Ofast adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_OFAST=1 OPT_OFAST=-Ofast @@ -445,13 +445,13 @@ fi # Use a fallback strategy so OPT_G2 can be used with RELEASE_CXXFLAGS OPT_G2= -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 -"$CXX" -DCRYPTOPP_ADHOC_MAIN -g2 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 +"$CXX" -DCRYPTOPP_ADHOC_MAIN -g2 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_G2=-g2 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -g adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -g adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_G2=-g fi @@ -459,13 +459,13 @@ fi # Use a fallback strategy so OPT_G3 can be used with DEBUG_CXXFLAGS OPT_G3= -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 -"$CXX" -DCRYPTOPP_ADHOC_MAIN -g3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 +"$CXX" -DCRYPTOPP_ADHOC_MAIN -g3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_G3=-g3 else - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -g adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -g adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then OPT_G3=-g fi @@ -473,10 +473,10 @@ fi # Cygwin and noisy compiles OPT_PIC= -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_PIC") ]]; then HAVE_PIC=0 - PIC_PROBLEMS=$("$CXX" -DCRYPTOPP_ADHOC_MAIN -fPIC adhoc.cpp -o "$TMP/adhoc.exe" 2>&1 | "$EGREP" -ic '(warning|error)') + PIC_PROBLEMS=$("$CXX" -DCRYPTOPP_ADHOC_MAIN -fPIC adhoc.cpp -o "$TMPDIR/adhoc.exe" 2>&1 | "$EGREP" -ic '(warning|error)') if [[ "$PIC_PROBLEMS" -eq "0" ]]; then HAVE_PIC=1 OPT_PIC=-fPIC @@ -484,12 +484,12 @@ if [[ (-z "$HAVE_PIC") ]]; then fi # GCC 4.8; Clang 3.4 -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_UBSAN") ]]; then HAVE_UBSAN=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -fsanitize=undefined adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -fsanitize=undefined adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then - "$TMP/adhoc.exe" > /dev/null 2>&1 + "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_UBSAN=1 fi @@ -497,12 +497,12 @@ if [[ (-z "$HAVE_UBSAN") ]]; then fi # GCC 4.8; Clang 3.4 -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_ASAN") ]]; then HAVE_ASAN=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -fsanitize=address adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -fsanitize=address adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then - "$TMP/adhoc.exe" > /dev/null 2>&1 + "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_ASAN=1 fi @@ -510,41 +510,41 @@ if [[ (-z "$HAVE_ASAN") ]]; then fi # GCC 6.0; maybe Clang -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_BSAN") ]]; then HAVE_BSAN=0 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -fsanitize=bounds-strict adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -fsanitize=bounds-strict adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then - "$TMP/adhoc.exe" > /dev/null 2>&1 + "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_BSAN=1 fi fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_OMP") ]]; then HAVE_OMP=0 if [[ "$GCC_COMPILER" -ne "0" ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -fopenmp -O3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -fopenmp -O3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_OMP=1 OMP_FLAGS=(-fopenmp -O3) fi elif [[ "$INTEL_COMPILER" -ne "0" ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -openmp -O3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -openmp -O3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_OMP=1 OMP_FLAGS=(-openmp -O3) fi elif [[ "$CLANG_COMPILER" -ne "0" ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -fopenmp=libomp -O3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -fopenmp=libomp -O3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_OMP=1 OMP_FLAGS=(-fopenmp=libomp -O3) fi elif [[ "$SUN_COMPILER" -ne "0" ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -xopenmp=parallel -xO3 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -xopenmp=parallel -xO3 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_OMP=1 OMP_FLAGS=(-xopenmp=parallel -xO3) @@ -552,33 +552,33 @@ if [[ (-z "$HAVE_OMP") ]]; then fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_INTEL_MULTIARCH") ]]; then HAVE_INTEL_MULTIARCH=0 if [[ ("$IS_DARWIN" -ne "0") && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0") ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -arch i386 -arch x86_64 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -arch i386 -arch x86_64 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_INTEL_MULTIARCH=1 fi fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_PPC_MULTIARCH") ]]; then HAVE_PPC_MULTIARCH=0 if [[ ("$IS_DARWIN" -ne "0") && ("$IS_PPC" -ne "0") ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -arch ppc -arch ppc64 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -arch ppc -arch ppc64 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_PPC_MULTIARCH=1 fi fi fi -rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 +rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ (-z "$HAVE_X32") ]]; then HAVE_X32=0 if [[ "$IS_X32" -ne "0" ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -mx32 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -mx32 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_X32=1 fi @@ -588,8 +588,8 @@ fi # Hit or miss, mostly hit if [[ (-z "$HAVE_NATIVE_ARCH") ]]; then HAVE_NATIVE_ARCH=0 - rm -f "$TMP/adhoc.exe" > /dev/null 2>&1 - "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=native adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + rm -f "$TMPDIR/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=native adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ ("$?" -eq "0") ]]; then HAVE_NATIVE_ARCH=1 fi @@ -603,7 +603,7 @@ if [[ (-z "$HAVE_LDGOLD") ]]; then if [[ (! -z "$LD_GOLD") && (! -z "$ELF_FILE") ]]; then LD_GOLD=$(file "$LD_GOLD" | cut -d":" -f 2 | "$EGREP" -i -c "elf") if [[ ("$LD_GOLD" -ne "0") ]]; then - "$CXX" -DCRYPTOPP_ADHOC_MAIN -fuse-ld=gold adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -fuse-ld=gold adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_LDGOLD=1 fi @@ -688,10 +688,10 @@ fi # Used to disassemble object modules so we can verify some aspects of code generation if [[ (-z "$HAVE_DISASS") ]]; then - echo "int main(int argc, char* argv[]) {return 0;}" > "$TMP/test.cc" - "$CXX" "$TMP/test.cc" -o "$TMP/test.exe" > /dev/null 2>&1 + echo "int main(int argc, char* argv[]) {return 0;}" > "$TMPDIR/test.cc" + "$CXX" "$TMPDIR/test.cc" -o "$TMPDIR/test.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then - "$DISASS" "${DISASSARGS[@]}" "$TMP/test.exe" > /dev/null 2>&1 + "$DISASS" "${DISASSARGS[@]}" "$TMPDIR/test.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then HAVE_DISASS=1 else @@ -1167,7 +1167,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t echo OBJFILE=sha.o; rm -f "$OBJFILE" 2>/dev/null - CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" DISASS_TEXT=$("$DISASS" "${DISASSARGS[@]}" "$OBJFILE" 2>/dev/null) @@ -1201,7 +1201,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t ############################################ # Test CRC-32C code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -msse4.2 adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -msse4.2 adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then X86_CRC32=1 fi @@ -1239,7 +1239,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t ############################################ # Test AES-NI code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -maes adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -maes adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then X86_AESNI=1 fi @@ -1251,7 +1251,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t echo OBJFILE=rijndael.o; rm -f "$OBJFILE" 2>/dev/null - CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" COUNT=0 FAILED=0 @@ -1301,7 +1301,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t ############################################ # X86 carryless multiply code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -mpclmul adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -mpclmul adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then X86_PCLMUL=1 fi @@ -1313,7 +1313,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t echo OBJFILE=gcm.o; rm -f "$OBJFILE" 2>/dev/null - CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" COUNT=0 FAILED=0 @@ -1339,11 +1339,11 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t ############################################ # Test RDRAND and RDSEED code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -mrdrnd adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -mrdrnd adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then X86_RDRAND=1 fi - "$CXX" -DCRYPTOPP_ADHOC_MAIN -mrdseed adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -mrdseed adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then X86_RDSEED=1 fi @@ -1355,7 +1355,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t echo OBJFILE=rdrand.o; rm -f "$OBJFILE" 2>/dev/null - CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" COUNT=0 FAILED=0 @@ -1385,7 +1385,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t ############################################ # X86 SHA code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -msha adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -msha adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then X86_SHA=1 fi @@ -1397,7 +1397,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_X86" -ne "0" || "$IS_X64" -ne "0")) ]]; t echo OBJFILE=sha-simd.o; rm -f "$OBJFILE" 2>/dev/null - CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1 -msse -msse2" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" + CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" COUNT=0 FAILED=0 @@ -1465,7 +1465,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_ARM32" -ne "0" || "$IS_ARM64" -ne "0")) ] echo "Testing: ARM NEON code generation" | tee -a "$TEST_RESULTS" echo - OBJFILE=aria.o; rm -f "$OBJFILE" 2>/dev/null + OBJFILE=aria-simd.o; rm -f "$OBJFILE" 2>/dev/null CXX="$CXX" CXXFLAGS="$RELEASE_CXXFLAGS -DDISABLE_NATIVE_ARCH=1" "$MAKE" "${MAKEARGS[@]}" $OBJFILE 2>&1 | tee -a "$TEST_RESULTS" COUNT=0 @@ -1515,7 +1515,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_ARM32" -ne "0" || "$IS_ARM64" -ne "0")) ] ############################################ # ARM CRC32 code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=armv8-a+crc adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=armv8-a+crc adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ARM_CRC32=1 fi @@ -1565,7 +1565,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_ARM32" -ne "0" || "$IS_ARM64" -ne "0")) ] ############################################ # ARM carryless multiply code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=armv8-a+crypto adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=armv8-a+crypto adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ARM_PMULL=1 fi @@ -1603,7 +1603,7 @@ if [[ ("$HAVE_DISASS" -ne "0" && ("$IS_ARM32" -ne "0" || "$IS_ARM64" -ne "0")) ] ############################################ # ARM SHA code generation - "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=armv8-a+crypto adhoc.cpp -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CXX" -DCRYPTOPP_ADHOC_MAIN -march=armv8-a+crypto adhoc.cpp -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ARM_SHA=1 fi @@ -5098,7 +5098,7 @@ fi if [[ ("$CLANG_COMPILER" -eq "0") ]]; then CLANG_CXX=$(which clang++ 2>&1 | "$GREP" -v "no clang++" | head -1) - "$CLANG_CXX" -x c++ -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$CLANG_CXX" -x c++ -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5133,7 +5133,7 @@ fi if [[ ("$GCC_COMPILER" -eq "0") ]]; then GCC_CXX=$(which g++ 2>&1 | "$GREP" -v "no g++" | head -1) - "$GCC_CXX" -x c++ -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$GCC_CXX" -x c++ -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5171,7 +5171,7 @@ if [[ ("$INTEL_COMPILER" -eq "0") ]]; then if [[ (-z "$INTEL_CXX") ]]; then INTEL_CXX=$(find /opt/intel -name icpc 2>/dev/null | "$GREP" -iv composer | head -1) fi - "$INTEL_CXX" -x c++ -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$INTEL_CXX" -x c++ -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5207,7 +5207,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'g++-mp-4*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5240,7 +5240,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'g++-mp-5*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5273,7 +5273,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'g++-mp-6*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5306,7 +5306,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'g++-mp-7*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5339,7 +5339,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'clang++-mp-3.7*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5371,7 +5371,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'clang++-mp-3.8*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5403,7 +5403,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'clang++-mp-3.9*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ @@ -5435,7 +5435,7 @@ if [[ ("$IS_DARWIN" -ne "0" && "$MACPORTS_COMPILER" -eq "0") ]]; then MACPORTS_CXX=$(find /opt/local/bin -name 'clang++-mp-4*' 2>/dev/null | head -1) if [[ (! -z "$MACPORTS_CXX") ]]; then - "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMP/adhoc.exe" > /dev/null 2>&1 + "$MACPORTS_CXX" -x c++ -std=c++11 -DCRYPTOPP_ADHOC_MAIN adhoc.cpp.proto -o "$TMPDIR/adhoc.exe" > /dev/null 2>&1 if [[ "$?" -eq "0" ]]; then ############################################ diff --git a/cryptlib.vcxproj b/cryptlib.vcxproj index 142c88fb..b4d5b74b 100644 --- a/cryptlib.vcxproj +++ b/cryptlib.vcxproj @@ -158,6 +158,7 @@ + @@ -165,6 +166,7 @@ + diff --git a/gcm.cpp b/gcm.cpp index b55024a1..2c915125 100644 --- a/gcm.cpp +++ b/gcm.cpp @@ -211,7 +211,7 @@ inline static void SSE2_Xor16(byte *a, const byte *b, const byte *c) } #endif -#if CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +#if CRYPTOPP_ARM_NEON_AVAILABLE inline static void NEON_Xor16(byte *a, const byte *b, const byte *c) { CRYPTOPP_ASSERT(IsAlignedOn(a,GetAlignmentOf())); @@ -437,7 +437,7 @@ void GCM_Base::SetKeyWithoutResync(const byte *userKey, size_t keylength, const for (k=1; k=64*1024) #if CRYPTOPP_BOOL_SSE2_ASM_AVAILABLE || defined(CRYPTOPP_X64_MASM_AVAILABLE) + HasSSE2() -//#elif CRYPTOPP_BOOL_NEON_INTRINSICS_AVAILABLE +//#elif CRYPTOPP_ARM_NEON_AVAILABLE // + HasNEON() #endif ) diff --git a/neon.cpp b/neon.cpp new file mode 100644 index 00000000..732a775f --- /dev/null +++ b/neon.cpp @@ -0,0 +1,102 @@ +// crc-simd.cpp - written and placed in the public domain by +// Jeffrey Walton, Uri Blumenthal and Marcel Raad. +// +// This source file uses intrinsics to gain access to ARMv7a and +// ARMv8a NEON instructions. A separate source file is needed +// because additional CXXFLAGS are required to enable the +// appropriate instructions sets in some build configurations. + +#include "pch.h" +#include "config.h" +#include "misc.h" + +#if (CRYPTOPP_ARM_NEON_AVAILABLE) && defined(__GNUC__) +# include "arm_neon.h" +#endif + +#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY +# include +# include +#endif + +NAMESPACE_BEGIN(CryptoPP) + +#ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY +extern "C" { + typedef void (*SigHandler)(int); + + static jmp_buf s_jmpSIGILL; + static void SigIllHandler(int) + { + longjmp(s_jmpSIGILL, 1); + } +}; +#endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY + +#if (CRYPTOPP_ARM_NEON_AVAILABLE) +bool CPU_TryNEON_ARM() +{ +# if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) + volatile bool result = true; + __try + { + uint32_t v1[4] = {1,1,1,1}; + uint32x4_t x1 = vld1q_u32(v1); + uint64_t v2[2] = {1,1}; + uint64x2_t x2 = vld1q_u64(v2); + + uint32x4_t x3 = vdupq_n_u32(2); + x3 = vsetq_lane_u32(vgetq_lane_u32(x1,0),x3,0); + x3 = vsetq_lane_u32(vgetq_lane_u32(x1,3),x3,3); + uint64x2_t x4 = vdupq_n_u64(2); + x4 = vsetq_lane_u64(vgetq_lane_u64(x2,0),x4,0); + x4 = vsetq_lane_u64(vgetq_lane_u64(x2,1),x4,1); + + result = !!(vgetq_lane_u32(x3,0) | vgetq_lane_u64(x4,1)); + } + __except (EXCEPTION_EXECUTE_HANDLER) + { + return false; + } + return result; +# else + // longjmp and clobber warnings. Volatile is required. + // http://github.com/weidai11/cryptopp/issues/24 and http://stackoverflow.com/q/7721854 + volatile bool result = true; + + volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler); + if (oldHandler == SIG_ERR) + return false; + + volatile sigset_t oldMask; + if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask)) + return false; + + if (setjmp(s_jmpSIGILL)) + result = false; + else + { + uint32_t v1[4] = {1,1,1,1}; + uint32x4_t x1 = vld1q_u32(v1); + uint64_t v2[2] = {1,1}; + uint64x2_t x2 = vld1q_u64(v2); + + uint32x4_t x3 = {0,0,0,0}; + x3 = vsetq_lane_u32(vgetq_lane_u32(x1,0),x3,0); + x3 = vsetq_lane_u32(vgetq_lane_u32(x1,3),x3,3); + uint64x2_t x4 = {0,0}; + x4 = vsetq_lane_u64(vgetq_lane_u64(x2,0),x4,0); + x4 = vsetq_lane_u64(vgetq_lane_u64(x2,1),x4,1); + + // Hack... GCC optimizes away the code and returns true + result = !!(vgetq_lane_u32(x3,0) | vgetq_lane_u64(x4,1)); + } + + sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR); + signal(SIGILL, oldHandler); + return result; +# endif +} +#endif // CRYPTOPP_ARM_NEON_AVAILABLE + +NAMESPACE_END diff --git a/rdrand-masm.cmd b/rdrand-masm.cmd old mode 100755 new mode 100644 diff --git a/sha-simd.cpp b/sha-simd.cpp index 268d8c17..7dafbc96 100644 --- a/sha-simd.cpp +++ b/sha-simd.cpp @@ -730,7 +730,7 @@ void CRYPTOPP_FASTCALL SHA256_HashBlocks_ARMV8A(word32 *state, const word32 *dat STATE0 = vld1q_u32(&state[0]); STATE1 = vld1q_u32(&state[4]); - const size_t BLOCKSIZE = 6; + const size_t BLOCKSIZE = 64; while (length >= BLOCKSIZE) { // Save current hash