diff --git a/GNUmakefile b/GNUmakefile index 65babe10..c6a30a4f 100755 --- a/GNUmakefile +++ b/GNUmakefile @@ -666,6 +666,12 @@ ifeq ($(DETECT_FEATURES),1) endif endif + # Drop SIMON64 and SPECK64 to Power4 if Power7 not available + ifeq ($(SIMON64_FLAG)$(SPECK64_FLAG),) + SPECK64_FLAG = $(ALTIVEC_FLAG) + SIMON64_FLAG = $(ALTIVEC_FLAG) + endif + # IBM XL C/C++ TPROG = TestPrograms/test_ppc_power8.cxx TOPT = -qarch=pwr8 -qaltivec @@ -743,6 +749,12 @@ ifeq ($(DETECT_FEATURES),1) endif endif + # Drop SIMON64 and SPECK64 to Power4 if Power7 not available + ifeq ($(SIMON64_FLAG)$(SPECK64_FLAG),) + SPECK64_FLAG = $(ALTIVEC_FLAG) + SIMON64_FLAG = $(ALTIVEC_FLAG) + endif + ifeq ($(ALTIVEC_FLAG),) CXXFLAGS += -DCRYPTOPP_DISABLE_ALTIVEC else ifeq ($(POWER7_FLAG),) diff --git a/adv_simd.h b/adv_simd.h index 76954e99..c79a9f25 100644 --- a/adv_simd.h +++ b/adv_simd.h @@ -74,6 +74,14 @@ # include "ppc_simd.h" #endif +#ifndef CRYPTOPP_INLINE +# if defined(CRYPTOPP_DEBUG) +# define CRYPTOPP_INLINE static +# else +# define CRYPTOPP_INLINE inline +# endif +#endif + // ************************ All block ciphers *********************** // ANONYMOUS_NAMESPACE_BEGIN @@ -103,7 +111,7 @@ NAMESPACE_BEGIN(CryptoPP) /// \details The subkey type is usually word32 or word64. F2 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks64_6x2_NEON(F2 func2, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks64_6x2_NEON(F2 func2, F6 func6, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -348,7 +356,7 @@ inline size_t AdvancedProcessBlocks64_6x2_NEON(F2 func2, F6 func6, /// \details The subkey type is usually word32 or word64. F1 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks128_6x1_NEON(F1 func1, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_6x1_NEON(F1 func1, F6 func6, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -510,7 +518,7 @@ inline size_t AdvancedProcessBlocks128_6x1_NEON(F1 func1, F6 func6, /// vector type. The V parameter is used to avoid template argument /// deduction/substitution failures. template -inline size_t AdvancedProcessBlocks128_4x1_NEON(F1 func1, F4 func4, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_4x1_NEON(F1 func1, F4 func4, const V& unused, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -651,7 +659,7 @@ inline size_t AdvancedProcessBlocks128_4x1_NEON(F1 func1, F4 func4, /// \details The subkey type is usually word32 or word64. F2 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks128_6x2_NEON(F2 func2, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_6x2_NEON(F2 func2, F6 func6, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -882,7 +890,7 @@ NAMESPACE_BEGIN(CryptoPP) /// \details The subkey type is usually word32 or word64. F1 and F2 must use the /// same word type. template -inline size_t AdvancedProcessBlocks64_2x1_SSE(F1 func1, F2 func2, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks64_2x1_SSE(F1 func1, F2 func2, MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -1037,7 +1045,7 @@ inline size_t AdvancedProcessBlocks64_2x1_SSE(F1 func1, F2 func2, /// \details The subkey type is usually word32 or word64. F2 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks64_6x2_SSE(F2 func2, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks64_6x2_SSE(F2 func2, F6 func6, MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -1288,7 +1296,7 @@ inline size_t AdvancedProcessBlocks64_6x2_SSE(F2 func2, F6 func6, /// \details The subkey type is usually word32 or word64. F2 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks128_6x2_SSE(F2 func2, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_6x2_SSE(F2 func2, F6 func6, MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -1483,7 +1491,7 @@ inline size_t AdvancedProcessBlocks128_6x2_SSE(F2 func2, F6 func6, /// \details The subkey type is usually word32 or word64. F1 and F4 must use the /// same word type. template -inline size_t AdvancedProcessBlocks128_4x1_SSE(F1 func1, F4 func4, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_4x1_SSE(F1 func1, F4 func4, MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -1614,7 +1622,7 @@ inline size_t AdvancedProcessBlocks128_4x1_SSE(F1 func1, F4 func4, /// \details The subkey type is usually word32 or word64. F1 and F4 must use the /// same word type. template -inline size_t AdvancedProcessBlocks64_4x1_SSE(F1 func1, F4 func4, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks64_4x1_SSE(F1 func1, F4 func4, MAYBE_CONST W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -1797,7 +1805,7 @@ NAMESPACE_BEGIN(CryptoPP) /// \details The subkey type is usually word32 or word64. F2 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks64_6x2_ALTIVEC(F2 func2, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks64_6x2_ALTIVEC(F2 func2, F6 func6, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -2068,7 +2076,7 @@ inline size_t AdvancedProcessBlocks64_6x2_ALTIVEC(F2 func2, F6 func6, /// \details The subkey type is usually word32 or word64. F1 and F4 must use the /// same word type. template -inline size_t AdvancedProcessBlocks128_4x1_ALTIVEC(F1 func1, F4 func4, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_4x1_ALTIVEC(F1 func1, F4 func4, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { @@ -2213,7 +2221,7 @@ inline size_t AdvancedProcessBlocks128_4x1_ALTIVEC(F1 func1, F4 func4, /// \details The subkey type is usually word32 or word64. F1 and F6 must use the /// same word type. template -inline size_t AdvancedProcessBlocks128_6x1_ALTIVEC(F1 func1, F6 func6, +CRYPTOPP_INLINE size_t AdvancedProcessBlocks128_6x1_ALTIVEC(F1 func1, F6 func6, const W *subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { diff --git a/speck.cpp b/speck.cpp index 9359ae9a..7f6eb31b 100644 --- a/speck.cpp +++ b/speck.cpp @@ -7,6 +7,14 @@ #include "misc.h" #include "cpu.h" +#ifndef CRYPTOPP_INLINE +# if defined(CRYPTOPP_DEBUG) +# define CRYPTOPP_INLINE static +# else +# define CRYPTOPP_INLINE inline +# endif +#endif + // Uncomment for benchmarking C++ against SSE or NEON. // Do so in both speck.cpp and speck-simd.cpp. // #undef CRYPTOPP_SSSE3_AVAILABLE @@ -28,7 +36,7 @@ using CryptoPP::rotrConstant; /// additional template parameters also made calling SPECK_Encrypt and SPECK_Decrypt /// kind of messy. template -inline void TF83(W& x, W& y, const W k) +CRYPTOPP_INLINE void TF83(W& x, W& y, const W k) { x = rotrConstant<8>(x); x += y; x ^= k; @@ -44,7 +52,7 @@ inline void TF83(W& x, W& y, const W k) /// additional template parameters also made calling SPECK_Encrypt and SPECK_Decrypt /// kind of messy. template -inline void TR83(W& x, W& y, const W k) +CRYPTOPP_INLINE void TR83(W& x, W& y, const W k) { y ^= x; y = rotrConstant<3>(y); @@ -59,7 +67,7 @@ inline void TR83(W& x, W& y, const W k) /// \param p input array /// \param k subkey array template -inline void SPECK_Encrypt(W c[2], const W p[2], const W k[R]) +CRYPTOPP_INLINE void SPECK_Encrypt(W c[2], const W p[2], const W k[R]) { c[0]=p[0]; c[1]=p[1]; @@ -75,7 +83,7 @@ inline void SPECK_Encrypt(W c[2], const W p[2], const W k[R]) /// \param c input array /// \param k subkey array template -inline void SPECK_Decrypt(W p[2], const W c[2], const W k[R]) +CRYPTOPP_INLINE void SPECK_Decrypt(W p[2], const W c[2], const W k[R]) { p[0]=c[0]; p[1]=c[1]; @@ -91,7 +99,7 @@ inline void SPECK_Decrypt(W p[2], const W c[2], const W k[R]) /// \param key empty subkey array /// \param k user key array template -inline void SPECK_ExpandKey_2W(W key[R], const W k[2]) +CRYPTOPP_INLINE void SPECK_ExpandKey_2W(W key[R], const W k[2]) { CRYPTOPP_ASSERT(R==32); W i=0, B=k[0], A=k[1]; @@ -111,7 +119,7 @@ inline void SPECK_ExpandKey_2W(W key[R], const W k[2]) /// \param key empty subkey array /// \param k user key array template -inline void SPECK_ExpandKey_3W(W key[R], const W k[3]) +CRYPTOPP_INLINE void SPECK_ExpandKey_3W(W key[R], const W k[3]) { CRYPTOPP_ASSERT(R==33 || R==26); W i=0, C=k[0], B=k[1], A=k[2]; @@ -138,7 +146,7 @@ inline void SPECK_ExpandKey_3W(W key[R], const W k[3]) /// \param key empty subkey array /// \param k user key array template -inline void SPECK_ExpandKey_4W(W key[R], const W k[4]) +CRYPTOPP_INLINE void SPECK_ExpandKey_4W(W key[R], const W k[4]) { CRYPTOPP_ASSERT(R==34 || R==27); W i=0, D=k[0], C=k[1], B=k[2], A=k[3]; @@ -200,11 +208,11 @@ extern size_t SPECK128_Dec_AdvancedProcessBlocks_SSSE3(const word64* subKeys, si const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags); #endif -#if defined(CRYPTOPP_POWER7_AVAILABLE) -extern size_t SPECK64_Enc_AdvancedProcessBlocks_POWER7(const word32* subKeys, size_t rounds, +#if defined(CRYPTOPP_ALTIVEC_AVAILABLE) +extern size_t SPECK64_Enc_AdvancedProcessBlocks_ALTIVEC(const word32* subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags); -extern size_t SPECK64_Dec_AdvancedProcessBlocks_POWER7(const word32* subKeys, size_t rounds, +extern size_t SPECK64_Dec_AdvancedProcessBlocks_ALTIVEC(const word32* subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags); #endif @@ -227,9 +235,9 @@ std::string SPECK64::Base::AlgorithmProvider() const if (HasNEON()) return "NEON"; # endif -# if (CRYPTOPP_POWER7_AVAILABLE) - if (HasPower7()) - return "Power7"; +# if (CRYPTOPP_ALTIVEC_AVAILABLE) + if (HasAltivec()) + return "Altivec"; # endif #endif return "C++"; @@ -264,6 +272,12 @@ void SPECK64::Base::UncheckedSetKey(const byte *userKey, unsigned int keyLength, default: CRYPTOPP_ASSERT(0);; } + + // Altivec loads the current subkey as a 16-byte vector + // The extra elements ensure memory backs the last subkey. +#if CRYPTOPP_ALTIVEC_AVAILABLE + m_rkeys.Grow(m_rkeys.size()+4); +#endif } void SPECK64::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const @@ -435,9 +449,9 @@ size_t SPECK64::Enc::AdvancedProcessBlocks(const byte *inBlocks, const byte *xor return SPECK64_Enc_AdvancedProcessBlocks_NEON(m_rkeys, (size_t)m_rounds, inBlocks, xorBlocks, outBlocks, length, flags); #endif -#if (CRYPTOPP_POWER7_AVAILABLE) - if (HasPower7()) - return SPECK64_Enc_AdvancedProcessBlocks_POWER7(m_rkeys, (size_t)m_rounds, +#if (CRYPTOPP_ALTIVEC_AVAILABLE) + if (HasAltivec()) + return SPECK64_Enc_AdvancedProcessBlocks_ALTIVEC(m_rkeys, (size_t)m_rounds, inBlocks, xorBlocks, outBlocks, length, flags); #endif return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags); @@ -456,9 +470,9 @@ size_t SPECK64::Dec::AdvancedProcessBlocks(const byte *inBlocks, const byte *xor return SPECK64_Dec_AdvancedProcessBlocks_NEON(m_rkeys, (size_t)m_rounds, inBlocks, xorBlocks, outBlocks, length, flags); #endif -#if (CRYPTOPP_POWER7_AVAILABLE) - if (HasPower7()) - return SPECK64_Dec_AdvancedProcessBlocks_POWER7(m_rkeys, (size_t)m_rounds, +#if (CRYPTOPP_ALTIVEC_AVAILABLE) + if (HasAltivec()) + return SPECK64_Dec_AdvancedProcessBlocks_ALTIVEC(m_rkeys, (size_t)m_rounds, inBlocks, xorBlocks, outBlocks, length, flags); #endif return BlockTransformation::AdvancedProcessBlocks(inBlocks, xorBlocks, outBlocks, length, flags); diff --git a/speck128_simd.cpp b/speck128_simd.cpp index 75e2c48f..14efeede 100644 --- a/speck128_simd.cpp +++ b/speck128_simd.cpp @@ -1,4 +1,4 @@ -// speck-simd.cpp - written and placed in the public domain by Jeffrey Walton +// speck128_simd.cpp - written and placed in the public domain by Jeffrey Walton // // This source file uses intrinsics and built-ins to gain access to // SSSE3, ARM NEON and ARMv8a, and Power7 Altivec instructions. A separate @@ -12,6 +12,14 @@ #include "misc.h" #include "adv_simd.h" +#ifndef CRYPTOPP_INLINE +# if defined(CRYPTOPP_DEBUG) +# define CRYPTOPP_INLINE static +# else +# define CRYPTOPP_INLINE inline +# endif +#endif + // Uncomment for benchmarking C++ against SSE or NEON. // Do so in both speck.cpp and speck-simd.cpp. // #undef CRYPTOPP_SSSE3_AVAILABLE @@ -42,7 +50,7 @@ # include #endif -#if defined(CRYPTOPP_POWER7_AVAILABLE) +#if defined(CRYPTOPP_POWER8_AVAILABLE) # include "ppc_simd.h" #endif @@ -60,7 +68,7 @@ using CryptoPP::word64; #if (CRYPTOPP_ARM_NEON_AVAILABLE) template -inline T UnpackHigh64(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackHigh64(const T& a, const T& b) { const uint64x1_t x(vget_high_u64((uint64x2_t)a)); const uint64x1_t y(vget_high_u64((uint64x2_t)b)); @@ -68,7 +76,7 @@ inline T UnpackHigh64(const T& a, const T& b) } template -inline T UnpackLow64(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackLow64(const T& a, const T& b) { const uint64x1_t x(vget_low_u64((uint64x2_t)a)); const uint64x1_t y(vget_low_u64((uint64x2_t)b)); @@ -76,7 +84,7 @@ inline T UnpackLow64(const T& a, const T& b) } template -inline uint64x2_t RotateLeft64(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateLeft64(const uint64x2_t& val) { const uint64x2_t a(vshlq_n_u64(val, R)); const uint64x2_t b(vshrq_n_u64(val, 64 - R)); @@ -84,7 +92,7 @@ inline uint64x2_t RotateLeft64(const uint64x2_t& val) } template -inline uint64x2_t RotateRight64(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateRight64(const uint64x2_t& val) { const uint64x2_t a(vshlq_n_u64(val, 64 - R)); const uint64x2_t b(vshrq_n_u64(val, R)); @@ -94,7 +102,7 @@ inline uint64x2_t RotateRight64(const uint64x2_t& val) #if defined(__aarch32__) || defined(__aarch64__) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateLeft64<8>(const uint64x2_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 }; @@ -110,7 +118,7 @@ inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint64x2_t RotateRight64<8>(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateRight64<8>(const uint64x2_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 }; @@ -125,7 +133,7 @@ inline uint64x2_t RotateRight64<8>(const uint64x2_t& val) } #endif -inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -148,7 +156,7 @@ inline void SPECK128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1, block1 = UnpackHigh64(y1, x1); } -inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5, const word64 *subkeys, unsigned int rounds) { @@ -190,7 +198,7 @@ inline void SPECK128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, block5 = UnpackHigh64(y3, x3); } -inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -213,7 +221,7 @@ inline void SPECK128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1, block1 = UnpackHigh64(y1, x1); } -inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5, const word64 *subkeys, unsigned int rounds) { @@ -278,7 +286,7 @@ inline void SPECK128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, #endif template -inline __m128i RotateLeft64(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateLeft64(const __m128i& val) { #if defined(CRYPTOPP_AVX512_ROTATE) return _mm_rol_epi64(val, R); @@ -291,7 +299,7 @@ inline __m128i RotateLeft64(const __m128i& val) } template -inline __m128i RotateRight64(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateRight64(const __m128i& val) { #if defined(CRYPTOPP_AVX512_ROTATE) return _mm_ror_epi64(val, R); @@ -305,7 +313,7 @@ inline __m128i RotateRight64(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateLeft64<8>(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateLeft64<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi64(val, 8); @@ -317,7 +325,7 @@ inline __m128i RotateLeft64<8>(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateRight64<8>(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateRight64<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi64(val, 64-8); @@ -327,7 +335,7 @@ inline __m128i RotateRight64<8>(const __m128i& val) #endif } -inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK128_Enc_Block(__m128i &block0, __m128i &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -351,7 +359,7 @@ inline void SPECK128_Enc_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi64(y1, x1); } -inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word64 *subkeys, unsigned int rounds) { @@ -394,7 +402,7 @@ inline void SPECK128_Enc_6_Blocks(__m128i &block0, __m128i &block1, block5 = _mm_unpackhi_epi64(y3, x3); } -inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK128_Dec_Block(__m128i &block0, __m128i &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -418,7 +426,7 @@ inline void SPECK128_Dec_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi64(y1, x1); } -inline void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK128_Dec_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word64 *subkeys, unsigned int rounds) { @@ -477,7 +485,7 @@ using CryptoPP::VectorXor; // Rotate left by bit count template -inline uint64x2_p RotateLeft64(const uint64x2_p val) +CRYPTOPP_INLINE uint64x2_p RotateLeft64(const uint64x2_p val) { const uint64x2_p m = {C, C}; return vec_rl(val, m); @@ -485,7 +493,7 @@ inline uint64x2_p RotateLeft64(const uint64x2_p val) // Rotate right by bit count template -inline uint64x2_p RotateRight64(const uint64x2_p val) +CRYPTOPP_INLINE uint64x2_p RotateRight64(const uint64x2_p val) { const uint64x2_p m = {64-C, 64-C}; return vec_rl(val, m); diff --git a/speck64_simd.cpp b/speck64_simd.cpp index 15019ddc..233e8d10 100644 --- a/speck64_simd.cpp +++ b/speck64_simd.cpp @@ -1,7 +1,7 @@ -// speck-simd.cpp - written and placed in the public domain by Jeffrey Walton +// speck64_simd.cpp - written and placed in the public domain by Jeffrey Walton // // This source file uses intrinsics and built-ins to gain access to -// SSSE3, ARM NEON and ARMv8a, and Power7 Altivec instructions. A separate +// SSSE3, ARM NEON and ARMv8a, and Altivec instructions. A separate // source file is needed because additional CXXFLAGS are required to enable // the appropriate instructions sets in some build configurations. @@ -46,10 +46,18 @@ # include #endif -#if defined(CRYPTOPP_POWER7_AVAILABLE) +#if defined(CRYPTOPP_ALTIVEC_AVAILABLE) # include "ppc_simd.h" #endif +#ifndef CRYPTOPP_INLINE +# if defined(CRYPTOPP_DEBUG) +# define CRYPTOPP_INLINE static +# else +# define CRYPTOPP_INLINE inline +# endif +#endif + // Squash MS LNK4221 and libtool warnings extern const char SPECK64_SIMD_FNAME[] = __FILE__; @@ -64,7 +72,7 @@ using CryptoPP::word64; #if (CRYPTOPP_ARM_NEON_AVAILABLE) template -inline T UnpackHigh32(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackHigh32(const T& a, const T& b) { const uint32x2_t x(vget_high_u32((uint32x4_t)a)); const uint32x2_t y(vget_high_u32((uint32x4_t)b)); @@ -73,7 +81,7 @@ inline T UnpackHigh32(const T& a, const T& b) } template -inline T UnpackLow32(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackLow32(const T& a, const T& b) { const uint32x2_t x(vget_low_u32((uint32x4_t)a)); const uint32x2_t y(vget_low_u32((uint32x4_t)b)); @@ -82,7 +90,7 @@ inline T UnpackLow32(const T& a, const T& b) } template -inline uint32x4_t RotateLeft32(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateLeft32(const uint32x4_t& val) { const uint32x4_t a(vshlq_n_u32(val, R)); const uint32x4_t b(vshrq_n_u32(val, 32 - R)); @@ -90,7 +98,7 @@ inline uint32x4_t RotateLeft32(const uint32x4_t& val) } template -inline uint32x4_t RotateRight32(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateRight32(const uint32x4_t& val) { const uint32x4_t a(vshlq_n_u32(val, 32 - R)); const uint32x4_t b(vshrq_n_u32(val, R)); @@ -100,7 +108,7 @@ inline uint32x4_t RotateRight32(const uint32x4_t& val) #if defined(__aarch32__) || defined(__aarch64__) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateLeft32<8>(const uint32x4_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,15, 10,9,8,11, 6,5,4,7, 2,1,0,3 }; @@ -116,7 +124,7 @@ inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint32x4_t RotateRight32<8>(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateRight32<8>(const uint32x4_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 12,15,14,13, 8,11,10,9, 4,7,6,5, 0,3,2,1 }; @@ -131,7 +139,7 @@ inline uint32x4_t RotateRight32<8>(const uint32x4_t& val) } #endif // Aarch32 or Aarch64 -inline void SPECK64_Enc_Block(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SPECK64_Enc_Block(uint32x4_t &block0, uint32x4_t &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -154,7 +162,7 @@ inline void SPECK64_Enc_Block(uint32x4_t &block0, uint32x4_t &block1, block1 = UnpackHigh32(y1, x1); } -inline void SPECK64_Dec_Block(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SPECK64_Dec_Block(uint32x4_t &block0, uint32x4_t &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -177,7 +185,7 @@ inline void SPECK64_Dec_Block(uint32x4_t &block0, uint32x4_t &block1, block1 = UnpackHigh32(y1, x1); } -inline void SPECK64_Enc_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SPECK64_Enc_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, uint32x4_t &block2, uint32x4_t &block3, uint32x4_t &block4, uint32x4_t &block5, const word32 *subkeys, unsigned int rounds) { @@ -219,7 +227,7 @@ inline void SPECK64_Enc_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, block5 = UnpackHigh32(y3, x3); } -inline void SPECK64_Dec_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SPECK64_Dec_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, uint32x4_t &block2, uint32x4_t &block3, uint32x4_t &block4, uint32x4_t &block5, const word32 *subkeys, unsigned int rounds) { @@ -268,7 +276,7 @@ inline void SPECK64_Dec_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, #if defined(CRYPTOPP_SSE41_AVAILABLE) template -inline __m128i RotateLeft32(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateLeft32(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, R); @@ -279,7 +287,7 @@ inline __m128i RotateLeft32(const __m128i& val) } template -inline __m128i RotateRight32(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateRight32(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 32-R); @@ -291,7 +299,7 @@ inline __m128i RotateRight32(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateLeft32<8>(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateLeft32<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 8); @@ -303,7 +311,7 @@ inline __m128i RotateLeft32<8>(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateRight32<8>(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateRight32<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 32-8); @@ -313,7 +321,7 @@ inline __m128i RotateRight32<8>(const __m128i& val) #endif } -inline void SPECK64_Enc_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK64_Enc_Block(__m128i &block0, __m128i &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -339,7 +347,7 @@ inline void SPECK64_Enc_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi32(y1, x1); } -inline void SPECK64_Dec_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK64_Dec_Block(__m128i &block0, __m128i &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -365,7 +373,7 @@ inline void SPECK64_Dec_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi32(y1, x1); } -inline void SPECK64_Enc_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK64_Enc_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word32 *subkeys, unsigned int rounds) { @@ -416,7 +424,7 @@ inline void SPECK64_Enc_6_Blocks(__m128i &block0, __m128i &block1, block5 = _mm_unpackhi_epi32(y3, x3); } -inline void SPECK64_Dec_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SPECK64_Dec_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word32 *subkeys, unsigned int rounds) { @@ -469,19 +477,20 @@ inline void SPECK64_Dec_6_Blocks(__m128i &block0, __m128i &block1, #endif // CRYPTOPP_SSE41_AVAILABLE -// ***************************** Power7 ***************************** // +// ***************************** Altivec ***************************** // -#if defined(CRYPTOPP_POWER7_AVAILABLE) +#if defined(CRYPTOPP_ALTIVEC_AVAILABLE) using CryptoPP::uint8x16_p; using CryptoPP::uint32x4_p; using CryptoPP::VectorAdd; using CryptoPP::VectorSub; using CryptoPP::VectorXor; +using CryptoPP::VectorLoad; // Rotate left by bit count template -inline uint32x4_p RotateLeft32(const uint32x4_p val) +CRYPTOPP_INLINE uint32x4_p RotateLeft32(const uint32x4_p val) { const uint32x4_p m = {C, C, C, C}; return vec_rl(val, m); @@ -489,7 +498,7 @@ inline uint32x4_p RotateLeft32(const uint32x4_p val) // Rotate right by bit count template -inline uint32x4_p RotateRight32(const uint32x4_p val) +CRYPTOPP_INLINE uint32x4_p RotateRight32(const uint32x4_p val) { const uint32x4_p m = {32-C, 32-C, 32-C, 32-C}; return vec_rl(val, m); @@ -512,7 +521,14 @@ void SPECK64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, for (int i=0; i < static_cast(rounds); ++i) { +#if CRYPTOPP_POWER7_AVAILABLE const uint32x4_p rk = vec_splats(subkeys[i]); +#else + // subkeys has extra elements so memory backs the last subkey + const uint8x16_p m = {0,1,2,3, 0,1,2,3, 0,1,2,3, 0,1,2,3}; + uint32x4_p rk = VectorLoad(0, subkeys+i); + rk = vec_perm(rk, rk, m); +#endif x1 = RotateRight32<8>(x1); x1 = VectorAdd(x1, y1); @@ -552,7 +568,14 @@ void SPECK64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, for (int i = static_cast(rounds-1); i >= 0; --i) { +#if CRYPTOPP_POWER7_AVAILABLE const uint32x4_p rk = vec_splats(subkeys[i]); +#else + // subkeys has extra elements so memory backs the last subkey + const uint8x16_p m = {0,1,2,3, 0,1,2,3, 0,1,2,3, 0,1,2,3}; + uint32x4_p rk = VectorLoad(0, subkeys+i); + rk = vec_perm(rk, rk, m); +#endif y1 = VectorXor(y1, x1); y1 = RotateRight32<3>(y1); @@ -597,7 +620,14 @@ void SPECK64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, for (int i=0; i < static_cast(rounds); ++i) { +#if CRYPTOPP_POWER7_AVAILABLE const uint32x4_p rk = vec_splats(subkeys[i]); +#else + // subkeys has extra elements so memory backs the last subkey + const uint8x16_p m = {0,1,2,3, 0,1,2,3, 0,1,2,3, 0,1,2,3}; + uint32x4_p rk = VectorLoad(0, subkeys+i); + rk = vec_perm(rk, rk, m); +#endif x1 = RotateRight32<8>(x1); x2 = RotateRight32<8>(x2); @@ -659,7 +689,14 @@ void SPECK64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, for (int i = static_cast(rounds-1); i >= 0; --i) { +#if CRYPTOPP_POWER7_AVAILABLE const uint32x4_p rk = vec_splats(subkeys[i]); +#else + // subkeys has extra elements so memory backs the last subkey + const uint8x16_p m = {0,1,2,3, 0,1,2,3, 0,1,2,3, 0,1,2,3}; + uint32x4_p rk = VectorLoad(0, subkeys+i); + rk = vec_perm(rk, rk, m); +#endif y1 = VectorXor(y1, x1); y2 = VectorXor(y2, x2); @@ -699,7 +736,7 @@ void SPECK64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, block5 = (uint32x4_p)vec_perm(x3, y3, m4); } -#endif // CRYPTOPP_POWER7_AVAILABLE +#endif // CRYPTOPP_ALTIVEC_AVAILABLE ANONYMOUS_NAMESPACE_END @@ -743,17 +780,17 @@ size_t SPECK64_Dec_AdvancedProcessBlocks_SSE41(const word32* subKeys, size_t rou } #endif -// ***************************** Power7 ***************************** // +// ***************************** Altivec ***************************** // -#if defined(CRYPTOPP_POWER7_AVAILABLE) -size_t SPECK64_Enc_AdvancedProcessBlocks_POWER7(const word32* subKeys, size_t rounds, +#if defined(CRYPTOPP_ALTIVEC_AVAILABLE) +size_t SPECK64_Enc_AdvancedProcessBlocks_ALTIVEC(const word32* subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { return AdvancedProcessBlocks64_6x2_ALTIVEC(SPECK64_Enc_Block, SPECK64_Enc_6_Blocks, subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags); } -size_t SPECK64_Dec_AdvancedProcessBlocks_POWER7(const word32* subKeys, size_t rounds, +size_t SPECK64_Dec_AdvancedProcessBlocks_ALTIVEC(const word32* subKeys, size_t rounds, const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags) { return AdvancedProcessBlocks64_6x2_ALTIVEC(SPECK64_Dec_Block, SPECK64_Dec_6_Blocks,