diff --git a/cryptlib.vcxproj.filters b/cryptlib.vcxproj.filters index 0dd63b56..104cf051 100644 --- a/cryptlib.vcxproj.filters +++ b/cryptlib.vcxproj.filters @@ -989,7 +989,5 @@ Miscellaneous - - \ No newline at end of file diff --git a/simon128_simd.cpp b/simon128_simd.cpp index cd8a00c5..c1a80546 100644 --- a/simon128_simd.cpp +++ b/simon128_simd.cpp @@ -46,6 +46,14 @@ # include "ppc_simd.h" #endif +#ifndef CRYPTOPP_INLINE +# if defined(CRYPTOPP_DEBUG) +# define CRYPTOPP_INLINE static +# else +# define CRYPTOPP_INLINE inline +# endif +#endif + // Squash MS LNK4221 and libtool warnings extern const char SIMON128_SIMD_FNAME[] = __FILE__; @@ -63,7 +71,7 @@ using CryptoPP::vec_swap; // SunCC #if (CRYPTOPP_ARM_NEON_AVAILABLE) template -inline T UnpackHigh64(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackHigh64(const T& a, const T& b) { const uint64x1_t x(vget_high_u64((uint64x2_t)a)); const uint64x1_t y(vget_high_u64((uint64x2_t)b)); @@ -71,7 +79,7 @@ inline T UnpackHigh64(const T& a, const T& b) } template -inline T UnpackLow64(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackLow64(const T& a, const T& b) { const uint64x1_t x(vget_low_u64((uint64x2_t)a)); const uint64x1_t y(vget_low_u64((uint64x2_t)b)); @@ -79,7 +87,7 @@ inline T UnpackLow64(const T& a, const T& b) } template -inline uint64x2_t RotateLeft64(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateLeft64(const uint64x2_t& val) { const uint64x2_t a(vshlq_n_u64(val, R)); const uint64x2_t b(vshrq_n_u64(val, 64 - R)); @@ -87,7 +95,7 @@ inline uint64x2_t RotateLeft64(const uint64x2_t& val) } template -inline uint64x2_t RotateRight64(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateRight64(const uint64x2_t& val) { const uint64x2_t a(vshlq_n_u64(val, 64 - R)); const uint64x2_t b(vshrq_n_u64(val, R)); @@ -97,7 +105,7 @@ inline uint64x2_t RotateRight64(const uint64x2_t& val) #if defined(__aarch32__) || defined(__aarch64__) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateLeft64<8>(const uint64x2_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7 }; @@ -113,7 +121,7 @@ inline uint64x2_t RotateLeft64<8>(const uint64x2_t& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint64x2_t RotateRight64<8>(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t RotateRight64<8>(const uint64x2_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1 }; @@ -128,13 +136,13 @@ inline uint64x2_t RotateRight64<8>(const uint64x2_t& val) } #endif -inline uint64x2_t SIMON128_f(const uint64x2_t& val) +CRYPTOPP_INLINE uint64x2_t SIMON128_f(const uint64x2_t& val) { return veorq_u64(RotateLeft64<2>(val), vandq_u64(RotateLeft64<1>(val), RotateLeft64<8>(val))); } -inline void SIMON128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SIMON128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -163,7 +171,7 @@ inline void SIMON128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1, block1 = UnpackHigh64(y1, x1); } -inline void SIMON128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SIMON128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5, const word64 *subkeys, unsigned int rounds) { @@ -207,7 +215,7 @@ inline void SIMON128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, block5 = UnpackHigh64(y3, x3); } -inline void SIMON128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SIMON128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -237,7 +245,7 @@ inline void SIMON128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1, block1 = UnpackHigh64(y1, x1); } -inline void SIMON128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, +CRYPTOPP_INLINE void SIMON128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5, const word64 *subkeys, unsigned int rounds) { @@ -304,7 +312,7 @@ inline void SIMON128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1, # define CONST_DOUBLE_CAST(x) ((const double *)(const void *)(x)) #endif -inline void Swap128(__m128i& a,__m128i& b) +CRYPTOPP_INLINE void Swap128(__m128i& a,__m128i& b) { #if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x5120) // __m128i is an unsigned long long[2], and support for swapping it was not added until C++11. @@ -316,7 +324,7 @@ inline void Swap128(__m128i& a,__m128i& b) } template -inline __m128i RotateLeft64(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateLeft64(const __m128i& val) { #if defined(CRYPTOPP_AVX512_ROTATE) return _mm_rol_epi64(val, R); @@ -329,7 +337,7 @@ inline __m128i RotateLeft64(const __m128i& val) } template -inline __m128i RotateRight64(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateRight64(const __m128i& val) { #if defined(CRYPTOPP_AVX512_ROTATE) return _mm_ror_epi64(val, R); @@ -343,7 +351,7 @@ inline __m128i RotateRight64(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateLeft64<8>(const __m128i& val) +__m128i RotateLeft64<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi64(val, 8); @@ -355,7 +363,7 @@ inline __m128i RotateLeft64<8>(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateRight64<8>(const __m128i& val) +__m128i RotateRight64<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi64(val, 64-8); @@ -365,13 +373,13 @@ inline __m128i RotateRight64<8>(const __m128i& val) #endif } -inline __m128i SIMON128_f(const __m128i& v) +CRYPTOPP_INLINE __m128i SIMON128_f(const __m128i& v) { return _mm_xor_si128(RotateLeft64<2>(v), _mm_and_si128(RotateLeft64<1>(v), RotateLeft64<8>(v))); } -inline void SIMON128_Enc_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON128_Enc_Block(__m128i &block0, __m128i &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -403,7 +411,7 @@ inline void SIMON128_Enc_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi64(y1, x1); } -inline void SIMON128_Enc_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON128_Enc_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word64 *subkeys, unsigned int rounds) { @@ -449,7 +457,7 @@ inline void SIMON128_Enc_6_Blocks(__m128i &block0, __m128i &block1, block5 = _mm_unpackhi_epi64(y3, x3); } -inline void SIMON128_Dec_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON128_Dec_Block(__m128i &block0, __m128i &block1, const word64 *subkeys, unsigned int rounds) { // [A1 A2][B1 B2] ... => [A1 B1][A2 B2] ... @@ -482,7 +490,7 @@ inline void SIMON128_Dec_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi64(y1, x1); } -inline void SIMON128_Dec_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON128_Dec_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word64 *subkeys, unsigned int rounds) { @@ -545,7 +553,7 @@ using CryptoPP::VectorXor; // Rotate left by bit count template -inline uint64x2_p RotateLeft64(const uint64x2_p val) +CRYPTOPP_INLINE uint64x2_p RotateLeft64(const uint64x2_p val) { const uint64x2_p m = {C, C}; return vec_rl(val, m); @@ -553,19 +561,19 @@ inline uint64x2_p RotateLeft64(const uint64x2_p val) // Rotate right by bit count template -inline uint64x2_p RotateRight64(const uint64x2_p val) +CRYPTOPP_INLINE uint64x2_p RotateRight64(const uint64x2_p val) { const uint64x2_p m = {64-C, 64-C}; return vec_rl(val, m); } -inline uint64x2_p SIMON128_f(const uint64x2_p val) +CRYPTOPP_INLINE uint64x2_p SIMON128_f(const uint64x2_p val) { return VectorXor(RotateLeft64<2>(val), VectorAnd(RotateLeft64<1>(val), RotateLeft64<8>(val))); } -inline void SIMON128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) +CRYPTOPP_INLINE void SIMON128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) { #if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; @@ -607,7 +615,7 @@ inline void SIMON128_Enc_Block(uint32x4_p &block, const word64 *subkeys, unsigne block = (uint32x4_p)vec_perm(x1, y1, m3); } -inline void SIMON128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) +CRYPTOPP_INLINE void SIMON128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigned int rounds) { #if (CRYPTOPP_BIG_ENDIAN) const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8}; @@ -650,7 +658,7 @@ inline void SIMON128_Dec_Block(uint32x4_p &block, const word64 *subkeys, unsigne block = (uint32x4_p)vec_perm(x1, y1, m3); } -inline void SIMON128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, +CRYPTOPP_INLINE void SIMON128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word64 *subkeys, unsigned int rounds) { @@ -709,7 +717,7 @@ inline void SIMON128_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, block5 = (uint32x4_p)vec_perm(x3, y3, m4); } -inline void SIMON128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, +CRYPTOPP_INLINE void SIMON128_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word64 *subkeys, unsigned int rounds) { diff --git a/simon64_simd.cpp b/simon64_simd.cpp index 2a1a894e..6b1a1db6 100644 --- a/simon64_simd.cpp +++ b/simon64_simd.cpp @@ -75,7 +75,7 @@ using CryptoPP::vec_swap; // SunCC #if (CRYPTOPP_ARM_NEON_AVAILABLE) template -inline T UnpackHigh32(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackHigh32(const T& a, const T& b) { const uint32x2_t x(vget_high_u32((uint32x4_t)a)); const uint32x2_t y(vget_high_u32((uint32x4_t)b)); @@ -84,7 +84,7 @@ inline T UnpackHigh32(const T& a, const T& b) } template -inline T UnpackLow32(const T& a, const T& b) +CRYPTOPP_INLINE T UnpackLow32(const T& a, const T& b) { const uint32x2_t x(vget_low_u32((uint32x4_t)a)); const uint32x2_t y(vget_low_u32((uint32x4_t)b)); @@ -93,7 +93,7 @@ inline T UnpackLow32(const T& a, const T& b) } template -inline uint32x4_t RotateLeft32(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateLeft32(const uint32x4_t& val) { const uint32x4_t a(vshlq_n_u32(val, R)); const uint32x4_t b(vshrq_n_u32(val, 32 - R)); @@ -101,7 +101,7 @@ inline uint32x4_t RotateLeft32(const uint32x4_t& val) } template -inline uint32x4_t RotateRight32(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateRight32(const uint32x4_t& val) { const uint32x4_t a(vshlq_n_u32(val, 32 - R)); const uint32x4_t b(vshrq_n_u32(val, R)); @@ -111,7 +111,7 @@ inline uint32x4_t RotateRight32(const uint32x4_t& val) #if defined(__aarch32__) || defined(__aarch64__) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateLeft32<8>(const uint32x4_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 14,13,12,15, 10,9,8,11, 6,5,4,7, 2,1,0,3 }; @@ -127,7 +127,7 @@ inline uint32x4_t RotateLeft32<8>(const uint32x4_t& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline uint32x4_t RotateRight32<8>(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t RotateRight32<8>(const uint32x4_t& val) { #if (CRYPTOPP_BIG_ENDIAN) const uint8_t maskb[16] = { 12,15,14,13, 8,11,10,9, 4,7,6,5, 0,3,2,1 }; @@ -142,13 +142,13 @@ inline uint32x4_t RotateRight32<8>(const uint32x4_t& val) } #endif -inline uint32x4_t SIMON64_f(const uint32x4_t& val) +CRYPTOPP_INLINE uint32x4_t SIMON64_f(const uint32x4_t& val) { return veorq_u32(RotateLeft32<2>(val), vandq_u32(RotateLeft32<1>(val), RotateLeft32<8>(val))); } -inline void SIMON64_Enc_Block(uint32x4_t &block1, uint32x4_t &block0, +CRYPTOPP_INLINE void SIMON64_Enc_Block(uint32x4_t &block1, uint32x4_t &block0, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -177,7 +177,7 @@ inline void SIMON64_Enc_Block(uint32x4_t &block1, uint32x4_t &block0, block1 = UnpackHigh32(y1, x1); } -inline void SIMON64_Dec_Block(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SIMON64_Dec_Block(uint32x4_t &block0, uint32x4_t &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -207,7 +207,7 @@ inline void SIMON64_Dec_Block(uint32x4_t &block0, uint32x4_t &block1, block1 = UnpackHigh32(y1, x1); } -inline void SIMON64_Enc_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SIMON64_Enc_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, uint32x4_t &block2, uint32x4_t &block3, uint32x4_t &block4, uint32x4_t &block5, const word32 *subkeys, unsigned int rounds) { @@ -251,7 +251,7 @@ inline void SIMON64_Enc_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, block5 = UnpackHigh32(y3, x3); } -inline void SIMON64_Dec_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, +CRYPTOPP_INLINE void SIMON64_Dec_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, uint32x4_t &block2, uint32x4_t &block3, uint32x4_t &block4, uint32x4_t &block5, const word32 *subkeys, unsigned int rounds) { @@ -302,7 +302,7 @@ inline void SIMON64_Dec_6_Blocks(uint32x4_t &block0, uint32x4_t &block1, #if defined(CRYPTOPP_SSE41_AVAILABLE) -inline void Swap128(__m128i& a,__m128i& b) +CRYPTOPP_INLINE void Swap128(__m128i& a,__m128i& b) { #if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x5120) // __m128i is an unsigned long long[2], and support for swapping it was not added until C++11. @@ -314,7 +314,7 @@ inline void Swap128(__m128i& a,__m128i& b) } template -inline __m128i RotateLeft32(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateLeft32(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, R); @@ -325,7 +325,7 @@ inline __m128i RotateLeft32(const __m128i& val) } template -inline __m128i RotateRight32(const __m128i& val) +CRYPTOPP_INLINE __m128i RotateRight32(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 32-R); @@ -337,7 +337,7 @@ inline __m128i RotateRight32(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateLeft32<8>(const __m128i& val) +__m128i RotateLeft32<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 8); @@ -349,7 +349,7 @@ inline __m128i RotateLeft32<8>(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -inline __m128i RotateRight32<8>(const __m128i& val) +__m128i RotateRight32<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 32-8); @@ -359,13 +359,13 @@ inline __m128i RotateRight32<8>(const __m128i& val) #endif } -inline __m128i SIMON64_f(const __m128i& v) +CRYPTOPP_INLINE __m128i SIMON64_f(const __m128i& v) { return _mm_xor_si128(RotateLeft32<2>(v), _mm_and_si128(RotateLeft32<1>(v), RotateLeft32<8>(v))); } -inline void SIMON64_Enc_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON64_Enc_Block(__m128i &block0, __m128i &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -395,7 +395,7 @@ inline void SIMON64_Enc_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi32(y1, x1); } -inline void SIMON64_Dec_Block(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON64_Dec_Block(__m128i &block0, __m128i &block1, const word32 *subkeys, unsigned int rounds) { // [A1 A2 A3 A4][B1 B2 B3 B4] ... => [A1 A3 B1 B3][A2 A4 B2 B4] ... @@ -426,7 +426,7 @@ inline void SIMON64_Dec_Block(__m128i &block0, __m128i &block1, block1 = _mm_unpackhi_epi32(y1, x1); } -inline void SIMON64_Enc_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON64_Enc_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word32 *subkeys, unsigned int rounds) { @@ -477,7 +477,7 @@ inline void SIMON64_Enc_6_Blocks(__m128i &block0, __m128i &block1, block5 = _mm_unpackhi_epi32(y3, x3); } -inline void SIMON64_Dec_6_Blocks(__m128i &block0, __m128i &block1, +CRYPTOPP_INLINE void SIMON64_Dec_6_Blocks(__m128i &block0, __m128i &block1, __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5, const word32 *subkeys, unsigned int rounds) { @@ -545,7 +545,7 @@ using CryptoPP::VectorLoadBE; // Rotate left by bit count template -inline uint32x4_p RotateLeft32(const uint32x4_p val) +CRYPTOPP_INLINE uint32x4_p RotateLeft32(const uint32x4_p val) { const uint32x4_p m = {C, C, C, C}; return vec_rl(val, m); @@ -553,19 +553,19 @@ inline uint32x4_p RotateLeft32(const uint32x4_p val) // Rotate right by bit count template -inline uint32x4_p RotateRight32(const uint32x4_p val) +CRYPTOPP_INLINE uint32x4_p RotateRight32(const uint32x4_p val) { const uint32x4_p m = {32-C, 32-C, 32-C, 32-C}; return vec_rl(val, m); } -inline uint32x4_p SIMON64_f(const uint32x4_p val) +CRYPTOPP_INLINE uint32x4_p SIMON64_f(const uint32x4_p val) { return VectorXor(RotateLeft32<2>(val), VectorAnd(RotateLeft32<1>(val), RotateLeft32<8>(val))); } -inline void SIMON64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, +CRYPTOPP_INLINE void SIMON64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, const word32 *subkeys, unsigned int rounds) { #if (CRYPTOPP_BIG_ENDIAN) @@ -622,7 +622,7 @@ inline void SIMON64_Enc_Block(uint32x4_p &block0, uint32x4_p &block1, block1 = (uint32x4_p)vec_perm(x1, y1, m4); } -inline void SIMON64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, +CRYPTOPP_INLINE void SIMON64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, const word32 *subkeys, unsigned int rounds) { #if (CRYPTOPP_BIG_ENDIAN) @@ -680,7 +680,7 @@ inline void SIMON64_Dec_Block(uint32x4_p &block0, uint32x4_p &block1, block1 = (uint32x4_p)vec_perm(x1, y1, m4); } -inline void SIMON64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, +CRYPTOPP_INLINE void SIMON64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word32 *subkeys, unsigned int rounds) { @@ -753,7 +753,7 @@ inline void SIMON64_Enc_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, block5 = (uint32x4_p)vec_perm(x3, y3, m4); } -inline void SIMON64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, +CRYPTOPP_INLINE void SIMON64_Dec_6_Blocks(uint32x4_p &block0, uint32x4_p &block1, uint32x4_p &block2, uint32x4_p &block3, uint32x4_p &block4, uint32x4_p &block5, const word32 *subkeys, unsigned int rounds) { diff --git a/speck128_simd.cpp b/speck128_simd.cpp index 14efeede..dd1ef08c 100644 --- a/speck128_simd.cpp +++ b/speck128_simd.cpp @@ -313,7 +313,7 @@ CRYPTOPP_INLINE __m128i RotateRight64(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -CRYPTOPP_INLINE __m128i RotateLeft64<8>(const __m128i& val) +__m128i RotateLeft64<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi64(val, 8); @@ -325,7 +325,7 @@ CRYPTOPP_INLINE __m128i RotateLeft64<8>(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -CRYPTOPP_INLINE __m128i RotateRight64<8>(const __m128i& val) +__m128i RotateRight64<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi64(val, 64-8); diff --git a/speck64_simd.cpp b/speck64_simd.cpp index 9f120f45..0ed4f8d3 100644 --- a/speck64_simd.cpp +++ b/speck64_simd.cpp @@ -299,7 +299,7 @@ CRYPTOPP_INLINE __m128i RotateRight32(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -CRYPTOPP_INLINE __m128i RotateLeft32<8>(const __m128i& val) +__m128i RotateLeft32<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 8); @@ -311,7 +311,7 @@ CRYPTOPP_INLINE __m128i RotateLeft32<8>(const __m128i& val) // Faster than two Shifts and an Or. Thanks to Louis Wingers and Bryan Weeks. template <> -CRYPTOPP_INLINE __m128i RotateRight32<8>(const __m128i& val) +__m128i RotateRight32<8>(const __m128i& val) { #if defined(__XOP__) return _mm_roti_epi32(val, 32-8);