From 43b01973b1dc0c50cd394502f7475e2c1039ad35 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sat, 8 Jun 2019 11:00:11 -0400 Subject: [PATCH] Clear lgtm findings We did some refactoring and added sse_simd.h. Over time more SSE functions will likely move into sse_simd.h --- Filelist.txt | 1 + asn.cpp | 16 ++++---- chacha_avx.cpp | 85 +++++++++++++++++++--------------------- cryptlib.vcxproj | 1 + cryptlib.vcxproj.filters | 3 ++ gf2n_simd.cpp | 26 ++++++------ sse_simd.h | 84 +++++++++++++++++++++++++++++++++++++++ 7 files changed, 149 insertions(+), 67 deletions(-) create mode 100644 sse_simd.h diff --git a/Filelist.txt b/Filelist.txt index 933d1226..141ec0e8 100644 --- a/Filelist.txt +++ b/Filelist.txt @@ -358,6 +358,7 @@ square.cpp square.h squaretb.cpp sse_simd.cpp +sse_simd.h stdcpp.h strciphr.cpp strciphr.h diff --git a/asn.cpp b/asn.cpp index ec5a097d..1631f52e 100644 --- a/asn.cpp +++ b/asn.cpp @@ -395,25 +395,25 @@ void EncodedObjectFilter::Put(const byte *inString, size_t length) } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BufferedTransformation &inQueue, byte asnTag) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(asnTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(DefaultTag); } BERGeneralDecoder::BERGeneralDecoder(BERGeneralDecoder &inQueue, byte asnTag) - : m_inQueue(inQueue), m_finished(false) + : m_inQueue(inQueue), m_length(0), m_finished(false) { Init(asnTag); } @@ -514,22 +514,22 @@ lword BERGeneralDecoder::ReduceLength(lword delta) } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(BufferedTransformation &outQueue, byte asnTag) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(DefaultTag), m_finished(false) { } DERGeneralEncoder::DERGeneralEncoder(DERGeneralEncoder &outQueue, byte asnTag) - : ByteQueue(), m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) + : m_outQueue(outQueue), m_asnTag(asnTag), m_finished(false) { } diff --git a/chacha_avx.cpp b/chacha_avx.cpp index 20693488..72dc42c2 100644 --- a/chacha_avx.cpp +++ b/chacha_avx.cpp @@ -24,6 +24,7 @@ # include # include # include +# include "sse_simd.h" #endif // Squash MS LNK4221 and libtool warnings @@ -91,14 +92,10 @@ NAMESPACE_BEGIN(CryptoPP) void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte *output, unsigned int rounds) { - MAYBE_CONST __m128i* state_mm = (MAYBE_CONST __m128i*)(state); - MAYBE_CONST __m256i* input_mm = (MAYBE_CONST __m256i*)(input); - __m256i* output_mm = reinterpret_cast<__m256i*>(output); - - const __m256i state0 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 0)); - const __m256i state1 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 1)); - const __m256i state2 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 2)); - const __m256i state3 = _mm256_broadcastsi128_si256(_mm_loadu_si128(state_mm + 3)); + const __m256i state0 = _mm256_broadcastsi128_si256(load_m128i<0>(state)); + const __m256i state1 = _mm256_broadcastsi128_si256(load_m128i<1>(state)); + const __m256i state2 = _mm256_broadcastsi128_si256(load_m128i<2>(state)); + const __m256i state3 = _mm256_broadcastsi128_si256(load_m128i<3>(state)); const __m256i CTR0 = _mm256_set_epi32(0, 0, 0, 0, 0, 0, 0, 4); const __m256i CTR1 = _mm256_set_epi32(0, 0, 0, 1, 0, 0, 0, 5); @@ -304,80 +301,80 @@ void ChaCha_OperateKeystream_AVX2(const word32 *state, const byte* input, byte * X3_3 = _mm256_add_epi32(X3_3, state3); X3_3 = _mm256_add_epi64(X3_3, CTR3); - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 0, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 0), + store_m256i<0>(output, _mm256_xor_si256(load_m256i<0>(input), _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 1, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 1), + store_m256i<1>(output, _mm256_xor_si256(load_m256i<1>(input), _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 2, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 2), + store_m256i<2>(output, _mm256_xor_si256(load_m256i<2>(input), _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 3, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 3), + store_m256i<3>(output, _mm256_xor_si256(load_m256i<3>(input), _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4)))); } else { - _mm256_storeu_si256(output_mm + 0, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 1, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 2, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 3, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); + store_m256i<0>(output, _mm256_permute2x128_si256(X0_0, X0_1, 1 + (3 << 4))); + store_m256i<1>(output, _mm256_permute2x128_si256(X0_2, X0_3, 1 + (3 << 4))); + store_m256i<2>(output, _mm256_permute2x128_si256(X1_0, X1_1, 1 + (3 << 4))); + store_m256i<3>(output, _mm256_permute2x128_si256(X1_2, X1_3, 1 + (3 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 4, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 4), + store_m256i<4>(output, _mm256_xor_si256(load_m256i<4>(input), _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 5, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 5), + store_m256i<5>(output, _mm256_xor_si256(load_m256i<5>(input), _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 6, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 6), + store_m256i<6>(output, _mm256_xor_si256(load_m256i<6>(input), _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4)))); - _mm256_storeu_si256(output_mm + 7, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 7), + store_m256i<7>(output, _mm256_xor_si256(load_m256i<7>(input), _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4)))); } else { - _mm256_storeu_si256(output_mm + 4, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 5, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 6, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); - _mm256_storeu_si256(output_mm + 7, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); + store_m256i<4>(output, _mm256_permute2x128_si256(X2_0, X2_1, 1 + (3 << 4))); + store_m256i<5>(output, _mm256_permute2x128_si256(X2_2, X2_3, 1 + (3 << 4))); + store_m256i<6>(output, _mm256_permute2x128_si256(X3_0, X3_1, 1 + (3 << 4))); + store_m256i<7>(output, _mm256_permute2x128_si256(X3_2, X3_3, 1 + (3 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 8, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 8), + store_m256i<8>(output, _mm256_xor_si256(load_m256i<8>(input), _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 9, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 9), + store_m256i<9>(output, _mm256_xor_si256(load_m256i<9>(input), _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 10, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 10), + store_m256i<10>(output, _mm256_xor_si256(load_m256i<10>(input), _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 11, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 11), + store_m256i<11>(output, _mm256_xor_si256(load_m256i<11>(input), _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4)))); } else { - _mm256_storeu_si256(output_mm + 8, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 9, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 10, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 11, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); + store_m256i<8>(output, _mm256_permute2x128_si256(X0_0, X0_1, 0 + (2 << 4))); + store_m256i<9>(output, _mm256_permute2x128_si256(X0_2, X0_3, 0 + (2 << 4))); + store_m256i<10>(output, _mm256_permute2x128_si256(X1_0, X1_1, 0 + (2 << 4))); + store_m256i<11>(output, _mm256_permute2x128_si256(X1_2, X1_3, 0 + (2 << 4))); } - if (input_mm) + if (input) { - _mm256_storeu_si256(output_mm + 12, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 12), + store_m256i<12>(output, _mm256_xor_si256(load_m256i<12>(input), _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 13, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 13), + store_m256i<13>(output, _mm256_xor_si256(load_m256i<13>(input), _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 14, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 14), + store_m256i<14>(output, _mm256_xor_si256(load_m256i<14>(input), _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4)))); - _mm256_storeu_si256(output_mm + 15, _mm256_xor_si256(_mm256_loadu_si256(input_mm + 15), + store_m256i<15>(output, _mm256_xor_si256(load_m256i<15>(input), _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4)))); } else { - _mm256_storeu_si256(output_mm + 12, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 13, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 14, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); - _mm256_storeu_si256(output_mm + 15, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); + store_m256i<12>(output, _mm256_permute2x128_si256(X2_0, X2_1, 0 + (2 << 4))); + store_m256i<13>(output, _mm256_permute2x128_si256(X2_2, X2_3, 0 + (2 << 4))); + store_m256i<14>(output, _mm256_permute2x128_si256(X3_0, X3_1, 0 + (2 << 4))); + store_m256i<15>(output, _mm256_permute2x128_si256(X3_2, X3_3, 0 + (2 << 4))); } // https://software.intel.com/en-us/articles/avoiding-avx-sse-transition-penalties diff --git a/cryptlib.vcxproj b/cryptlib.vcxproj index 4985c096..1f81c046 100644 --- a/cryptlib.vcxproj +++ b/cryptlib.vcxproj @@ -538,6 +538,7 @@ + diff --git a/cryptlib.vcxproj.filters b/cryptlib.vcxproj.filters index de140e33..7b91f87b 100644 --- a/cryptlib.vcxproj.filters +++ b/cryptlib.vcxproj.filters @@ -993,6 +993,9 @@ Header Files + + Header Files + Header Files diff --git a/gf2n_simd.cpp b/gf2n_simd.cpp index 1d4d933f..3bd3cd0e 100644 --- a/gf2n_simd.cpp +++ b/gf2n_simd.cpp @@ -28,6 +28,7 @@ #if (CRYPTOPP_CLMUL_AVAILABLE) # include # include +# include "sse_simd.h" #endif #if (CRYPTOPP_ARM_PMULL_AVAILABLE) @@ -465,36 +466,31 @@ NAMESPACE_BEGIN(CryptoPP) void GF2NT_233_Multiply_Reduce_CLMUL(const word* pA, const word* pB, word* pC) { - const __m128i* pAA = reinterpret_cast(pA); - const __m128i* pBB = reinterpret_cast(pB); - __m128i a0 = _mm_loadu_si128(pAA+0); - __m128i a1 = _mm_loadu_si128(pAA+1); - __m128i b0 = _mm_loadu_si128(pBB+0); - __m128i b1 = _mm_loadu_si128(pBB+1); + __m128i a0 = load_m128i<0>(pA); + __m128i a1 = load_m128i<1>(pA); + __m128i b0 = load_m128i<0>(pB); + __m128i b1 = load_m128i<1>(pB); __m128i c0, c1, c2, c3; F2N_Multiply_256x256_CLMUL(c3, c2, c1, c0, a1, a0, b1, b0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - __m128i* pCC = reinterpret_cast<__m128i*>(pC); - _mm_storeu_si128(pCC+0, c0); - _mm_storeu_si128(pCC+1, c1); + store_m128i<0>(pC, c0); + store_m128i<1>(pC, c1); } void GF2NT_233_Square_Reduce_CLMUL(const word* pA, word* pC) { - const __m128i* pAA = reinterpret_cast(pA); - __m128i a0 = _mm_loadu_si128(pAA+0); - __m128i a1 = _mm_loadu_si128(pAA+1); + __m128i a0 = load_m128i<0>(pA); + __m128i a1 = load_m128i<1>(pA); __m128i c0, c1, c2, c3; F2N_Square_256_CLMUL(c3, c2, c1, c0, a1, a0); GF2NT_233_Reduce_CLMUL(c3, c2, c1, c0); - __m128i* pCC = reinterpret_cast<__m128i*>(pC); - _mm_storeu_si128(pCC+0, c0); - _mm_storeu_si128(pCC+1, c1); + store_m128i<0>(pC, c0); + store_m128i<1>(pC, c1); } #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) diff --git a/sse_simd.h b/sse_simd.h new file mode 100644 index 00000000..c77cbd82 --- /dev/null +++ b/sse_simd.h @@ -0,0 +1,84 @@ +// sse_simd.h - written and placed in public domain by Jeffrey Walton +// Helper functions to work with SSE and above. The class file +// was added after a scan by lgtm.com. We caught some findings +// that were not problems, but we refactored to squash them. + +#ifndef CRYPTOPP_SSE_CRYPTO_H +#define CRYPTOPP_SSE_CRYPTO_H + +#include "config.h" + +#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) +# include +#endif + +#if (CRYPTOPP_AVX2_AVAILABLE) +# include +#endif + +NAMESPACE_BEGIN(CryptoPP) + +#if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) + +// N specifies the nth 128-bit element +template +inline __m128i load_m128i(T* ptr) +{ + enum { SCALE=sizeof(__m128i)/sizeof(T) }; + return _mm_loadu_si128( + reinterpret_cast<__m128i*>(ptr+SCALE*N)); +} + +// N specifies the nth 128-bit element +template +inline __m128i load_m128i(const T* ptr) +{ + enum { SCALE=sizeof(__m128i)/sizeof(T) }; + return _mm_loadu_si128( + reinterpret_cast(ptr+SCALE*N)); +} + +// N specifies the nth 128-bit element +template +inline void store_m128i(T* ptr, __m128i val) +{ + enum { SCALE=sizeof(__m128i)/sizeof(T) }; + return _mm_storeu_si128( + reinterpret_cast<__m128i*>(ptr+SCALE*N), val); +} +#endif + +#if (CRYPTOPP_AVX2_AVAILABLE) + +// N specifies the nth 256-bit element +template +inline __m256i load_m256i(T* ptr) +{ + enum { SCALE=sizeof(__m256i)/sizeof(T) }; + return _mm256_loadu_si256( + reinterpret_cast<__m256i*>(ptr+SCALE*N)); +} + +// N specifies the nth 256-bit element +template +inline __m256i load_m256i(const T* ptr) +{ + enum { SCALE=sizeof(__m256i)/sizeof(T) }; + return _mm256_loadu_si256( + reinterpret_cast(ptr+SCALE*N)); +} + +// N specifies the nth 256-bit element +template +inline void store_m256i(T* ptr, __m256i val) +{ + enum { SCALE=sizeof(__m256i)/sizeof(T) }; + return _mm256_storeu_si256( + reinterpret_cast<__m256i*>(ptr+SCALE*N), val); +} + +#endif + +NAMESPACE_END + +#endif // CRYPTOPP_SSE_CRYPTO_H \ No newline at end of file