From 26597059d9a1f3559e0ed0ea93f60a244b37c585 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Sat, 23 Sep 2017 02:13:16 -0400 Subject: [PATCH] Move to anonymous namespaces in rijndael-simd.cpp --- ppc-crypto.h | 4 +- rijndael-simd.cpp | 265 +++++++++++++++++++++++----------------------- 2 files changed, 136 insertions(+), 133 deletions(-) diff --git a/ppc-crypto.h b/ppc-crypto.h index 9545a806..d5fc7987 100644 --- a/ppc-crypto.h +++ b/ppc-crypto.h @@ -42,7 +42,7 @@ typedef uint64x2_p8 VectorType; #if defined(CRYPTOPP_DOXYGEN_PROCESSING) //! \brief Default vector typedef -//! \details IBM XL C/C++ provides equally good support for all vector types, +//! \details IBM XL C/C++ provides equally good support for all vector types, //! including uint8x16_p8. GCC provides good support for //! uint64x2_p8. VectorType is typedef'd accordingly to //! minimize casting to and from buit-in function calls. @@ -345,7 +345,7 @@ inline T1 VectorAdd(const T1& vec1, const T2& vec2) //! of bytes. Both vec1 and vec2 are cast to uint8x16_p8. The return //! vector is the same type as vec1. //! \details On big endian machines VectorShiftLeft() is vec_sld(a, b, -//! c). On little endian machines VectorShiftLeft() is translated to +//! c). On little endian machines VectorShiftLeft() is translated to //! vec_sld(b, a, 16-c). You should always call the function as //! if on a big endian machine as shown below. //!
diff --git a/rijndael-simd.cpp b/rijndael-simd.cpp
index 3a1f7647..beda89e0 100644
--- a/rijndael-simd.cpp
+++ b/rijndael-simd.cpp
@@ -159,6 +159,8 @@ bool CPU_ProbeAES()
 
 #if (CRYPTOPP_ARM_AES_AVAILABLE)
 
+ANONYMOUS_NAMESPACE_BEGIN
+
 #if defined(IS_LITTLE_ENDIAN)
 const word32 s_one[] = {0, 0, 0, 1<<24};  // uint32x4_t
 #else
@@ -333,6 +335,8 @@ static inline void ARMV8_Dec_6_Blocks(uint8x16_t &block0, uint8x16_t &block1, ui
 	block5 = veorq_u8(block5, key);
 }
 
+ANONYMOUS_NAMESPACE_END
+
 template 
 size_t Rijndael_AdvancedProcessBlocks_ARMV8(F1 func1, F6 func6, const word32 *subKeys, size_t rounds,
             const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
@@ -471,6 +475,8 @@ size_t Rijndael_Dec_AdvancedProcessBlocks_ARMV8(const word32 *subKeys, size_t ro
 
 #if (CRYPTOPP_AESNI_AVAILABLE)
 
+ANONYMOUS_NAMESPACE_BEGIN
+
 CRYPTOPP_ALIGN_DATA(16)
 const word32 s_one[] = {0, 0, 0, 1<<24};
 
@@ -667,33 +673,11 @@ static inline size_t Rijndael_AdvancedProcessBlocks_AESNI(F1 func1, F4 func4,
 	return length;
 }
 
-size_t Rijndael_Enc_AdvancedProcessBlocks_AESNI(const word32 *subKeys, size_t rounds,
-        const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
-{
-	// SunCC workaround
-	MAYBE_CONST word32* sk = MAYBE_UNCONST_CAST(word32*, subKeys);
-	MAYBE_CONST   byte* ib = MAYBE_UNCONST_CAST(byte*,  inBlocks);
-	MAYBE_CONST   byte* xb = MAYBE_UNCONST_CAST(byte*, xorBlocks);
-
-	return Rijndael_AdvancedProcessBlocks_AESNI(AESNI_Enc_Block, AESNI_Enc_4_Blocks,
-                sk, rounds, ib, xb, outBlocks, length, flags);
-}
-
-size_t Rijndael_Dec_AdvancedProcessBlocks_AESNI(const word32 *subKeys, size_t rounds,
-        const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
-{
-	MAYBE_CONST word32* sk = MAYBE_UNCONST_CAST(word32*, subKeys);
-	MAYBE_CONST   byte* ib = MAYBE_UNCONST_CAST(byte*,  inBlocks);
-	MAYBE_CONST   byte* xb = MAYBE_UNCONST_CAST(byte*, xorBlocks);
-
-	return Rijndael_AdvancedProcessBlocks_AESNI(AESNI_Dec_Block, AESNI_Dec_4_Blocks,
-                sk, rounds, ib, xb, outBlocks, length, flags);
-}
+ANONYMOUS_NAMESPACE_END
 
 void Rijndael_UncheckedSetKey_SSE4_AESNI(const byte *userKey, size_t keyLen, word32 *rk, unsigned int rounds)
 {
-	const word32 *ro = s_rconLE, *rc = s_rconLE;
-	CRYPTOPP_UNUSED(ro);
+	const word32 *rc = s_rconLE;
 
 	__m128i temp = _mm_loadu_si128(M128_CAST(userKey+keyLen-16));
 	std::memcpy(rk, userKey, keyLen);
@@ -704,7 +688,6 @@ void Rijndael_UncheckedSetKey_SSE4_AESNI(const byte *userKey, size_t keyLen, wor
 
 	while (true)
 	{
-		CRYPTOPP_ASSERT(rc < ro + COUNTOF(s_rconLE));
 		rk[keyLen/4] = rk[0] ^ _mm_extract_epi32(_mm_aeskeygenassist_si128(temp, 0), 3) ^ *(rc++);
 		rk[keyLen/4+1] = rk[1] ^ rk[keyLen/4];
 		rk[keyLen/4+2] = rk[2] ^ rk[keyLen/4+1];
@@ -717,25 +700,19 @@ void Rijndael_UncheckedSetKey_SSE4_AESNI(const byte *userKey, size_t keyLen, wor
 		{
 			rk[10] = rk[ 4] ^ rk[ 9];
 			rk[11] = rk[ 5] ^ rk[10];
-
-			CRYPTOPP_ASSERT(keySize >= 12);
 			temp = _mm_insert_epi32(temp, rk[11], 3);
 		}
 		else if (keyLen == 32)
 		{
-			CRYPTOPP_ASSERT(keySize >= 12);
 			temp = _mm_insert_epi32(temp, rk[11], 3);
 			rk[12] = rk[ 4] ^ _mm_extract_epi32(_mm_aeskeygenassist_si128(temp, 0), 2);
 			rk[13] = rk[ 5] ^ rk[12];
 			rk[14] = rk[ 6] ^ rk[13];
 			rk[15] = rk[ 7] ^ rk[14];
-
-			CRYPTOPP_ASSERT(keySize >= 16);
 			temp = _mm_insert_epi32(temp, rk[15], 3);
 		}
 		else
 		{
-			CRYPTOPP_ASSERT(keySize >= 8);
 			temp = _mm_insert_epi32(temp, rk[7], 3);
 		}
 
@@ -764,14 +741,39 @@ void Rijndael_UncheckedSetKeyRev_AESNI(word32 *key, unsigned int rounds)
 
 	*M128_CAST(key+i) = _mm_aesimc_si128(*M128_CAST(key+i));
 }
+
+size_t Rijndael_Enc_AdvancedProcessBlocks_AESNI(const word32 *subKeys, size_t rounds,
+        const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+	// SunCC workaround
+	MAYBE_CONST word32* sk = MAYBE_UNCONST_CAST(word32*, subKeys);
+	MAYBE_CONST   byte* ib = MAYBE_UNCONST_CAST(byte*,  inBlocks);
+	MAYBE_CONST   byte* xb = MAYBE_UNCONST_CAST(byte*, xorBlocks);
+
+	return Rijndael_AdvancedProcessBlocks_AESNI(AESNI_Enc_Block, AESNI_Enc_4_Blocks,
+                sk, rounds, ib, xb, outBlocks, length, flags);
+}
+
+size_t Rijndael_Dec_AdvancedProcessBlocks_AESNI(const word32 *subKeys, size_t rounds,
+        const byte *inBlocks, const byte *xorBlocks, byte *outBlocks, size_t length, word32 flags)
+{
+	MAYBE_CONST word32* sk = MAYBE_UNCONST_CAST(word32*, subKeys);
+	MAYBE_CONST   byte* ib = MAYBE_UNCONST_CAST(byte*,  inBlocks);
+	MAYBE_CONST   byte* xb = MAYBE_UNCONST_CAST(byte*, xorBlocks);
+
+	return Rijndael_AdvancedProcessBlocks_AESNI(AESNI_Dec_Block, AESNI_Dec_4_Blocks,
+                sk, rounds, ib, xb, outBlocks, length, flags);
+}
+
 #endif  // CRYPTOPP_AESNI_AVAILABLE
 
 // ***************************** Power 8 ***************************** //
 
 #if (CRYPTOPP_POWER8_AES_AVAILABLE)
 
+ANONYMOUS_NAMESPACE_BEGIN
+
 /* Round constants */
-CRYPTOPP_ALIGN_DATA(16)
 static const uint32_t s_rcon[3][4] = {
 #if defined(IS_LITTLE_ENDIAN)
 	{0x01,0x01,0x01,0x01},   /*  1 */
@@ -785,7 +787,6 @@ static const uint32_t s_rcon[3][4] = {
 };
 
 /* Permute mask */
-CRYPTOPP_ALIGN_DATA(16)
 static const uint32_t s_mask[4] = {
 #if defined(IS_LITTLE_ENDIAN)
 	0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d,0x0c0f0e0d
@@ -828,104 +829,6 @@ IncrementPointerAndStore(const uint8x16_p8& r, uint8_t* p)
 	return p;
 }
 
-// We still need rcon and Se to fallback to C/C++ for AES-192 and AES-256.
-// The IBM docs on AES sucks. Intel's docs on AESNI puts IBM to shame.
-void Rijndael_UncheckedSetKey_POWER8(const byte* userKey, size_t keyLen, word32* rk,
-                                     const word32* rc, const byte* Se)
-{
-	const size_t rounds = keyLen / 4 + 6;
-	if (keyLen == 16)
-	{
-		std::memcpy(rk, userKey, keyLen);
-		uint8_t* skptr = (uint8_t*)rk;
-
-		uint8x16_p8 r1 = (uint8x16_p8)VectorLoadKey(skptr);
-		uint8x16_p8 r4 = (uint8x16_p8)VectorLoadKey(s_rcon[0]);
-		uint8x16_p8 r5 = (uint8x16_p8)VectorLoadKey(s_mask);
-
-#if defined(IS_LITTLE_ENDIAN)
-		// Only the user key requires byte reversing.
-		// The subkeys are stored in proper endianess.
-		ReverseByteArrayLE(skptr);
-#endif
-
-		for (unsigned int i=0; i