parent
2c79be7a54
commit
57e3ae309b
149
ppc-simd.h
149
ppc-simd.h
|
|
@ -1,7 +1,7 @@
|
|||
// ppc-simd.h - written and placed in public domain by Jeffrey Walton
|
||||
|
||||
/// \file ppc-simd.h
|
||||
/// \brief Support functions for PowerPC and Power8 vector operations
|
||||
/// \brief Support functions for PowerPC and vector operations
|
||||
/// \details This header provides an agnostic interface into GCC and
|
||||
/// IBM XL C/C++ compilers modulo their different built-in functions
|
||||
/// for accessing vector intructions.
|
||||
|
|
@ -31,26 +31,12 @@ NAMESPACE_BEGIN(CryptoPP)
|
|||
|
||||
typedef __vector unsigned char uint8x16_p;
|
||||
typedef __vector unsigned int uint32x4_p;
|
||||
|
||||
#if defined(CRYPTOPP_POWER5_AVAILABLE)
|
||||
typedef __vector unsigned long long uint64x2_p;
|
||||
#endif
|
||||
|
||||
// Use 8x16 for documentation because it is used frequently
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
typedef uint8x16_p VectorType;
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
typedef uint64x2_p VectorType;
|
||||
#endif
|
||||
|
||||
#if defined(CRYPTOPP_DOXYGEN_PROCESSING)
|
||||
/// \brief Default vector typedef
|
||||
/// \details IBM XL C/C++ provides equally good support for all vector types,
|
||||
/// including <tt>uint8x16_p</tt>. GCC provides good support for
|
||||
/// <tt>uint64x2_p</tt>. <tt>VectorType</tt> is typedef'd accordingly to
|
||||
/// minimize casting to and from buit-in function calls.
|
||||
# define VectorType ...
|
||||
#endif
|
||||
/// \brief Default vector type
|
||||
typedef uint32x4_p VectorType;
|
||||
|
||||
#endif // CRYPTOPP_ALTIVEC_AVAILABLE
|
||||
|
||||
|
|
@ -64,11 +50,11 @@ typedef uint64x2_p VectorType;
|
|||
inline void ReverseByteArrayLE(byte src[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION) && defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
vec_st(vec_reve(vec_ld(0, src)), 0, src);
|
||||
vec_st(vec_reve(vec_ld(0, src)), 0, src);
|
||||
#elif defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
|
||||
const uint8x16_p zero = {0};
|
||||
vec_vsx_st(vec_perm(vec_vsx_ld(0, src), zero, mask), 0, src);
|
||||
const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
|
||||
const uint8x16_p zero = {0};
|
||||
vec_vsx_st(vec_perm(vec_vsx_ld(0, src), zero, mask), 0, src);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -81,9 +67,8 @@ inline void ReverseByteArrayLE(byte src[16])
|
|||
template <class T>
|
||||
inline T Reverse(const T& src)
|
||||
{
|
||||
const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
|
||||
const uint8x16_p zero = {0};
|
||||
return vec_perm(src, zero, mask);
|
||||
const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
|
||||
return vec_perm(src, src, mask);
|
||||
}
|
||||
|
||||
/// \brief Loads a vector from a byte array
|
||||
|
|
@ -93,15 +78,15 @@ inline T Reverse(const T& src)
|
|||
/// \note VectorLoadBE() does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoadBE(const uint8_t src[16])
|
||||
inline uint32x4_p VectorLoadBE(const uint8_t src[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (VectorType)vec_xl_be(0, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_xl_be(0, (uint8_t*)src);
|
||||
#else
|
||||
# if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
return (VectorType)Reverse(vec_vsx_ld(0, (uint8_t*)src));
|
||||
return (uint32x4_p)Reverse(vec_vsx_ld(0, (uint8_t*)src));
|
||||
# else
|
||||
return (VectorType)vec_vsx_ld(0, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_vsx_ld(0, (uint8_t*)src);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
|
@ -114,15 +99,15 @@ inline VectorType VectorLoadBE(const uint8_t src[16])
|
|||
/// \note VectorLoadBE does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoadBE(int off, const uint8_t src[16])
|
||||
inline uint32x4_p VectorLoadBE(int off, const uint8_t src[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (VectorType)vec_xl_be(off, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_xl_be(off, (uint8_t*)src);
|
||||
#else
|
||||
# if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
return (VectorType)Reverse(vec_vsx_ld(off, (uint8_t*)src));
|
||||
return (uint32x4_p)Reverse(vec_vsx_ld(off, (uint8_t*)src));
|
||||
# else
|
||||
return (VectorType)vec_vsx_ld(off, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_vsx_ld(off, (uint8_t*)src);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
|
@ -134,9 +119,9 @@ inline VectorType VectorLoadBE(int off, const uint8_t src[16])
|
|||
/// \note VectorLoad does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoad(const byte src[16])
|
||||
inline uint32x4_p VectorLoad(const byte src[16])
|
||||
{
|
||||
return (VectorType)VectorLoadBE((uint8_t*)src);
|
||||
return (uint32x4_p)VectorLoadBE((uint8_t*)src);
|
||||
}
|
||||
|
||||
/// \brief Loads a vector from a byte array
|
||||
|
|
@ -147,9 +132,9 @@ inline VectorType VectorLoad(const byte src[16])
|
|||
/// \note VectorLoad does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoad(int off, const byte src[16])
|
||||
inline uint32x4_p VectorLoad(int off, const byte src[16])
|
||||
{
|
||||
return (VectorType)VectorLoadBE(off, (uint8_t*)src);
|
||||
return (uint32x4_p)VectorLoadBE(off, (uint8_t*)src);
|
||||
}
|
||||
|
||||
/// \brief Loads a vector from a byte array
|
||||
|
|
@ -159,12 +144,12 @@ inline VectorType VectorLoad(int off, const byte src[16])
|
|||
/// \note VectorLoadKey does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoadKey(const byte src[16])
|
||||
inline uint32x4_p VectorLoadKey(const byte src[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (VectorType)vec_xl(0, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_xl(0, (uint8_t*)src);
|
||||
#else
|
||||
return (VectorType)vec_vsx_ld(0, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_vsx_ld(0, (uint8_t*)src);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -175,12 +160,12 @@ inline VectorType VectorLoadKey(const byte src[16])
|
|||
/// \note VectorLoadKey does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoadKey(const word32 src[4])
|
||||
inline uint32x4_p VectorLoadKey(const word32 src[4])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (VectorType)vec_xl(0, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_xl(0, (uint8_t*)src);
|
||||
#else
|
||||
return (VectorType)vec_vsx_ld(0, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_vsx_ld(0, (uint8_t*)src);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -192,12 +177,12 @@ inline VectorType VectorLoadKey(const word32 src[4])
|
|||
/// \note VectorLoadKey does not require an aligned array.
|
||||
/// \sa Reverse(), VectorLoadBE(), VectorLoad(), VectorLoadKey()
|
||||
/// \since Crypto++ 6.0
|
||||
inline VectorType VectorLoadKey(int off, const byte src[16])
|
||||
inline uint32x4_p VectorLoadKey(int off, const byte src[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (VectorType)vec_xl(off, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_xl(off, (uint8_t*)src);
|
||||
#else
|
||||
return (VectorType)vec_vsx_ld(off, (uint8_t*)src);
|
||||
return (uint32x4_p)vec_vsx_ld(off, (uint8_t*)src);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -214,12 +199,12 @@ template <class T>
|
|||
inline void VectorStoreBE(const T& src, uint8_t dest[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
vec_xst_be((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
vec_xst_be((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
#else
|
||||
# if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), 0, (uint8_t*)dest);
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), 0, (uint8_t*)dest);
|
||||
# else
|
||||
vec_vsx_st((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
vec_vsx_st((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
|
@ -237,12 +222,12 @@ template <class T>
|
|||
inline void VectorStoreBE(const T& src, int off, uint8_t dest[16])
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
vec_xst_be((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
vec_xst_be((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
#else
|
||||
# if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), off, (uint8_t*)dest);
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), off, (uint8_t*)dest);
|
||||
# else
|
||||
vec_vsx_st((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
vec_vsx_st((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
|
@ -258,14 +243,14 @@ inline void VectorStoreBE(const T& src, int off, uint8_t dest[16])
|
|||
template<class T>
|
||||
inline void VectorStore(const T& src, byte dest[16])
|
||||
{
|
||||
// Do not call VectorStoreBE. It slows us down by about 0.5 cpb on LE.
|
||||
// Do not call VectorStoreBE. It slows us down by about 0.5 cpb on LE.
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
vec_xst_be((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
vec_xst_be((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
#else
|
||||
# if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), 0, (uint8_t*)dest);
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), 0, (uint8_t*)dest);
|
||||
# else
|
||||
vec_vsx_st((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
vec_vsx_st((uint8x16_p)src, 0, (uint8_t*)dest);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
|
@ -282,14 +267,14 @@ inline void VectorStore(const T& src, byte dest[16])
|
|||
template<class T>
|
||||
inline void VectorStore(const T& src, int off, byte dest[16])
|
||||
{
|
||||
// Do not call VectorStoreBE. It slows us down by about 0.5 cpb on LE.
|
||||
// Do not call VectorStoreBE. It slows us down by about 0.5 cpb on LE.
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
vec_xst_be((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
vec_xst_be((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
#else
|
||||
# if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), off, (uint8_t*)dest);
|
||||
vec_vsx_st(Reverse((uint8x16_p)src), off, (uint8_t*)dest);
|
||||
# else
|
||||
vec_vsx_st((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
vec_vsx_st((uint8x16_p)src, off, (uint8_t*)dest);
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
|
@ -307,7 +292,7 @@ inline void VectorStore(const T& src, int off, byte dest[16])
|
|||
template <class T1, class T2>
|
||||
inline T1 VectorPermute(const T1& vec1, const T1& vec2, const T2& mask)
|
||||
{
|
||||
return (T1)vec_perm(vec1, vec2, (uint8x16_p)mask);
|
||||
return (T1)vec_perm(vec1, vec2, (uint8x16_p)mask);
|
||||
}
|
||||
|
||||
/// \brief XOR two vectors
|
||||
|
|
@ -321,7 +306,7 @@ inline T1 VectorPermute(const T1& vec1, const T1& vec2, const T2& mask)
|
|||
template <class T1, class T2>
|
||||
inline T1 VectorXor(const T1& vec1, const T2& vec2)
|
||||
{
|
||||
return (T1)vec_xor(vec1, (T1)vec2);
|
||||
return (T1)vec_xor(vec1, (T1)vec2);
|
||||
}
|
||||
|
||||
/// \brief Add two vector
|
||||
|
|
@ -336,7 +321,7 @@ inline T1 VectorXor(const T1& vec1, const T2& vec2)
|
|||
template <class T1, class T2>
|
||||
inline T1 VectorAdd(const T1& vec1, const T2& vec2)
|
||||
{
|
||||
return (T1)vec_add(vec1, (T1)vec2);
|
||||
return (T1)vec_add(vec1, (T1)vec2);
|
||||
}
|
||||
|
||||
/// \brief Shift two vectors left
|
||||
|
|
@ -365,9 +350,9 @@ template <unsigned int C, class T1, class T2>
|
|||
inline T1 VectorShiftLeft(const T1& vec1, const T2& vec2)
|
||||
{
|
||||
#if defined(CRYPTOPP_LITTLE_ENDIAN)
|
||||
return (T1)vec_sld((uint8x16_p)vec2, (uint8x16_p)vec1, 16-C);
|
||||
return (T1)vec_sld((uint8x16_p)vec2, (uint8x16_p)vec1, 16-C);
|
||||
#else
|
||||
return (T1)vec_sld((uint8x16_p)vec1, (uint8x16_p)vec2, C);
|
||||
return (T1)vec_sld((uint8x16_p)vec1, (uint8x16_p)vec2, C);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -387,11 +372,11 @@ template <class T1, class T2>
|
|||
inline T1 VectorEncrypt(const T1& state, const T2& key)
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (T1)__vcipher((VectorType)state, (VectorType)key);
|
||||
return (T1)__vcipher((uint32x4_p)state, (uint32x4_p)key);
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
return (T1)__builtin_crypto_vcipher((VectorType)state, (VectorType)key);
|
||||
return (T1)__builtin_crypto_vcipher((uint32x4_p)state, (uint32x4_p)key);
|
||||
#else
|
||||
CRYPTOPP_ASSERT(0);
|
||||
CRYPTOPP_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -407,11 +392,11 @@ template <class T1, class T2>
|
|||
inline T1 VectorEncryptLast(const T1& state, const T2& key)
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (T1)__vcipherlast((VectorType)state, (VectorType)key);
|
||||
return (T1)__vcipherlast((uint32x4_p)state, (uint32x4_p)key);
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
return (T1)__builtin_crypto_vcipherlast((VectorType)state, (VectorType)key);
|
||||
return (T1)__builtin_crypto_vcipherlast((uint32x4_p)state, (uint32x4_p)key);
|
||||
#else
|
||||
CRYPTOPP_ASSERT(0);
|
||||
CRYPTOPP_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -427,11 +412,11 @@ template <class T1, class T2>
|
|||
inline T1 VectorDecrypt(const T1& state, const T2& key)
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (T1)__vncipher((VectorType)state, (VectorType)key);
|
||||
return (T1)__vncipher((uint32x4_p)state, (uint32x4_p)key);
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
return (T1)__builtin_crypto_vncipher((VectorType)state, (VectorType)key);
|
||||
return (T1)__builtin_crypto_vncipher((uint32x4_p)state, (uint32x4_p)key);
|
||||
#else
|
||||
CRYPTOPP_ASSERT(0);
|
||||
CRYPTOPP_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -447,11 +432,11 @@ template <class T1, class T2>
|
|||
inline T1 VectorDecryptLast(const T1& state, const T2& key)
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (T1)__vncipherlast((VectorType)state, (VectorType)key);
|
||||
return (T1)__vncipherlast((uint32x4_p)state, (uint32x4_p)key);
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
return (T1)__builtin_crypto_vncipherlast((VectorType)state, (VectorType)key);
|
||||
return (T1)__builtin_crypto_vncipherlast((uint32x4_p)state, (uint32x4_p)key);
|
||||
#else
|
||||
CRYPTOPP_ASSERT(0);
|
||||
CRYPTOPP_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -467,11 +452,11 @@ template <int func, int subfunc, class T>
|
|||
inline T VectorSHA256(const T& vec)
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (T)__vshasigmaw((uint32x4_p)vec, func, subfunc);
|
||||
return (T)__vshasigmaw((uint32x4_p)vec, func, subfunc);
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
return (T)__builtin_crypto_vshasigmaw((uint32x4_p)vec, func, subfunc);
|
||||
return (T)__builtin_crypto_vshasigmaw((uint32x4_p)vec, func, subfunc);
|
||||
#else
|
||||
CRYPTOPP_ASSERT(0);
|
||||
CRYPTOPP_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
@ -487,11 +472,11 @@ template <int func, int subfunc, class T>
|
|||
inline T VectorSHA512(const T& vec)
|
||||
{
|
||||
#if defined(CRYPTOPP_XLC_VERSION)
|
||||
return (T)__vshasigmad((uint64x2_p)vec, func, subfunc);
|
||||
return (T)__vshasigmad((uint64x2_p)vec, func, subfunc);
|
||||
#elif defined(CRYPTOPP_GCC_VERSION)
|
||||
return (T)__builtin_crypto_vshasigmad((uint64x2_p)vec, func, subfunc);
|
||||
return (T)__builtin_crypto_vshasigmad((uint64x2_p)vec, func, subfunc);
|
||||
#else
|
||||
CRYPTOPP_ASSERT(0);
|
||||
CRYPTOPP_ASSERT(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue