From 2fe5177d07dd359bf0452a3ffbac053100e465e5 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Thu, 22 Nov 2018 16:54:28 -0500 Subject: [PATCH] Update documentation --- ppc_simd.h | 104 ++++++++++++++++++++++++++--------------------------- 1 file changed, 52 insertions(+), 52 deletions(-) diff --git a/ppc_simd.h b/ppc_simd.h index 666fc9df..b68c31f9 100644 --- a/ppc_simd.h +++ b/ppc_simd.h @@ -70,17 +70,17 @@ NAMESPACE_BEGIN(CryptoPP) #if defined(__ALTIVEC__) || defined(CRYPTOPP_DOXYGEN_PROCESSING) /// \brief Vector of 8-bit elements -/// \para Wraps +/// \par Wraps /// __vector unsigned char /// \since Crypto++ 6.0 typedef __vector unsigned char uint8x16_p; /// \brief Vector of 16-bit elements -/// \para Wraps +/// \par Wraps /// __vector unsigned short /// \since Crypto++ 6.0 typedef __vector unsigned short uint16x8_p; /// \brief Vector of 32-bit elements -/// \para Wraps +/// \par Wraps /// __vector unsigned int /// \since Crypto++ 6.0 typedef __vector unsigned int uint32x4_p; @@ -88,7 +88,7 @@ typedef __vector unsigned int uint32x4_p; #if defined(_ARCH_PWR8) || defined(CRYPTOPP_DOXYGEN_PROCESSING) /// \brief Vector of 64-bit elements /// \details uint64x2_p is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vector unsigned long long /// \since Crypto++ 6.0 typedef __vector unsigned long long uint64x2_p; @@ -117,7 +117,7 @@ inline uint32x4_p VecOne() /// \param data the vector /// \returns vector /// \details VecReverse() reverses the bytes in a vector -/// \para Wraps +/// \par Wraps /// vec_perm /// \since Crypto++ 6.0 template @@ -139,7 +139,7 @@ inline T VecReverse(const T data) /// provide aligned memory adresses. /// \details VecLoad_ALTIVEC() is used automatically when POWER7 or above /// and unaligned loads is not available. -/// \para Wraps +/// \par Wraps /// vec_ld, vec_lvsl, vec_perm /// \since Crypto++ 6.0 inline uint32x4_p VecLoad_ALTIVEC(const byte src[16]) @@ -167,7 +167,7 @@ inline uint32x4_p VecLoad_ALTIVEC(const byte src[16]) /// otherwise. /// vec_lvsl and vec_perm are relatively expensive so you should /// provide aligned memory adresses. -/// \para Wraps +/// \par Wraps /// vec_ld, vec_lvsl, vec_perm /// \since Crypto++ 6.0 inline uint32x4_p VecLoad_ALTIVEC(int off, const byte src[16]) @@ -195,7 +195,7 @@ inline uint32x4_p VecLoad_ALTIVEC(int off, const byte src[16]) /// is not available. VecLoad_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 6.0 inline uint32x4_p VecLoad(const byte src[16]) @@ -223,7 +223,7 @@ inline uint32x4_p VecLoad(const byte src[16]) /// is not available. VecLoad_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 6.0 inline uint32x4_p VecLoad(int off, const byte src[16]) @@ -250,7 +250,7 @@ inline uint32x4_p VecLoad(int off, const byte src[16]) /// is not available. VecLoad_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 8.0 inline uint32x4_p VecLoad(const word32 src[4]) @@ -268,7 +268,7 @@ inline uint32x4_p VecLoad(const word32 src[4]) /// is not available. VecLoad_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 8.0 inline uint32x4_p VecLoad(int off, const word32 src[4]) @@ -288,7 +288,7 @@ inline uint32x4_p VecLoad(int off, const word32 src[4]) /// extra instructions are required to fix up unaligned memory /// addresses. /// \details VecLoad with 64-bit elements is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 8.0 inline uint64x2_p VecLoad(const word64 src[2]) @@ -307,7 +307,7 @@ inline uint64x2_p VecLoad(const word64 src[2]) /// extra instructions are required to fix up unaligned memory /// addresses. /// \details VecLoad with 64-bit elements is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 8.0 inline uint64x2_p VecLoad(int off, const word64 src[2]) @@ -327,7 +327,7 @@ inline uint64x2_p VecLoad(int off, const word64 src[2]) /// is not available. VecLoad_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 6.0 inline uint32x4_p VecLoadBE(const byte src[16]) @@ -368,7 +368,7 @@ inline uint32x4_p VecLoadBE(const byte src[16]) /// is not available. VecLoad_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xlw4, vec_xld2, vec_xl, vec_vsx_ld (and Altivec load) /// \since Crypto++ 6.0 inline uint32x4_p VecLoadBE(int off, const byte src[16]) @@ -411,7 +411,7 @@ inline uint32x4_p VecLoadBE(int off, const byte src[16]) /// memory adresses. /// \details VecStore_ALTIVEC() is used automatically when POWER7 or above /// and unaligned loads is not available. -/// \para Wraps +/// \par Wraps /// vec_st, vec_ste, vec_lvsr, vec_perm /// \since Crypto++ 8.0 template @@ -448,7 +448,7 @@ inline void VecStore_ALTIVEC(const T data, byte dest[16]) /// memory adresses. /// \details VecStore_ALTIVEC() is used automatically when POWER7 or above /// and unaligned loads is not available. -/// \para Wraps +/// \par Wraps /// vec_st, vec_ste, vec_lvsr, vec_perm /// \since Crypto++ 8.0 template @@ -484,7 +484,7 @@ inline void VecStore_ALTIVEC(const T data, int off, byte dest[16]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 6.0 template @@ -515,7 +515,7 @@ inline void VecStore(const T data, byte dest[16]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 6.0 template @@ -545,7 +545,7 @@ inline void VecStore(const T data, int off, byte dest[16]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 8.0 template @@ -566,7 +566,7 @@ inline void VecStore(const T data, word32 dest[4]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 8.0 template @@ -587,7 +587,7 @@ inline void VecStore(const T data, int off, word32 dest[4]) /// extra instructions are required to fix up unaligned memory /// addresses. /// \details VecStore with 64-bit elements is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 8.0 template @@ -609,7 +609,7 @@ inline void VecStore(const T data, word64 dest[2]) /// extra instructions are required to fix up unaligned memory /// addresses. /// \details VecStore with 64-bit elements is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 8.0 template @@ -630,7 +630,7 @@ inline void VecStore(const T data, int off, word64 dest[2]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 6.0 template @@ -674,7 +674,7 @@ inline void VecStoreBE(const T data, byte dest[16]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 6.0 template @@ -717,7 +717,7 @@ inline void VecStoreBE(const T data, int off, byte dest[16]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 8.0 template @@ -739,7 +739,7 @@ inline void VecStoreBE(const T data, word32 dest[4]) /// is not available. VecStore_ALTIVEC() can be relatively expensive if /// extra instructions are required to fix up unaligned memory /// addresses. -/// \para Wraps +/// \par Wraps /// vec_xstw4, vec_xstld2, vec_xst, vec_vsx_st (and Altivec store) /// \since Crypto++ 8.0 template @@ -759,7 +759,7 @@ inline void VecStoreBE(const T data, int off, word32 dest[4]) /// \details VecPermute returns a new vector from vec based on /// mask. mask is an uint8x16_p type vector. The return /// vector is the same type as vec. -/// \para Wraps +/// \par Wraps /// vec_perm /// \since Crypto++ 6.0 template @@ -778,7 +778,7 @@ inline T1 VecPermute(const T1 vec, const T2 mask) /// \details VecPermute returns a new vector from vec1 and vec2 /// based on mask. mask is an uint8x16_p type vector. The return /// vector is the same type as vec1. -/// \para Wraps +/// \par Wraps /// vec_perm /// \since Crypto++ 6.0 template @@ -795,7 +795,7 @@ inline T1 VecPermute(const T1 vec1, const T1 vec2, const T2 mask) /// \returns vector /// \details VecAnd returns a new vector from vec1 and vec2. The return /// vector is the same type as vec1. -/// \para Wraps +/// \par Wraps /// vec_and /// \since Crypto++ 6.0 template @@ -812,7 +812,7 @@ inline T1 VecAnd(const T1 vec1, const T2 vec2) /// \returns vector /// \details VecOr returns a new vector from vec1 and vec2. The return /// vector is the same type as vec1. -/// \para Wraps +/// \par Wraps /// vec_or /// \since Crypto++ 6.0 template @@ -829,7 +829,7 @@ inline T1 VecOr(const T1 vec1, const T2 vec2) /// \returns vector /// \details VecXor returns a new vector from vec1 and vec2. The return /// vector is the same type as vec1. -/// \para Wraps +/// \par Wraps /// vec_xor /// \since Crypto++ 6.0 template @@ -847,7 +847,7 @@ inline T1 VecXor(const T1 vec1, const T2 vec2) /// \details VecAdd returns a new vector from vec1 and vec2. /// vec2 is cast to the same type as vec1. The return vector /// is the same type as vec1. -/// \para Wraps +/// \par Wraps /// vec_add /// \since Crypto++ 6.0 template @@ -864,7 +864,7 @@ inline T1 VecAdd(const T1 vec1, const T2 vec2) /// \details VecSub returns a new vector from vec1 and vec2. /// vec2 is cast to the same type as vec1. The return vector /// is the same type as vec1. -/// \para Wraps +/// \par Wraps /// vec_sub /// \since Crypto++ 6.0 template @@ -883,7 +883,7 @@ inline T1 VecSub(const T1 vec1, const T2 vec2) /// vec1 and vec2 are added as if uint64x2_p vectors. On POWER7 /// and below VecAdd64 manages the carries from two elements in /// a uint32x4_p vector. -/// \para Wraps +/// \par Wraps /// vec_add for POWER8, vec_addc, vec_perm, vec_add for Altivec /// \since Crypto++ 8.0 inline uint32x4_p VecAdd64(const uint32x4_p& vec1, const uint32x4_p& vec2) @@ -919,7 +919,7 @@ inline uint32x4_p VecAdd64(const uint32x4_p& vec1, const uint32x4_p& vec2) /// uint8x16_p x = VecLoad(ptr); /// uint8x16_p y = VecShiftLeftOctet<12>(x); /// -/// \para Wraps +/// \par Wraps /// vec_sld /// \sa Is vec_sld /// endian sensitive? on Stack Overflow @@ -966,7 +966,7 @@ inline T VecShiftLeftOctet(const T vec) /// uint8x16_p x = VecLoad(ptr); /// uint8x16_p y = VecShiftRightOctet<12>(y); /// -/// \para Wraps +/// \par Wraps /// vec_sld /// \sa Is vec_sld /// endian sensitive? on Stack Overflow @@ -1005,7 +1005,7 @@ inline T VecShiftRightOctet(const T vec) /// \details VecRotateLeftOctet() returns a new vector after rotating the /// concatenation of the source vector with itself by the specified /// number of bytes. The return vector is the same type as vec. -/// \para Wraps +/// \par Wraps /// vec_sld /// \sa Is vec_sld /// endian sensitive? on Stack Overflow @@ -1030,7 +1030,7 @@ inline T VecRotateLeftOctet(const T vec) /// \details VecRotateRightOctet() returns a new vector after rotating the /// concatenation of the source vector with itself by the specified /// number of bytes. The return vector is the same type as vec. -/// \para Wraps +/// \par Wraps /// vec_sld /// \sa Is vec_sld /// endian sensitive? on Stack Overflow @@ -1052,7 +1052,7 @@ inline T VecRotateRightOctet(const T vec) /// \param vec the vector /// \returns vector /// \details VecRotateLeft rotates each element in a packed vector by bit count. -/// \para Wraps +/// \par Wraps /// vec_rl /// \since Crypto++ 7.0 template @@ -1067,7 +1067,7 @@ inline uint32x4_p VecRotateLeft(const uint32x4_p vec) /// \param vec the vector /// \returns vector /// \details VecRotateRight rotates each element in a packed vector by bit count. -/// \para Wraps +/// \par Wraps /// vec_rl /// \since Crypto++ 7.0 template @@ -1081,7 +1081,7 @@ inline uint32x4_p VecRotateRight(const uint32x4_p vec) /// \tparam T vector type /// \param vec the vector /// \returns vector -/// \para Wraps +/// \par Wraps /// vec_sld /// \since Crypto++ 7.0 template @@ -1098,7 +1098,7 @@ inline T VecSwapWords(const T vec) /// is composed of the least significant bits and occupies bytes 8 through 15 /// when viewed as a big endian array. The return vector is the same type as /// the original vector and padded with 0's in the most significant bit positions. -/// \para Wraps +/// \par Wraps /// vec_sld /// \since Crypto++ 7.0 template @@ -1118,7 +1118,7 @@ inline T VecGetLow(const T val) /// is composed of the most significant bits and occupies bytes 0 through 7 /// when viewed as a big endian array. The return vector is the same type as /// the original vector and padded with 0's in the most significant bit positions. -/// \para Wraps +/// \par Wraps /// vec_sld /// \since Crypto++ 7.0 template @@ -1138,7 +1138,7 @@ inline T VecGetHigh(const T val) /// \returns true if vec1 equals vec2, false otherwise /// \details VecEqual performs a bitwise compare. The vector element types do /// not matter. -/// \para Wraps +/// \par Wraps /// vec_all_eq /// \since Crypto++ 8.0 template @@ -1155,7 +1155,7 @@ inline bool VecEqual(const T1 vec1, const T2 vec2) /// \returns true if vec1 does not equal vec2, false otherwise /// \details VecEqual performs a bitwise compare. The vector element types do /// not matter. -/// \para Wraps +/// \par Wraps /// vec_all_eq /// \since Crypto++ 8.0 template @@ -1176,7 +1176,7 @@ inline bool VecNotEqual(const T1 vec1, const T2 vec2) /// \details VecEncrypt performs one round of AES encryption of state /// using subkey key. The return vector is the same type as vec1. /// \details VecEncrypt is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vcipher, __builtin_altivec_crypto_vcipher, __builtin_crypto_vcipher /// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0 template @@ -1201,7 +1201,7 @@ inline T1 VecEncrypt(const T1 state, const T2 key) /// \details VecEncryptLast performs the final round of AES encryption /// of state using subkey key. The return vector is the same type as vec1. /// \details VecEncryptLast is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vcipherlast, __builtin_altivec_crypto_vcipherlast, __builtin_crypto_vcipherlast /// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0 template @@ -1226,7 +1226,7 @@ inline T1 VecEncryptLast(const T1 state, const T2 key) /// \details VecDecrypt performs one round of AES decryption of state /// using subkey key. The return vector is the same type as vec1. /// \details VecDecrypt is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vncipher, __builtin_altivec_crypto_vncipher, __builtin_crypto_vncipher /// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0 template @@ -1251,7 +1251,7 @@ inline T1 VecDecrypt(const T1 state, const T2 key) /// \details VecDecryptLast performs the final round of AES decryption /// of state using subkey key. The return vector is the same type as vec1. /// \details VecDecryptLast is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vncipherlast, __builtin_altivec_crypto_vncipherlast, __builtin_crypto_vncipherlast /// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0 template @@ -1276,7 +1276,7 @@ inline T1 VecDecryptLast(const T1 state, const T2 key) /// \details VecSHA256 selects sigma0, sigma1, Sigma0, Sigma1 based on /// func and subfunc. The return vector is the same type as vec. /// \details VecSHA256 is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vshasigmaw, __builtin_altivec_crypto_vshasigmaw, __builtin_crypto_vshasigmaw /// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0 template @@ -1301,7 +1301,7 @@ inline T VecSHA256(const T vec) /// \details VecSHA512 selects sigma0, sigma1, Sigma0, Sigma1 based on /// func and subfunc. The return vector is the same type as vec. /// \details VecSHA512 is available on POWER8 and above. -/// \para Wraps +/// \par Wraps /// __vshasigmad, __builtin_altivec_crypto_vshasigmad, __builtin_crypto_vshasigmad /// \since GCC and XLC since Crypto++ 6.0, LLVM Clang since Crypto++ 8.0 template