diff --git a/gcm.cpp b/gcm.cpp index 18067eac..9ea384dc 100644 --- a/gcm.cpp +++ b/gcm.cpp @@ -80,7 +80,10 @@ __m128i _mm_clmulepi64_si128(const __m128i &a, const __m128i &b, int i) inline static void SSE2_Xor16(byte *a, const byte *b, const byte *c) { #if CRYPTOPP_BOOL_SSE2_INTRINSICS_AVAILABLE - *(__m128i *)a = _mm_xor_si128(*(__m128i *)b, *(__m128i *)c); + assert(IsAlignedOn(a,GetAlignmentOf<__m128i>())); + assert(IsAlignedOn(b,GetAlignmentOf<__m128i>())); + assert(IsAlignedOn(c,GetAlignmentOf<__m128i>())); + *(__m128i *)(void *)a = _mm_xor_si128(*(__m128i *)(void *)b, *(__m128i *)(void *)c); #else asm ("movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;" : "=m" (a[0]) : "m"(b[0]), "m"(c[0])); #endif diff --git a/shark.cpp b/shark.cpp index 592d0d91..8721574b 100644 --- a/shark.cpp +++ b/shark.cpp @@ -96,7 +96,7 @@ template struct SharkProcessAndXorBlock{ // VC60 workaround: problem with template functions inline SharkProcessAndXorBlock(const word64 *roundKeys, unsigned int rounds, const byte *inBlock, const byte *xorBlock, byte *outBlock) { - assert(IsAlignedOn(input,GetAlignmentOf())); + assert(IsAlignedOn(inBlock,GetAlignmentOf())); word64 tmp = *(word64 *)(void *)inBlock ^ roundKeys[0]; ByteOrder order = GetNativeByteOrder(); @@ -125,7 +125,7 @@ inline SharkProcessAndXorBlock(const word64 *roundKeys, unsigned int rounds, con (sbox[GETBYTE(tmp, 1)]) (sbox[GETBYTE(tmp, 0)]); - assert(IsAlignedOn(output,GetAlignmentOf())); + assert(IsAlignedOn(outBlock,GetAlignmentOf())); *(word64 *)(void *)outBlock ^= roundKeys[rounds]; }}; diff --git a/tea.cpp b/tea.cpp index 4597d16d..871a00b3 100644 --- a/tea.cpp +++ b/tea.cpp @@ -106,9 +106,12 @@ void XTEA::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, by void BTEA::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const { CRYPTOPP_UNUSED(xorBlock); + assert(IsAlignedOn(inBlock,GetAlignmentOf())); + assert(IsAlignedOn(outBlock,GetAlignmentOf())); + unsigned int n = m_blockSize / 4; - word32 *v = (word32*)outBlock; - ConditionalByteReverse(BIG_ENDIAN_ORDER, v, (const word32*)inBlock, m_blockSize); + word32 *v = (word32*)(void *)outBlock; + ConditionalByteReverse(BIG_ENDIAN_ORDER, v, (const word32*)(void *)inBlock, m_blockSize); word32 y = v[0], z = v[n-1], e; word32 p, q = 6+52/n; @@ -133,9 +136,12 @@ void BTEA::Enc::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, by void BTEA::Dec::ProcessAndXorBlock(const byte *inBlock, const byte *xorBlock, byte *outBlock) const { CRYPTOPP_UNUSED(xorBlock); + assert(IsAlignedOn(inBlock,GetAlignmentOf())); + assert(IsAlignedOn(outBlock,GetAlignmentOf())); + unsigned int n = m_blockSize / 4; - word32 *v = (word32*)outBlock; - ConditionalByteReverse(BIG_ENDIAN_ORDER, v, (const word32*)inBlock, m_blockSize); + word32 *v = (word32*)(void *)outBlock; + ConditionalByteReverse(BIG_ENDIAN_ORDER, v, (const word32*)(void *)inBlock, m_blockSize); word32 y = v[0], z = v[n-1], e; word32 p, q = 6+52/n;