From 16cf591f3c0a290133016af1e0659e448b3e47e8 Mon Sep 17 00:00:00 2001 From: Jeffrey Walton Date: Wed, 15 Aug 2018 13:25:53 -0400 Subject: [PATCH] Cutover to BLAKE2 team impl The BLAKE2 team runs a tad bit slower but we want to push maintenance onto them --- blake2-simd.cpp | 2245 +++++++++++++++-------------------------------- 1 file changed, 717 insertions(+), 1528 deletions(-) diff --git a/blake2-simd.cpp b/blake2-simd.cpp index cff64f26..4c54de4a 100644 --- a/blake2-simd.cpp +++ b/blake2-simd.cpp @@ -67,1534 +67,723 @@ ANONYMOUS_NAMESPACE_END NAMESPACE_BEGIN(CryptoPP) #if CRYPTOPP_SSE41_AVAILABLE + +#define LOADU(p) _mm_loadu_si128( (const __m128i *)(const void*)(p) ) +#define STOREU(p,r) _mm_storeu_si128((__m128i *)(void*)(p), r) +#define TOF(reg) _mm_castsi128_ps((reg)) +#define TOI(reg) _mm_castps_si128((reg)) + void BLAKE2_Compress32_SSE4(const byte* input, BLAKE2_State& state) { - __m128i row1, row2, row3, row4; - __m128i buf1, buf2, buf3, buf4; - - __m128i t0, t1, t2; - __m128i ff0, ff1; - - const __m128i r8 = _mm_set_epi8(12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1); - const __m128i r16 = _mm_set_epi8(13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2); - - const __m128i m0 = _mm_loadu_si128(CONST_M128_CAST(input + 00)); - const __m128i m1 = _mm_loadu_si128(CONST_M128_CAST(input + 16)); - const __m128i m2 = _mm_loadu_si128(CONST_M128_CAST(input + 32)); - const __m128i m3 = _mm_loadu_si128(CONST_M128_CAST(input + 48)); - - row1 = ff0 = _mm_loadu_si128(CONST_M128_CAST(&state.h[0])); - row2 = ff1 = _mm_loadu_si128(CONST_M128_CAST(&state.h[4])); - row3 = _mm_setr_epi32(BLAKE2S_IV[0], BLAKE2S_IV[1], BLAKE2S_IV[2], BLAKE2S_IV[3]); - row4 = _mm_xor_si128(_mm_setr_epi32(BLAKE2S_IV[4], BLAKE2S_IV[5], BLAKE2S_IV[6], BLAKE2S_IV[7]), _mm_loadu_si128(CONST_M128_CAST(&state.t[0]))); - buf1 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m0)), _mm_castsi128_ps((m1)), _MM_SHUFFLE(2,0,2,0)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - buf2 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m0)), _mm_castsi128_ps((m1)), _MM_SHUFFLE(3,1,3,1)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - buf3 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m2)), _mm_castsi128_ps((m3)), _MM_SHUFFLE(2,0,2,0)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - buf4 = _mm_castps_si128((_mm_shuffle_ps(_mm_castsi128_ps((m2)), _mm_castsi128_ps((m3)), _MM_SHUFFLE(3,1,3,1)))); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_blend_epi16(m1, m2, 0x0C); - t1 = _mm_slli_si128(m3, 4); - t2 = _mm_blend_epi16(t0, t1, 0xF0); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_shuffle_epi32(m2,_MM_SHUFFLE(0,0,2,0)); - t1 = _mm_blend_epi16(m1,m3,0xC0); - t2 = _mm_blend_epi16(t0, t1, 0xF0); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_slli_si128(m1, 4); - t1 = _mm_blend_epi16(m2, t0, 0x30); - t2 = _mm_blend_epi16(m0, t1, 0xF0); - buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_slli_si128(m3, 4); - t2 = _mm_blend_epi16(t0, t1, 0x0C); - buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m2,m3); - t1 = _mm_blend_epi16(m3,m1,0x0C); - t2 = _mm_blend_epi16(t0, t1, 0x0F); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi32(m2,m0); - t1 = _mm_blend_epi16(t0, m0, 0xF0); - t2 = _mm_slli_si128(m3, 8); - buf2 = _mm_blend_epi16(t1, t2, 0xC0); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m0, m2, 0x3C); - t1 = _mm_srli_si128(m1, 12); - t2 = _mm_blend_epi16(t0,t1,0x03); - buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,3,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_slli_si128(m3, 4); - t1 = _mm_blend_epi16(m0, m1, 0x33); - t2 = _mm_blend_epi16(t1, t0, 0xC0); - buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(0,1,2,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_unpackhi_epi32(t0, m2); - t2 = _mm_blend_epi16(t1, m3, 0x0C); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_slli_si128(m2, 8); - t1 = _mm_blend_epi16(m3,m0,0x0C); - t2 = _mm_blend_epi16(t1, t0, 0xC0); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m0,m1,0x0F); - t1 = _mm_blend_epi16(t0, m3, 0xC0); - buf3 = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi32(m0,m2); - t1 = _mm_unpackhi_epi32(m1,m2); - buf4 = _mm_unpacklo_epi64(t1,t0); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpacklo_epi64(m1,m2); - t1 = _mm_unpackhi_epi64(m0,m2); - t2 = _mm_blend_epi16(t0,t1,0x33); - buf1 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi64(m1,m3); - t1 = _mm_unpacklo_epi64(m0,m1); - buf2 = _mm_blend_epi16(t0,t1,0x33); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpackhi_epi64(m3,m1); - t1 = _mm_unpackhi_epi64(m2,m0); - buf3 = _mm_blend_epi16(t1,t0,0x33); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m0,m2,0x03); - t1 = _mm_slli_si128(t0, 8); - t2 = _mm_blend_epi16(t1,m3,0x0F); - buf4 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,0,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_unpacklo_epi32(m0,m2); - buf1 = _mm_unpacklo_epi64(t0,t1); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_srli_si128(m2, 4); - t1 = _mm_blend_epi16(m0,m3,0x03); - buf2 = _mm_blend_epi16(t1,t0,0x3C); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m1,m0,0x0C); - t1 = _mm_srli_si128(m3, 4); - t2 = _mm_blend_epi16(t0,t1,0x30); - buf3 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,3,0)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi64(m1,m2); - t1= _mm_shuffle_epi32(m3, _MM_SHUFFLE(0,2,0,1)); - buf4 = _mm_blend_epi16(t0,t1,0x33); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_slli_si128(m1, 12); - t1 = _mm_blend_epi16(m0,m3,0x33); - buf1 = _mm_blend_epi16(t1,t0,0xC0); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m3,m2,0x30); - t1 = _mm_srli_si128(m1, 4); - t2 = _mm_blend_epi16(t0,t1,0x03); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,3,0)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpacklo_epi64(m0,m2); - t1 = _mm_srli_si128(m1, 4); - buf3 = _mm_shuffle_epi32(_mm_blend_epi16(t0,t1,0x0C), _MM_SHUFFLE(2,3,1,0)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi32(m1,m2); - t1 = _mm_unpackhi_epi64(m0,t0); - buf4 = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m0,m1); - t1 = _mm_blend_epi16(t0,m3,0x0F); - buf1 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(2,0,3,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m2,m3,0x30); - t1 = _mm_srli_si128(m0,4); - t2 = _mm_blend_epi16(t0,t1,0x03); - buf2 = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,2,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpackhi_epi64(m0,m3); - t1 = _mm_unpacklo_epi64(m1,m2); - t2 = _mm_blend_epi16(t0,t1,0x3C); - buf3 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,2,3,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpacklo_epi32(m0,m1); - t1 = _mm_unpackhi_epi32(m1,m2); - buf4 = _mm_unpacklo_epi64(t0,t1); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_unpackhi_epi32(m1,m3); - t1 = _mm_unpacklo_epi64(t0,m0); - t2 = _mm_blend_epi16(t1,m2,0xC0); - buf1 = _mm_shufflehi_epi16(t2,_MM_SHUFFLE(1,0,3,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_unpackhi_epi32(m0,m3); - t1 = _mm_blend_epi16(m2,t0,0xF0); - buf2 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(0,2,1,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_blend_epi16(m2,m0,0x0C); - t1 = _mm_slli_si128(t0,4); - buf3 = _mm_blend_epi16(t1,m3,0x0F); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m1,m0,0x30); - buf4 = _mm_shuffle_epi32(t0,_MM_SHUFFLE(1,0,3,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - t0 = _mm_blend_epi16(m0,m2,0x03); - t1 = _mm_blend_epi16(m1,m2,0x30); - t2 = _mm_blend_epi16(t1,t0,0x0F); - buf1 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(1,3,0,2)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf1), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_slli_si128(m0,4); - t1 = _mm_blend_epi16(m1,t0,0xC0); - buf2 = _mm_shuffle_epi32(t1,_MM_SHUFFLE(1,2,0,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf2), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(2,1,0,3)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(0,3,2,1)); - - t0 = _mm_unpackhi_epi32(m0,m3); - t1 = _mm_unpacklo_epi32(m2,m3); - t2 = _mm_unpackhi_epi64(t0,t1); - buf3 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(3,0,2,1)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf3), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r16); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 12),_mm_slli_epi32(row2, 20)); - - t0 = _mm_blend_epi16(m3,m2,0xC0); - t1 = _mm_unpacklo_epi32(m0,m3); - t2 = _mm_blend_epi16(t0,t1,0x0F); - buf4 = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,1,2,3)); - - row1 = _mm_add_epi32(_mm_add_epi32(row1, buf4), row2); - row4 = _mm_xor_si128(row4, row1); - row4 = _mm_shuffle_epi8(row4,r8); - row3 = _mm_add_epi32(row3, row4); - row2 = _mm_xor_si128(row2, row3); - row2 = _mm_xor_si128(_mm_srli_epi32(row2, 7),_mm_slli_epi32(row2, 25)); - - row4 = _mm_shuffle_epi32(row4, _MM_SHUFFLE(0,3,2,1)); - row3 = _mm_shuffle_epi32(row3, _MM_SHUFFLE(1,0,3,2)); - row2 = _mm_shuffle_epi32(row2, _MM_SHUFFLE(2,1,0,3)); - - _mm_storeu_si128(M128_CAST(&state.h[0]), _mm_xor_si128(ff0, _mm_xor_si128(row1, row3))); - _mm_storeu_si128(M128_CAST(&state.h[4]), _mm_xor_si128(ff1, _mm_xor_si128(row2, row4))); + #define BLAKE2S_LOAD_MSG_0_1(buf) \ + buf = TOI(_mm_shuffle_ps(TOF(m0), TOF(m1), _MM_SHUFFLE(2,0,2,0))); + + #define BLAKE2S_LOAD_MSG_0_2(buf) \ + buf = TOI(_mm_shuffle_ps(TOF(m0), TOF(m1), _MM_SHUFFLE(3,1,3,1))); + + #define BLAKE2S_LOAD_MSG_0_3(buf) \ + buf = TOI(_mm_shuffle_ps(TOF(m2), TOF(m3), _MM_SHUFFLE(2,0,2,0))); + + #define BLAKE2S_LOAD_MSG_0_4(buf) \ + buf = TOI(_mm_shuffle_ps(TOF(m2), TOF(m3), _MM_SHUFFLE(3,1,3,1))); + + #define BLAKE2S_LOAD_MSG_1_1(buf) \ + t0 = _mm_blend_epi16(m1, m2, 0x0C); \ + t1 = _mm_slli_si128(m3, 4); \ + t2 = _mm_blend_epi16(t0, t1, 0xF0); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,0,3)); + + #define BLAKE2S_LOAD_MSG_1_2(buf) \ + t0 = _mm_shuffle_epi32(m2,_MM_SHUFFLE(0,0,2,0)); \ + t1 = _mm_blend_epi16(m1,m3,0xC0); \ + t2 = _mm_blend_epi16(t0, t1, 0xF0); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); + + #define BLAKE2S_LOAD_MSG_1_3(buf) \ + t0 = _mm_slli_si128(m1, 4); \ + t1 = _mm_blend_epi16(m2, t0, 0x30); \ + t2 = _mm_blend_epi16(m0, t1, 0xF0); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); + + #define BLAKE2S_LOAD_MSG_1_4(buf) \ + t0 = _mm_unpackhi_epi32(m0,m1); \ + t1 = _mm_slli_si128(m3, 4); \ + t2 = _mm_blend_epi16(t0, t1, 0x0C); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,3,0,1)); + + #define BLAKE2S_LOAD_MSG_2_1(buf) \ + t0 = _mm_unpackhi_epi32(m2,m3); \ + t1 = _mm_blend_epi16(m3,m1,0x0C); \ + t2 = _mm_blend_epi16(t0, t1, 0x0F); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); + + #define BLAKE2S_LOAD_MSG_2_2(buf) \ + t0 = _mm_unpacklo_epi32(m2,m0); \ + t1 = _mm_blend_epi16(t0, m0, 0xF0); \ + t2 = _mm_slli_si128(m3, 8); \ + buf = _mm_blend_epi16(t1, t2, 0xC0); + + #define BLAKE2S_LOAD_MSG_2_3(buf) \ + t0 = _mm_blend_epi16(m0, m2, 0x3C); \ + t1 = _mm_srli_si128(m1, 12); \ + t2 = _mm_blend_epi16(t0,t1,0x03); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,3,2)); + + #define BLAKE2S_LOAD_MSG_2_4(buf) \ + t0 = _mm_slli_si128(m3, 4); \ + t1 = _mm_blend_epi16(m0, m1, 0x33); \ + t2 = _mm_blend_epi16(t1, t0, 0xC0); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(0,1,2,3)); + + #define BLAKE2S_LOAD_MSG_3_1(buf) \ + t0 = _mm_unpackhi_epi32(m0,m1); \ + t1 = _mm_unpackhi_epi32(t0, m2); \ + t2 = _mm_blend_epi16(t1, m3, 0x0C); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(3,1,0,2)); + + #define BLAKE2S_LOAD_MSG_3_2(buf) \ + t0 = _mm_slli_si128(m2, 8); \ + t1 = _mm_blend_epi16(m3,m0,0x0C); \ + t2 = _mm_blend_epi16(t1, t0, 0xC0); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); + + #define BLAKE2S_LOAD_MSG_3_3(buf) \ + t0 = _mm_blend_epi16(m0,m1,0x0F); \ + t1 = _mm_blend_epi16(t0, m3, 0xC0); \ + buf = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); + + #define BLAKE2S_LOAD_MSG_3_4(buf) \ + t0 = _mm_unpacklo_epi32(m0,m2); \ + t1 = _mm_unpackhi_epi32(m1,m2); \ + buf = _mm_unpacklo_epi64(t1,t0); + + #define BLAKE2S_LOAD_MSG_4_1(buf) \ + t0 = _mm_unpacklo_epi64(m1,m2); \ + t1 = _mm_unpackhi_epi64(m0,m2); \ + t2 = _mm_blend_epi16(t0,t1,0x33); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,0,1,3)); + + #define BLAKE2S_LOAD_MSG_4_2(buf) \ + t0 = _mm_unpackhi_epi64(m1,m3); \ + t1 = _mm_unpacklo_epi64(m0,m1); \ + buf = _mm_blend_epi16(t0,t1,0x33); + + #define BLAKE2S_LOAD_MSG_4_3(buf) \ + t0 = _mm_unpackhi_epi64(m3,m1); \ + t1 = _mm_unpackhi_epi64(m2,m0); \ + buf = _mm_blend_epi16(t1,t0,0x33); + + #define BLAKE2S_LOAD_MSG_4_4(buf) \ + t0 = _mm_blend_epi16(m0,m2,0x03); \ + t1 = _mm_slli_si128(t0, 8); \ + t2 = _mm_blend_epi16(t1,m3,0x0F); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,0,3)); + + #define BLAKE2S_LOAD_MSG_5_1(buf) \ + t0 = _mm_unpackhi_epi32(m0,m1); \ + t1 = _mm_unpacklo_epi32(m0,m2); \ + buf = _mm_unpacklo_epi64(t0,t1); + + #define BLAKE2S_LOAD_MSG_5_2(buf) \ + t0 = _mm_srli_si128(m2, 4); \ + t1 = _mm_blend_epi16(m0,m3,0x03); \ + buf = _mm_blend_epi16(t1,t0,0x3C); + + #define BLAKE2S_LOAD_MSG_5_3(buf) \ + t0 = _mm_blend_epi16(m1,m0,0x0C); \ + t1 = _mm_srli_si128(m3, 4); \ + t2 = _mm_blend_epi16(t0,t1,0x30); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,2,3,0)); + + #define BLAKE2S_LOAD_MSG_5_4(buf) \ + t0 = _mm_unpacklo_epi64(m1,m2); \ + t1= _mm_shuffle_epi32(m3, _MM_SHUFFLE(0,2,0,1)); \ + buf = _mm_blend_epi16(t0,t1,0x33); + + #define BLAKE2S_LOAD_MSG_6_1(buf) \ + t0 = _mm_slli_si128(m1, 12); \ + t1 = _mm_blend_epi16(m0,m3,0x33); \ + buf = _mm_blend_epi16(t1,t0,0xC0); + + #define BLAKE2S_LOAD_MSG_6_2(buf) \ + t0 = _mm_blend_epi16(m3,m2,0x30); \ + t1 = _mm_srli_si128(m1, 4); \ + t2 = _mm_blend_epi16(t0,t1,0x03); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(2,1,3,0)); + + #define BLAKE2S_LOAD_MSG_6_3(buf) \ + t0 = _mm_unpacklo_epi64(m0,m2); \ + t1 = _mm_srli_si128(m1, 4); \ + buf = _mm_shuffle_epi32(_mm_blend_epi16(t0,t1,0x0C), _MM_SHUFFLE(2,3,1,0)); + + #define BLAKE2S_LOAD_MSG_6_4(buf) \ + t0 = _mm_unpackhi_epi32(m1,m2); \ + t1 = _mm_unpackhi_epi64(m0,t0); \ + buf = _mm_shuffle_epi32(t1, _MM_SHUFFLE(3,0,1,2)); + + #define BLAKE2S_LOAD_MSG_7_1(buf) \ + t0 = _mm_unpackhi_epi32(m0,m1); \ + t1 = _mm_blend_epi16(t0,m3,0x0F); \ + buf = _mm_shuffle_epi32(t1,_MM_SHUFFLE(2,0,3,1)); + + #define BLAKE2S_LOAD_MSG_7_2(buf) \ + t0 = _mm_blend_epi16(m2,m3,0x30); \ + t1 = _mm_srli_si128(m0,4); \ + t2 = _mm_blend_epi16(t0,t1,0x03); \ + buf = _mm_shuffle_epi32(t2, _MM_SHUFFLE(1,0,2,3)); + + #define BLAKE2S_LOAD_MSG_7_3(buf) \ + t0 = _mm_unpackhi_epi64(m0,m3); \ + t1 = _mm_unpacklo_epi64(m1,m2); \ + t2 = _mm_blend_epi16(t0,t1,0x3C); \ + buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,2,3,1)); + + #define BLAKE2S_LOAD_MSG_7_4(buf) \ + t0 = _mm_unpacklo_epi32(m0,m1); \ + t1 = _mm_unpackhi_epi32(m1,m2); \ + buf = _mm_unpacklo_epi64(t0,t1); + + #define BLAKE2S_LOAD_MSG_8_1(buf) \ + t0 = _mm_unpackhi_epi32(m1,m3); \ + t1 = _mm_unpacklo_epi64(t0,m0); \ + t2 = _mm_blend_epi16(t1,m2,0xC0); \ + buf = _mm_shufflehi_epi16(t2,_MM_SHUFFLE(1,0,3,2)); + + #define BLAKE2S_LOAD_MSG_8_2(buf) \ + t0 = _mm_unpackhi_epi32(m0,m3); \ + t1 = _mm_blend_epi16(m2,t0,0xF0); \ + buf = _mm_shuffle_epi32(t1,_MM_SHUFFLE(0,2,1,3)); + + #define BLAKE2S_LOAD_MSG_8_3(buf) \ + t0 = _mm_blend_epi16(m2,m0,0x0C); \ + t1 = _mm_slli_si128(t0,4); \ + buf = _mm_blend_epi16(t1,m3,0x0F); + + #define BLAKE2S_LOAD_MSG_8_4(buf) \ + t0 = _mm_blend_epi16(m1,m0,0x30); \ + buf = _mm_shuffle_epi32(t0,_MM_SHUFFLE(1,0,3,2)); + + #define BLAKE2S_LOAD_MSG_9_1(buf) \ + t0 = _mm_blend_epi16(m0,m2,0x03); \ + t1 = _mm_blend_epi16(m1,m2,0x30); \ + t2 = _mm_blend_epi16(t1,t0,0x0F); \ + buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(1,3,0,2)); + + #define BLAKE2S_LOAD_MSG_9_2(buf) \ + t0 = _mm_slli_si128(m0,4); \ + t1 = _mm_blend_epi16(m1,t0,0xC0); \ + buf = _mm_shuffle_epi32(t1,_MM_SHUFFLE(1,2,0,3)); + + #define BLAKE2S_LOAD_MSG_9_3(buf) \ + t0 = _mm_unpackhi_epi32(m0,m3); \ + t1 = _mm_unpacklo_epi32(m2,m3); \ + t2 = _mm_unpackhi_epi64(t0,t1); \ + buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(3,0,2,1)); + + #define BLAKE2S_LOAD_MSG_9_4(buf) \ + t0 = _mm_blend_epi16(m3,m2,0xC0); \ + t1 = _mm_unpacklo_epi32(m0,m3); \ + t2 = _mm_blend_epi16(t0,t1,0x0F); \ + buf = _mm_shuffle_epi32(t2,_MM_SHUFFLE(0,1,2,3)); + +//#define _mm_roti_epi32(r, c) \ +// _mm_xor_si128(_mm_srli_epi32( (r), -(c) ), \ +// _mm_slli_epi32( (r), 32-(-(c)) )) + +#define _mm_roti_epi32(r, c) ( \ + (8==-(c)) ? _mm_shuffle_epi8(r,r8) \ + : (16==-(c)) ? _mm_shuffle_epi8(r,r16) \ + : _mm_xor_si128(_mm_srli_epi32( (r), -(c) ), \ + _mm_slli_epi32( (r), 32-(-(c)) )) ) + +#define BLAKE2S_G1(row1,row2,row3,row4,buf) \ + row1 = _mm_add_epi32( _mm_add_epi32( row1, buf), row2 ); \ + row4 = _mm_xor_si128( row4, row1 ); \ + row4 = _mm_roti_epi32(row4, -16); \ + row3 = _mm_add_epi32( row3, row4 ); \ + row2 = _mm_xor_si128( row2, row3 ); \ + row2 = _mm_roti_epi32(row2, -12); + +#define BLAKE2S_G2(row1,row2,row3,row4,buf) \ + row1 = _mm_add_epi32( _mm_add_epi32( row1, buf), row2 ); \ + row4 = _mm_xor_si128( row4, row1 ); \ + row4 = _mm_roti_epi32(row4, -8); \ + row3 = _mm_add_epi32( row3, row4 ); \ + row2 = _mm_xor_si128( row2, row3 ); \ + row2 = _mm_roti_epi32(row2, -7); + +#define DIAGONALIZE(row1,row2,row3,row4) \ + row4 = _mm_shuffle_epi32( row4, _MM_SHUFFLE(2,1,0,3) ); \ + row3 = _mm_shuffle_epi32( row3, _MM_SHUFFLE(1,0,3,2) ); \ + row2 = _mm_shuffle_epi32( row2, _MM_SHUFFLE(0,3,2,1) ); + +#define UNDIAGONALIZE(row1,row2,row3,row4) \ + row4 = _mm_shuffle_epi32( row4, _MM_SHUFFLE(0,3,2,1) ); \ + row3 = _mm_shuffle_epi32( row3, _MM_SHUFFLE(1,0,3,2) ); \ + row2 = _mm_shuffle_epi32( row2, _MM_SHUFFLE(2,1,0,3) ); + +#define BLAKE2S_ROUND(r) \ + BLAKE2S_LOAD_MSG_ ##r ##_1(buf1); \ + BLAKE2S_G1(row1,row2,row3,row4,buf1); \ + BLAKE2S_LOAD_MSG_ ##r ##_2(buf2); \ + BLAKE2S_G2(row1,row2,row3,row4,buf2); \ + DIAGONALIZE(row1,row2,row3,row4); \ + BLAKE2S_LOAD_MSG_ ##r ##_3(buf3); \ + BLAKE2S_G1(row1,row2,row3,row4,buf3); \ + BLAKE2S_LOAD_MSG_ ##r ##_4(buf4); \ + BLAKE2S_G2(row1,row2,row3,row4,buf4); \ + UNDIAGONALIZE(row1,row2,row3,row4); + + __m128i row1, row2, row3, row4; + __m128i buf1, buf2, buf3, buf4; + __m128i t0, t1, t2, ff0, ff1; + + const __m128i r8 = _mm_set_epi8( 12, 15, 14, 13, 8, 11, 10, 9, 4, 7, 6, 5, 0, 3, 2, 1 ); + const __m128i r16 = _mm_set_epi8( 13, 12, 15, 14, 9, 8, 11, 10, 5, 4, 7, 6, 1, 0, 3, 2 ); + + const __m128i m0 = LOADU( input + 00 ); + const __m128i m1 = LOADU( input + 16 ); + const __m128i m2 = LOADU( input + 32 ); + const __m128i m3 = LOADU( input + 48 ); + + row1 = ff0 = LOADU( &state.h[0] ); + row2 = ff1 = LOADU( &state.h[4] ); + row3 = LOADU( &BLAKE2S_IV[0] ); + row4 = _mm_xor_si128( LOADU( &BLAKE2S_IV[4] ), LOADU( &state.t[0] ) ); + BLAKE2S_ROUND( 0 ); + BLAKE2S_ROUND( 1 ); + BLAKE2S_ROUND( 2 ); + BLAKE2S_ROUND( 3 ); + BLAKE2S_ROUND( 4 ); + BLAKE2S_ROUND( 5 ); + BLAKE2S_ROUND( 6 ); + BLAKE2S_ROUND( 7 ); + BLAKE2S_ROUND( 8 ); + BLAKE2S_ROUND( 9 ); + STOREU( &state.h[0], _mm_xor_si128( ff0, _mm_xor_si128( row1, row3 ) ) ); + STOREU( &state.h[4], _mm_xor_si128( ff1, _mm_xor_si128( row2, row4 ) ) ); } void BLAKE2_Compress64_SSE4(const byte* input, BLAKE2_State& state) { - __m128i row1l, row1h; - __m128i row2l, row2h; - __m128i row3l, row3h; - __m128i row4l, row4h; - __m128i b0, b1, t0, t1; - - const __m128i r16 = _mm_setr_epi8(2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9); - const __m128i r24 = _mm_setr_epi8(3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10); - - const __m128i m0 = _mm_loadu_si128(CONST_M128_CAST(input + 00)); - const __m128i m1 = _mm_loadu_si128(CONST_M128_CAST(input + 16)); - const __m128i m2 = _mm_loadu_si128(CONST_M128_CAST(input + 32)); - const __m128i m3 = _mm_loadu_si128(CONST_M128_CAST(input + 48)); - const __m128i m4 = _mm_loadu_si128(CONST_M128_CAST(input + 64)); - const __m128i m5 = _mm_loadu_si128(CONST_M128_CAST(input + 80)); - const __m128i m6 = _mm_loadu_si128(CONST_M128_CAST(input + 96)); - const __m128i m7 = _mm_loadu_si128(CONST_M128_CAST(input + 112)); - - row1l = _mm_loadu_si128(CONST_M128_CAST(&state.h[0])); - row1h = _mm_loadu_si128(CONST_M128_CAST(&state.h[2])); - row2l = _mm_loadu_si128(CONST_M128_CAST(&state.h[4])); - row2h = _mm_loadu_si128(CONST_M128_CAST(&state.h[6])); - row3l = _mm_loadu_si128(CONST_M128_CAST(&BLAKE2B_IV[0])); - row3h = _mm_loadu_si128(CONST_M128_CAST(&BLAKE2B_IV[2])); - row4l = _mm_xor_si128(_mm_loadu_si128(CONST_M128_CAST(&BLAKE2B_IV[4])), _mm_loadu_si128(CONST_M128_CAST(&state.t[0]))); - row4h = _mm_xor_si128(_mm_loadu_si128(CONST_M128_CAST(&BLAKE2B_IV[6])), _mm_loadu_si128(CONST_M128_CAST(&state.f[0]))); - - b0 = _mm_unpacklo_epi64(m0, m1); - b1 = _mm_unpacklo_epi64(m2, m3); - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m0, m1); - b1 = _mm_unpackhi_epi64(m2, m3); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m4, m5); - b1 = _mm_unpacklo_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m4, m5); - b1 = _mm_unpackhi_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m7, m2); - b1 = _mm_unpackhi_epi64(m4, m6); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m5, m4); - b1 = _mm_alignr_epi8(m3, m7, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); - b1 = _mm_unpackhi_epi64(m5, m2); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m6, m1); - b1 = _mm_unpackhi_epi64(m3, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_alignr_epi8(m6, m5, 8); - b1 = _mm_unpackhi_epi64(m2, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m4, m0); - b1 = _mm_blend_epi16(m1, m6, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m5, m1, 0xF0); - b1 = _mm_unpackhi_epi64(m3, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m7, m3); - b1 = _mm_alignr_epi8(m2, m0, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m3, m1); - b1 = _mm_unpackhi_epi64(m6, m5); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m4, m0); - b1 = _mm_unpacklo_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m1, m2, 0xF0); - b1 = _mm_blend_epi16(m2, m7, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m3, m5); - b1 = _mm_unpacklo_epi64(m0, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m4, m2); - b1 = _mm_unpacklo_epi64(m1, m5); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_blend_epi16(m0, m3, 0xF0); - b1 = _mm_blend_epi16(m2, m7, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m7, m5, 0xF0); - b1 = _mm_blend_epi16(m3, m1, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_alignr_epi8(m6, m0, 8); - b1 = _mm_blend_epi16(m4, m6, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m1, m3); - b1 = _mm_unpacklo_epi64(m0, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m6, m5); - b1 = _mm_unpackhi_epi64(m5, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m2, m3, 0xF0); - b1 = _mm_unpackhi_epi64(m7, m0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m6, m2); - b1 = _mm_blend_epi16(m7, m4, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_blend_epi16(m6, m0, 0xF0); - b1 = _mm_unpacklo_epi64(m7, m2); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m2, m7); - b1 = _mm_alignr_epi8(m5, m6, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m0, m3); - b1 = _mm_shuffle_epi32(m4, _MM_SHUFFLE(1,0,3,2)); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m3, m1); - b1 = _mm_blend_epi16(m1, m5, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m6, m3); - b1 = _mm_blend_epi16(m6, m1, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_alignr_epi8(m7, m5, 8); - b1 = _mm_unpackhi_epi64(m0, m4); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m2, m7); - b1 = _mm_unpacklo_epi64(m4, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m0, m2); - b1 = _mm_unpacklo_epi64(m3, m5); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m3, m7); - b1 = _mm_alignr_epi8(m0, m5, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m7, m4); - b1 = _mm_alignr_epi8(m4, m1, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = m6; - b1 = _mm_alignr_epi8(m5, m0, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_blend_epi16(m1, m3, 0xF0); - b1 = m2; - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m5, m4); - b1 = _mm_unpackhi_epi64(m3, m0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m1, m2); - b1 = _mm_blend_epi16(m3, m2, 0xF0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpackhi_epi64(m7, m4); - b1 = _mm_unpackhi_epi64(m1, m6); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_alignr_epi8(m7, m5, 8); - b1 = _mm_unpacklo_epi64(m6, m0); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m0, m1); - b1 = _mm_unpacklo_epi64(m2, m3); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m0, m1); - b1 = _mm_unpackhi_epi64(m2, m3); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m4, m5); - b1 = _mm_unpacklo_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpackhi_epi64(m4, m5); - b1 = _mm_unpackhi_epi64(m6, m7); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - b0 = _mm_unpacklo_epi64(m7, m2); - b1 = _mm_unpackhi_epi64(m4, m6); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m5, m4); - b1 = _mm_alignr_epi8(m3, m7, 8); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2h, row2l, 8); - t1 = _mm_alignr_epi8(row2l, row2h, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4h, row4l, 8); - t1 = _mm_alignr_epi8(row4l, row4h, 8); - row4l = t1, row4h = t0; - - b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); - b1 = _mm_unpackhi_epi64(m5, m2); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi32(row4l, _MM_SHUFFLE(2,3,0,1)); - row4h = _mm_shuffle_epi32(row4h, _MM_SHUFFLE(2,3,0,1)); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_shuffle_epi8(row2l, r24); - row2h = _mm_shuffle_epi8(row2h, r24); - - b0 = _mm_unpacklo_epi64(m6, m1); - b1 = _mm_unpackhi_epi64(m3, m1); - - row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); - row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); - row4l = _mm_xor_si128(row4l, row1l); - row4h = _mm_xor_si128(row4h, row1h); - row4l = _mm_shuffle_epi8(row4l, r16); - row4h = _mm_shuffle_epi8(row4h, r16); - row3l = _mm_add_epi64(row3l, row4l); - row3h = _mm_add_epi64(row3h, row4h); - row2l = _mm_xor_si128(row2l, row3l); - row2h = _mm_xor_si128(row2h, row3h); - row2l = _mm_xor_si128(_mm_srli_epi64(row2l, 63), _mm_add_epi64(row2l, row2l)); - row2h = _mm_xor_si128(_mm_srli_epi64(row2h, 63), _mm_add_epi64(row2h, row2h)); - - t0 = _mm_alignr_epi8(row2l, row2h, 8); - t1 = _mm_alignr_epi8(row2h, row2l, 8); - row2l = t0, row2h = t1, t0 = row3l, row3l = row3h, row3h = t0; - t0 = _mm_alignr_epi8(row4l, row4h, 8); - t1 = _mm_alignr_epi8(row4h, row4l, 8); - row4l = t1, row4h = t0; - - row1l = _mm_xor_si128(row3l, row1l); - row1h = _mm_xor_si128(row3h, row1h); - _mm_storeu_si128(M128_CAST(&state.h[0]), _mm_xor_si128(_mm_loadu_si128(CONST_M128_CAST(&state.h[0])), row1l)); - _mm_storeu_si128(M128_CAST(&state.h[2]), _mm_xor_si128(_mm_loadu_si128(CONST_M128_CAST(&state.h[2])), row1h)); - - row2l = _mm_xor_si128(row4l, row2l); - row2h = _mm_xor_si128(row4h, row2h); - _mm_storeu_si128(M128_CAST(&state.h[4]), _mm_xor_si128(_mm_loadu_si128(CONST_M128_CAST(&state.h[4])), row2l)); - _mm_storeu_si128(M128_CAST(&state.h[6]), _mm_xor_si128(_mm_loadu_si128(CONST_M128_CAST(&state.h[6])), row2h)); + #define BLAKE2B_LOAD_MSG_0_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m0, m1); \ + b1 = _mm_unpacklo_epi64(m2, m3); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_0_2(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m0, m1); \ + b1 = _mm_unpackhi_epi64(m2, m3); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_0_3(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m4, m5); \ + b1 = _mm_unpacklo_epi64(m6, m7); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_0_4(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m4, m5); \ + b1 = _mm_unpackhi_epi64(m6, m7); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_1_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m7, m2); \ + b1 = _mm_unpackhi_epi64(m4, m6); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_1_2(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m5, m4); \ + b1 = _mm_alignr_epi8(m3, m7, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_1_3(b0, b1) \ + do { \ + b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); \ + b1 = _mm_unpackhi_epi64(m5, m2); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_1_4(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m6, m1); \ + b1 = _mm_unpackhi_epi64(m3, m1); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_2_1(b0, b1) \ + do { \ + b0 = _mm_alignr_epi8(m6, m5, 8); \ + b1 = _mm_unpackhi_epi64(m2, m7); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_2_2(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m4, m0); \ + b1 = _mm_blend_epi16(m1, m6, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_2_3(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m5, m1, 0xF0); \ + b1 = _mm_unpackhi_epi64(m3, m4); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_2_4(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m7, m3); \ + b1 = _mm_alignr_epi8(m2, m0, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_3_1(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m3, m1); \ + b1 = _mm_unpackhi_epi64(m6, m5); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_3_2(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m4, m0); \ + b1 = _mm_unpacklo_epi64(m6, m7); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_3_3(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m1, m2, 0xF0); \ + b1 = _mm_blend_epi16(m2, m7, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_3_4(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m3, m5); \ + b1 = _mm_unpacklo_epi64(m0, m4); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_4_1(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m4, m2); \ + b1 = _mm_unpacklo_epi64(m1, m5); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_4_2(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m0, m3, 0xF0); \ + b1 = _mm_blend_epi16(m2, m7, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_4_3(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m7, m5, 0xF0); \ + b1 = _mm_blend_epi16(m3, m1, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_4_4(b0, b1) \ + do { \ + b0 = _mm_alignr_epi8(m6, m0, 8); \ + b1 = _mm_blend_epi16(m4, m6, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_5_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m1, m3); \ + b1 = _mm_unpacklo_epi64(m0, m4); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_5_2(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m6, m5); \ + b1 = _mm_unpackhi_epi64(m5, m1); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_5_3(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m2, m3, 0xF0); \ + b1 = _mm_unpackhi_epi64(m7, m0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_5_4(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m6, m2); \ + b1 = _mm_blend_epi16(m7, m4, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_6_1(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m6, m0, 0xF0); \ + b1 = _mm_unpacklo_epi64(m7, m2); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_6_2(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m2, m7); \ + b1 = _mm_alignr_epi8(m5, m6, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_6_3(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m0, m3); \ + b1 = _mm_shuffle_epi32(m4, _MM_SHUFFLE(1,0,3,2)); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_6_4(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m3, m1); \ + b1 = _mm_blend_epi16(m1, m5, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_7_1(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m6, m3); \ + b1 = _mm_blend_epi16(m6, m1, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_7_2(b0, b1) \ + do { \ + b0 = _mm_alignr_epi8(m7, m5, 8); \ + b1 = _mm_unpackhi_epi64(m0, m4); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_7_3(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m2, m7); \ + b1 = _mm_unpacklo_epi64(m4, m1); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_7_4(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m0, m2); \ + b1 = _mm_unpacklo_epi64(m3, m5); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_8_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m3, m7); \ + b1 = _mm_alignr_epi8(m0, m5, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_8_2(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m7, m4); \ + b1 = _mm_alignr_epi8(m4, m1, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_8_3(b0, b1) \ + do { \ + b0 = m6; \ + b1 = _mm_alignr_epi8(m5, m0, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_8_4(b0, b1) \ + do { \ + b0 = _mm_blend_epi16(m1, m3, 0xF0); \ + b1 = m2; \ + } while(0) + + #define BLAKE2B_LOAD_MSG_9_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m5, m4); \ + b1 = _mm_unpackhi_epi64(m3, m0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_9_2(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m1, m2); \ + b1 = _mm_blend_epi16(m3, m2, 0xF0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_9_3(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m7, m4); \ + b1 = _mm_unpackhi_epi64(m1, m6); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_9_4(b0, b1) \ + do { \ + b0 = _mm_alignr_epi8(m7, m5, 8); \ + b1 = _mm_unpacklo_epi64(m6, m0); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_10_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m0, m1); \ + b1 = _mm_unpacklo_epi64(m2, m3); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_10_2(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m0, m1); \ + b1 = _mm_unpackhi_epi64(m2, m3); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_10_3(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m4, m5); \ + b1 = _mm_unpacklo_epi64(m6, m7); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_10_4(b0, b1) \ + do { \ + b0 = _mm_unpackhi_epi64(m4, m5); \ + b1 = _mm_unpackhi_epi64(m6, m7); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_11_1(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m7, m2); \ + b1 = _mm_unpackhi_epi64(m4, m6); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_11_2(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m5, m4); \ + b1 = _mm_alignr_epi8(m3, m7, 8); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_11_3(b0, b1) \ + do { \ + b0 = _mm_shuffle_epi32(m0, _MM_SHUFFLE(1,0,3,2)); \ + b1 = _mm_unpackhi_epi64(m5, m2); \ + } while(0) + + #define BLAKE2B_LOAD_MSG_11_4(b0, b1) \ + do { \ + b0 = _mm_unpacklo_epi64(m6, m1); \ + b1 = _mm_unpackhi_epi64(m3, m1); \ + } while(0) + +//#define _mm_roti_epi64(r, c) \ +// _mm_xor_si128(_mm_srli_epi64( (r), -(c) ), \ +// _mm_slli_epi64( (r), 64-(-(c)) )) + +#define _mm_roti_epi64(x, c) \ + (-(c) == 32) ? _mm_shuffle_epi32((x), _MM_SHUFFLE(2,3,0,1)) \ + : (-(c) == 24) ? _mm_shuffle_epi8((x), r24) \ + : (-(c) == 16) ? _mm_shuffle_epi8((x), r16) \ + : (-(c) == 63) ? _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_add_epi64((x), (x))) \ + : _mm_xor_si128(_mm_srli_epi64((x), -(c)), _mm_slli_epi64((x), 64-(-(c)))) + +#define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ + \ + row4l = _mm_xor_si128(row4l, row1l); \ + row4h = _mm_xor_si128(row4h, row1h); \ + \ + row4l = _mm_roti_epi64(row4l, -32); \ + row4h = _mm_roti_epi64(row4h, -32); \ + \ + row3l = _mm_add_epi64(row3l, row4l); \ + row3h = _mm_add_epi64(row3h, row4h); \ + \ + row2l = _mm_xor_si128(row2l, row3l); \ + row2h = _mm_xor_si128(row2h, row3h); \ + \ + row2l = _mm_roti_epi64(row2l, -24); \ + row2h = _mm_roti_epi64(row2h, -24); + +#define G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + row1l = _mm_add_epi64(_mm_add_epi64(row1l, b0), row2l); \ + row1h = _mm_add_epi64(_mm_add_epi64(row1h, b1), row2h); \ + \ + row4l = _mm_xor_si128(row4l, row1l); \ + row4h = _mm_xor_si128(row4h, row1h); \ + \ + row4l = _mm_roti_epi64(row4l, -16); \ + row4h = _mm_roti_epi64(row4h, -16); \ + \ + row3l = _mm_add_epi64(row3l, row4l); \ + row3h = _mm_add_epi64(row3h, row4h); \ + \ + row2l = _mm_xor_si128(row2l, row3l); \ + row2h = _mm_xor_si128(row2h, row3h); \ + \ + row2l = _mm_roti_epi64(row2l, -63); \ + row2h = _mm_roti_epi64(row2h, -63); \ + +#define BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + t0 = row4l;\ + t1 = row2l;\ + row4l = row3l;\ + row3l = row3h;\ + row3h = row4l;\ + row4l = _mm_unpackhi_epi64(row4h, _mm_unpacklo_epi64(t0, t0)); \ + row4h = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(row4h, row4h)); \ + row2l = _mm_unpackhi_epi64(row2l, _mm_unpacklo_epi64(row2h, row2h)); \ + row2h = _mm_unpackhi_epi64(row2h, _mm_unpacklo_epi64(t1, t1)) + +#define BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h) \ + t0 = row3l;\ + row3l = row3h;\ + row3h = t0;\ + t0 = row2l;\ + t1 = row4l;\ + row2l = _mm_unpackhi_epi64(row2h, _mm_unpacklo_epi64(row2l, row2l)); \ + row2h = _mm_unpackhi_epi64(t0, _mm_unpacklo_epi64(row2h, row2h)); \ + row4l = _mm_unpackhi_epi64(row4l, _mm_unpacklo_epi64(row4h, row4h)); \ + row4h = _mm_unpackhi_epi64(row4h, _mm_unpacklo_epi64(t1, t1)) + +#define BLAKE2B_ROUND(r) \ + BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ + BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ + G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ + BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ + BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ + G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); + + __m128i row1l, row1h; + __m128i row2l, row2h; + __m128i row3l, row3h; + __m128i row4l, row4h; + __m128i b0, b1; + __m128i t0, t1; + + const __m128i r16 = _mm_setr_epi8( 2, 3, 4, 5, 6, 7, 0, 1, 10, 11, 12, 13, 14, 15, 8, 9 ); + const __m128i r24 = _mm_setr_epi8( 3, 4, 5, 6, 7, 0, 1, 2, 11, 12, 13, 14, 15, 8, 9, 10 ); + + const __m128i m0 = LOADU( input + 00 ); + const __m128i m1 = LOADU( input + 16 ); + const __m128i m2 = LOADU( input + 32 ); + const __m128i m3 = LOADU( input + 48 ); + const __m128i m4 = LOADU( input + 64 ); + const __m128i m5 = LOADU( input + 80 ); + const __m128i m6 = LOADU( input + 96 ); + const __m128i m7 = LOADU( input + 112 ); + + row1l = LOADU( &state.h[0] ); + row1h = LOADU( &state.h[2] ); + row2l = LOADU( &state.h[4] ); + row2h = LOADU( &state.h[6] ); + row3l = LOADU( &BLAKE2B_IV[0] ); + row3h = LOADU( &BLAKE2B_IV[2] ); + row4l = _mm_xor_si128( LOADU( &BLAKE2B_IV[4] ), LOADU( &state.t[0] ) ); + row4h = _mm_xor_si128( LOADU( &BLAKE2B_IV[6] ), LOADU( &state.f[0] ) ); + BLAKE2B_ROUND( 0 ); + BLAKE2B_ROUND( 1 ); + BLAKE2B_ROUND( 2 ); + BLAKE2B_ROUND( 3 ); + BLAKE2B_ROUND( 4 ); + BLAKE2B_ROUND( 5 ); + BLAKE2B_ROUND( 6 ); + BLAKE2B_ROUND( 7 ); + BLAKE2B_ROUND( 8 ); + BLAKE2B_ROUND( 9 ); + BLAKE2B_ROUND( 10 ); + BLAKE2B_ROUND( 11 ); + row1l = _mm_xor_si128( row3l, row1l ); + row1h = _mm_xor_si128( row3h, row1h ); + STOREU( &state.h[0], _mm_xor_si128( LOADU( &state.h[0] ), row1l ) ); + STOREU( &state.h[2], _mm_xor_si128( LOADU( &state.h[2] ), row1h ) ); + row2l = _mm_xor_si128( row4l, row2l ); + row2h = _mm_xor_si128( row4h, row2h ); + STOREU( &state.h[4], _mm_xor_si128( LOADU( &state.h[4] ), row2l ) ); + STOREU( &state.h[6], _mm_xor_si128( LOADU( &state.h[6] ), row2h ) ); } #endif // CRYPTOPP_SSE41_AVAILABLE @@ -2077,7 +1266,7 @@ void BLAKE2_Compress64_NEON(const byte* input, BLAKE2_State& state #define vrorq_n_u64_63(x) veorq_u64(vaddq_u64(x, x), vshrq_n_u64(x, 63)) - #define BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + #define G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ do { \ row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ @@ -2088,7 +1277,7 @@ void BLAKE2_Compress64_NEON(const byte* input, BLAKE2_State& state row2l = vrorq_n_u64_24(row2l); row2h = vrorq_n_u64_24(row2h); \ } while(0) - #define BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ + #define G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1) \ do { \ row1l = vaddq_u64(vaddq_u64(row1l, b0), row2l); \ row1h = vaddq_u64(vaddq_u64(row1h, b1), row2h); \ @@ -2121,14 +1310,14 @@ void BLAKE2_Compress64_NEON(const byte* input, BLAKE2_State& state do { \ uint64x2_t b0, b1; \ BLAKE2B_LOAD_MSG_ ##r ##_1(b0, b1); \ - BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ BLAKE2B_LOAD_MSG_ ##r ##_2(b0, b1); \ - BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ BLAKE2B_DIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ BLAKE2B_LOAD_MSG_ ##r ##_3(b0, b1); \ - BLAKE2B_G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + G1(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ BLAKE2B_LOAD_MSG_ ##r ##_4(b0, b1); \ - BLAKE2B_G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ + G2(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h,b0,b1); \ BLAKE2B_UNDIAGONALIZE(row1l,row2l,row3l,row4l,row1h,row2h,row3h,row4h); \ } while(0)