15 #if defined(CRYPTOPP_DISABLE_GCM_ASM) 16 # undef CRYPTOPP_X86_ASM_AVAILABLE 17 # undef CRYPTOPP_X32_ASM_AVAILABLE 18 # undef CRYPTOPP_X64_ASM_AVAILABLE 19 # undef CRYPTOPP_SSE2_ASM_AVAILABLE 22 #if (CRYPTOPP_SSE2_INTRIN_AVAILABLE) 23 # include <emmintrin.h> 24 # include <xmmintrin.h> 27 #if (CRYPTOPP_CLMUL_AVAILABLE) 28 # include <tmmintrin.h> 29 # include <wmmintrin.h> 33 #if (CRYPTOPP_ARM_NEON_AVAILABLE) && !defined(_M_ARM64) 34 # include <arm_neon.h> 37 #if (CRYPTOPP_ARM_ACLE_AVAILABLE) 39 # include <arm_acle.h> 42 #if defined(CRYPTOPP_ARM_PMULL_AVAILABLE) 46 #if defined(CRYPTOPP_ALTIVEC_AVAILABLE) 50 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 55 #ifndef EXCEPTION_EXECUTE_HANDLER 56 # define EXCEPTION_EXECUTE_HANDLER 1 60 #define M128_CAST(x) ((__m128i *)(void *)(x)) 61 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) 64 #define UINT64X2_CAST(x) ((uint64x2_t *)(void *)(x)) 65 #define CONST_UINT64X2_CAST(x) ((const uint64x2_t *)(const void *)(x)) 68 extern const char GCM_SIMD_FNAME[] = __FILE__;
74 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 76 typedef void (*SigHandler)(int);
78 static jmp_buf s_jmpSIGILL;
79 static void SigIllHandler(
int)
81 longjmp(s_jmpSIGILL, 1);
84 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY 86 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8) 89 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 91 #elif (CRYPTOPP_ARM_PMULL_AVAILABLE) 92 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) 93 volatile bool result =
true;
97 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
98 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
100 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
101 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
102 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
103 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
104 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
106 const uint64x2_t r1 =
PMULL_00(a1, b1);
107 const uint64x2_t r2 =
PMULL_11(vreinterpretq_u64_u8(a2),
108 vreinterpretq_u64_u8(b2));
110 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
111 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
112 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
113 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
115 __except (EXCEPTION_EXECUTE_HANDLER)
123 volatile bool result =
true;
125 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
126 if (oldHandler == SIG_ERR)
129 volatile sigset_t oldMask;
130 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
133 if (setjmp(s_jmpSIGILL))
138 const uint64_t wa1[]={0,0x9090909090909090}, wb1[]={0,0xb0b0b0b0b0b0b0b0};
139 const uint64x2_t a1=vld1q_u64(wa1), b1=vld1q_u64(wb1);
141 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
142 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
143 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
144 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
145 const uint8x16_t a2=vld1q_u8(wa2), b2=vld1q_u8(wb2);
147 const uint64x2_t r1 =
PMULL_00(a1, b1);
148 const uint64x2_t r2 =
PMULL_11(vreinterpretq_u64_u8(a2),
149 vreinterpretq_u64_u8(b2));
151 result = !!(vgetq_lane_u64(r1,0) == 0x5300530053005300 &&
152 vgetq_lane_u64(r1,1) == 0x5300530053005300 &&
153 vgetq_lane_u64(r2,0) == 0x6c006c006c006c00 &&
154 vgetq_lane_u64(r2,1) == 0x6c006c006c006c00);
157 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
158 signal(SIGILL, oldHandler);
163 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE 165 #endif // ARM32 or ARM64 167 #if (CRYPTOPP_BOOL_PPC32 || CRYPTOPP_BOOL_PPC64) 168 bool CPU_ProbePMULL()
170 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 172 #elif (CRYPTOPP_POWER8_VMULL_AVAILABLE) 174 volatile bool result =
true;
176 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
177 if (oldHandler == SIG_ERR)
180 volatile sigset_t oldMask;
181 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
184 if (setjmp(s_jmpSIGILL))
188 const uint64_t wa1[]={0,W64LIT(0x9090909090909090)},
189 wb1[]={0,W64LIT(0xb0b0b0b0b0b0b0b0)};
192 const uint8_t wa2[]={0x80,0x80,0x80,0x80,0x80,0x80,0x80,0x80,
193 0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0,0xa0},
194 wb2[]={0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,0xc0,
195 0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0,0xe0};
201 const uint64_t wc1[]={W64LIT(0x5300530053005300), W64LIT(0x5300530053005300)},
202 wc2[]={W64LIT(0x6c006c006c006c00), W64LIT(0x6c006c006c006c00)};
208 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
209 signal(SIGILL, oldHandler);
213 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE 215 #endif // PPC32 or PPC64 219 #if CRYPTOPP_ARM_NEON_AVAILABLE 220 void GCM_Xor16_NEON(byte *a,
const byte *b,
const byte *c)
225 *UINT64X2_CAST(a) = veorq_u64(*CONST_UINT64X2_CAST(b), *CONST_UINT64X2_CAST(c));
227 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 229 #if CRYPTOPP_ARM_PMULL_AVAILABLE 232 inline uint64x2_t SwapWords(
const uint64x2_t& data)
234 return (uint64x2_t)vcombine_u64(
235 vget_high_u64(data), vget_low_u64(data));
238 uint64x2_t GCM_Reduce_PMULL(uint64x2_t c0, uint64x2_t c1, uint64x2_t c2,
const uint64x2_t &r)
240 c1 = veorq_u64(c1, VEXT_U8<8>(vdupq_n_u64(0), c0));
241 c1 = veorq_u64(c1,
PMULL_01(c0, r));
242 c0 = VEXT_U8<8>(c0, vdupq_n_u64(0));
243 c0 = vshlq_n_u64(veorq_u64(c0, c1), 1);
245 c2 = veorq_u64(c2, c0);
246 c2 = veorq_u64(c2, VEXT_U8<8>(c1, vdupq_n_u64(0)));
247 c1 = vshrq_n_u64(vcombine_u64(vget_low_u64(c1), vget_low_u64(c2)), 63);
248 c2 = vshlq_n_u64(c2, 1);
250 return veorq_u64(c2, c1);
253 uint64x2_t GCM_Multiply_PMULL(
const uint64x2_t &x,
const uint64x2_t &h,
const uint64x2_t &r)
255 const uint64x2_t c0 =
PMULL_00(x, h);
257 const uint64x2_t c2 =
PMULL_11(x, h);
259 return GCM_Reduce_PMULL(c0, c1, c2, r);
262 void GCM_SetKeyWithoutResync_PMULL(
const byte *hashKey, byte *mulTable,
unsigned int tableSize)
264 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
265 const uint64x2_t t = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(hashKey)));
266 const uint64x2_t h0 = vextq_u64(t, t, 1);
270 for (i=0; i<tableSize-32; i+=32)
272 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
273 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
274 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
275 vst1q_u64((uint64_t *)(mulTable+i+8), h);
276 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
277 h = GCM_Multiply_PMULL(h1, h0, r);
280 const uint64x2_t h1 = GCM_Multiply_PMULL(h, h0, r);
281 vst1_u64((uint64_t *)(mulTable+i), vget_low_u64(h));
282 vst1q_u64((uint64_t *)(mulTable+i+16), h1);
283 vst1q_u64((uint64_t *)(mulTable+i+8), h);
284 vst1_u64((uint64_t *)(mulTable+i+8), vget_low_u64(h1));
287 size_t GCM_AuthenticateBlocks_PMULL(
const byte *data,
size_t len,
const byte *mtable, byte *hbuffer)
289 const uint64x2_t r = {0xe100000000000000ull, 0xc200000000000000ull};
290 uint64x2_t x = vreinterpretq_u64_u8(vld1q_u8(hbuffer));
295 uint64x2_t d1, d2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-1)*16U)));
296 uint64x2_t c0 = vdupq_n_u64(0);
297 uint64x2_t c1 = vdupq_n_u64(0);
298 uint64x2_t c2 = vdupq_n_u64(0);
302 const uint64x2_t h0 = vld1q_u64((
const uint64_t*)(mtable+(i+0)*16));
303 const uint64x2_t h1 = vld1q_u64((
const uint64_t*)(mtable+(i+1)*16));
304 const uint64x2_t h2 = veorq_u64(h0, h1);
308 const uint64x2_t t1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
309 d1 = veorq_u64(vextq_u64(t1, t1, 1), x);
310 c0 = veorq_u64(c0,
PMULL_00(d1, h0));
311 c2 = veorq_u64(c2,
PMULL_10(d1, h1));
312 d1 = veorq_u64(d1, SwapWords(d1));
313 c1 = veorq_u64(c1,
PMULL_00(d1, h2));
318 d1 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
319 c0 = veorq_u64(c0,
PMULL_10(d2, h0));
320 c2 = veorq_u64(c2,
PMULL_10(d1, h1));
321 d2 = veorq_u64(d2, d1);
322 c1 = veorq_u64(c1,
PMULL_10(d2, h2));
326 const uint64x2_t t2 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data)));
327 d1 = veorq_u64(vextq_u64(t2, t2, 1), x);
328 c0 = veorq_u64(c0,
PMULL_01(d1, h0));
329 c2 = veorq_u64(c2,
PMULL_11(d1, h1));
330 d1 = veorq_u64(d1, SwapWords(d1));
331 c1 = veorq_u64(c1,
PMULL_01(d1, h2));
336 const uint64x2_t t3 = vreinterpretq_u64_u8(vrev64q_u8(vld1q_u8(data+(s-i)*16-8)));
337 d2 = vextq_u64(t3, t3, 1);
338 c0 = veorq_u64(c0,
PMULL_01(d1, h0));
339 c2 = veorq_u64(c2,
PMULL_01(d2, h1));
340 d1 = veorq_u64(d1, d2);
341 c1 = veorq_u64(c1,
PMULL_01(d1, h2));
346 c1 = veorq_u64(veorq_u64(c1, c0), c2);
347 x = GCM_Reduce_PMULL(c0, c1, c2, r);
350 vst1q_u64(reinterpret_cast<uint64_t *>(hbuffer), x);
354 void GCM_ReverseHashBufferIfNeeded_PMULL(byte *hashBuffer)
358 const uint8x16_t x = vrev64q_u8(vld1q_u8(hashBuffer));
359 vst1q_u8(hashBuffer, vextq_u8(x, x, 8));
362 #endif // CRYPTOPP_ARM_PMULL_AVAILABLE 366 #if CRYPTOPP_SSE2_INTRIN_AVAILABLE || CRYPTOPP_SSE2_ASM_AVAILABLE 369 void GCM_Xor16_SSE2(byte *a,
const byte *b,
const byte *c)
371 # if CRYPTOPP_SSE2_ASM_AVAILABLE && defined(__GNUC__) 372 asm (
"movdqa %1, %%xmm0; pxor %2, %%xmm0; movdqa %%xmm0, %0;" 373 :
"=m" (a[0]) :
"m"(b[0]),
"m"(c[0]));
374 # else // CRYPTOPP_SSE2_INTRIN_AVAILABLE 375 _mm_store_si128(M128_CAST(a), _mm_xor_si128(
376 _mm_load_si128(CONST_M128_CAST(b)),
377 _mm_load_si128(CONST_M128_CAST(c))));
380 #endif // CRYPTOPP_SSE2_ASM_AVAILABLE 382 #if CRYPTOPP_CLMUL_AVAILABLE 386 void gcm_gf_mult(
const unsigned char *a,
const unsigned char *b,
unsigned char *c)
388 word64 Z0=0, Z1=0, V0, V1;
391 Block::Get(a)(V0)(V1);
393 for (
int i=0; i<16; i++)
395 for (
int j=0x80; j!=0; j>>=1)
401 V1 = (V1>>1) | (V0<<63);
402 V0 = (V0>>1) ^ (x ? W64LIT(0xe1) << 56 : 0);
408 __m128i _mm_clmulepi64_si128(
const __m128i &a,
const __m128i &b,
int i)
418 for (
int i=0; i<16; i++)
419 ((byte *)&output)[i] = c.GetByte(i);
425 inline __m128i SwapWords(
const __m128i& val)
427 return _mm_shuffle_epi32(val, _MM_SHUFFLE(1, 0, 3, 2));
432 inline __m128i GCM_Reduce_CLMUL(__m128i c0, __m128i c1, __m128i c2,
const __m128i& r)
446 c1 = _mm_xor_si128(c1, _mm_slli_si128(c0, 8));
447 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(c0, r, 0x10));
448 c0 = _mm_xor_si128(c1, _mm_srli_si128(c0, 8));
449 c0 = _mm_slli_epi64(c0, 1);
450 c0 = _mm_clmulepi64_si128(c0, r, 0);
451 c2 = _mm_xor_si128(c2, c0);
452 c2 = _mm_xor_si128(c2, _mm_srli_si128(c1, 8));
453 c1 = _mm_unpacklo_epi64(c1, c2);
454 c1 = _mm_srli_epi64(c1, 63);
455 c2 = _mm_slli_epi64(c2, 1);
456 return _mm_xor_si128(c2, c1);
461 __m128i GCM_Multiply_CLMUL(
const __m128i &x,
const __m128i &h,
const __m128i &r)
463 const __m128i c0 = _mm_clmulepi64_si128(x,h,0);
464 const __m128i c1 = _mm_xor_si128(_mm_clmulepi64_si128(x,h,1), _mm_clmulepi64_si128(x,h,0x10));
465 const __m128i c2 = _mm_clmulepi64_si128(x,h,0x11);
467 return GCM_Reduce_CLMUL(c0, c1, c2, r);
470 void GCM_SetKeyWithoutResync_CLMUL(
const byte *hashKey, byte *mulTable,
unsigned int tableSize)
472 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
473 const __m128i m = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
474 __m128i h0 = _mm_shuffle_epi8(_mm_load_si128(CONST_M128_CAST(hashKey)), m), h = h0;
477 for (i=0; i<tableSize-32; i+=32)
479 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
480 _mm_storel_epi64(M128_CAST(mulTable+i), h);
481 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
482 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
483 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
484 h = GCM_Multiply_CLMUL(h1, h0, r);
487 const __m128i h1 = GCM_Multiply_CLMUL(h, h0, r);
488 _mm_storel_epi64(M128_CAST(mulTable+i), h);
489 _mm_storeu_si128(M128_CAST(mulTable+i+16), h1);
490 _mm_storeu_si128(M128_CAST(mulTable+i+8), h);
491 _mm_storel_epi64(M128_CAST(mulTable+i+8), h1);
494 size_t GCM_AuthenticateBlocks_CLMUL(
const byte *data,
size_t len,
const byte *mtable, byte *hbuffer)
496 const __m128i r = _mm_set_epi32(0xc2000000, 0x00000000, 0xe1000000, 0x00000000);
497 const __m128i m1 = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
498 const __m128i m2 = _mm_set_epi32(0x08090a0b, 0x0c0d0e0f, 0x00010203, 0x04050607);
499 __m128i x = _mm_load_si128(M128_CAST(hbuffer));
504 __m128i d1 = _mm_loadu_si128(CONST_M128_CAST(data+(s-1)*16));
505 __m128i d2 = _mm_shuffle_epi8(d1, m2);
506 __m128i c0 = _mm_setzero_si128();
507 __m128i c1 = _mm_setzero_si128();
508 __m128i c2 = _mm_setzero_si128();
512 const __m128i h0 = _mm_load_si128(CONST_M128_CAST(mtable+(i+0)*16));
513 const __m128i h1 = _mm_load_si128(CONST_M128_CAST(mtable+(i+1)*16));
514 const __m128i h2 = _mm_xor_si128(h0, h1);
518 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
519 d1 = _mm_xor_si128(d1, x);
520 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0));
521 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
522 d1 = _mm_xor_si128(d1, SwapWords(d1));
523 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0));
527 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m2);
528 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d2, h0, 1));
529 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 1));
530 d2 = _mm_xor_si128(d2, d1);
531 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d2, h2, 1));
535 d1 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data)), m1);
536 d1 = _mm_xor_si128(d1, x);
537 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
538 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d1, h1, 0x11));
539 d1 = _mm_xor_si128(d1, SwapWords(d1));
540 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
544 d2 = _mm_shuffle_epi8(_mm_loadu_si128(CONST_M128_CAST(data+(s-i)*16-8)), m1);
545 c0 = _mm_xor_si128(c0, _mm_clmulepi64_si128(d1, h0, 0x10));
546 c2 = _mm_xor_si128(c2, _mm_clmulepi64_si128(d2, h1, 0x10));
547 d1 = _mm_xor_si128(d1, d2);
548 c1 = _mm_xor_si128(c1, _mm_clmulepi64_si128(d1, h2, 0x10));
553 c1 = _mm_xor_si128(_mm_xor_si128(c1, c0), c2);
554 x = GCM_Reduce_CLMUL(c0, c1, c2, r);
557 _mm_store_si128(M128_CAST(hbuffer), x);
561 void GCM_ReverseHashBufferIfNeeded_CLMUL(byte *hashBuffer)
564 const __m128i mask = _mm_set_epi32(0x00010203, 0x04050607, 0x08090a0b, 0x0c0d0e0f);
565 _mm_storeu_si128(M128_CAST(hashBuffer), _mm_shuffle_epi8(
566 _mm_loadu_si128(CONST_M128_CAST(hashBuffer)), mask));
568 #endif // CRYPTOPP_CLMUL_AVAILABLE 572 #if CRYPTOPP_POWER8_AVAILABLE 573 void GCM_Xor16_POWER8(byte *a,
const byte *b,
const byte *c)
577 #endif // CRYPTOPP_POWER8_AVAILABLE 579 #if CRYPTOPP_POWER8_VMULL_AVAILABLE 585 c1 =
VecXor(c1, VecShiftRightOctet<8>(c0));
587 c0 =
VecXor(c1, VecShiftLeftOctet<8>(c0));
590 c2 =
VecXor(c2, VecShiftLeftOctet<8>(c1));
591 c1 = vec_sr(vec_mergeh(c1, c2), m63);
603 return GCM_Reduce_VMULL(c0, c1, c2, r);
606 inline uint64x2_p LoadHashKey(
const byte *hashKey)
608 #if (CRYPTOPP_BIG_ENDIAN) 610 const uint8x16_p mask = {8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7};
614 const uint8x16_p mask = {15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0};
619 void GCM_SetKeyWithoutResync_VMULL(
const byte *hashKey, byte *mulTable,
unsigned int tableSize)
621 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
627 for (i=0; i<tableSize-32; i+=32)
629 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
631 std::memcpy(mulTable+i, temp+0, 8);
635 std::memcpy(mulTable+i+8, temp+0, 8);
636 h = GCM_Multiply_VMULL(h1, h0, r);
639 const uint64x2_p h1 = GCM_Multiply_VMULL(h, h0, r);
641 std::memcpy(mulTable+i, temp+0, 8);
645 std::memcpy(mulTable+i+8, temp+0, 8);
650 inline T SwapWords(
const T& data)
652 return (T)VecRotateLeftOctet<8>(data);
655 inline uint64x2_p LoadBuffer1(
const byte *dataBuffer)
657 #if (CRYPTOPP_BIG_ENDIAN) 661 const uint8x16_p mask = {7,6,5,4, 3,2,1,0, 15,14,13,12, 11,10,9,8};
666 inline uint64x2_p LoadBuffer2(
const byte *dataBuffer)
668 #if (CRYPTOPP_BIG_ENDIAN) 675 size_t GCM_AuthenticateBlocks_VMULL(
const byte *data,
size_t len,
const byte *mtable, byte *hbuffer)
677 const uint64x2_p r = {0xe100000000000000ull, 0xc200000000000000ull};
683 uint64x2_p d1, d2 = LoadBuffer1(data+(s-1)*16);
694 d1 = LoadBuffer2(data);
698 d1 =
VecXor(d1, SwapWords(d1));
703 d1 = LoadBuffer1(data+(s-i)*16-8);
711 d1 = LoadBuffer2(data);
715 d1 =
VecXor(d1, SwapWords(d1));
720 d2 = LoadBuffer2(data+(s-i)*16-8);
730 x = GCM_Reduce_VMULL(c0, c1, c2, r);
737 void GCM_ReverseHashBufferIfNeeded_VMULL(byte *hashBuffer)
739 const uint64x2_p mask = {0x08090a0b0c0d0e0full, 0x0001020304050607ull};
742 #endif // CRYPTOPP_POWER8_VMULL_AVAILABLE Utility functions for the Crypto++ library.
uint64x2_t PMULL_10(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
uint64x2_p VecPolyMultiply01LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Library configuration file.
Access a block of memory.
uint64x2_p VecPolyMultiply10LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
uint64x2_t PMULL_01(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Polynomial with Coefficients in GF(2)
bool IsAlignedOn(const void *ptr, unsigned int alignment)
Determines whether ptr is aligned to a minimum value.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
uint64x2_p VecPolyMultiply00LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
uint64x2_t PMULL_00(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Support functions for PowerPC and vector operations.
const T1 UnsignedMin(const T1 &a, const T2 &b)
Safe comparison of values that could be neagtive and incorrectly promoted.
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
bool VecEqual(const T1 vec1, const T2 vec2)
Compare two vectors.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
uint32x4_p VecLoadBE(const byte src[16])
Loads a vector from a byte array.
ByteOrder GetNativeByteOrder()
Returns NativeByteOrder as an enumerated ByteOrder value.
uint64x2_t PMULL_11(const uint64x2_t a, const uint64x2_t b)
Polynomial multiplication.
Access a block of memory.
Crypto++ library namespace.
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
byte ByteReverse(byte value)
Reverses bytes in a 8-bit value.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
uint64x2_p VecPolyMultiply11LE(const uint64x2_p &a, const uint64x2_p &b)
Polynomial multiplication.
Support functions for ARM and vector operations.