14 #if defined(CRYPTOPP_DISABLE_SHA_ASM) 15 # undef CRYPTOPP_X86_ASM_AVAILABLE 16 # undef CRYPTOPP_X32_ASM_AVAILABLE 17 # undef CRYPTOPP_X64_ASM_AVAILABLE 18 # undef CRYPTOPP_SSE2_ASM_AVAILABLE 21 #if (CRYPTOPP_SHANI_AVAILABLE) 22 # include <nmmintrin.h> 23 # include <immintrin.h> 27 #if (CRYPTOPP_ARM_NEON_AVAILABLE) && !defined(_M_ARM64) 28 # include <arm_neon.h> 31 #if (CRYPTOPP_ARM_ACLE_AVAILABLE) 33 # include <arm_acle.h> 36 #if CRYPTOPP_POWER8_SHA_AVAILABLE 40 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 45 #ifndef EXCEPTION_EXECUTE_HANDLER 46 # define EXCEPTION_EXECUTE_HANDLER 1 50 #define M128_CAST(x) ((__m128i *)(void *)(x)) 51 #define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) 54 extern const char SHA_SIMD_FNAME[] = __FILE__;
60 extern const word32 SHA256_K[64];
61 extern const word64 SHA512_K[80];
65 #ifdef CRYPTOPP_GNU_STYLE_INLINE_ASSEMBLY 67 typedef void (*SigHandler)(int);
69 static jmp_buf s_jmpSIGILL;
70 static void SigIllHandler(
int)
72 longjmp(s_jmpSIGILL, 1);
75 #endif // Not CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY 77 #if (CRYPTOPP_BOOL_ARM32 || CRYPTOPP_BOOL_ARMV8) 80 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 82 #elif (CRYPTOPP_ARM_SHA1_AVAILABLE) 83 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) 84 volatile bool result =
true;
87 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
88 uint32x4_t data1 = vld1q_u32(w+0);
89 uint32x4_t data2 = vld1q_u32(w+4);
90 uint32x4_t data3 = vld1q_u32(w+8);
92 uint32x4_t r1 = vsha1cq_u32 (data1, 0, data2);
93 uint32x4_t r2 = vsha1mq_u32 (data1, 0, data2);
94 uint32x4_t r3 = vsha1pq_u32 (data1, 0, data2);
95 uint32x4_t r4 = vsha1su0q_u32 (data1, data2, data3);
96 uint32x4_t r5 = vsha1su1q_u32 (data1, data2);
98 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3) | vgetq_lane_u32(r5,0));
100 __except (EXCEPTION_EXECUTE_HANDLER)
109 volatile bool result =
true;
111 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
112 if (oldHandler == SIG_ERR)
115 volatile sigset_t oldMask;
116 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
119 if (setjmp(s_jmpSIGILL))
123 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
124 uint32x4_t data1 = vld1q_u32(w+0);
125 uint32x4_t data2 = vld1q_u32(w+4);
126 uint32x4_t data3 = vld1q_u32(w+8);
128 uint32x4_t r1 = vsha1cq_u32 (data1, 0, data2);
129 uint32x4_t r2 = vsha1mq_u32 (data1, 0, data2);
130 uint32x4_t r3 = vsha1pq_u32 (data1, 0, data2);
131 uint32x4_t r4 = vsha1su0q_u32 (data1, data2, data3);
132 uint32x4_t r5 = vsha1su1q_u32 (data1, data2);
134 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3) | vgetq_lane_u32(r5,0));
137 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
138 signal(SIGILL, oldHandler);
143 #endif // CRYPTOPP_ARM_SHA1_AVAILABLE 148 #if defined(CRYPTOPP_NO_CPU_FEATURE_PROBES) 150 #elif (CRYPTOPP_ARM_SHA2_AVAILABLE) 151 # if defined(CRYPTOPP_MS_STYLE_INLINE_ASSEMBLY) 152 volatile bool result =
true;
155 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
156 uint32x4_t data1 = vld1q_u32(w+0);
157 uint32x4_t data2 = vld1q_u32(w+4);
158 uint32x4_t data3 = vld1q_u32(w+8);
160 uint32x4_t r1 = vsha256hq_u32 (data1, data2, data3);
161 uint32x4_t r2 = vsha256h2q_u32 (data1, data2, data3);
162 uint32x4_t r3 = vsha256su0q_u32 (data1, data2);
163 uint32x4_t r4 = vsha256su1q_u32 (data1, data2, data3);
165 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3));
167 __except (EXCEPTION_EXECUTE_HANDLER)
176 volatile bool result =
true;
178 volatile SigHandler oldHandler = signal(SIGILL, SigIllHandler);
179 if (oldHandler == SIG_ERR)
182 volatile sigset_t oldMask;
183 if (sigprocmask(0, NULLPTR, (sigset_t*)&oldMask))
186 if (setjmp(s_jmpSIGILL))
190 unsigned int w[] = {1,2,3,4, 5,6,7,8, 9,10,11,12};
191 uint32x4_t data1 = vld1q_u32(w+0);
192 uint32x4_t data2 = vld1q_u32(w+4);
193 uint32x4_t data3 = vld1q_u32(w+8);
195 uint32x4_t r1 = vsha256hq_u32 (data1, data2, data3);
196 uint32x4_t r2 = vsha256h2q_u32 (data1, data2, data3);
197 uint32x4_t r3 = vsha256su0q_u32 (data1, data2);
198 uint32x4_t r4 = vsha256su1q_u32 (data1, data2, data3);
200 result = !!(vgetq_lane_u32(r1,0) | vgetq_lane_u32(r2,1) | vgetq_lane_u32(r3,2) | vgetq_lane_u32(r4,3));
203 sigprocmask(SIG_SETMASK, (sigset_t*)&oldMask, NULLPTR);
204 signal(SIGILL, oldHandler);
209 #endif // CRYPTOPP_ARM_SHA2_AVAILABLE 211 #endif // ARM32 or ARM64 219 #if CRYPTOPP_SHANI_AVAILABLE 221 void SHA1_HashMultipleBlocks_SHANI(word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
227 __m128i ABCD, ABCD_SAVE, E0, E0_SAVE, E1;
228 __m128i MASK, MSG0, MSG1, MSG2, MSG3;
231 ABCD = _mm_loadu_si128(CONST_M128_CAST(state));
232 E0 = _mm_set_epi32(state[4], 0, 0, 0);
233 ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
239 _mm_set_epi8(0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15) :
240 _mm_set_epi8(3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12) ;
242 while (length >= SHA1::BLOCKSIZE)
249 MSG0 = _mm_loadu_si128(CONST_M128_CAST(data+0));
250 MSG0 = _mm_shuffle_epi8(MSG0, MASK);
251 E0 = _mm_add_epi32(E0, MSG0);
253 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
256 MSG1 = _mm_loadu_si128(CONST_M128_CAST(data+4));
257 MSG1 = _mm_shuffle_epi8(MSG1, MASK);
258 E1 = _mm_sha1nexte_epu32(E1, MSG1);
260 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
261 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
264 MSG2 = _mm_loadu_si128(CONST_M128_CAST(data+8));
265 MSG2 = _mm_shuffle_epi8(MSG2, MASK);
266 E0 = _mm_sha1nexte_epu32(E0, MSG2);
268 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
269 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
270 MSG0 = _mm_xor_si128(MSG0, MSG2);
273 MSG3 = _mm_loadu_si128(CONST_M128_CAST(data+12));
274 MSG3 = _mm_shuffle_epi8(MSG3, MASK);
275 E1 = _mm_sha1nexte_epu32(E1, MSG3);
277 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
278 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 0);
279 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
280 MSG1 = _mm_xor_si128(MSG1, MSG3);
283 E0 = _mm_sha1nexte_epu32(E0, MSG0);
285 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
286 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 0);
287 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
288 MSG2 = _mm_xor_si128(MSG2, MSG0);
291 E1 = _mm_sha1nexte_epu32(E1, MSG1);
293 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
294 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
295 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
296 MSG3 = _mm_xor_si128(MSG3, MSG1);
299 E0 = _mm_sha1nexte_epu32(E0, MSG2);
301 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
302 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
303 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
304 MSG0 = _mm_xor_si128(MSG0, MSG2);
307 E1 = _mm_sha1nexte_epu32(E1, MSG3);
309 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
310 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
311 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
312 MSG1 = _mm_xor_si128(MSG1, MSG3);
315 E0 = _mm_sha1nexte_epu32(E0, MSG0);
317 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
318 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 1);
319 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
320 MSG2 = _mm_xor_si128(MSG2, MSG0);
323 E1 = _mm_sha1nexte_epu32(E1, MSG1);
325 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
326 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 1);
327 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
328 MSG3 = _mm_xor_si128(MSG3, MSG1);
331 E0 = _mm_sha1nexte_epu32(E0, MSG2);
333 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
334 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
335 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
336 MSG0 = _mm_xor_si128(MSG0, MSG2);
339 E1 = _mm_sha1nexte_epu32(E1, MSG3);
341 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
342 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
343 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
344 MSG1 = _mm_xor_si128(MSG1, MSG3);
347 E0 = _mm_sha1nexte_epu32(E0, MSG0);
349 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
350 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
351 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
352 MSG2 = _mm_xor_si128(MSG2, MSG0);
355 E1 = _mm_sha1nexte_epu32(E1, MSG1);
357 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
358 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 2);
359 MSG0 = _mm_sha1msg1_epu32(MSG0, MSG1);
360 MSG3 = _mm_xor_si128(MSG3, MSG1);
363 E0 = _mm_sha1nexte_epu32(E0, MSG2);
365 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
366 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 2);
367 MSG1 = _mm_sha1msg1_epu32(MSG1, MSG2);
368 MSG0 = _mm_xor_si128(MSG0, MSG2);
371 E1 = _mm_sha1nexte_epu32(E1, MSG3);
373 MSG0 = _mm_sha1msg2_epu32(MSG0, MSG3);
374 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
375 MSG2 = _mm_sha1msg1_epu32(MSG2, MSG3);
376 MSG1 = _mm_xor_si128(MSG1, MSG3);
379 E0 = _mm_sha1nexte_epu32(E0, MSG0);
381 MSG1 = _mm_sha1msg2_epu32(MSG1, MSG0);
382 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
383 MSG3 = _mm_sha1msg1_epu32(MSG3, MSG0);
384 MSG2 = _mm_xor_si128(MSG2, MSG0);
387 E1 = _mm_sha1nexte_epu32(E1, MSG1);
389 MSG2 = _mm_sha1msg2_epu32(MSG2, MSG1);
390 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
391 MSG3 = _mm_xor_si128(MSG3, MSG1);
394 E0 = _mm_sha1nexte_epu32(E0, MSG2);
396 MSG3 = _mm_sha1msg2_epu32(MSG3, MSG2);
397 ABCD = _mm_sha1rnds4_epu32(ABCD, E0, 3);
400 E1 = _mm_sha1nexte_epu32(E1, MSG3);
402 ABCD = _mm_sha1rnds4_epu32(ABCD, E1, 3);
405 E0 = _mm_sha1nexte_epu32(E0, E0_SAVE);
406 ABCD = _mm_add_epi32(ABCD, ABCD_SAVE);
408 data += SHA1::BLOCKSIZE/
sizeof(word32);
409 length -= SHA1::BLOCKSIZE;
413 ABCD = _mm_shuffle_epi32(ABCD, 0x1B);
414 _mm_storeu_si128(M128_CAST(state), ABCD);
415 state[4] = _mm_extract_epi32(E0, 3);
419 void SHA256_HashMultipleBlocks_SHANI(word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
425 __m128i STATE0, STATE1;
426 __m128i MSG, TMP, MASK;
427 __m128i TMSG0, TMSG1, TMSG2, TMSG3;
428 __m128i ABEF_SAVE, CDGH_SAVE;
431 TMP = _mm_loadu_si128(M128_CAST(&state[0]));
432 STATE1 = _mm_loadu_si128(M128_CAST(&state[4]));
438 _mm_set_epi8(12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3) :
439 _mm_set_epi8(15,14,13,12, 11,10,9,8, 7,6,5,4, 3,2,1,0) ;
441 TMP = _mm_shuffle_epi32(TMP, 0xB1);
442 STATE1 = _mm_shuffle_epi32(STATE1, 0x1B);
443 STATE0 = _mm_alignr_epi8(TMP, STATE1, 8);
444 STATE1 = _mm_blend_epi16(STATE1, TMP, 0xF0);
446 while (length >= SHA256::BLOCKSIZE)
453 MSG = _mm_loadu_si128(CONST_M128_CAST(data+0));
454 TMSG0 = _mm_shuffle_epi8(MSG, MASK);
455 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0xE9B5DBA5B5C0FBCF), W64LIT(0x71374491428A2F98)));
456 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
457 MSG = _mm_shuffle_epi32(MSG, 0x0E);
458 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
461 TMSG1 = _mm_loadu_si128(CONST_M128_CAST(data+4));
462 TMSG1 = _mm_shuffle_epi8(TMSG1, MASK);
463 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0xAB1C5ED5923F82A4), W64LIT(0x59F111F13956C25B)));
464 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
465 MSG = _mm_shuffle_epi32(MSG, 0x0E);
466 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
467 TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
470 TMSG2 = _mm_loadu_si128(CONST_M128_CAST(data+8));
471 TMSG2 = _mm_shuffle_epi8(TMSG2, MASK);
472 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0x550C7DC3243185BE), W64LIT(0x12835B01D807AA98)));
473 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
474 MSG = _mm_shuffle_epi32(MSG, 0x0E);
475 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
476 TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
479 TMSG3 = _mm_loadu_si128(CONST_M128_CAST(data+12));
480 TMSG3 = _mm_shuffle_epi8(TMSG3, MASK);
481 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0xC19BF1749BDC06A7), W64LIT(0x80DEB1FE72BE5D74)));
482 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
483 TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
484 TMSG0 = _mm_add_epi32(TMSG0, TMP);
485 TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
486 MSG = _mm_shuffle_epi32(MSG, 0x0E);
487 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
488 TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
491 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0x240CA1CC0FC19DC6), W64LIT(0xEFBE4786E49B69C1)));
492 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
493 TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
494 TMSG1 = _mm_add_epi32(TMSG1, TMP);
495 TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
496 MSG = _mm_shuffle_epi32(MSG, 0x0E);
497 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
498 TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
501 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0x76F988DA5CB0A9DC), W64LIT(0x4A7484AA2DE92C6F)));
502 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
503 TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
504 TMSG2 = _mm_add_epi32(TMSG2, TMP);
505 TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
506 MSG = _mm_shuffle_epi32(MSG, 0x0E);
507 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
508 TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
511 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0xBF597FC7B00327C8), W64LIT(0xA831C66D983E5152)));
512 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
513 TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
514 TMSG3 = _mm_add_epi32(TMSG3, TMP);
515 TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
516 MSG = _mm_shuffle_epi32(MSG, 0x0E);
517 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
518 TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
521 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0x1429296706CA6351), W64LIT(0xD5A79147C6E00BF3)));
522 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
523 TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
524 TMSG0 = _mm_add_epi32(TMSG0, TMP);
525 TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
526 MSG = _mm_shuffle_epi32(MSG, 0x0E);
527 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
528 TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
531 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0x53380D134D2C6DFC), W64LIT(0x2E1B213827B70A85)));
532 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
533 TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
534 TMSG1 = _mm_add_epi32(TMSG1, TMP);
535 TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
536 MSG = _mm_shuffle_epi32(MSG, 0x0E);
537 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
538 TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
541 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0x92722C8581C2C92E), W64LIT(0x766A0ABB650A7354)));
542 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
543 TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
544 TMSG2 = _mm_add_epi32(TMSG2, TMP);
545 TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
546 MSG = _mm_shuffle_epi32(MSG, 0x0E);
547 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
548 TMSG0 = _mm_sha256msg1_epu32(TMSG0, TMSG1);
551 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0xC76C51A3C24B8B70), W64LIT(0xA81A664BA2BFE8A1)));
552 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
553 TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
554 TMSG3 = _mm_add_epi32(TMSG3, TMP);
555 TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
556 MSG = _mm_shuffle_epi32(MSG, 0x0E);
557 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
558 TMSG1 = _mm_sha256msg1_epu32(TMSG1, TMSG2);
561 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0x106AA070F40E3585), W64LIT(0xD6990624D192E819)));
562 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
563 TMP = _mm_alignr_epi8(TMSG3, TMSG2, 4);
564 TMSG0 = _mm_add_epi32(TMSG0, TMP);
565 TMSG0 = _mm_sha256msg2_epu32(TMSG0, TMSG3);
566 MSG = _mm_shuffle_epi32(MSG, 0x0E);
567 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
568 TMSG2 = _mm_sha256msg1_epu32(TMSG2, TMSG3);
571 MSG = _mm_add_epi32(TMSG0, _mm_set_epi64x(W64LIT(0x34B0BCB52748774C), W64LIT(0x1E376C0819A4C116)));
572 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
573 TMP = _mm_alignr_epi8(TMSG0, TMSG3, 4);
574 TMSG1 = _mm_add_epi32(TMSG1, TMP);
575 TMSG1 = _mm_sha256msg2_epu32(TMSG1, TMSG0);
576 MSG = _mm_shuffle_epi32(MSG, 0x0E);
577 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
578 TMSG3 = _mm_sha256msg1_epu32(TMSG3, TMSG0);
581 MSG = _mm_add_epi32(TMSG1, _mm_set_epi64x(W64LIT(0x682E6FF35B9CCA4F), W64LIT(0x4ED8AA4A391C0CB3)));
582 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
583 TMP = _mm_alignr_epi8(TMSG1, TMSG0, 4);
584 TMSG2 = _mm_add_epi32(TMSG2, TMP);
585 TMSG2 = _mm_sha256msg2_epu32(TMSG2, TMSG1);
586 MSG = _mm_shuffle_epi32(MSG, 0x0E);
587 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
590 MSG = _mm_add_epi32(TMSG2, _mm_set_epi64x(W64LIT(0x8CC7020884C87814), W64LIT(0x78A5636F748F82EE)));
591 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
592 TMP = _mm_alignr_epi8(TMSG2, TMSG1, 4);
593 TMSG3 = _mm_add_epi32(TMSG3, TMP);
594 TMSG3 = _mm_sha256msg2_epu32(TMSG3, TMSG2);
595 MSG = _mm_shuffle_epi32(MSG, 0x0E);
596 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
599 MSG = _mm_add_epi32(TMSG3, _mm_set_epi64x(W64LIT(0xC67178F2BEF9A3F7), W64LIT(0xA4506CEB90BEFFFA)));
600 STATE1 = _mm_sha256rnds2_epu32(STATE1, STATE0, MSG);
601 MSG = _mm_shuffle_epi32(MSG, 0x0E);
602 STATE0 = _mm_sha256rnds2_epu32(STATE0, STATE1, MSG);
605 STATE0 = _mm_add_epi32(STATE0, ABEF_SAVE);
606 STATE1 = _mm_add_epi32(STATE1, CDGH_SAVE);
608 data += SHA256::BLOCKSIZE/
sizeof(word32);
609 length -= SHA256::BLOCKSIZE;
612 TMP = _mm_shuffle_epi32(STATE0, 0x1B);
613 STATE1 = _mm_shuffle_epi32(STATE1, 0xB1);
614 STATE0 = _mm_blend_epi16(TMP, STATE1, 0xF0);
615 STATE1 = _mm_alignr_epi8(STATE1, TMP, 8);
618 _mm_storeu_si128(M128_CAST(&state[0]), STATE0);
619 _mm_storeu_si128(M128_CAST(&state[4]), STATE1);
621 #endif // CRYPTOPP_SHANI_AVAILABLE 633 #if CRYPTOPP_ARM_SHA1_AVAILABLE 634 void SHA1_HashMultipleBlocks_ARMV8(word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
640 uint32x4_t C0, C1, C2, C3;
641 uint32x4_t ABCD, ABCD_SAVED;
642 uint32x4_t MSG0, MSG1, MSG2, MSG3;
643 uint32x4_t TMP0, TMP1;
644 uint32_t E0, E0_SAVED, E1;
647 C0 = vdupq_n_u32(0x5A827999);
648 C1 = vdupq_n_u32(0x6ED9EBA1);
649 C2 = vdupq_n_u32(0x8F1BBCDC);
650 C3 = vdupq_n_u32(0xCA62C1D6);
652 ABCD = vld1q_u32(&state[0]);
655 while (length >= SHA1::BLOCKSIZE)
661 MSG0 = vld1q_u32(data + 0);
662 MSG1 = vld1q_u32(data + 4);
663 MSG2 = vld1q_u32(data + 8);
664 MSG3 = vld1q_u32(data + 12);
668 MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
669 MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
670 MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
671 MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
674 TMP0 = vaddq_u32(MSG0, C0);
675 TMP1 = vaddq_u32(MSG1, C0);
678 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
679 ABCD = vsha1cq_u32(ABCD, E0, TMP0);
680 TMP0 = vaddq_u32(MSG2, C0);
681 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
684 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
685 ABCD = vsha1cq_u32(ABCD, E1, TMP1);
686 TMP1 = vaddq_u32(MSG3, C0);
687 MSG0 = vsha1su1q_u32(MSG0, MSG3);
688 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
691 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
692 ABCD = vsha1cq_u32(ABCD, E0, TMP0);
693 TMP0 = vaddq_u32(MSG0, C0);
694 MSG1 = vsha1su1q_u32(MSG1, MSG0);
695 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
698 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
699 ABCD = vsha1cq_u32(ABCD, E1, TMP1);
700 TMP1 = vaddq_u32(MSG1, C1);
701 MSG2 = vsha1su1q_u32(MSG2, MSG1);
702 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
705 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
706 ABCD = vsha1cq_u32(ABCD, E0, TMP0);
707 TMP0 = vaddq_u32(MSG2, C1);
708 MSG3 = vsha1su1q_u32(MSG3, MSG2);
709 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
712 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
713 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
714 TMP1 = vaddq_u32(MSG3, C1);
715 MSG0 = vsha1su1q_u32(MSG0, MSG3);
716 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
719 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
720 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
721 TMP0 = vaddq_u32(MSG0, C1);
722 MSG1 = vsha1su1q_u32(MSG1, MSG0);
723 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
726 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
727 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
728 TMP1 = vaddq_u32(MSG1, C1);
729 MSG2 = vsha1su1q_u32(MSG2, MSG1);
730 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
733 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
734 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
735 TMP0 = vaddq_u32(MSG2, C2);
736 MSG3 = vsha1su1q_u32(MSG3, MSG2);
737 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
740 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
741 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
742 TMP1 = vaddq_u32(MSG3, C2);
743 MSG0 = vsha1su1q_u32(MSG0, MSG3);
744 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
747 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
748 ABCD = vsha1mq_u32(ABCD, E0, TMP0);
749 TMP0 = vaddq_u32(MSG0, C2);
750 MSG1 = vsha1su1q_u32(MSG1, MSG0);
751 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
754 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
755 ABCD = vsha1mq_u32(ABCD, E1, TMP1);
756 TMP1 = vaddq_u32(MSG1, C2);
757 MSG2 = vsha1su1q_u32(MSG2, MSG1);
758 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
761 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
762 ABCD = vsha1mq_u32(ABCD, E0, TMP0);
763 TMP0 = vaddq_u32(MSG2, C2);
764 MSG3 = vsha1su1q_u32(MSG3, MSG2);
765 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
768 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
769 ABCD = vsha1mq_u32(ABCD, E1, TMP1);
770 TMP1 = vaddq_u32(MSG3, C3);
771 MSG0 = vsha1su1q_u32(MSG0, MSG3);
772 MSG1 = vsha1su0q_u32(MSG1, MSG2, MSG3);
775 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
776 ABCD = vsha1mq_u32(ABCD, E0, TMP0);
777 TMP0 = vaddq_u32(MSG0, C3);
778 MSG1 = vsha1su1q_u32(MSG1, MSG0);
779 MSG2 = vsha1su0q_u32(MSG2, MSG3, MSG0);
782 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
783 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
784 TMP1 = vaddq_u32(MSG1, C3);
785 MSG2 = vsha1su1q_u32(MSG2, MSG1);
786 MSG3 = vsha1su0q_u32(MSG3, MSG0, MSG1);
789 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
790 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
791 TMP0 = vaddq_u32(MSG2, C3);
792 MSG3 = vsha1su1q_u32(MSG3, MSG2);
793 MSG0 = vsha1su0q_u32(MSG0, MSG1, MSG2);
796 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
797 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
798 TMP1 = vaddq_u32(MSG3, C3);
799 MSG0 = vsha1su1q_u32(MSG0, MSG3);
802 E1 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
803 ABCD = vsha1pq_u32(ABCD, E0, TMP0);
806 E0 = vsha1h_u32(vgetq_lane_u32(ABCD, 0));
807 ABCD = vsha1pq_u32(ABCD, E1, TMP1);
810 ABCD = vaddq_u32(ABCD_SAVED, ABCD);
812 data += SHA1::BLOCKSIZE/
sizeof(word32);
813 length -= SHA1::BLOCKSIZE;
817 vst1q_u32(&state[0], ABCD);
820 #endif // CRYPTOPP_ARM_SHA1_AVAILABLE 822 #if CRYPTOPP_ARM_SHA2_AVAILABLE 823 void SHA256_HashMultipleBlocks_ARMV8(word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
829 uint32x4_t STATE0, STATE1, ABEF_SAVE, CDGH_SAVE;
830 uint32x4_t MSG0, MSG1, MSG2, MSG3;
831 uint32x4_t TMP0, TMP1, TMP2;
834 STATE0 = vld1q_u32(&state[0]);
835 STATE1 = vld1q_u32(&state[4]);
837 while (length >= SHA256::BLOCKSIZE)
844 MSG0 = vld1q_u32(data + 0);
845 MSG1 = vld1q_u32(data + 4);
846 MSG2 = vld1q_u32(data + 8);
847 MSG3 = vld1q_u32(data + 12);
851 MSG0 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG0)));
852 MSG1 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG1)));
853 MSG2 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG2)));
854 MSG3 = vreinterpretq_u32_u8(vrev32q_u8(vreinterpretq_u8_u32(MSG3)));
857 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x00]));
860 MSG0 = vsha256su0q_u32(MSG0, MSG1);
862 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x04]));
863 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
864 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
865 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);;
868 MSG1 = vsha256su0q_u32(MSG1, MSG2);
870 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x08]));
871 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
872 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
873 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);;
876 MSG2 = vsha256su0q_u32(MSG2, MSG3);
878 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x0c]));
879 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
880 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
881 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);;
884 MSG3 = vsha256su0q_u32(MSG3, MSG0);
886 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x10]));
887 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
888 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
889 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);;
892 MSG0 = vsha256su0q_u32(MSG0, MSG1);
894 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x14]));
895 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
896 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
897 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);;
900 MSG1 = vsha256su0q_u32(MSG1, MSG2);
902 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x18]));
903 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
904 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
905 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);;
908 MSG2 = vsha256su0q_u32(MSG2, MSG3);
910 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x1c]));
911 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
912 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
913 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);;
916 MSG3 = vsha256su0q_u32(MSG3, MSG0);
918 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x20]));
919 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
920 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
921 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);;
924 MSG0 = vsha256su0q_u32(MSG0, MSG1);
926 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x24]));
927 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
928 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
929 MSG0 = vsha256su1q_u32(MSG0, MSG2, MSG3);;
932 MSG1 = vsha256su0q_u32(MSG1, MSG2);
934 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x28]));
935 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
936 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
937 MSG1 = vsha256su1q_u32(MSG1, MSG3, MSG0);;
940 MSG2 = vsha256su0q_u32(MSG2, MSG3);
942 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x2c]));
943 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
944 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);
945 MSG2 = vsha256su1q_u32(MSG2, MSG0, MSG1);;
948 MSG3 = vsha256su0q_u32(MSG3, MSG0);
950 TMP0 = vaddq_u32(MSG0, vld1q_u32(&SHA256_K[0x30]));
951 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
952 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);
953 MSG3 = vsha256su1q_u32(MSG3, MSG1, MSG2);;
957 TMP1 = vaddq_u32(MSG1, vld1q_u32(&SHA256_K[0x34]));
958 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
959 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);;
963 TMP0 = vaddq_u32(MSG2, vld1q_u32(&SHA256_K[0x38]));
964 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
965 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);;
969 TMP1 = vaddq_u32(MSG3, vld1q_u32(&SHA256_K[0x3c]));
970 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP0);
971 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP0);;
975 STATE0 = vsha256hq_u32(STATE0, STATE1, TMP1);
976 STATE1 = vsha256h2q_u32(STATE1, TMP2, TMP1);;
979 STATE0 = vaddq_u32(STATE0, ABEF_SAVE);
980 STATE1 = vaddq_u32(STATE1, CDGH_SAVE);
982 data += SHA256::BLOCKSIZE/
sizeof(word32);
983 length -= SHA256::BLOCKSIZE;
987 vst1q_u32(&state[0], STATE0);
988 vst1q_u32(&state[4], STATE1);
990 #endif // CRYPTOPP_ARM_SHA2_AVAILABLE 1002 #if CRYPTOPP_POWER8_SHA_AVAILABLE 1005 enum {A=0, B=1, C, D, E, F, G, H};
1008 uint32x4_p VecLoad32(
const word32* data,
int offset)
1010 #if (CRYPTOPP_LITTLE_ENDIAN) 1011 const uint8x16_p mask = {3,2,1,0, 7,6,5,4, 11,10,9,8, 15,14,13,12};
1019 template<
class T>
inline 1020 void VecStore32(
const T data, word32 dest[4])
1029 return vec_sel(z,y,x);
1036 return vec_sel(y, z,
VecXor(x, y));
1042 return VecSHA256<0,0>(val);
1048 return VecSHA256<0,0xf>(val);
1054 return VecSHA256<1,0>(val);
1060 return VecSHA256<1,0xf>(val);
1067 const uint8x16_p m1 = {0,1,2,3, 16,17,18,19, 0,0,0,0, 0,0,0,0};
1068 const uint8x16_p m2 = {0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
1072 template <
unsigned int R>
inline 1078 T1 = S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K + M;
1079 T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1081 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1083 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1087 template <
unsigned int R>
inline 1091 enum {IDX0=(R+0)&0xf, IDX1=(R+1)&0xf, IDX9=(R+9)&0xf, IDX14=(R+14)&0xf};
1093 const uint32x4_p s0 = Vector_sigma0(W[IDX1]);
1094 const uint32x4_p s1 = Vector_sigma1(W[IDX14]);
1096 uint32x4_p T1 = (W[IDX0] += s0 + s1 + W[IDX9]);
1097 T1 += S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K;
1098 uint32x4_p T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1100 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1102 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1106 void SHA256_HashMultipleBlocks_POWER8(word32 *state,
const word32 *data,
size_t length,
ByteOrder order)
1110 CRYPTOPP_UNUSED(order);
1112 const uint32_t* k =
reinterpret_cast<const uint32_t*
>(SHA256_K);
1113 const uint32_t* m =
reinterpret_cast<const uint32_t*
>(data);
1119 size_t blocks = length / SHA256::BLOCKSIZE;
1122 unsigned int offset=0;
1124 S[A] = abcd; S[E] = efgh;
1125 S[B] = VecShiftLeftOctet<4>(S[A]);
1126 S[F] = VecShiftLeftOctet<4>(S[E]);
1127 S[C] = VecShiftLeftOctet<4>(S[B]);
1128 S[G] = VecShiftLeftOctet<4>(S[F]);
1129 S[D] = VecShiftLeftOctet<4>(S[C]);
1130 S[H] = VecShiftLeftOctet<4>(S[G]);
1134 vm = VecLoad32(m, offset);
1135 SHA256_ROUND1<0>(W,S, vk,vm);
1138 vk = VecShiftLeftOctet<4>(vk);
1139 vm = VecShiftLeftOctet<4>(vm);
1140 SHA256_ROUND1<1>(W,S, vk,vm);
1142 vk = VecShiftLeftOctet<4>(vk);
1143 vm = VecShiftLeftOctet<4>(vm);
1144 SHA256_ROUND1<2>(W,S, vk,vm);
1146 vk = VecShiftLeftOctet<4>(vk);
1147 vm = VecShiftLeftOctet<4>(vm);
1148 SHA256_ROUND1<3>(W,S, vk,vm);
1151 vm = VecLoad32(m, offset);
1152 SHA256_ROUND1<4>(W,S, vk,vm);
1155 vk = VecShiftLeftOctet<4>(vk);
1156 vm = VecShiftLeftOctet<4>(vm);
1157 SHA256_ROUND1<5>(W,S, vk,vm);
1159 vk = VecShiftLeftOctet<4>(vk);
1160 vm = VecShiftLeftOctet<4>(vm);
1161 SHA256_ROUND1<6>(W,S, vk,vm);
1163 vk = VecShiftLeftOctet<4>(vk);
1164 vm = VecShiftLeftOctet<4>(vm);
1165 SHA256_ROUND1<7>(W,S, vk,vm);
1168 vm = VecLoad32(m, offset);
1169 SHA256_ROUND1<8>(W,S, vk,vm);
1172 vk = VecShiftLeftOctet<4>(vk);
1173 vm = VecShiftLeftOctet<4>(vm);
1174 SHA256_ROUND1<9>(W,S, vk,vm);
1176 vk = VecShiftLeftOctet<4>(vk);
1177 vm = VecShiftLeftOctet<4>(vm);
1178 SHA256_ROUND1<10>(W,S, vk,vm);
1180 vk = VecShiftLeftOctet<4>(vk);
1181 vm = VecShiftLeftOctet<4>(vm);
1182 SHA256_ROUND1<11>(W,S, vk,vm);
1185 vm = VecLoad32(m, offset);
1186 SHA256_ROUND1<12>(W,S, vk,vm);
1189 vk = VecShiftLeftOctet<4>(vk);
1190 vm = VecShiftLeftOctet<4>(vm);
1191 SHA256_ROUND1<13>(W,S, vk,vm);
1193 vk = VecShiftLeftOctet<4>(vk);
1194 vm = VecShiftLeftOctet<4>(vm);
1195 SHA256_ROUND1<14>(W,S, vk,vm);
1197 vk = VecShiftLeftOctet<4>(vk);
1198 vm = VecShiftLeftOctet<4>(vm);
1199 SHA256_ROUND1<15>(W,S, vk,vm);
1204 for (
unsigned int i=16; i<64; i+=16)
1207 SHA256_ROUND2<0>(W,S, vk);
1208 SHA256_ROUND2<1>(W,S, VecShiftLeftOctet<4>(vk));
1209 SHA256_ROUND2<2>(W,S, VecShiftLeftOctet<8>(vk));
1210 SHA256_ROUND2<3>(W,S, VecShiftLeftOctet<12>(vk));
1214 SHA256_ROUND2<4>(W,S, vk);
1215 SHA256_ROUND2<5>(W,S, VecShiftLeftOctet<4>(vk));
1216 SHA256_ROUND2<6>(W,S, VecShiftLeftOctet<8>(vk));
1217 SHA256_ROUND2<7>(W,S, VecShiftLeftOctet<12>(vk));
1221 SHA256_ROUND2<8>(W,S, vk);
1222 SHA256_ROUND2<9>(W,S, VecShiftLeftOctet<4>(vk));
1223 SHA256_ROUND2<10>(W,S, VecShiftLeftOctet<8>(vk));
1224 SHA256_ROUND2<11>(W,S, VecShiftLeftOctet<12>(vk));
1228 SHA256_ROUND2<12>(W,S, vk);
1229 SHA256_ROUND2<13>(W,S, VecShiftLeftOctet<4>(vk));
1230 SHA256_ROUND2<14>(W,S, VecShiftLeftOctet<8>(vk));
1231 SHA256_ROUND2<15>(W,S, VecShiftLeftOctet<12>(vk));
1235 abcd += VectorPack(S[A],S[B],S[C],S[D]);
1236 efgh += VectorPack(S[E],S[F],S[G],S[H]);
1239 VecStore32(abcd, state+0);
1240 VecStore32(efgh, state+4);
1244 void VecStore64(
const uint64x2_p val, word64* data)
1250 uint64x2_p VecLoad64(
const word64* data,
int offset)
1252 #if (CRYPTOPP_LITTLE_ENDIAN) 1253 const uint8x16_p mask = {0,1,2,3, 4,5,6,7, 8,9,10,11, 12,13,14,15};
1264 return vec_sel(z,y,x);
1271 return vec_sel(y, z,
VecXor(x, y));
1277 return VecSHA512<0,0>(val);
1283 return VecSHA512<0,0xf>(val);
1289 return VecSHA512<1,0>(val);
1295 return VecSHA512<1,0xf>(val);
1301 const uint8x16_p m = {0,1,2,3, 4,5,6,7, 16,17,18,19, 20,21,22,23};
1305 template <
unsigned int R>
inline 1311 T1 = S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K + M;
1312 T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1314 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1316 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1320 template <
unsigned int R>
inline 1324 enum {IDX0=(R+0)&0xf, IDX1=(R+1)&0xf, IDX9=(R+9)&0xf, IDX14=(R+14)&0xf};
1326 const uint64x2_p s0 = Vector_sigma0(W[IDX1]);
1327 const uint64x2_p s1 = Vector_sigma1(W[IDX14]);
1329 uint64x2_p T1 = (W[IDX0] += s0 + s1 + W[IDX9]);
1330 T1 += S[H] + VectorSigma1(S[E]) + VectorCh(S[E],S[F],S[G]) + K;
1331 uint64x2_p T2 = VectorSigma0(S[A]) + VectorMaj(S[A],S[B],S[C]);
1333 S[H] = S[G]; S[G] = S[F]; S[F] = S[E];
1335 S[D] = S[C]; S[C] = S[B]; S[B] = S[A];
1339 void SHA512_HashMultipleBlocks_POWER8(word64 *state,
const word64 *data,
size_t length,
ByteOrder order)
1343 CRYPTOPP_UNUSED(order);
1345 const uint64_t* k =
reinterpret_cast<const uint64_t*
>(SHA512_K);
1346 const uint64_t* m =
reinterpret_cast<const uint64_t*
>(data);
1354 size_t blocks = length / SHA512::BLOCKSIZE;
1357 unsigned int offset=0;
1359 S[A] = ab; S[C] = cd;
1360 S[E] = ef; S[G] = gh;
1361 S[B] = VecShiftLeftOctet<8>(S[A]);
1362 S[D] = VecShiftLeftOctet<8>(S[C]);
1363 S[F] = VecShiftLeftOctet<8>(S[E]);
1364 S[H] = VecShiftLeftOctet<8>(S[G]);
1368 vm = VecLoad64(m, offset);
1369 SHA512_ROUND1<0>(W,S, vk,vm);
1372 vk = VecShiftLeftOctet<8>(vk);
1373 vm = VecShiftLeftOctet<8>(vm);
1374 SHA512_ROUND1<1>(W,S, vk,vm);
1377 vm = VecLoad64(m, offset);
1378 SHA512_ROUND1<2>(W,S, vk,vm);
1381 vk = VecShiftLeftOctet<8>(vk);
1382 vm = VecShiftLeftOctet<8>(vm);
1383 SHA512_ROUND1<3>(W,S, vk,vm);
1386 vm = VecLoad64(m, offset);
1387 SHA512_ROUND1<4>(W,S, vk,vm);
1390 vk = VecShiftLeftOctet<8>(vk);
1391 vm = VecShiftLeftOctet<8>(vm);
1392 SHA512_ROUND1<5>(W,S, vk,vm);
1395 vm = VecLoad64(m, offset);
1396 SHA512_ROUND1<6>(W,S, vk,vm);
1399 vk = VecShiftLeftOctet<8>(vk);
1400 vm = VecShiftLeftOctet<8>(vm);
1401 SHA512_ROUND1<7>(W,S, vk,vm);
1404 vm = VecLoad64(m, offset);
1405 SHA512_ROUND1<8>(W,S, vk,vm);
1408 vk = VecShiftLeftOctet<8>(vk);
1409 vm = VecShiftLeftOctet<8>(vm);
1410 SHA512_ROUND1<9>(W,S, vk,vm);
1413 vm = VecLoad64(m, offset);
1414 SHA512_ROUND1<10>(W,S, vk,vm);
1417 vk = VecShiftLeftOctet<8>(vk);
1418 vm = VecShiftLeftOctet<8>(vm);
1419 SHA512_ROUND1<11>(W,S, vk,vm);
1422 vm = VecLoad64(m, offset);
1423 SHA512_ROUND1<12>(W,S, vk,vm);
1426 vk = VecShiftLeftOctet<8>(vk);
1427 vm = VecShiftLeftOctet<8>(vm);
1428 SHA512_ROUND1<13>(W,S, vk,vm);
1431 vm = VecLoad64(m, offset);
1432 SHA512_ROUND1<14>(W,S, vk,vm);
1435 vk = VecShiftLeftOctet<8>(vk);
1436 vm = VecShiftLeftOctet<8>(vm);
1437 SHA512_ROUND1<15>(W,S, vk,vm);
1442 for (
unsigned int i=16; i<80; i+=16)
1445 SHA512_ROUND2<0>(W,S, vk);
1446 SHA512_ROUND2<1>(W,S, VecShiftLeftOctet<8>(vk));
1450 SHA512_ROUND2<2>(W,S, vk);
1451 SHA512_ROUND2<3>(W,S, VecShiftLeftOctet<8>(vk));
1455 SHA512_ROUND2<4>(W,S, vk);
1456 SHA512_ROUND2<5>(W,S, VecShiftLeftOctet<8>(vk));
1460 SHA512_ROUND2<6>(W,S, vk);
1461 SHA512_ROUND2<7>(W,S, VecShiftLeftOctet<8>(vk));
1465 SHA512_ROUND2<8>(W,S, vk);
1466 SHA512_ROUND2<9>(W,S, VecShiftLeftOctet<8>(vk));
1470 SHA512_ROUND2<10>(W,S, vk);
1471 SHA512_ROUND2<11>(W,S, VecShiftLeftOctet<8>(vk));
1475 SHA512_ROUND2<12>(W,S, vk);
1476 SHA512_ROUND2<13>(W,S, VecShiftLeftOctet<8>(vk));
1480 SHA512_ROUND2<14>(W,S, vk);
1481 SHA512_ROUND2<15>(W,S, VecShiftLeftOctet<8>(vk));
1485 ab += VectorPack(S[A],S[B]);
1486 cd += VectorPack(S[C],S[D]);
1487 ef += VectorPack(S[E],S[F]);
1488 gh += VectorPack(S[G],S[H]);
1491 VecStore64(ab, state+0);
1492 VecStore64(cd, state+2);
1493 VecStore64(ef, state+4);
1494 VecStore64(gh, state+6);
1497 #endif // CRYPTOPP_POWER8_SHA_AVAILABLE Utility functions for the Crypto++ library.
ByteOrder
Provides the byte ordering.
Library configuration file.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Support functions for PowerPC and vector operations.
void VecStore(const T data, byte dest[16])
Stores a vector to a byte array.
#define CRYPTOPP_ASSERT(exp)
Debugging and diagnostic assertion.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
Classes for SHA-1 and SHA-2 family of message digests.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Crypto++ library namespace.
uint32x4_p VecLoad(const byte src[16])
Loads a vector from a byte array.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.