19 #if (CRYPTOPP_SSSE3_AVAILABLE) 21 # include <pmmintrin.h> 22 # include <tmmintrin.h> 26 # include <ammintrin.h> 29 #if defined(__AVX512F__) 30 # define CRYPTOPP_AVX512_ROTATE 1 31 # include <immintrin.h> 35 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 38 # include <arm_neon.h> 42 #if (CRYPTOPP_ARM_ACLE_AVAILABLE) 44 # include <arm_acle.h> 47 #if defined(CRYPTOPP_POWER8_AVAILABLE) 53 extern const char SIMON128_SIMD_FNAME[] = __FILE__;
55 ANONYMOUS_NAMESPACE_BEGIN
58 using CryptoPP::word32;
59 using CryptoPP::word64;
64 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 67 #if defined(_MSC_VER) && !defined(_M_ARM64) 68 inline uint64x2_t vld1q_dup_u64(
const uint64_t* ptr)
70 return vmovq_n_u64(*ptr);
75 inline T UnpackHigh64(
const T& a,
const T& b)
77 const uint64x1_t x(vget_high_u64((uint64x2_t)a));
78 const uint64x1_t y(vget_high_u64((uint64x2_t)b));
79 return (T)vcombine_u64(x, y);
83 inline T UnpackLow64(
const T& a,
const T& b)
85 const uint64x1_t x(vget_low_u64((uint64x2_t)a));
86 const uint64x1_t y(vget_low_u64((uint64x2_t)b));
87 return (T)vcombine_u64(x, y);
90 template <
unsigned int R>
91 inline uint64x2_t RotateLeft64(
const uint64x2_t& val)
93 const uint64x2_t a(vshlq_n_u64(val, R));
94 const uint64x2_t b(vshrq_n_u64(val, 64 - R));
95 return vorrq_u64(a, b);
98 template <
unsigned int R>
99 inline uint64x2_t RotateRight64(
const uint64x2_t& val)
101 const uint64x2_t a(vshlq_n_u64(val, 64 - R));
102 const uint64x2_t b(vshrq_n_u64(val, R));
103 return vorrq_u64(a, b);
106 #if defined(__aarch32__) || defined(__aarch64__) 109 inline uint64x2_t RotateLeft64<8>(
const uint64x2_t& val)
111 const uint8_t maskb[16] = { 7,0,1,2, 3,4,5,6, 15,8,9,10, 11,12,13,14 };
112 const uint8x16_t mask = vld1q_u8(maskb);
114 return vreinterpretq_u64_u8(
115 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
120 inline uint64x2_t RotateRight64<8>(
const uint64x2_t& val)
122 const uint8_t maskb[16] = { 1,2,3,4, 5,6,7,0, 9,10,11,12, 13,14,15,8 };
123 const uint8x16_t mask = vld1q_u8(maskb);
125 return vreinterpretq_u64_u8(
126 vqtbl1q_u8(vreinterpretq_u8_u64(val), mask));
130 inline uint64x2_t SIMON128_f(
const uint64x2_t& val)
132 return veorq_u64(RotateLeft64<2>(val),
133 vandq_u64(RotateLeft64<1>(val), RotateLeft64<8>(val)));
136 inline void SIMON128_Enc_Block(uint64x2_t &block0, uint64x2_t &block1,
137 const word64 *subkeys,
unsigned int rounds)
140 uint64x2_t x1 = UnpackHigh64(block0, block1);
141 uint64x2_t y1 = UnpackLow64(block0, block1);
143 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
145 const uint64x2_t rk1 = vld1q_dup_u64(subkeys+i);
146 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk1);
148 const uint64x2_t rk2 = vld1q_dup_u64(subkeys+i+1);
149 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk2);
154 const uint64x2_t rk = vld1q_dup_u64(subkeys+rounds-1);
156 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk);
161 block0 = UnpackLow64(y1, x1);
162 block1 = UnpackHigh64(y1, x1);
165 inline void SIMON128_Enc_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
166 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
167 const word64 *subkeys,
unsigned int rounds)
170 uint64x2_t x1 = UnpackHigh64(block0, block1);
171 uint64x2_t y1 = UnpackLow64(block0, block1);
172 uint64x2_t x2 = UnpackHigh64(block2, block3);
173 uint64x2_t y2 = UnpackLow64(block2, block3);
174 uint64x2_t x3 = UnpackHigh64(block4, block5);
175 uint64x2_t y3 = UnpackLow64(block4, block5);
177 for (
int i = 0; i < static_cast<int>(rounds & ~1) - 1; i += 2)
179 const uint64x2_t rk1 = vld1q_dup_u64(subkeys+i);
180 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk1);
181 y2 = veorq_u64(veorq_u64(y2, SIMON128_f(x2)), rk1);
182 y3 = veorq_u64(veorq_u64(y3, SIMON128_f(x3)), rk1);
184 const uint64x2_t rk2 = vld1q_dup_u64(subkeys+i+1);
185 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk2);
186 x2 = veorq_u64(veorq_u64(x2, SIMON128_f(y2)), rk2);
187 x3 = veorq_u64(veorq_u64(x3, SIMON128_f(y3)), rk2);
192 const uint64x2_t rk = vld1q_dup_u64(subkeys + rounds - 1);
194 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk);
195 y2 = veorq_u64(veorq_u64(y2, SIMON128_f(x2)), rk);
196 y3 = veorq_u64(veorq_u64(y3, SIMON128_f(x3)), rk);
197 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
201 block0 = UnpackLow64(y1, x1);
202 block1 = UnpackHigh64(y1, x1);
203 block2 = UnpackLow64(y2, x2);
204 block3 = UnpackHigh64(y2, x2);
205 block4 = UnpackLow64(y3, x3);
206 block5 = UnpackHigh64(y3, x3);
209 inline void SIMON128_Dec_Block(uint64x2_t &block0, uint64x2_t &block1,
210 const word64 *subkeys,
unsigned int rounds)
213 uint64x2_t x1 = UnpackHigh64(block0, block1);
214 uint64x2_t y1 = UnpackLow64(block0, block1);
219 const uint64x2_t rk = vld1q_dup_u64(subkeys + rounds - 1);
221 y1 = veorq_u64(veorq_u64(y1, rk), SIMON128_f(x1));
225 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
227 const uint64x2_t rk1 = vld1q_dup_u64(subkeys+i+1);
228 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk1);
230 const uint64x2_t rk2 = vld1q_dup_u64(subkeys+i);
231 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk2);
235 block0 = UnpackLow64(y1, x1);
236 block1 = UnpackHigh64(y1, x1);
239 inline void SIMON128_Dec_6_Blocks(uint64x2_t &block0, uint64x2_t &block1,
240 uint64x2_t &block2, uint64x2_t &block3, uint64x2_t &block4, uint64x2_t &block5,
241 const word64 *subkeys,
unsigned int rounds)
244 uint64x2_t x1 = UnpackHigh64(block0, block1);
245 uint64x2_t y1 = UnpackLow64(block0, block1);
246 uint64x2_t x2 = UnpackHigh64(block2, block3);
247 uint64x2_t y2 = UnpackLow64(block2, block3);
248 uint64x2_t x3 = UnpackHigh64(block4, block5);
249 uint64x2_t y3 = UnpackLow64(block4, block5);
253 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
254 const uint64x2_t rk = vld1q_dup_u64(subkeys + rounds - 1);
256 y1 = veorq_u64(veorq_u64(y1, rk), SIMON128_f(x1));
257 y2 = veorq_u64(veorq_u64(y2, rk), SIMON128_f(x2));
258 y3 = veorq_u64(veorq_u64(y3, rk), SIMON128_f(x3));
262 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
264 const uint64x2_t rk1 = vld1q_dup_u64(subkeys + i + 1);
265 x1 = veorq_u64(veorq_u64(x1, SIMON128_f(y1)), rk1);
266 x2 = veorq_u64(veorq_u64(x2, SIMON128_f(y2)), rk1);
267 x3 = veorq_u64(veorq_u64(x3, SIMON128_f(y3)), rk1);
269 const uint64x2_t rk2 = vld1q_dup_u64(subkeys + i);
270 y1 = veorq_u64(veorq_u64(y1, SIMON128_f(x1)), rk2);
271 y2 = veorq_u64(veorq_u64(y2, SIMON128_f(x2)), rk2);
272 y3 = veorq_u64(veorq_u64(y3, SIMON128_f(x3)), rk2);
276 block0 = UnpackLow64(y1, x1);
277 block1 = UnpackHigh64(y1, x1);
278 block2 = UnpackLow64(y2, x2);
279 block3 = UnpackHigh64(y2, x2);
280 block4 = UnpackLow64(y3, x3);
281 block5 = UnpackHigh64(y3, x3);
284 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 288 #if defined(CRYPTOPP_SSSE3_AVAILABLE) 292 # define M128_CAST(x) ((__m128i *)(void *)(x)) 294 #ifndef CONST_M128_CAST 295 # define CONST_M128_CAST(x) ((const __m128i *)(const void *)(x)) 300 # define DOUBLE_CAST(x) ((double *)(void *)(x)) 302 #ifndef CONST_DOUBLE_CAST 303 # define CONST_DOUBLE_CAST(x) ((const double *)(const void *)(x)) 306 inline void Swap128(__m128i& a,__m128i& b)
308 #if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x5120) 317 template <
unsigned int R>
318 inline __m128i RotateLeft64(
const __m128i& val)
320 #if defined(CRYPTOPP_AVX512_ROTATE) 321 return _mm_rol_epi64(val, R);
322 #elif defined(__XOP__) 323 return _mm_roti_epi64(val, R);
326 _mm_slli_epi64(val, R), _mm_srli_epi64(val, 64-R));
330 template <
unsigned int R>
331 inline __m128i RotateRight64(
const __m128i& val)
333 #if defined(CRYPTOPP_AVX512_ROTATE) 334 return _mm_ror_epi64(val, R);
335 #elif defined(__XOP__) 336 return _mm_roti_epi64(val, 64-R);
339 _mm_slli_epi64(val, 64-R), _mm_srli_epi64(val, R));
345 __m128i RotateLeft64<8>(
const __m128i& val)
348 return _mm_roti_epi64(val, 8);
350 const __m128i mask = _mm_set_epi8(14,13,12,11, 10,9,8,15, 6,5,4,3, 2,1,0,7);
351 return _mm_shuffle_epi8(val, mask);
357 __m128i RotateRight64<8>(
const __m128i& val)
360 return _mm_roti_epi64(val, 64-8);
362 const __m128i mask = _mm_set_epi8(8,15,14,13, 12,11,10,9, 0,7,6,5, 4,3,2,1);
363 return _mm_shuffle_epi8(val, mask);
367 inline __m128i SIMON128_f(
const __m128i& v)
369 return _mm_xor_si128(RotateLeft64<2>(v),
370 _mm_and_si128(RotateLeft64<1>(v), RotateLeft64<8>(v)));
373 inline void SIMON128_Enc_Block(__m128i &block0, __m128i &block1,
374 const word64 *subkeys,
unsigned int rounds)
377 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
378 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
380 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
382 const __m128i rk1 = _mm_castpd_si128(
383 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
384 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk1);
386 const __m128i rk2 = _mm_castpd_si128(
387 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i+1)));
388 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk2);
393 const __m128i rk = _mm_castpd_si128(
394 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+rounds-1)));
396 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk);
401 block0 = _mm_unpacklo_epi64(y1, x1);
402 block1 = _mm_unpackhi_epi64(y1, x1);
405 inline void SIMON128_Enc_6_Blocks(__m128i &block0, __m128i &block1,
406 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
407 const word64 *subkeys,
unsigned int rounds)
410 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
411 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
412 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
413 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
414 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
415 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
417 for (
int i = 0; i < static_cast<int>(rounds & ~1) - 1; i += 2)
419 const __m128i rk1 = _mm_castpd_si128(
420 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i)));
421 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk1);
422 y2 = _mm_xor_si128(_mm_xor_si128(y2, SIMON128_f(x2)), rk1);
423 y3 = _mm_xor_si128(_mm_xor_si128(y3, SIMON128_f(x3)), rk1);
425 const __m128i rk2 = _mm_castpd_si128(
426 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i + 1)));
427 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk2);
428 x2 = _mm_xor_si128(_mm_xor_si128(x2, SIMON128_f(y2)), rk2);
429 x3 = _mm_xor_si128(_mm_xor_si128(x3, SIMON128_f(y3)), rk2);
434 const __m128i rk = _mm_castpd_si128(
435 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + rounds - 1)));
436 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk);
437 y2 = _mm_xor_si128(_mm_xor_si128(y2, SIMON128_f(x2)), rk);
438 y3 = _mm_xor_si128(_mm_xor_si128(y3, SIMON128_f(x3)), rk);
439 Swap128(x1, y1); Swap128(x2, y2); Swap128(x3, y3);
443 block0 = _mm_unpacklo_epi64(y1, x1);
444 block1 = _mm_unpackhi_epi64(y1, x1);
445 block2 = _mm_unpacklo_epi64(y2, x2);
446 block3 = _mm_unpackhi_epi64(y2, x2);
447 block4 = _mm_unpacklo_epi64(y3, x3);
448 block5 = _mm_unpackhi_epi64(y3, x3);
451 inline void SIMON128_Dec_Block(__m128i &block0, __m128i &block1,
452 const word64 *subkeys,
unsigned int rounds)
455 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
456 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
460 const __m128i rk = _mm_castpd_si128(
461 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + rounds - 1)));
464 y1 = _mm_xor_si128(_mm_xor_si128(y1, rk), SIMON128_f(x1));
468 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
470 const __m128i rk1 = _mm_castpd_si128(
471 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i+1)));
472 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk1);
474 const __m128i rk2 = _mm_castpd_si128(
475 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys+i)));
476 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk2);
480 block0 = _mm_unpacklo_epi64(y1, x1);
481 block1 = _mm_unpackhi_epi64(y1, x1);
484 inline void SIMON128_Dec_6_Blocks(__m128i &block0, __m128i &block1,
485 __m128i &block2, __m128i &block3, __m128i &block4, __m128i &block5,
486 const word64 *subkeys,
unsigned int rounds)
489 __m128i x1 = _mm_unpackhi_epi64(block0, block1);
490 __m128i y1 = _mm_unpacklo_epi64(block0, block1);
491 __m128i x2 = _mm_unpackhi_epi64(block2, block3);
492 __m128i y2 = _mm_unpacklo_epi64(block2, block3);
493 __m128i x3 = _mm_unpackhi_epi64(block4, block5);
494 __m128i y3 = _mm_unpacklo_epi64(block4, block5);
498 const __m128i rk = _mm_castpd_si128(
499 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + rounds - 1)));
501 Swap128(x1, y1); Swap128(x2, y2); Swap128(x3, y3);
502 y1 = _mm_xor_si128(_mm_xor_si128(y1, rk), SIMON128_f(x1));
503 y2 = _mm_xor_si128(_mm_xor_si128(y2, rk), SIMON128_f(x2));
504 y3 = _mm_xor_si128(_mm_xor_si128(y3, rk), SIMON128_f(x3));
508 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
510 const __m128i rk1 = _mm_castpd_si128(
511 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i + 1)));
512 x1 = _mm_xor_si128(_mm_xor_si128(x1, SIMON128_f(y1)), rk1);
513 x2 = _mm_xor_si128(_mm_xor_si128(x2, SIMON128_f(y2)), rk1);
514 x3 = _mm_xor_si128(_mm_xor_si128(x3, SIMON128_f(y3)), rk1);
516 const __m128i rk2 = _mm_castpd_si128(
517 _mm_loaddup_pd(CONST_DOUBLE_CAST(subkeys + i)));
518 y1 = _mm_xor_si128(_mm_xor_si128(y1, SIMON128_f(x1)), rk2);
519 y2 = _mm_xor_si128(_mm_xor_si128(y2, SIMON128_f(x2)), rk2);
520 y3 = _mm_xor_si128(_mm_xor_si128(y3, SIMON128_f(x3)), rk2);
524 block0 = _mm_unpacklo_epi64(y1, x1);
525 block1 = _mm_unpackhi_epi64(y1, x1);
526 block2 = _mm_unpacklo_epi64(y2, x2);
527 block3 = _mm_unpackhi_epi64(y2, x2);
528 block4 = _mm_unpacklo_epi64(y3, x3);
529 block5 = _mm_unpackhi_epi64(y3, x3);
532 #endif // CRYPTOPP_SSSE3_AVAILABLE 536 #if defined(CRYPTOPP_POWER8_AVAILABLE) 547 template<
unsigned int C>
551 return vec_rl(val, m);
555 template<
unsigned int C>
559 return vec_rl(val, m);
564 return VecXor(RotateLeft64<2>(val),
565 VecAnd(RotateLeft64<1>(val), RotateLeft64<8>(val)));
568 inline void SIMON128_Enc_Block(
uint32x4_p &block,
const word64 *subkeys,
unsigned int rounds)
570 #if (CRYPTOPP_BIG_ENDIAN) 571 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
572 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
574 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
575 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
582 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
584 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i]);
587 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i+1]);
593 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
598 #if (CRYPTOPP_BIG_ENDIAN) 599 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
602 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
610 inline void SIMON128_Dec_Block(
uint32x4_p &block,
const word64 *subkeys,
unsigned int rounds)
612 #if (CRYPTOPP_BIG_ENDIAN) 613 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
614 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
616 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
617 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
627 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
632 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
634 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i+1]);
637 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i]);
641 #if (CRYPTOPP_BIG_ENDIAN) 642 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
645 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
655 uint32x4_p &block5,
const word64 *subkeys,
unsigned int rounds)
657 #if (CRYPTOPP_BIG_ENDIAN) 658 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
659 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
661 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
662 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
673 for (
int i = 0; i < static_cast<int>(rounds & ~1)-1; i += 2)
675 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i]);
680 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i+1]);
688 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
692 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
695 #if (CRYPTOPP_BIG_ENDIAN) 696 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
697 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
699 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
700 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
714 uint32x4_p &block5,
const word64 *subkeys,
unsigned int rounds)
716 #if (CRYPTOPP_BIG_ENDIAN) 717 const uint8x16_p m1 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
718 const uint8x16_p m2 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
720 const uint8x16_p m1 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
721 const uint8x16_p m2 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
734 std::swap(x1, y1); std::swap(x2, y2); std::swap(x3, y3);
735 const uint64x2_p rk = vec_splats((
unsigned long long)subkeys[rounds-1]);
742 for (
int i = static_cast<int>(rounds-2); i >= 0; i -= 2)
744 const uint64x2_p rk1 = vec_splats((
unsigned long long)subkeys[i+1]);
749 const uint64x2_p rk2 = vec_splats((
unsigned long long)subkeys[i]);
755 #if (CRYPTOPP_BIG_ENDIAN) 756 const uint8x16_p m3 = {31,30,29,28,27,26,25,24, 15,14,13,12,11,10,9,8};
757 const uint8x16_p m4 = {23,22,21,20,19,18,17,16, 7,6,5,4,3,2,1,0};
759 const uint8x16_p m3 = {7,6,5,4,3,2,1,0, 23,22,21,20,19,18,17,16};
760 const uint8x16_p m4 = {15,14,13,12,11,10,9,8, 31,30,29,28,27,26,25,24};
772 #endif // CRYPTOPP_POWER8_AVAILABLE 774 ANONYMOUS_NAMESPACE_END
782 #if (CRYPTOPP_ARM_NEON_AVAILABLE) 783 size_t SIMON128_Enc_AdvancedProcessBlocks_NEON(
const word64* subKeys,
size_t rounds,
784 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
786 return AdvancedProcessBlocks128_6x2_NEON(SIMON128_Enc_Block, SIMON128_Enc_6_Blocks,
787 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
790 size_t SIMON128_Dec_AdvancedProcessBlocks_NEON(
const word64* subKeys,
size_t rounds,
791 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
793 return AdvancedProcessBlocks128_6x2_NEON(SIMON128_Dec_Block, SIMON128_Dec_6_Blocks,
794 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
796 #endif // CRYPTOPP_ARM_NEON_AVAILABLE 800 #if defined(CRYPTOPP_SSSE3_AVAILABLE) 801 size_t SIMON128_Enc_AdvancedProcessBlocks_SSSE3(
const word64* subKeys,
size_t rounds,
802 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
804 return AdvancedProcessBlocks128_6x2_SSE(SIMON128_Enc_Block, SIMON128_Enc_6_Blocks,
805 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
808 size_t SIMON128_Dec_AdvancedProcessBlocks_SSSE3(
const word64* subKeys,
size_t rounds,
809 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
811 return AdvancedProcessBlocks128_6x2_SSE(SIMON128_Dec_Block, SIMON128_Dec_6_Blocks,
812 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
814 #endif // CRYPTOPP_SSSE3_AVAILABLE 818 #if defined(CRYPTOPP_POWER8_AVAILABLE) 819 size_t SIMON128_Enc_AdvancedProcessBlocks_POWER8(
const word64* subKeys,
size_t rounds,
820 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
822 return AdvancedProcessBlocks128_6x1_ALTIVEC(SIMON128_Enc_Block, SIMON128_Enc_6_Blocks,
823 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
826 size_t SIMON128_Dec_AdvancedProcessBlocks_POWER8(
const word64* subKeys,
size_t rounds,
827 const byte *inBlocks,
const byte *xorBlocks, byte *outBlocks,
size_t length, word32 flags)
829 return AdvancedProcessBlocks128_6x1_ALTIVEC(SIMON128_Dec_Block, SIMON128_Dec_6_Blocks,
830 subKeys, rounds, inBlocks, xorBlocks, outBlocks, length, flags);
832 #endif // CRYPTOPP_POWER8_AVAILABLE Utility functions for the Crypto++ library.
Library configuration file.
T1 VecPermute(const T1 vec, const T2 mask)
Permutes a vector.
__vector unsigned int uint32x4_p
Vector of 32-bit elements.
Support functions for PowerPC and vector operations.
Template for AdvancedProcessBlocks and SIMD processing.
T1 VecXor(const T1 vec1, const T2 vec2)
XOR two vectors.
__vector unsigned long long uint64x2_p
Vector of 64-bit elements.
Classes for the Simon block cipher.
Crypto++ library namespace.
__vector unsigned char uint8x16_p
Vector of 8-bit elements.
T1 VecAnd(const T1 vec1, const T2 vec2)
AND two vectors.
void vec_swap(T &a, T &b)
Swaps two variables which are arrays.