28 #define AES_KEY_ROUNDS(x) (10 + x * 2)
29 #define AES_KEY_BYTES(x) (16 + x * 8)
32 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
45 return (
u8x16) _mm_aesenc_si128 ((__m128i)
a, (__m128i) k);
46 #elif defined (__ARM_FEATURE_CRYPTO)
47 return vaesmcq_u8 (vaeseq_u8 (
a, u8x16_splat (0))) ^ k;
51 #if defined (__VAES__)
55 return (
u8x64) _mm512_aesenc_epi128 ((__m512i)
a, (__m512i) k);
61 return (
u8x64) _mm512_aesenclast_epi128 ((__m512i)
a, (__m512i) k);
67 return (
u8x64) _mm512_aesdec_epi128 ((__m512i)
a, (__m512i) k);
73 return (
u8x64) _mm512_aesdeclast_epi128 ((__m512i)
a, (__m512i) k);
81 return (
u8x16) _mm_aesenclast_si128 ((__m128i)
a, (__m128i) k);
82 #elif defined (__ARM_FEATURE_CRYPTO)
83 return vaeseq_u8 (
a, u8x16_splat (0)) ^ k;
92 return (
u8x16) _mm_aesdec_si128 ((__m128i)
a, (__m128i) k);
98 return (
u8x16) _mm_aesdeclast_si128 ((__m128i)
a, (__m128i) k);
120 return (
u8x16) _mm_mask_loadu_epi8 (zero, (1 <<
n_bytes) - 1, p);
133 _mm_mask_storeu_epi8 (p, (1 <<
n_bytes) - 1, (__m128i)
r);
136 _mm_maskmoveu_si128 ((__m128i)
r, (__m128i)
mask, p);
146 block ^= round_keys[0];
147 for (
int i = 1;
i < rounds;
i += 1)
155 #if defined (__AES__)
156 return (
u8x16) _mm_aesimc_si128 ((__m128i)
a);
157 #elif defined (__ARM_FEATURE_CRYPTO)
158 return vaesimcq_u8 (
a);
163 #define aes_keygen_assist(a, b) \
164 (u8x16) _mm_aeskeygenassist_si128((__m128i) a, b)
203 r1[0] ^= (
u8x16) _mm_shuffle_epi32 ((__m128i) key_assist, 0x55);
205 r2[0] ^= (
u8x16) _mm_shuffle_epi32 ((__m128i) r1[0], 0xff);
219 rk[1] = (
u8x16) _mm_shuffle_pd ((__m128d) rk[1], (__m128d) r1, 0);
220 rk[2] = (
u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
227 rk[4] = (
u8x16) _mm_shuffle_pd ((__m128d) rk[4], (__m128d) r1, 0);
228 rk[5] = (
u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
235 rk[7] = (
u8x16) _mm_shuffle_pd ((__m128d) rk[7], (__m128d) r1, 0);
236 rk[8] = (
u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
243 rk[10] = (
u8x16) _mm_shuffle_pd ((__m128d) rk[10], (__m128d) r1, 0);
244 rk[11] = (
u8x16) _mm_shuffle_pd ((__m128d) r1, (__m128d) r2, 1);
291 static const u8x16 aese_prep_mask1 =
292 { 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12 };
293 static const u8x16 aese_prep_mask2 =
294 { 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15, 12, 13, 14, 15 };
297 aes128_key_expand_round_neon (
u8x16 * rk,
u32 rcon)
299 u8x16 r, t, last_round = rk[-1], z = { };
300 r = vqtbl1q_u8 (last_round, aese_prep_mask1);
301 r = vaeseq_u8 (
r, z);
302 r ^= (
u8x16) vdupq_n_u32 (rcon);
304 r ^= t = vextq_u8 (z, last_round, 12);
305 r ^= t = vextq_u8 (z, t, 12);
306 r ^= vextq_u8 (z, t, 12);
314 aes128_key_expand_round_neon (rk + 1, 0x01);
315 aes128_key_expand_round_neon (rk + 2, 0x02);
316 aes128_key_expand_round_neon (rk + 3, 0x04);
317 aes128_key_expand_round_neon (rk + 4, 0x08);
318 aes128_key_expand_round_neon (rk + 5, 0x10);
319 aes128_key_expand_round_neon (rk + 6, 0x20);
320 aes128_key_expand_round_neon (rk + 7, 0x40);
321 aes128_key_expand_round_neon (rk + 8, 0x80);
322 aes128_key_expand_round_neon (rk + 9, 0x1b);
323 aes128_key_expand_round_neon (rk + 10, 0x36);
327 aes192_key_expand_round_neon (u8x8 * rk,
u32 rcon)
329 u8x8
r, last_round = rk[-1], z = { };
332 r2 = (
u8x16) vdupq_lane_u64 ((uint64x1_t) last_round, 0);
333 r2 = vqtbl1q_u8 (r2, aese_prep_mask1);
334 r2 = vaeseq_u8 (r2, z2);
335 r2 ^= (
u8x16) vdupq_n_u32 (rcon);
337 r = (u8x8) vdup_laneq_u64 ((
u64x2) r2, 0);
339 r ^= vext_u8 (z, rk[-3], 4);
342 r = rk[-2] ^ vext_u8 (
r, z, 4);
343 r ^= vext_u8 (z,
r, 4);
349 r = rk[-1] ^ vext_u8 (
r, z, 4);
350 r ^= vext_u8 (z,
r, 4);
357 u8x8 *rk = (u8x8 *) ek;
359 rk[2] = *(u8x8u *) (k + 1);
360 aes192_key_expand_round_neon (rk + 3, 0x01);
361 aes192_key_expand_round_neon (rk + 6, 0x02);
362 aes192_key_expand_round_neon (rk + 9, 0x04);
363 aes192_key_expand_round_neon (rk + 12, 0x08);
364 aes192_key_expand_round_neon (rk + 15, 0x10);
365 aes192_key_expand_round_neon (rk + 18, 0x20);
366 aes192_key_expand_round_neon (rk + 21, 0x40);
367 aes192_key_expand_round_neon (rk + 24, 0x80);
372 aes256_key_expand_round_neon (
u8x16 * rk,
u32 rcon)
376 r = vqtbl1q_u8 (rk[-1], rcon ? aese_prep_mask1 : aese_prep_mask2);
377 r = vaeseq_u8 (
r, z);
379 r ^= (
u8x16) vdupq_n_u32 (rcon);
381 r ^= t = vextq_u8 (z, rk[-2], 12);
382 r ^= t = vextq_u8 (z, t, 12);
383 r ^= vextq_u8 (z, t, 12);
392 aes256_key_expand_round_neon (rk + 2, 0x01);
393 aes256_key_expand_round_neon (rk + 3, 0);
394 aes256_key_expand_round_neon (rk + 4, 0x02);
395 aes256_key_expand_round_neon (rk + 5, 0);
396 aes256_key_expand_round_neon (rk + 6, 0x04);
397 aes256_key_expand_round_neon (rk + 7, 0);
398 aes256_key_expand_round_neon (rk + 8, 0x08);
399 aes256_key_expand_round_neon (rk + 9, 0);
400 aes256_key_expand_round_neon (rk + 10, 0x10);
401 aes256_key_expand_round_neon (rk + 11, 0);
402 aes256_key_expand_round_neon (rk + 12, 0x20);
403 aes256_key_expand_round_neon (rk + 13, 0);
404 aes256_key_expand_round_neon (rk + 14, 0x40);
434 for (
int i = 1;
i < (rounds / 2);
i++)