Botan  2.8.0
Crypto and TLS for C++11
aes_armv8.cpp
Go to the documentation of this file.
1 /*
2 * AES using ARMv8
3 * Contributed by Jeffrey Walton
4 *
5 * Further changes
6 * (C) 2017,2018 Jack Lloyd
7 *
8 * Botan is released under the Simplified BSD License (see license.txt)
9 */
10 
11 #include <botan/aes.h>
12 #include <botan/loadstor.h>
13 #include <arm_neon.h>
14 
15 namespace Botan {
16 
17 #define AES_ENC_4_ROUNDS(K) \
18  do \
19  { \
20  B0 = vaesmcq_u8(vaeseq_u8(B0, K)); \
21  B1 = vaesmcq_u8(vaeseq_u8(B1, K)); \
22  B2 = vaesmcq_u8(vaeseq_u8(B2, K)); \
23  B3 = vaesmcq_u8(vaeseq_u8(B3, K)); \
24  } while(0)
25 
26 #define AES_ENC_4_LAST_ROUNDS(K, K2) \
27  do \
28  { \
29  B0 = veorq_u8(vaeseq_u8(B0, K), K2); \
30  B1 = veorq_u8(vaeseq_u8(B1, K), K2); \
31  B2 = veorq_u8(vaeseq_u8(B2, K), K2); \
32  B3 = veorq_u8(vaeseq_u8(B3, K), K2); \
33  } while(0)
34 
35 #define AES_DEC_4_ROUNDS(K) \
36  do \
37  { \
38  B0 = vaesimcq_u8(vaesdq_u8(B0, K)); \
39  B1 = vaesimcq_u8(vaesdq_u8(B1, K)); \
40  B2 = vaesimcq_u8(vaesdq_u8(B2, K)); \
41  B3 = vaesimcq_u8(vaesdq_u8(B3, K)); \
42  } while(0)
43 
44 #define AES_DEC_4_LAST_ROUNDS(K, K2) \
45  do \
46  { \
47  B0 = veorq_u8(vaesdq_u8(B0, K), K2); \
48  B1 = veorq_u8(vaesdq_u8(B1, K), K2); \
49  B2 = veorq_u8(vaesdq_u8(B2, K), K2); \
50  B3 = veorq_u8(vaesdq_u8(B3, K), K2); \
51  } while(0)
52 
53 /*
54 * AES-128 Encryption
55 */
56 BOTAN_FUNC_ISA("+crypto")
57 void AES_128::armv8_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
58  {
59  BOTAN_ASSERT(m_EK.empty() == false, "Key was set");
60 
61  const uint8_t *skey = reinterpret_cast<const uint8_t*>(m_EK.data());
62  const uint8_t *mkey = reinterpret_cast<const uint8_t*>(m_ME.data());
63 
64  const uint8x16_t K0 = vld1q_u8(skey + 0);
65  const uint8x16_t K1 = vld1q_u8(skey + 16);
66  const uint8x16_t K2 = vld1q_u8(skey + 32);
67  const uint8x16_t K3 = vld1q_u8(skey + 48);
68  const uint8x16_t K4 = vld1q_u8(skey + 64);
69  const uint8x16_t K5 = vld1q_u8(skey + 80);
70  const uint8x16_t K6 = vld1q_u8(skey + 96);
71  const uint8x16_t K7 = vld1q_u8(skey + 112);
72  const uint8x16_t K8 = vld1q_u8(skey + 128);
73  const uint8x16_t K9 = vld1q_u8(skey + 144);
74  const uint8x16_t K10 = vld1q_u8(mkey);
75 
76  while(blocks >= 4)
77  {
78  uint8x16_t B0 = vld1q_u8(in);
79  uint8x16_t B1 = vld1q_u8(in+16);
80  uint8x16_t B2 = vld1q_u8(in+32);
81  uint8x16_t B3 = vld1q_u8(in+48);
82 
83  AES_ENC_4_ROUNDS(K0);
84  AES_ENC_4_ROUNDS(K1);
85  AES_ENC_4_ROUNDS(K2);
86  AES_ENC_4_ROUNDS(K3);
87  AES_ENC_4_ROUNDS(K4);
88  AES_ENC_4_ROUNDS(K5);
89  AES_ENC_4_ROUNDS(K6);
90  AES_ENC_4_ROUNDS(K7);
91  AES_ENC_4_ROUNDS(K8);
92  AES_ENC_4_LAST_ROUNDS(K9, K10);
93 
94  vst1q_u8(out, B0);
95  vst1q_u8(out+16, B1);
96  vst1q_u8(out+32, B2);
97  vst1q_u8(out+48, B3);
98 
99  in += 16*4;
100  out += 16*4;
101  blocks -= 4;
102  }
103 
104  for(size_t i = 0; i != blocks; ++i)
105  {
106  uint8x16_t B = vld1q_u8(in+16*i);
107  B = vaesmcq_u8(vaeseq_u8(B, K0));
108  B = vaesmcq_u8(vaeseq_u8(B, K1));
109  B = vaesmcq_u8(vaeseq_u8(B, K2));
110  B = vaesmcq_u8(vaeseq_u8(B, K3));
111  B = vaesmcq_u8(vaeseq_u8(B, K4));
112  B = vaesmcq_u8(vaeseq_u8(B, K5));
113  B = vaesmcq_u8(vaeseq_u8(B, K6));
114  B = vaesmcq_u8(vaeseq_u8(B, K7));
115  B = vaesmcq_u8(vaeseq_u8(B, K8));
116  B = veorq_u8(vaeseq_u8(B, K9), K10);
117  vst1q_u8(out+16*i, B);
118  }
119  }
120 
121 /*
122 * AES-128 Decryption
123 */
124 BOTAN_FUNC_ISA("+crypto")
125 void AES_128::armv8_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
126  {
127  BOTAN_ASSERT(m_DK.empty() == false, "Key was set");
128 
129  const uint8_t *skey = reinterpret_cast<const uint8_t*>(m_DK.data());
130  const uint8_t *mkey = reinterpret_cast<const uint8_t*>(m_MD.data());
131 
132  const uint8x16_t K0 = vld1q_u8(skey + 0);
133  const uint8x16_t K1 = vld1q_u8(skey + 16);
134  const uint8x16_t K2 = vld1q_u8(skey + 32);
135  const uint8x16_t K3 = vld1q_u8(skey + 48);
136  const uint8x16_t K4 = vld1q_u8(skey + 64);
137  const uint8x16_t K5 = vld1q_u8(skey + 80);
138  const uint8x16_t K6 = vld1q_u8(skey + 96);
139  const uint8x16_t K7 = vld1q_u8(skey + 112);
140  const uint8x16_t K8 = vld1q_u8(skey + 128);
141  const uint8x16_t K9 = vld1q_u8(skey + 144);
142  const uint8x16_t K10 = vld1q_u8(mkey);
143 
144  while(blocks >= 4)
145  {
146  uint8x16_t B0 = vld1q_u8(in);
147  uint8x16_t B1 = vld1q_u8(in+16);
148  uint8x16_t B2 = vld1q_u8(in+32);
149  uint8x16_t B3 = vld1q_u8(in+48);
150 
151  AES_DEC_4_ROUNDS(K0);
152  AES_DEC_4_ROUNDS(K1);
153  AES_DEC_4_ROUNDS(K2);
154  AES_DEC_4_ROUNDS(K3);
155  AES_DEC_4_ROUNDS(K4);
156  AES_DEC_4_ROUNDS(K5);
157  AES_DEC_4_ROUNDS(K6);
158  AES_DEC_4_ROUNDS(K7);
159  AES_DEC_4_ROUNDS(K8);
160  AES_DEC_4_LAST_ROUNDS(K9, K10);
161 
162  vst1q_u8(out, B0);
163  vst1q_u8(out+16, B1);
164  vst1q_u8(out+32, B2);
165  vst1q_u8(out+48, B3);
166 
167  in += 16*4;
168  out += 16*4;
169  blocks -= 4;
170  }
171 
172  for(size_t i = 0; i != blocks; ++i)
173  {
174  uint8x16_t B = vld1q_u8(in+16*i);
175  B = vaesimcq_u8(vaesdq_u8(B, K0));
176  B = vaesimcq_u8(vaesdq_u8(B, K1));
177  B = vaesimcq_u8(vaesdq_u8(B, K2));
178  B = vaesimcq_u8(vaesdq_u8(B, K3));
179  B = vaesimcq_u8(vaesdq_u8(B, K4));
180  B = vaesimcq_u8(vaesdq_u8(B, K5));
181  B = vaesimcq_u8(vaesdq_u8(B, K6));
182  B = vaesimcq_u8(vaesdq_u8(B, K7));
183  B = vaesimcq_u8(vaesdq_u8(B, K8));
184  B = veorq_u8(vaesdq_u8(B, K9), K10);
185  vst1q_u8(out+16*i, B);
186  }
187  }
188 
189 /*
190 * AES-192 Encryption
191 */
192 BOTAN_FUNC_ISA("+crypto")
193 void AES_192::armv8_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
194  {
195  BOTAN_ASSERT(m_EK.empty() == false, "Key was set");
196 
197  const uint8_t *skey = reinterpret_cast<const uint8_t*>(m_EK.data());
198  const uint8_t *mkey = reinterpret_cast<const uint8_t*>(m_ME.data());
199 
200  const uint8x16_t K0 = vld1q_u8(skey + 0);
201  const uint8x16_t K1 = vld1q_u8(skey + 16);
202  const uint8x16_t K2 = vld1q_u8(skey + 32);
203  const uint8x16_t K3 = vld1q_u8(skey + 48);
204  const uint8x16_t K4 = vld1q_u8(skey + 64);
205  const uint8x16_t K5 = vld1q_u8(skey + 80);
206  const uint8x16_t K6 = vld1q_u8(skey + 96);
207  const uint8x16_t K7 = vld1q_u8(skey + 112);
208  const uint8x16_t K8 = vld1q_u8(skey + 128);
209  const uint8x16_t K9 = vld1q_u8(skey + 144);
210  const uint8x16_t K10 = vld1q_u8(skey + 160);
211  const uint8x16_t K11 = vld1q_u8(skey + 176);
212  const uint8x16_t K12 = vld1q_u8(mkey);
213 
214  while(blocks >= 4)
215  {
216  uint8x16_t B0 = vld1q_u8(in);
217  uint8x16_t B1 = vld1q_u8(in+16);
218  uint8x16_t B2 = vld1q_u8(in+32);
219  uint8x16_t B3 = vld1q_u8(in+48);
220 
221  AES_ENC_4_ROUNDS(K0);
222  AES_ENC_4_ROUNDS(K1);
223  AES_ENC_4_ROUNDS(K2);
224  AES_ENC_4_ROUNDS(K3);
225  AES_ENC_4_ROUNDS(K4);
226  AES_ENC_4_ROUNDS(K5);
227  AES_ENC_4_ROUNDS(K6);
228  AES_ENC_4_ROUNDS(K7);
229  AES_ENC_4_ROUNDS(K8);
230  AES_ENC_4_ROUNDS(K9);
231  AES_ENC_4_ROUNDS(K10);
232  AES_ENC_4_LAST_ROUNDS(K11, K12);
233 
234  vst1q_u8(out, B0);
235  vst1q_u8(out+16, B1);
236  vst1q_u8(out+32, B2);
237  vst1q_u8(out+48, B3);
238 
239  in += 16*4;
240  out += 16*4;
241  blocks -= 4;
242  }
243 
244  for(size_t i = 0; i != blocks; ++i)
245  {
246  uint8x16_t B = vld1q_u8(in+16*i);
247  B = vaesmcq_u8(vaeseq_u8(B, K0));
248  B = vaesmcq_u8(vaeseq_u8(B, K1));
249  B = vaesmcq_u8(vaeseq_u8(B, K2));
250  B = vaesmcq_u8(vaeseq_u8(B, K3));
251  B = vaesmcq_u8(vaeseq_u8(B, K4));
252  B = vaesmcq_u8(vaeseq_u8(B, K5));
253  B = vaesmcq_u8(vaeseq_u8(B, K6));
254  B = vaesmcq_u8(vaeseq_u8(B, K7));
255  B = vaesmcq_u8(vaeseq_u8(B, K8));
256  B = vaesmcq_u8(vaeseq_u8(B, K9));
257  B = vaesmcq_u8(vaeseq_u8(B, K10));
258  B = veorq_u8(vaeseq_u8(B, K11), K12);
259  vst1q_u8(out+16*i, B);
260  }
261  }
262 
263 /*
264 * AES-192 Decryption
265 */
266 BOTAN_FUNC_ISA("+crypto")
267 void AES_192::armv8_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
268  {
269  BOTAN_ASSERT(m_DK.empty() == false, "Key was set");
270  const uint8_t *skey = reinterpret_cast<const uint8_t*>(m_DK.data());
271  const uint8_t *mkey = reinterpret_cast<const uint8_t*>(m_MD.data());
272 
273  const uint8x16_t K0 = vld1q_u8(skey + 0);
274  const uint8x16_t K1 = vld1q_u8(skey + 16);
275  const uint8x16_t K2 = vld1q_u8(skey + 32);
276  const uint8x16_t K3 = vld1q_u8(skey + 48);
277  const uint8x16_t K4 = vld1q_u8(skey + 64);
278  const uint8x16_t K5 = vld1q_u8(skey + 80);
279  const uint8x16_t K6 = vld1q_u8(skey + 96);
280  const uint8x16_t K7 = vld1q_u8(skey + 112);
281  const uint8x16_t K8 = vld1q_u8(skey + 128);
282  const uint8x16_t K9 = vld1q_u8(skey + 144);
283  const uint8x16_t K10 = vld1q_u8(skey + 160);
284  const uint8x16_t K11 = vld1q_u8(skey + 176);
285  const uint8x16_t K12 = vld1q_u8(mkey);
286 
287  while(blocks >= 4)
288  {
289  uint8x16_t B0 = vld1q_u8(in);
290  uint8x16_t B1 = vld1q_u8(in+16);
291  uint8x16_t B2 = vld1q_u8(in+32);
292  uint8x16_t B3 = vld1q_u8(in+48);
293 
294  AES_DEC_4_ROUNDS(K0);
295  AES_DEC_4_ROUNDS(K1);
296  AES_DEC_4_ROUNDS(K2);
297  AES_DEC_4_ROUNDS(K3);
298  AES_DEC_4_ROUNDS(K4);
299  AES_DEC_4_ROUNDS(K5);
300  AES_DEC_4_ROUNDS(K6);
301  AES_DEC_4_ROUNDS(K7);
302  AES_DEC_4_ROUNDS(K8);
303  AES_DEC_4_ROUNDS(K9);
304  AES_DEC_4_ROUNDS(K10);
305  AES_DEC_4_LAST_ROUNDS(K11, K12);
306 
307  vst1q_u8(out, B0);
308  vst1q_u8(out+16, B1);
309  vst1q_u8(out+32, B2);
310  vst1q_u8(out+48, B3);
311 
312  in += 16*4;
313  out += 16*4;
314  blocks -= 4;
315  }
316 
317  for(size_t i = 0; i != blocks; ++i)
318  {
319  uint8x16_t B = vld1q_u8(in+16*i);
320  B = vaesimcq_u8(vaesdq_u8(B, K0));
321  B = vaesimcq_u8(vaesdq_u8(B, K1));
322  B = vaesimcq_u8(vaesdq_u8(B, K2));
323  B = vaesimcq_u8(vaesdq_u8(B, K3));
324  B = vaesimcq_u8(vaesdq_u8(B, K4));
325  B = vaesimcq_u8(vaesdq_u8(B, K5));
326  B = vaesimcq_u8(vaesdq_u8(B, K6));
327  B = vaesimcq_u8(vaesdq_u8(B, K7));
328  B = vaesimcq_u8(vaesdq_u8(B, K8));
329  B = vaesimcq_u8(vaesdq_u8(B, K9));
330  B = vaesimcq_u8(vaesdq_u8(B, K10));
331  B = veorq_u8(vaesdq_u8(B, K11), K12);
332  vst1q_u8(out+16*i, B);
333  }
334  }
335 
336 /*
337 * AES-256 Encryption
338 */
339 BOTAN_FUNC_ISA("+crypto")
340 void AES_256::armv8_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
341  {
342  BOTAN_ASSERT(m_EK.empty() == false, "Key was set");
343 
344  const uint8_t *skey = reinterpret_cast<const uint8_t*>(m_EK.data());
345  const uint8_t *mkey = reinterpret_cast<const uint8_t*>(m_ME.data());
346 
347  const uint8x16_t K0 = vld1q_u8(skey + 0);
348  const uint8x16_t K1 = vld1q_u8(skey + 16);
349  const uint8x16_t K2 = vld1q_u8(skey + 32);
350  const uint8x16_t K3 = vld1q_u8(skey + 48);
351  const uint8x16_t K4 = vld1q_u8(skey + 64);
352  const uint8x16_t K5 = vld1q_u8(skey + 80);
353  const uint8x16_t K6 = vld1q_u8(skey + 96);
354  const uint8x16_t K7 = vld1q_u8(skey + 112);
355  const uint8x16_t K8 = vld1q_u8(skey + 128);
356  const uint8x16_t K9 = vld1q_u8(skey + 144);
357  const uint8x16_t K10 = vld1q_u8(skey + 160);
358  const uint8x16_t K11 = vld1q_u8(skey + 176);
359  const uint8x16_t K12 = vld1q_u8(skey + 192);
360  const uint8x16_t K13 = vld1q_u8(skey + 208);
361  const uint8x16_t K14 = vld1q_u8(mkey);
362 
363  while(blocks >= 4)
364  {
365  uint8x16_t B0 = vld1q_u8(in);
366  uint8x16_t B1 = vld1q_u8(in+16);
367  uint8x16_t B2 = vld1q_u8(in+32);
368  uint8x16_t B3 = vld1q_u8(in+48);
369 
370  AES_ENC_4_ROUNDS(K0);
371  AES_ENC_4_ROUNDS(K1);
372  AES_ENC_4_ROUNDS(K2);
373  AES_ENC_4_ROUNDS(K3);
374  AES_ENC_4_ROUNDS(K4);
375  AES_ENC_4_ROUNDS(K5);
376  AES_ENC_4_ROUNDS(K6);
377  AES_ENC_4_ROUNDS(K7);
378  AES_ENC_4_ROUNDS(K8);
379  AES_ENC_4_ROUNDS(K9);
380  AES_ENC_4_ROUNDS(K10);
381  AES_ENC_4_ROUNDS(K11);
382  AES_ENC_4_ROUNDS(K12);
383  AES_ENC_4_LAST_ROUNDS(K13, K14);
384 
385  vst1q_u8(out, B0);
386  vst1q_u8(out+16, B1);
387  vst1q_u8(out+32, B2);
388  vst1q_u8(out+48, B3);
389 
390  in += 16*4;
391  out += 16*4;
392  blocks -= 4;
393  }
394 
395  for(size_t i = 0; i != blocks; ++i)
396  {
397  uint8x16_t B = vld1q_u8(in+16*i);
398  B = vaesmcq_u8(vaeseq_u8(B, K0));
399  B = vaesmcq_u8(vaeseq_u8(B, K1));
400  B = vaesmcq_u8(vaeseq_u8(B, K2));
401  B = vaesmcq_u8(vaeseq_u8(B, K3));
402  B = vaesmcq_u8(vaeseq_u8(B, K4));
403  B = vaesmcq_u8(vaeseq_u8(B, K5));
404  B = vaesmcq_u8(vaeseq_u8(B, K6));
405  B = vaesmcq_u8(vaeseq_u8(B, K7));
406  B = vaesmcq_u8(vaeseq_u8(B, K8));
407  B = vaesmcq_u8(vaeseq_u8(B, K9));
408  B = vaesmcq_u8(vaeseq_u8(B, K10));
409  B = vaesmcq_u8(vaeseq_u8(B, K11));
410  B = vaesmcq_u8(vaeseq_u8(B, K12));
411  B = veorq_u8(vaeseq_u8(B, K13), K14);
412  vst1q_u8(out+16*i, B);
413  }
414  }
415 
416 /*
417 * AES-256 Decryption
418 */
419 BOTAN_FUNC_ISA("+crypto")
420 void AES_256::armv8_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const
421  {
422  BOTAN_ASSERT(m_DK.empty() == false, "Key was set");
423 
424  const uint8_t *skey = reinterpret_cast<const uint8_t*>(m_DK.data());
425  const uint8_t *mkey = reinterpret_cast<const uint8_t*>(m_MD.data());
426 
427  const uint8x16_t K0 = vld1q_u8(skey + 0);
428  const uint8x16_t K1 = vld1q_u8(skey + 16);
429  const uint8x16_t K2 = vld1q_u8(skey + 32);
430  const uint8x16_t K3 = vld1q_u8(skey + 48);
431  const uint8x16_t K4 = vld1q_u8(skey + 64);
432  const uint8x16_t K5 = vld1q_u8(skey + 80);
433  const uint8x16_t K6 = vld1q_u8(skey + 96);
434  const uint8x16_t K7 = vld1q_u8(skey + 112);
435  const uint8x16_t K8 = vld1q_u8(skey + 128);
436  const uint8x16_t K9 = vld1q_u8(skey + 144);
437  const uint8x16_t K10 = vld1q_u8(skey + 160);
438  const uint8x16_t K11 = vld1q_u8(skey + 176);
439  const uint8x16_t K12 = vld1q_u8(skey + 192);
440  const uint8x16_t K13 = vld1q_u8(skey + 208);
441  const uint8x16_t K14 = vld1q_u8(mkey);
442 
443  while(blocks >= 4)
444  {
445  uint8x16_t B0 = vld1q_u8(in);
446  uint8x16_t B1 = vld1q_u8(in+16);
447  uint8x16_t B2 = vld1q_u8(in+32);
448  uint8x16_t B3 = vld1q_u8(in+48);
449 
450  AES_DEC_4_ROUNDS(K0);
451  AES_DEC_4_ROUNDS(K1);
452  AES_DEC_4_ROUNDS(K2);
453  AES_DEC_4_ROUNDS(K3);
454  AES_DEC_4_ROUNDS(K4);
455  AES_DEC_4_ROUNDS(K5);
456  AES_DEC_4_ROUNDS(K6);
457  AES_DEC_4_ROUNDS(K7);
458  AES_DEC_4_ROUNDS(K8);
459  AES_DEC_4_ROUNDS(K9);
460  AES_DEC_4_ROUNDS(K10);
461  AES_DEC_4_ROUNDS(K11);
462  AES_DEC_4_ROUNDS(K12);
463  AES_DEC_4_LAST_ROUNDS(K13, K14);
464 
465  vst1q_u8(out, B0);
466  vst1q_u8(out+16, B1);
467  vst1q_u8(out+32, B2);
468  vst1q_u8(out+48, B3);
469 
470  in += 16*4;
471  out += 16*4;
472  blocks -= 4;
473  }
474 
475  for(size_t i = 0; i != blocks; ++i)
476  {
477  uint8x16_t B = vld1q_u8(in+16*i);
478  B = vaesimcq_u8(vaesdq_u8(B, K0));
479  B = vaesimcq_u8(vaesdq_u8(B, K1));
480  B = vaesimcq_u8(vaesdq_u8(B, K2));
481  B = vaesimcq_u8(vaesdq_u8(B, K3));
482  B = vaesimcq_u8(vaesdq_u8(B, K4));
483  B = vaesimcq_u8(vaesdq_u8(B, K5));
484  B = vaesimcq_u8(vaesdq_u8(B, K6));
485  B = vaesimcq_u8(vaesdq_u8(B, K7));
486  B = vaesimcq_u8(vaesdq_u8(B, K8));
487  B = vaesimcq_u8(vaesdq_u8(B, K9));
488  B = vaesimcq_u8(vaesdq_u8(B, K10));
489  B = vaesimcq_u8(vaesdq_u8(B, K11));
490  B = vaesimcq_u8(vaesdq_u8(B, K12));
491  B = veorq_u8(vaesdq_u8(B, K13), K14);
492  vst1q_u8(out+16*i, B);
493  }
494  }
495 
496 #undef AES_ENC_4_ROUNDS
497 #undef AES_ENC_4_LAST_ROUNDS
498 #undef AES_DEC_4_ROUNDS
499 #undef AES_DEC_4_LAST_ROUNDS
500 
501 }
#define BOTAN_ASSERT(expr, assertion_made)
Definition: assert.h:55
#define BOTAN_FUNC_ISA(isa)
Definition: compiler.h:75
Definition: alg_id.cpp:13
#define AES_DEC_4_ROUNDS(K)
Definition: aes_armv8.cpp:35
#define AES_ENC_4_ROUNDS(K)
Definition: aes_armv8.cpp:17
#define AES_ENC_4_LAST_ROUNDS(K, K2)
Definition: aes_armv8.cpp:26
#define AES_DEC_4_LAST_ROUNDS(K, K2)
Definition: aes_armv8.cpp:44