Botan 3.10.0
Crypto and TLS for C&
aes_vperm.cpp
Go to the documentation of this file.
1/*
2* AES using vector permutes (SSSE3, NEON)
3* (C) 2010,2016,2019 Jack Lloyd
4*
5* Based on public domain x86-64 assembly written by Mike Hamburg,
6* described in "Accelerating AES with Vector Permute Instructions"
7* (CHES 2009). His original code is available at
8* https://crypto.stanford.edu/vpaes/
9*
10* Botan is released under the Simplified BSD License (see license.txt)
11*/
12
13#include <botan/internal/aes.h>
14
15#include <botan/internal/ct_utils.h>
16#include <botan/internal/isa_extn.h>
17#include <botan/internal/simd_4x32.h>
18#include <bit>
19
20namespace Botan {
21
22namespace {
23
24inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 shuffle(SIMD_4x32 tbl, SIMD_4x32 idx) {
25 if constexpr(std::endian::native == std::endian::little) {
26 return SIMD_4x32::byte_shuffle(tbl, idx);
27 } else {
28 return SIMD_4x32::byte_shuffle(tbl.bswap(), idx.bswap()).bswap();
29 }
30}
31
32inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 masked_shuffle(SIMD_4x32 tbl, SIMD_4x32 idx) {
33 if constexpr(std::endian::native == std::endian::little) {
34 return SIMD_4x32::masked_byte_shuffle(tbl, idx);
35 } else {
36 return SIMD_4x32::masked_byte_shuffle(tbl.bswap(), idx.bswap()).bswap();
37 }
38}
39
40const SIMD_4x32 k_ipt1 = SIMD_4x32(0x5A2A7000, 0xC2B2E898, 0x52227808, 0xCABAE090);
41const SIMD_4x32 k_ipt2 = SIMD_4x32(0x317C4D00, 0x4C01307D, 0xB0FDCC81, 0xCD80B1FC);
42
43const SIMD_4x32 k_inv1 = SIMD_4x32(0x0D080180, 0x0E05060F, 0x0A0B0C02, 0x04070309);
44const SIMD_4x32 k_inv2 = SIMD_4x32(0x0F0B0780, 0x01040A06, 0x02050809, 0x030D0E0C);
45
46const SIMD_4x32 sb1u = SIMD_4x32(0xCB503E00, 0xB19BE18F, 0x142AF544, 0xA5DF7A6E);
47const SIMD_4x32 sb1t = SIMD_4x32(0xFAE22300, 0x3618D415, 0x0D2ED9EF, 0x3BF7CCC1);
48const SIMD_4x32 sbou = SIMD_4x32(0x6FBDC700, 0xD0D26D17, 0xC502A878, 0x15AABF7A);
49const SIMD_4x32 sbot = SIMD_4x32(0x5FBB6A00, 0xCFE474A5, 0x412B35FA, 0x8E1E90D1);
50
51const SIMD_4x32 sboud = SIMD_4x32(0x7EF94000, 0x1387EA53, 0xD4943E2D, 0xC7AA6DB9);
52const SIMD_4x32 sbotd = SIMD_4x32(0x93441D00, 0x12D7560F, 0xD8C58E9C, 0xCA4B8159);
53
54const SIMD_4x32 mc_forward[4] = {SIMD_4x32(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D),
55 SIMD_4x32(0x04070605, 0x080B0A09, 0x0C0F0E0D, 0x00030201),
56 SIMD_4x32(0x080B0A09, 0x0C0F0E0D, 0x00030201, 0x04070605),
57 SIMD_4x32(0x0C0F0E0D, 0x00030201, 0x04070605, 0x080B0A09)};
58
59const SIMD_4x32 vperm_sr[4] = {
60 SIMD_4x32(0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C),
61 SIMD_4x32(0x0F0A0500, 0x030E0904, 0x07020D08, 0x0B06010C),
62 SIMD_4x32(0x0B020900, 0x0F060D04, 0x030A0108, 0x070E050C),
63 SIMD_4x32(0x070A0D00, 0x0B0E0104, 0x0F020508, 0x0306090C),
64};
65
66const SIMD_4x32 rcon[10] = {
67 SIMD_4x32(0x00000070, 0x00000000, 0x00000000, 0x00000000),
68 SIMD_4x32(0x0000002A, 0x00000000, 0x00000000, 0x00000000),
69 SIMD_4x32(0x00000098, 0x00000000, 0x00000000, 0x00000000),
70 SIMD_4x32(0x00000008, 0x00000000, 0x00000000, 0x00000000),
71 SIMD_4x32(0x0000004D, 0x00000000, 0x00000000, 0x00000000),
72 SIMD_4x32(0x0000007C, 0x00000000, 0x00000000, 0x00000000),
73 SIMD_4x32(0x0000007D, 0x00000000, 0x00000000, 0x00000000),
74 SIMD_4x32(0x00000081, 0x00000000, 0x00000000, 0x00000000),
75 SIMD_4x32(0x0000001F, 0x00000000, 0x00000000, 0x00000000),
76 SIMD_4x32(0x00000083, 0x00000000, 0x00000000, 0x00000000),
77};
78
79const SIMD_4x32 sb2u = SIMD_4x32(0x0B712400, 0xE27A93C6, 0xBC982FCD, 0x5EB7E955);
80const SIMD_4x32 sb2t = SIMD_4x32(0x0AE12900, 0x69EB8840, 0xAB82234A, 0xC2A163C8);
81
82const SIMD_4x32 k_dipt1 = SIMD_4x32(0x0B545F00, 0x0F505B04, 0x114E451A, 0x154A411E);
83const SIMD_4x32 k_dipt2 = SIMD_4x32(0x60056500, 0x86E383E6, 0xF491F194, 0x12771772);
84
85const SIMD_4x32 sb9u = SIMD_4x32(0x9A86D600, 0x851C0353, 0x4F994CC9, 0xCAD51F50);
86const SIMD_4x32 sb9t = SIMD_4x32(0xECD74900, 0xC03B1789, 0xB2FBA565, 0x725E2C9E);
87
88const SIMD_4x32 sbeu = SIMD_4x32(0x26D4D000, 0x46F29296, 0x64B4F6B0, 0x22426004);
89const SIMD_4x32 sbet = SIMD_4x32(0xFFAAC100, 0x0C55A6CD, 0x98593E32, 0x9467F36B);
90
91const SIMD_4x32 sbdu = SIMD_4x32(0xE6B1A200, 0x7D57CCDF, 0x882A4439, 0xF56E9B13);
92const SIMD_4x32 sbdt = SIMD_4x32(0x24C6CB00, 0x3CE2FAF7, 0x15DEEFD3, 0x2931180D);
93
94const SIMD_4x32 sbbu = SIMD_4x32(0x96B44200, 0xD0226492, 0xB0F2D404, 0x602646F6);
95const SIMD_4x32 sbbt = SIMD_4x32(0xCD596700, 0xC19498A6, 0x3255AA6B, 0xF3FF0C3E);
96
97const SIMD_4x32 mcx[4] = {
98 SIMD_4x32(0x0C0F0E0D, 0x00030201, 0x04070605, 0x080B0A09),
99 SIMD_4x32(0x080B0A09, 0x0C0F0E0D, 0x00030201, 0x04070605),
100 SIMD_4x32(0x04070605, 0x080B0A09, 0x0C0F0E0D, 0x00030201),
101 SIMD_4x32(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D),
102};
103
104const SIMD_4x32 mc_backward[4] = {
105 SIMD_4x32(0x02010003, 0x06050407, 0x0A09080B, 0x0E0D0C0F),
106 SIMD_4x32(0x0E0D0C0F, 0x02010003, 0x06050407, 0x0A09080B),
107 SIMD_4x32(0x0A09080B, 0x0E0D0C0F, 0x02010003, 0x06050407),
108 SIMD_4x32(0x06050407, 0x0A09080B, 0x0E0D0C0F, 0x02010003),
109};
110
111const SIMD_4x32 lo_nibs_mask = SIMD_4x32::splat_u8(0x0F);
112
113inline SIMD_4x32 low_nibs(SIMD_4x32 x) {
114 return lo_nibs_mask & x;
115}
116
117inline SIMD_4x32 high_nibs(SIMD_4x32 x) {
118 return (x.shr<4>() & lo_nibs_mask);
119}
120
121inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_enc_first_round(SIMD_4x32 B, SIMD_4x32 K) {
122 return shuffle(k_ipt1, low_nibs(B)) ^ shuffle(k_ipt2, high_nibs(B)) ^ K;
123}
124
125inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_enc_round(SIMD_4x32 B, SIMD_4x32 K, size_t r) {
126 const SIMD_4x32 Bh = high_nibs(B);
127 SIMD_4x32 Bl = low_nibs(B);
128 const SIMD_4x32 t2 = shuffle(k_inv2, Bl);
129 Bl ^= Bh;
130
131 const SIMD_4x32 t5 = Bl ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
132 const SIMD_4x32 t6 = Bh ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bl));
133
134 const SIMD_4x32 t7 = masked_shuffle(sb1t, t6) ^ masked_shuffle(sb1u, t5) ^ K;
135 const SIMD_4x32 t8 = masked_shuffle(sb2t, t6) ^ masked_shuffle(sb2u, t5) ^ shuffle(t7, mc_forward[r % 4]);
136
137 return shuffle(t8, mc_forward[r % 4]) ^ shuffle(t7, mc_backward[r % 4]) ^ t8;
138}
139
140inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_enc_last_round(SIMD_4x32 B, SIMD_4x32 K, size_t r) {
141 const SIMD_4x32 Bh = high_nibs(B);
142 SIMD_4x32 Bl = low_nibs(B);
143 const SIMD_4x32 t2 = shuffle(k_inv2, Bl);
144 Bl ^= Bh;
145
146 const SIMD_4x32 t5 = Bl ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
147 const SIMD_4x32 t6 = Bh ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bl));
148
149 return shuffle(masked_shuffle(sbou, t5) ^ masked_shuffle(sbot, t6) ^ K, vperm_sr[r % 4]);
150}
151
152inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_dec_first_round(SIMD_4x32 B, SIMD_4x32 K) {
153 return shuffle(k_dipt1, low_nibs(B)) ^ shuffle(k_dipt2, high_nibs(B)) ^ K;
154}
155
156inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_dec_round(SIMD_4x32 B, SIMD_4x32 K, size_t r) {
157 const SIMD_4x32 Bh = high_nibs(B);
158 B = low_nibs(B);
159 const SIMD_4x32 t2 = shuffle(k_inv2, B);
160
161 B ^= Bh;
162
163 const SIMD_4x32 t5 = B ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
164 const SIMD_4x32 t6 = Bh ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, B));
165
166 const SIMD_4x32 mc = mcx[(r - 1) % 4];
167
168 const SIMD_4x32 t8 = masked_shuffle(sb9t, t6) ^ masked_shuffle(sb9u, t5) ^ K;
169 const SIMD_4x32 t9 = shuffle(t8, mc) ^ masked_shuffle(sbdu, t5) ^ masked_shuffle(sbdt, t6);
170 const SIMD_4x32 t12 = shuffle(t9, mc) ^ masked_shuffle(sbbu, t5) ^ masked_shuffle(sbbt, t6);
171 return shuffle(t12, mc) ^ masked_shuffle(sbeu, t5) ^ masked_shuffle(sbet, t6);
172}
173
174inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_dec_last_round(SIMD_4x32 B, SIMD_4x32 K, size_t r) {
175 const uint32_t which_sr = ((((r - 1) << 4) ^ 48) & 48) / 16;
176
177 const SIMD_4x32 Bh = high_nibs(B);
178 B = low_nibs(B);
179 const SIMD_4x32 t2 = shuffle(k_inv2, B);
180
181 B ^= Bh;
182
183 const SIMD_4x32 t5 = B ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
184 const SIMD_4x32 t6 = Bh ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, B));
185
186 const SIMD_4x32 x = masked_shuffle(sboud, t5) ^ masked_shuffle(sbotd, t6) ^ K;
187 return shuffle(x, vperm_sr[which_sr]);
188}
189
190void BOTAN_FN_ISA_SIMD_4X32
191vperm_encrypt_blocks(const uint8_t in[], uint8_t out[], size_t blocks, const SIMD_4x32 K[], size_t rounds) {
192 CT::poison(in, blocks * 16);
193
194 const size_t blocks2 = blocks - (blocks % 2);
195
196 for(size_t i = 0; i != blocks2; i += 2) {
197 SIMD_4x32 B0 = SIMD_4x32::load_le(in + i * 16);
198 SIMD_4x32 B1 = SIMD_4x32::load_le(in + (i + 1) * 16);
199
200 B0 = aes_enc_first_round(B0, K[0]);
201 B1 = aes_enc_first_round(B1, K[0]);
202
203 for(size_t r = 1; r != rounds; ++r) {
204 B0 = aes_enc_round(B0, K[r], r);
205 B1 = aes_enc_round(B1, K[r], r);
206 }
207
208 B0 = aes_enc_last_round(B0, K[rounds], rounds);
209 B1 = aes_enc_last_round(B1, K[rounds], rounds);
210
211 B0.store_le(out + i * 16);
212 B1.store_le(out + (i + 1) * 16);
213 }
214
215 for(size_t i = blocks2; i < blocks; ++i) {
216 SIMD_4x32 B = SIMD_4x32::load_le(in + i * 16); // ???
217
218 B = aes_enc_first_round(B, K[0]);
219
220 for(size_t r = 1; r != rounds; ++r) {
221 B = aes_enc_round(B, K[r], r);
222 }
223
224 B = aes_enc_last_round(B, K[rounds], rounds);
225 B.store_le(out + i * 16);
226 }
227
228 CT::unpoison(in, blocks * 16);
229 CT::unpoison(out, blocks * 16);
230}
231
232void BOTAN_FN_ISA_SIMD_4X32
233vperm_decrypt_blocks(const uint8_t in[], uint8_t out[], size_t blocks, const SIMD_4x32 K[], size_t rounds) {
234 CT::poison(in, blocks * 16);
235
236 const size_t blocks2 = blocks - (blocks % 2);
237
238 for(size_t i = 0; i != blocks2; i += 2) {
239 SIMD_4x32 B0 = SIMD_4x32::load_le(in + i * 16);
240 SIMD_4x32 B1 = SIMD_4x32::load_le(in + (i + 1) * 16);
241
242 B0 = aes_dec_first_round(B0, K[0]);
243 B1 = aes_dec_first_round(B1, K[0]);
244
245 for(size_t r = 1; r != rounds; ++r) {
246 B0 = aes_dec_round(B0, K[r], r);
247 B1 = aes_dec_round(B1, K[r], r);
248 }
249
250 B0 = aes_dec_last_round(B0, K[rounds], rounds);
251 B1 = aes_dec_last_round(B1, K[rounds], rounds);
252
253 B0.store_le(out + i * 16);
254 B1.store_le(out + (i + 1) * 16);
255 }
256
257 for(size_t i = blocks2; i < blocks; ++i) {
258 SIMD_4x32 B = SIMD_4x32::load_le(in + i * 16); // ???
259
260 B = aes_dec_first_round(B, K[0]);
261
262 for(size_t r = 1; r != rounds; ++r) {
263 B = aes_dec_round(B, K[r], r);
264 }
265
266 B = aes_dec_last_round(B, K[rounds], rounds);
267 B.store_le(out + i * 16);
268 }
269
270 CT::unpoison(in, blocks * 16);
271 CT::unpoison(out, blocks * 16);
272}
273
274} // namespace
275
276void AES_128::vperm_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const {
277 const SIMD_4x32 K[11] = {
278 SIMD_4x32::load_le(&m_EK[4 * 0]),
279 SIMD_4x32::load_le(&m_EK[4 * 1]),
280 SIMD_4x32::load_le(&m_EK[4 * 2]),
281 SIMD_4x32::load_le(&m_EK[4 * 3]),
282 SIMD_4x32::load_le(&m_EK[4 * 4]),
283 SIMD_4x32::load_le(&m_EK[4 * 5]),
284 SIMD_4x32::load_le(&m_EK[4 * 6]),
285 SIMD_4x32::load_le(&m_EK[4 * 7]),
286 SIMD_4x32::load_le(&m_EK[4 * 8]),
287 SIMD_4x32::load_le(&m_EK[4 * 9]),
288 SIMD_4x32::load_le(&m_EK[4 * 10]),
289 };
290
291 return vperm_encrypt_blocks(in, out, blocks, K, 10);
292}
293
294void AES_128::vperm_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const {
295 const SIMD_4x32 K[11] = {
296 SIMD_4x32::load_le(&m_DK[4 * 0]),
297 SIMD_4x32::load_le(&m_DK[4 * 1]),
298 SIMD_4x32::load_le(&m_DK[4 * 2]),
299 SIMD_4x32::load_le(&m_DK[4 * 3]),
300 SIMD_4x32::load_le(&m_DK[4 * 4]),
301 SIMD_4x32::load_le(&m_DK[4 * 5]),
302 SIMD_4x32::load_le(&m_DK[4 * 6]),
303 SIMD_4x32::load_le(&m_DK[4 * 7]),
304 SIMD_4x32::load_le(&m_DK[4 * 8]),
305 SIMD_4x32::load_le(&m_DK[4 * 9]),
306 SIMD_4x32::load_le(&m_DK[4 * 10]),
307 };
308
309 return vperm_decrypt_blocks(in, out, blocks, K, 10);
310}
311
312void AES_192::vperm_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const {
313 const SIMD_4x32 K[13] = {
314 SIMD_4x32::load_le(&m_EK[4 * 0]),
315 SIMD_4x32::load_le(&m_EK[4 * 1]),
316 SIMD_4x32::load_le(&m_EK[4 * 2]),
317 SIMD_4x32::load_le(&m_EK[4 * 3]),
318 SIMD_4x32::load_le(&m_EK[4 * 4]),
319 SIMD_4x32::load_le(&m_EK[4 * 5]),
320 SIMD_4x32::load_le(&m_EK[4 * 6]),
321 SIMD_4x32::load_le(&m_EK[4 * 7]),
322 SIMD_4x32::load_le(&m_EK[4 * 8]),
323 SIMD_4x32::load_le(&m_EK[4 * 9]),
324 SIMD_4x32::load_le(&m_EK[4 * 10]),
325 SIMD_4x32::load_le(&m_EK[4 * 11]),
326 SIMD_4x32::load_le(&m_EK[4 * 12]),
327 };
328
329 return vperm_encrypt_blocks(in, out, blocks, K, 12);
330}
331
332void AES_192::vperm_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const {
333 const SIMD_4x32 K[13] = {
334 SIMD_4x32::load_le(&m_DK[4 * 0]),
335 SIMD_4x32::load_le(&m_DK[4 * 1]),
336 SIMD_4x32::load_le(&m_DK[4 * 2]),
337 SIMD_4x32::load_le(&m_DK[4 * 3]),
338 SIMD_4x32::load_le(&m_DK[4 * 4]),
339 SIMD_4x32::load_le(&m_DK[4 * 5]),
340 SIMD_4x32::load_le(&m_DK[4 * 6]),
341 SIMD_4x32::load_le(&m_DK[4 * 7]),
342 SIMD_4x32::load_le(&m_DK[4 * 8]),
343 SIMD_4x32::load_le(&m_DK[4 * 9]),
344 SIMD_4x32::load_le(&m_DK[4 * 10]),
345 SIMD_4x32::load_le(&m_DK[4 * 11]),
346 SIMD_4x32::load_le(&m_DK[4 * 12]),
347 };
348
349 return vperm_decrypt_blocks(in, out, blocks, K, 12);
350}
351
352void AES_256::vperm_encrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const {
353 const SIMD_4x32 K[15] = {
354 SIMD_4x32::load_le(&m_EK[4 * 0]),
355 SIMD_4x32::load_le(&m_EK[4 * 1]),
356 SIMD_4x32::load_le(&m_EK[4 * 2]),
357 SIMD_4x32::load_le(&m_EK[4 * 3]),
358 SIMD_4x32::load_le(&m_EK[4 * 4]),
359 SIMD_4x32::load_le(&m_EK[4 * 5]),
360 SIMD_4x32::load_le(&m_EK[4 * 6]),
361 SIMD_4x32::load_le(&m_EK[4 * 7]),
362 SIMD_4x32::load_le(&m_EK[4 * 8]),
363 SIMD_4x32::load_le(&m_EK[4 * 9]),
364 SIMD_4x32::load_le(&m_EK[4 * 10]),
365 SIMD_4x32::load_le(&m_EK[4 * 11]),
366 SIMD_4x32::load_le(&m_EK[4 * 12]),
367 SIMD_4x32::load_le(&m_EK[4 * 13]),
368 SIMD_4x32::load_le(&m_EK[4 * 14]),
369 };
370
371 return vperm_encrypt_blocks(in, out, blocks, K, 14);
372}
373
374void AES_256::vperm_decrypt_n(const uint8_t in[], uint8_t out[], size_t blocks) const {
375 const SIMD_4x32 K[15] = {
376 SIMD_4x32::load_le(&m_DK[4 * 0]),
377 SIMD_4x32::load_le(&m_DK[4 * 1]),
378 SIMD_4x32::load_le(&m_DK[4 * 2]),
379 SIMD_4x32::load_le(&m_DK[4 * 3]),
380 SIMD_4x32::load_le(&m_DK[4 * 4]),
381 SIMD_4x32::load_le(&m_DK[4 * 5]),
382 SIMD_4x32::load_le(&m_DK[4 * 6]),
383 SIMD_4x32::load_le(&m_DK[4 * 7]),
384 SIMD_4x32::load_le(&m_DK[4 * 8]),
385 SIMD_4x32::load_le(&m_DK[4 * 9]),
386 SIMD_4x32::load_le(&m_DK[4 * 10]),
387 SIMD_4x32::load_le(&m_DK[4 * 11]),
388 SIMD_4x32::load_le(&m_DK[4 * 12]),
389 SIMD_4x32::load_le(&m_DK[4 * 13]),
390 SIMD_4x32::load_le(&m_DK[4 * 14]),
391 };
392
393 return vperm_decrypt_blocks(in, out, blocks, K, 14);
394}
395
396namespace {
397
398inline SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_transform(SIMD_4x32 input, SIMD_4x32 table_1, SIMD_4x32 table_2) {
399 return shuffle(table_1, low_nibs(input)) ^ shuffle(table_2, high_nibs(input));
400}
401
402SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_mangle(SIMD_4x32 k, uint8_t round_no) {
403 const SIMD_4x32 mc_forward0(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D);
404
405 SIMD_4x32 t = shuffle(k ^ SIMD_4x32::splat_u8(0x5B), mc_forward0);
406 SIMD_4x32 t2 = t;
407 t = shuffle(t, mc_forward0);
408 t2 = t ^ t2 ^ shuffle(t, mc_forward0);
409 return shuffle(t2, vperm_sr[round_no % 4]);
410}
411
412SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_mangle_dec(SIMD_4x32 k, uint8_t round_no) {
413 const SIMD_4x32 mc_forward0(0x00030201, 0x04070605, 0x080B0A09, 0x0C0F0E0D);
414
415 const SIMD_4x32 dsk[8] = {
416 SIMD_4x32(0x7ED9A700, 0xB6116FC8, 0x82255BFC, 0x4AED9334),
417 SIMD_4x32(0x27143300, 0x45765162, 0xE9DAFDCE, 0x8BB89FAC),
418 SIMD_4x32(0xCCA86400, 0x27438FEB, 0xADC90561, 0x4622EE8A),
419 SIMD_4x32(0x4F92DD00, 0x815C13CE, 0xBD602FF2, 0x73AEE13C),
420 SIMD_4x32(0x01C6C700, 0x03C4C502, 0xFA3D3CFB, 0xF83F3EF9),
421 SIMD_4x32(0x38CFF700, 0xEE1921D6, 0x7384BC4B, 0xA5526A9D),
422 SIMD_4x32(0x53732000, 0xE3C390B0, 0x10306343, 0xA080D3F3),
423 SIMD_4x32(0x036982E8, 0xA0CA214B, 0x8CE60D67, 0x2F45AEC4),
424 };
425
426 SIMD_4x32 t = aes_schedule_transform(k, dsk[0], dsk[1]);
427 SIMD_4x32 output = shuffle(t, mc_forward0);
428
429 t = aes_schedule_transform(t, dsk[2], dsk[3]);
430 output = shuffle(t ^ output, mc_forward0);
431
432 t = aes_schedule_transform(t, dsk[4], dsk[5]);
433 output = shuffle(t ^ output, mc_forward0);
434
435 t = aes_schedule_transform(t, dsk[6], dsk[7]);
436 output = shuffle(t ^ output, mc_forward0);
437
438 return shuffle(output, vperm_sr[round_no % 4]);
439}
440
441SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_mangle_last(SIMD_4x32 k, uint8_t round_no) {
442 const SIMD_4x32 out_tr1(0xD6B66000, 0xFF9F4929, 0xDEBE6808, 0xF7974121);
443 const SIMD_4x32 out_tr2(0x50BCEC00, 0x01EDBD51, 0xB05C0CE0, 0xE10D5DB1);
444
445 k = shuffle(k, vperm_sr[round_no % 4]);
446 k ^= SIMD_4x32::splat_u8(0x5B);
447 return aes_schedule_transform(k, out_tr1, out_tr2);
448}
449
450SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_mangle_last_dec(SIMD_4x32 k) {
451 const SIMD_4x32 deskew1(0x47A4E300, 0x07E4A340, 0x5DBEF91A, 0x1DFEB95A);
452 const SIMD_4x32 deskew2(0x83EA6900, 0x5F36B5DC, 0xF49D1E77, 0x2841C2AB);
453
454 k ^= SIMD_4x32::splat_u8(0x5B);
455 return aes_schedule_transform(k, deskew1, deskew2);
456}
457
458SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_round(SIMD_4x32 input1, SIMD_4x32 input2) {
459 SIMD_4x32 smeared = input2 ^ input2.shift_elems_left<1>();
460 smeared ^= smeared.shift_elems_left<2>();
461 smeared ^= SIMD_4x32::splat_u8(0x5B);
462
463 const SIMD_4x32 Bh = high_nibs(input1);
464 SIMD_4x32 Bl = low_nibs(input1);
465
466 const SIMD_4x32 t2 = shuffle(k_inv2, Bl);
467
468 Bl ^= Bh;
469
470 SIMD_4x32 t5 = Bl ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bh));
471 SIMD_4x32 t6 = Bh ^ masked_shuffle(k_inv1, t2 ^ shuffle(k_inv1, Bl));
472
473 return smeared ^ masked_shuffle(sb1u, t5) ^ masked_shuffle(sb1t, t6);
474}
475
476SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_round(SIMD_4x32 rc, SIMD_4x32 input1, SIMD_4x32 input2) {
477 // This byte shuffle is equivalent to alignr<1>(shuffle32(input1, (3,3,3,3)));
478 const SIMD_4x32 shuffle3333_15 = SIMD_4x32::splat(0x0C0F0E0D);
479 return aes_schedule_round(shuffle(input1, shuffle3333_15), input2 ^ rc);
480}
481
482SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 aes_schedule_192_smear(SIMD_4x32 x, SIMD_4x32 y) {
483 const SIMD_4x32 shuffle3332 = SIMD_4x32(0x0B0A0908, 0x0F0E0D0C, 0x0F0E0D0C, 0x0F0E0D0C);
484 const SIMD_4x32 shuffle2000 = SIMD_4x32(0x03020100, 0x03020100, 0x03020100, 0x0B0A0908);
485
486 const SIMD_4x32 zero_top_half(0, 0, 0xFFFFFFFF, 0xFFFFFFFF);
487 y &= zero_top_half;
488 return y ^ shuffle(x, shuffle3332) ^ shuffle(y, shuffle2000);
489}
490
491} // namespace
492
493// NOLINTBEGIN(readability-container-data-pointer)
494
495void AES_128::vperm_key_schedule(const uint8_t keyb[], size_t /*unused*/) {
496 m_EK.resize(11 * 4);
497 m_DK.resize(11 * 4);
498
499 SIMD_4x32 key = SIMD_4x32::load_le(keyb);
500
501 shuffle(key, vperm_sr[2]).store_le(&m_DK[4 * 10]);
502
503 key = aes_schedule_transform(key, k_ipt1, k_ipt2);
504 key.store_le(&m_EK[0]);
505
506 for(size_t i = 1; i != 10; ++i) {
507 key = aes_schedule_round(rcon[i - 1], key, key);
508
509 aes_schedule_mangle(key, (12 - i) % 4).store_le(&m_EK[4 * i]);
510
511 aes_schedule_mangle_dec(key, (10 - i) % 4).store_le(&m_DK[4 * (10 - i)]);
512 }
513
514 key = aes_schedule_round(rcon[9], key, key);
515 aes_schedule_mangle_last(key, 2).store_le(&m_EK[4 * 10]);
516 aes_schedule_mangle_last_dec(key).store_le(&m_DK[0]);
517}
518
519void AES_192::vperm_key_schedule(const uint8_t keyb[], size_t /*unused*/) {
520 m_EK.resize(13 * 4);
521 m_DK.resize(13 * 4);
522
523 SIMD_4x32 key1 = SIMD_4x32::load_le(keyb);
524 SIMD_4x32 key2 = SIMD_4x32::load_le(keyb + 8);
525
526 shuffle(key1, vperm_sr[0]).store_le(&m_DK[12 * 4]);
527
528 key1 = aes_schedule_transform(key1, k_ipt1, k_ipt2);
529 key2 = aes_schedule_transform(key2, k_ipt1, k_ipt2);
530
531 key1.store_le(&m_EK[0]);
532
533 for(size_t i = 0; i != 4; ++i) {
534 // key2 with 8 high bytes masked off
535 SIMD_4x32 t = key2;
536 key2 = aes_schedule_round(rcon[2 * i], key2, key1);
537 const auto key2t = SIMD_4x32::alignr8(key2, t);
538
539 aes_schedule_mangle(key2t, (i + 3) % 4).store_le(&m_EK[4 * (3 * i + 1)]);
540 aes_schedule_mangle_dec(key2t, (i + 3) % 4).store_le(&m_DK[4 * (11 - 3 * i)]);
541
542 t = aes_schedule_192_smear(key2, t);
543
544 aes_schedule_mangle(t, (i + 2) % 4).store_le(&m_EK[4 * (3 * i + 2)]);
545 aes_schedule_mangle_dec(t, (i + 2) % 4).store_le(&m_DK[4 * (10 - 3 * i)]);
546
547 key2 = aes_schedule_round(rcon[2 * i + 1], t, key2);
548
549 if(i == 3) {
550 aes_schedule_mangle_last(key2, (i + 1) % 4).store_le(&m_EK[4 * (3 * i + 3)]);
551 aes_schedule_mangle_last_dec(key2).store_le(&m_DK[4 * (9 - 3 * i)]);
552 } else {
553 aes_schedule_mangle(key2, (i + 1) % 4).store_le(&m_EK[4 * (3 * i + 3)]);
554 aes_schedule_mangle_dec(key2, (i + 1) % 4).store_le(&m_DK[4 * (9 - 3 * i)]);
555 }
556
557 key1 = key2;
558 key2 = aes_schedule_192_smear(key2, t);
559 }
560}
561
562void AES_256::vperm_key_schedule(const uint8_t keyb[], size_t /*unused*/) {
563 m_EK.resize(15 * 4);
564 m_DK.resize(15 * 4);
565
566 SIMD_4x32 key1 = SIMD_4x32::load_le(keyb);
567 SIMD_4x32 key2 = SIMD_4x32::load_le(keyb + 16);
568
569 shuffle(key1, vperm_sr[2]).store_le(&m_DK[4 * 14]);
570
571 key1 = aes_schedule_transform(key1, k_ipt1, k_ipt2);
572 key2 = aes_schedule_transform(key2, k_ipt1, k_ipt2);
573
574 key1.store_le(&m_EK[0]);
575 aes_schedule_mangle(key2, 3).store_le(&m_EK[4]);
576
577 aes_schedule_mangle_dec(key2, 1).store_le(&m_DK[4 * 13]);
578
579 const SIMD_4x32 shuffle3333 = SIMD_4x32::splat(0x0F0E0D0C);
580
581 for(size_t i = 2; i != 14; i += 2) {
582 const SIMD_4x32 k_t = key2;
583 key1 = key2 = aes_schedule_round(rcon[(i / 2) - 1], key2, key1);
584
585 aes_schedule_mangle(key2, i % 4).store_le(&m_EK[4 * i]);
586 aes_schedule_mangle_dec(key2, (i + 2) % 4).store_le(&m_DK[4 * (14 - i)]);
587
588 key2 = aes_schedule_round(shuffle(key2, shuffle3333), k_t);
589
590 aes_schedule_mangle(key2, (i - 1) % 4).store_le(&m_EK[4 * (i + 1)]);
591 aes_schedule_mangle_dec(key2, (i + 1) % 4).store_le(&m_DK[4 * (13 - i)]);
592 }
593
594 key2 = aes_schedule_round(rcon[6], key2, key1);
595
596 aes_schedule_mangle_last(key2, 2).store_le(&m_EK[4 * 14]);
597 aes_schedule_mangle_last_dec(key2).store_le(&m_DK[0]);
598}
599
600// NOLINTEND(readability-container-data-pointer)
601
602} // namespace Botan
static SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 byte_shuffle(const SIMD_4x32 &tbl, const SIMD_4x32 &idx)
Definition simd_4x32.h:677
static SIMD_4x32 load_le(const void *in) noexcept
Definition simd_4x32.h:149
static SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 alignr8(const SIMD_4x32 &a, const SIMD_4x32 &b)
Definition simd_4x32.h:755
SIMD_4x32 shift_elems_left() const noexcept
Definition simd_4x32.h:552
static SIMD_4x32 splat_u8(uint8_t B) noexcept
Definition simd_4x32.h:133
static SIMD_4x32 BOTAN_FN_ISA_SIMD_4X32 masked_byte_shuffle(const SIMD_4x32 &tbl, const SIMD_4x32 &idx)
Definition simd_4x32.h:712
BOTAN_FN_ISA_SIMD_4X32 SIMD_4x32 bswap() const noexcept
Definition simd_4x32.h:528
static SIMD_4x32 splat(uint32_t B) noexcept
Definition simd_4x32.h:118
constexpr void unpoison(const T *p, size_t n)
Definition ct_utils.h:65
constexpr void poison(const T *p, size_t n)
Definition ct_utils.h:54