Botan 3.10.0
Crypto and TLS for C&
ghash_cpu.cpp
Go to the documentation of this file.
1/*
2* Hook for CLMUL/PMULL/VPMSUM
3* (C) 2013,2017,2019,2020 Jack Lloyd
4*
5* Botan is released under the Simplified BSD License (see license.txt)
6*/
7
8#include <botan/internal/ghash.h>
9
10#include <botan/internal/isa_extn.h>
11#include <botan/internal/simd_4x32.h>
12#include <botan/internal/target_info.h>
13
14#if defined(BOTAN_SIMD_USE_SSSE3)
15 #include <immintrin.h>
16 #include <wmmintrin.h>
17#endif
18
19namespace Botan {
20
21namespace {
22
23// NOLINTBEGIN(portability-simd-intrinsics)
24
25BOTAN_FORCE_INLINE BOTAN_FN_ISA_SIMD_4X32 SIMD_4x32 reverse_vector(const SIMD_4x32& in) {
26#if defined(BOTAN_SIMD_USE_SSSE3)
27 const __m128i BSWAP_MASK = _mm_set_epi8(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
28 return SIMD_4x32(_mm_shuffle_epi8(in.raw(), BSWAP_MASK));
29#elif defined(BOTAN_SIMD_USE_NEON)
30 const uint8_t maskb[16] = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
31 const uint8x16_t mask = vld1q_u8(maskb);
32 return SIMD_4x32(vreinterpretq_u32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(in.raw()), mask)));
33#elif defined(BOTAN_SIMD_USE_ALTIVEC)
34 const __vector unsigned char mask = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0};
35 return SIMD_4x32(vec_perm(in.raw(), in.raw(), mask));
36#endif
37}
38
39template <int M>
40BOTAN_FORCE_INLINE BOTAN_FN_ISA_CLMUL SIMD_4x32 clmul(const SIMD_4x32& H, const SIMD_4x32& x) {
41 static_assert(M == 0x00 || M == 0x01 || M == 0x10 || M == 0x11, "Valid clmul mode");
42
43#if defined(BOTAN_SIMD_USE_SSSE3)
44 return SIMD_4x32(_mm_clmulepi64_si128(x.raw(), H.raw(), M));
45#elif defined(BOTAN_SIMD_USE_NEON)
46 const uint64_t a = vgetq_lane_u64(vreinterpretq_u64_u32(x.raw()), M & 0x01);
47 const uint64_t b = vgetq_lane_u64(vreinterpretq_u64_u32(H.raw()), (M & 0x10) >> 4);
48
49 #if defined(BOTAN_BUILD_COMPILER_IS_MSVC)
50 __n64 a1 = {a}, b1 = {b};
51 return SIMD_4x32(vmull_p64(a1, b1));
52 #else
53 return SIMD_4x32(reinterpret_cast<uint32x4_t>(vmull_p64(a, b)));
54 #endif
55
56#elif defined(BOTAN_SIMD_USE_ALTIVEC)
57 const SIMD_4x32 mask_lo = SIMD_4x32(0, 0, 0xFFFFFFFF, 0xFFFFFFFF);
58 constexpr uint8_t flip = (std::endian::native == std::endian::big) ? 0x11 : 0x00;
59
60 SIMD_4x32 i1 = x;
61 SIMD_4x32 i2 = H;
62
63 if constexpr(std::endian::native == std::endian::big) {
64 i1 = reverse_vector(i1).bswap();
65 i2 = reverse_vector(i2).bswap();
66 }
67
68 if constexpr(M == (0x11 ^ flip)) {
69 i1 &= mask_lo;
70 i2 &= mask_lo;
71 } else if constexpr(M == (0x10 ^ flip)) {
72 i1 = i1.shift_elems_left<2>();
73 } else if constexpr(M == (0x01 ^ flip)) {
74 i2 = i2.shift_elems_left<2>();
75 } else if constexpr(M == (0x00 ^ flip)) {
76 i1 = mask_lo.andc(i1);
77 i2 = mask_lo.andc(i2);
78 }
79
80 auto i1v = reinterpret_cast<__vector unsigned long long>(i1.raw());
81 auto i2v = reinterpret_cast<__vector unsigned long long>(i2.raw());
82
83 #if BOTAN_COMPILER_HAS_BUILTIN(__builtin_crypto_vpmsumd)
84 auto rv = __builtin_crypto_vpmsumd(i1v, i2v);
85 #else
86 auto rv = __builtin_altivec_crypto_vpmsumd(i1v, i2v);
87 #endif
88
89 auto z = SIMD_4x32(reinterpret_cast<__vector unsigned int>(rv));
90
91 if constexpr(std::endian::native == std::endian::big) {
92 z = reverse_vector(z).bswap();
93 }
94
95 return z;
96#endif
97}
98
99// NOLINTEND(portability-simd-intrinsics)
100
101inline SIMD_4x32 BOTAN_FN_ISA_CLMUL gcm_reduce(const SIMD_4x32& B0, const SIMD_4x32& B1) {
102 SIMD_4x32 X0 = B1.shr<31>();
103 SIMD_4x32 X1 = B1.shl<1>();
104 SIMD_4x32 X2 = B0.shr<31>();
105 SIMD_4x32 X3 = B0.shl<1>();
106
107 X3 |= X0.shift_elems_right<3>();
108 X3 |= X2.shift_elems_left<1>();
109 X1 |= X0.shift_elems_left<1>();
110
111 X0 = X1.shl<31>() ^ X1.shl<30>() ^ X1.shl<25>();
112
113 X1 ^= X0.shift_elems_left<3>();
114
115 X0 = X1 ^ X3 ^ X0.shift_elems_right<1>();
116 X0 ^= X1.shr<7>() ^ X1.shr<2>() ^ X1.shr<1>();
117 return X0;
118}
119
120inline SIMD_4x32 BOTAN_FN_ISA_CLMUL gcm_multiply(const SIMD_4x32& H, const SIMD_4x32& x) {
121 SIMD_4x32 T0 = clmul<0x11>(H, x);
122 SIMD_4x32 T1 = clmul<0x10>(H, x);
123 SIMD_4x32 T2 = clmul<0x01>(H, x);
124 SIMD_4x32 T3 = clmul<0x00>(H, x);
125
126 T1 ^= T2;
127 T0 ^= T1.shift_elems_right<2>();
128 T3 ^= T1.shift_elems_left<2>();
129
130 return gcm_reduce(T0, T3);
131}
132
133inline SIMD_4x32 BOTAN_FN_ISA_CLMUL gcm_multiply_x4(const SIMD_4x32& H1,
134 const SIMD_4x32& H2,
135 const SIMD_4x32& H3,
136 const SIMD_4x32& H4,
137 const SIMD_4x32& X1,
138 const SIMD_4x32& X2,
139 const SIMD_4x32& X3,
140 const SIMD_4x32& X4) {
141 /*
142 * Multiply with delayed reduction, algorithm by Krzysztof Jankowski
143 * and Pierre Laurent of Intel
144 */
145
146 const SIMD_4x32 lo = (clmul<0x00>(H1, X1) ^ clmul<0x00>(H2, X2)) ^ (clmul<0x00>(H3, X3) ^ clmul<0x00>(H4, X4));
147
148 const SIMD_4x32 hi = (clmul<0x11>(H1, X1) ^ clmul<0x11>(H2, X2)) ^ (clmul<0x11>(H3, X3) ^ clmul<0x11>(H4, X4));
149
150 SIMD_4x32 T;
151
152 T ^= clmul<0x00>(H1 ^ H1.shift_elems_right<2>(), X1 ^ X1.shift_elems_right<2>());
153 T ^= clmul<0x00>(H2 ^ H2.shift_elems_right<2>(), X2 ^ X2.shift_elems_right<2>());
154 T ^= clmul<0x00>(H3 ^ H3.shift_elems_right<2>(), X3 ^ X3.shift_elems_right<2>());
155 T ^= clmul<0x00>(H4 ^ H4.shift_elems_right<2>(), X4 ^ X4.shift_elems_right<2>());
156 T ^= lo;
157 T ^= hi;
158
159 return gcm_reduce(hi ^ T.shift_elems_right<2>(), lo ^ T.shift_elems_left<2>());
160}
161
162} // namespace
163
164void BOTAN_FN_ISA_CLMUL GHASH::ghash_precompute_cpu(const uint8_t H_bytes[16], uint64_t H_pow[4 * 2]) {
165 const SIMD_4x32 H1 = reverse_vector(SIMD_4x32::load_le(H_bytes));
166 const SIMD_4x32 H2 = gcm_multiply(H1, H1);
167 const SIMD_4x32 H3 = gcm_multiply(H1, H2);
168 const SIMD_4x32 H4 = gcm_multiply(H2, H2);
169
170 H1.store_le(H_pow);
171 H2.store_le(H_pow + 2);
172 H3.store_le(H_pow + 4);
173 H4.store_le(H_pow + 6);
174}
175
176void BOTAN_FN_ISA_CLMUL GHASH::ghash_multiply_cpu(uint8_t x[16],
177 const uint64_t H_pow[8],
178 const uint8_t input[],
179 size_t blocks) {
180 /*
181 * Algorithms 1 and 5 from Intel's CLMUL guide
182 */
183 const SIMD_4x32 H1 = SIMD_4x32::load_le(H_pow);
184
185 SIMD_4x32 a = reverse_vector(SIMD_4x32::load_le(x));
186
187 if(blocks >= 4) {
188 const SIMD_4x32 H2 = SIMD_4x32::load_le(H_pow + 2);
189 const SIMD_4x32 H3 = SIMD_4x32::load_le(H_pow + 4);
190 const SIMD_4x32 H4 = SIMD_4x32::load_le(H_pow + 6);
191
192 while(blocks >= 4) {
193 const SIMD_4x32 m0 = reverse_vector(SIMD_4x32::load_le(input));
194 const SIMD_4x32 m1 = reverse_vector(SIMD_4x32::load_le(input + 16 * 1));
195 const SIMD_4x32 m2 = reverse_vector(SIMD_4x32::load_le(input + 16 * 2));
196 const SIMD_4x32 m3 = reverse_vector(SIMD_4x32::load_le(input + 16 * 3));
197
198 a ^= m0;
199 a = gcm_multiply_x4(H1, H2, H3, H4, m3, m2, m1, a);
200
201 input += 4 * 16;
202 blocks -= 4;
203 }
204 }
205
206 for(size_t i = 0; i != blocks; ++i) {
207 const SIMD_4x32 m = reverse_vector(SIMD_4x32::load_le(input + 16 * i));
208
209 a ^= m;
210 a = gcm_multiply(H1, a);
211 }
212
213 a = reverse_vector(a);
214 a.store_le(x);
215}
216
217} // namespace Botan
static SIMD_4x32 load_le(const void *in) noexcept
Definition simd_4x32.h:149
SIMD_4x32 shift_elems_left() const noexcept
Definition simd_4x32.h:552
SIMD_4x32 shr() const noexcept
Definition simd_4x32.h:479
SIMD_4x32 shl() const noexcept
Definition simd_4x32.h:461
SIMD_4x32 shift_elems_right() const noexcept
Definition simd_4x32.h:575
#define BOTAN_FORCE_INLINE
Definition compiler.h:87