Botan  1.11.15
sha1_sse2.cpp
Go to the documentation of this file.
1 /*
2 * SHA-1 using SSE2
3 * Based on public domain code by Dean Gaudet
4 * (http://arctic.org/~dean/crypto/sha1.html)
5 * (C) 2009-2011 Jack Lloyd
6 *
7 * Botan is released under the Simplified BSD License (see license.txt)
8 */
9 
10 #include <botan/internal/hash_utils.h>
11 #include <botan/sha1_sse2.h>
12 #include <botan/cpuid.h>
13 #include <emmintrin.h>
14 
15 namespace Botan {
16 
17 BOTAN_REGISTER_HASH_NOARGS_IF(CPUID::has_sse2(), SHA_160_SSE2, "SHA-160", "sse2", 64);
18 
19 namespace SHA1_SSE2_F {
20 
21 namespace {
22 
23 /*
24 * First 16 bytes just need byte swapping. Preparing just means
25 * adding in the round constants.
26 */
27 
28 #define prep00_15(P, W) \
29  do { \
30  W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1)); \
31  W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1)); \
32  W = _mm_or_si128(_mm_slli_epi16(W, 8), \
33  _mm_srli_epi16(W, 8)); \
34  P.u128 = _mm_add_epi32(W, K00_19); \
35  } while(0)
36 
37 /*
38 For each multiple of 4, t, we want to calculate this:
39 
40 W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
41 W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
42 W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
43 W[t+3] = rol(W[t] ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
44 
45 we'll actually calculate this:
46 
47 W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
48 W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
49 W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
50 W[t+3] = rol( 0 ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
51 W[t+3] ^= rol(W[t+0], 1);
52 
53 the parameters are:
54 
55 W0 = &W[t-16];
56 W1 = &W[t-12];
57 W2 = &W[t- 8];
58 W3 = &W[t- 4];
59 
60 and on output:
61 prepared = W0 + K
62 W0 = W[t]..W[t+3]
63 */
64 
65 /* note that there is a step here where i want to do a rol by 1, which
66 * normally would look like this:
67 *
68 * r1 = psrld r0,$31
69 * r0 = pslld r0,$1
70 * r0 = por r0,r1
71 *
72 * but instead i do this:
73 *
74 * r1 = pcmpltd r0,zero
75 * r0 = paddd r0,r0
76 * r0 = psub r0,r1
77 *
78 * because pcmpltd and paddd are availabe in both MMX units on
79 * efficeon, pentium-m, and opteron but shifts are available in
80 * only one unit.
81 */
82 #define prep(prep, XW0, XW1, XW2, XW3, K) \
83  do { \
84  __m128i r0, r1, r2, r3; \
85  \
86  /* load W[t-4] 16-byte aligned, and shift */ \
87  r3 = _mm_srli_si128((XW3), 4); \
88  r0 = (XW0); \
89  /* get high 64-bits of XW0 into low 64-bits */ \
90  r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2)); \
91  /* load high 64-bits of r1 */ \
92  r1 = _mm_unpacklo_epi64(r1, (XW1)); \
93  r2 = (XW2); \
94  \
95  r0 = _mm_xor_si128(r1, r0); \
96  r2 = _mm_xor_si128(r3, r2); \
97  r0 = _mm_xor_si128(r2, r0); \
98  /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */ \
99  \
100  r2 = _mm_slli_si128(r0, 12); \
101  r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128()); \
102  r0 = _mm_add_epi32(r0, r0); /* shift left by 1 */ \
103  r0 = _mm_sub_epi32(r0, r1); /* r0 has W[t]..W[t+2] */ \
104  \
105  r3 = _mm_srli_epi32(r2, 30); \
106  r2 = _mm_slli_epi32(r2, 2); \
107  \
108  r0 = _mm_xor_si128(r0, r3); \
109  r0 = _mm_xor_si128(r0, r2); /* r0 now has W[t+3] */ \
110  \
111  (XW0) = r0; \
112  (prep).u128 = _mm_add_epi32(r0, K); \
113  } while(0)
114 
115 /*
116 * SHA-160 F1 Function
117 */
118 inline void F1(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
119  {
120  E += (D ^ (B & (C ^ D))) + msg + rotate_left(A, 5);
121  B = rotate_left(B, 30);
122  }
123 
124 /*
125 * SHA-160 F2 Function
126 */
127 inline void F2(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
128  {
129  E += (B ^ C ^ D) + msg + rotate_left(A, 5);
130  B = rotate_left(B, 30);
131  }
132 
133 /*
134 * SHA-160 F3 Function
135 */
136 inline void F3(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
137  {
138  E += ((B & C) | ((B | C) & D)) + msg + rotate_left(A, 5);
139  B = rotate_left(B, 30);
140  }
141 
142 /*
143 * SHA-160 F4 Function
144 */
145 inline void F4(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
146  {
147  E += (B ^ C ^ D) + msg + rotate_left(A, 5);
148  B = rotate_left(B, 30);
149  }
150 
151 }
152 
153 }
154 
155 /*
156 * SHA-160 Compression Function using SSE for message expansion
157 */
158 void SHA_160_SSE2::compress_n(const byte input_bytes[], size_t blocks)
159  {
160  using namespace SHA1_SSE2_F;
161 
162  const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
163  const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
164  const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
165  const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
166 
167  u32bit A = digest[0],
168  B = digest[1],
169  C = digest[2],
170  D = digest[3],
171  E = digest[4];
172 
173  const __m128i* input = reinterpret_cast<const __m128i*>(input_bytes);
174 
175  for(size_t i = 0; i != blocks; ++i)
176  {
177  union v4si {
178  u32bit u32[4];
179  __m128i u128;
180  };
181 
182  v4si P0, P1, P2, P3;
183 
184  __m128i W0 = _mm_loadu_si128(&input[0]);
185  prep00_15(P0, W0);
186 
187  __m128i W1 = _mm_loadu_si128(&input[1]);
188  prep00_15(P1, W1);
189 
190  __m128i W2 = _mm_loadu_si128(&input[2]);
191  prep00_15(P2, W2);
192 
193  __m128i W3 = _mm_loadu_si128(&input[3]);
194  prep00_15(P3, W3);
195 
196  /*
197  Using SSE4; slower on Core2 and Nehalem
198  #define GET_P_32(P, i) _mm_extract_epi32(P.u128, i)
199 
200  Much slower on all tested platforms
201  #define GET_P_32(P,i) _mm_cvtsi128_si32(_mm_srli_si128(P.u128, i*4))
202  */
203 
204 #define GET_P_32(P, i) P.u32[i]
205 
206  F1(A, B, C, D, E, GET_P_32(P0, 0));
207  F1(E, A, B, C, D, GET_P_32(P0, 1));
208  F1(D, E, A, B, C, GET_P_32(P0, 2));
209  F1(C, D, E, A, B, GET_P_32(P0, 3));
210  prep(P0, W0, W1, W2, W3, K00_19);
211 
212  F1(B, C, D, E, A, GET_P_32(P1, 0));
213  F1(A, B, C, D, E, GET_P_32(P1, 1));
214  F1(E, A, B, C, D, GET_P_32(P1, 2));
215  F1(D, E, A, B, C, GET_P_32(P1, 3));
216  prep(P1, W1, W2, W3, W0, K20_39);
217 
218  F1(C, D, E, A, B, GET_P_32(P2, 0));
219  F1(B, C, D, E, A, GET_P_32(P2, 1));
220  F1(A, B, C, D, E, GET_P_32(P2, 2));
221  F1(E, A, B, C, D, GET_P_32(P2, 3));
222  prep(P2, W2, W3, W0, W1, K20_39);
223 
224  F1(D, E, A, B, C, GET_P_32(P3, 0));
225  F1(C, D, E, A, B, GET_P_32(P3, 1));
226  F1(B, C, D, E, A, GET_P_32(P3, 2));
227  F1(A, B, C, D, E, GET_P_32(P3, 3));
228  prep(P3, W3, W0, W1, W2, K20_39);
229 
230  F1(E, A, B, C, D, GET_P_32(P0, 0));
231  F1(D, E, A, B, C, GET_P_32(P0, 1));
232  F1(C, D, E, A, B, GET_P_32(P0, 2));
233  F1(B, C, D, E, A, GET_P_32(P0, 3));
234  prep(P0, W0, W1, W2, W3, K20_39);
235 
236  F2(A, B, C, D, E, GET_P_32(P1, 0));
237  F2(E, A, B, C, D, GET_P_32(P1, 1));
238  F2(D, E, A, B, C, GET_P_32(P1, 2));
239  F2(C, D, E, A, B, GET_P_32(P1, 3));
240  prep(P1, W1, W2, W3, W0, K20_39);
241 
242  F2(B, C, D, E, A, GET_P_32(P2, 0));
243  F2(A, B, C, D, E, GET_P_32(P2, 1));
244  F2(E, A, B, C, D, GET_P_32(P2, 2));
245  F2(D, E, A, B, C, GET_P_32(P2, 3));
246  prep(P2, W2, W3, W0, W1, K40_59);
247 
248  F2(C, D, E, A, B, GET_P_32(P3, 0));
249  F2(B, C, D, E, A, GET_P_32(P3, 1));
250  F2(A, B, C, D, E, GET_P_32(P3, 2));
251  F2(E, A, B, C, D, GET_P_32(P3, 3));
252  prep(P3, W3, W0, W1, W2, K40_59);
253 
254  F2(D, E, A, B, C, GET_P_32(P0, 0));
255  F2(C, D, E, A, B, GET_P_32(P0, 1));
256  F2(B, C, D, E, A, GET_P_32(P0, 2));
257  F2(A, B, C, D, E, GET_P_32(P0, 3));
258  prep(P0, W0, W1, W2, W3, K40_59);
259 
260  F2(E, A, B, C, D, GET_P_32(P1, 0));
261  F2(D, E, A, B, C, GET_P_32(P1, 1));
262  F2(C, D, E, A, B, GET_P_32(P1, 2));
263  F2(B, C, D, E, A, GET_P_32(P1, 3));
264  prep(P1, W1, W2, W3, W0, K40_59);
265 
266  F3(A, B, C, D, E, GET_P_32(P2, 0));
267  F3(E, A, B, C, D, GET_P_32(P2, 1));
268  F3(D, E, A, B, C, GET_P_32(P2, 2));
269  F3(C, D, E, A, B, GET_P_32(P2, 3));
270  prep(P2, W2, W3, W0, W1, K40_59);
271 
272  F3(B, C, D, E, A, GET_P_32(P3, 0));
273  F3(A, B, C, D, E, GET_P_32(P3, 1));
274  F3(E, A, B, C, D, GET_P_32(P3, 2));
275  F3(D, E, A, B, C, GET_P_32(P3, 3));
276  prep(P3, W3, W0, W1, W2, K60_79);
277 
278  F3(C, D, E, A, B, GET_P_32(P0, 0));
279  F3(B, C, D, E, A, GET_P_32(P0, 1));
280  F3(A, B, C, D, E, GET_P_32(P0, 2));
281  F3(E, A, B, C, D, GET_P_32(P0, 3));
282  prep(P0, W0, W1, W2, W3, K60_79);
283 
284  F3(D, E, A, B, C, GET_P_32(P1, 0));
285  F3(C, D, E, A, B, GET_P_32(P1, 1));
286  F3(B, C, D, E, A, GET_P_32(P1, 2));
287  F3(A, B, C, D, E, GET_P_32(P1, 3));
288  prep(P1, W1, W2, W3, W0, K60_79);
289 
290  F3(E, A, B, C, D, GET_P_32(P2, 0));
291  F3(D, E, A, B, C, GET_P_32(P2, 1));
292  F3(C, D, E, A, B, GET_P_32(P2, 2));
293  F3(B, C, D, E, A, GET_P_32(P2, 3));
294  prep(P2, W2, W3, W0, W1, K60_79);
295 
296  F4(A, B, C, D, E, GET_P_32(P3, 0));
297  F4(E, A, B, C, D, GET_P_32(P3, 1));
298  F4(D, E, A, B, C, GET_P_32(P3, 2));
299  F4(C, D, E, A, B, GET_P_32(P3, 3));
300  prep(P3, W3, W0, W1, W2, K60_79);
301 
302  F4(B, C, D, E, A, GET_P_32(P0, 0));
303  F4(A, B, C, D, E, GET_P_32(P0, 1));
304  F4(E, A, B, C, D, GET_P_32(P0, 2));
305  F4(D, E, A, B, C, GET_P_32(P0, 3));
306 
307  F4(C, D, E, A, B, GET_P_32(P1, 0));
308  F4(B, C, D, E, A, GET_P_32(P1, 1));
309  F4(A, B, C, D, E, GET_P_32(P1, 2));
310  F4(E, A, B, C, D, GET_P_32(P1, 3));
311 
312  F4(D, E, A, B, C, GET_P_32(P2, 0));
313  F4(C, D, E, A, B, GET_P_32(P2, 1));
314  F4(B, C, D, E, A, GET_P_32(P2, 2));
315  F4(A, B, C, D, E, GET_P_32(P2, 3));
316 
317  F4(E, A, B, C, D, GET_P_32(P3, 0));
318  F4(D, E, A, B, C, GET_P_32(P3, 1));
319  F4(C, D, E, A, B, GET_P_32(P3, 2));
320  F4(B, C, D, E, A, GET_P_32(P3, 3));
321 
322  A = (digest[0] += A);
323  B = (digest[1] += B);
324  C = (digest[2] += C);
325  D = (digest[3] += D);
326  E = (digest[4] += E);
327 
328  input += (hash_block_size() / 16);
329  }
330 
331 #undef GET_P_32
332  }
333 
334 #undef prep00_15
335 #undef prep
336 
337 }
size_t hash_block_size() const
Definition: mdx_hash.h:32
BOTAN_REGISTER_HASH_NOARGS_IF(CPUID::has_sse2(), SHA_160_SSE2,"SHA-160","sse2", 64)
#define prep00_15(P, W)
Definition: sha1_sse2.cpp:28
T rotate_left(T input, size_t rot)
Definition: rotate.h:21
secure_vector< u32bit > digest
Definition: sha160.h:50
void F2(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:30
void F3(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:40
#define prep(prep, XW0, XW1, XW2, XW3, K)
Definition: sha1_sse2.cpp:82
uint32_t u32bit
Definition: types.h:33
uint8_t byte
Definition: types.h:31
static bool has_sse2()
Definition: cpuid.h:46
void F4(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:50
#define GET_P_32(P, i)
void F1(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:20