Botan  1.11.10
sha1_sse2.cpp
Go to the documentation of this file.
1 /*
2 * SHA-1 using SSE2
3 * (C) 2009-2011 Jack Lloyd
4 *
5 * Distributed under the terms of the Botan license
6 *
7 * Based on public domain code by Dean Gaudet
8 * (http://arctic.org/~dean/crypto/sha1.html)
9 */
10 
11 #include <botan/sha1_sse2.h>
12 #include <botan/rotate.h>
13 #include <emmintrin.h>
14 
15 namespace Botan {
16 
17 namespace SHA1_SSE2_F {
18 
19 namespace {
20 
21 /*
22 * First 16 bytes just need byte swapping. Preparing just means
23 * adding in the round constants.
24 */
25 
26 #define prep00_15(P, W) \
27  do { \
28  W = _mm_shufflehi_epi16(W, _MM_SHUFFLE(2, 3, 0, 1)); \
29  W = _mm_shufflelo_epi16(W, _MM_SHUFFLE(2, 3, 0, 1)); \
30  W = _mm_or_si128(_mm_slli_epi16(W, 8), \
31  _mm_srli_epi16(W, 8)); \
32  P.u128 = _mm_add_epi32(W, K00_19); \
33  } while(0)
34 
35 /*
36 For each multiple of 4, t, we want to calculate this:
37 
38 W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
39 W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
40 W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
41 W[t+3] = rol(W[t] ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
42 
43 we'll actually calculate this:
44 
45 W[t+0] = rol(W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16], 1);
46 W[t+1] = rol(W[t-2] ^ W[t-7] ^ W[t-13] ^ W[t-15], 1);
47 W[t+2] = rol(W[t-1] ^ W[t-6] ^ W[t-12] ^ W[t-14], 1);
48 W[t+3] = rol( 0 ^ W[t-5] ^ W[t-11] ^ W[t-13], 1);
49 W[t+3] ^= rol(W[t+0], 1);
50 
51 the parameters are:
52 
53 W0 = &W[t-16];
54 W1 = &W[t-12];
55 W2 = &W[t- 8];
56 W3 = &W[t- 4];
57 
58 and on output:
59 prepared = W0 + K
60 W0 = W[t]..W[t+3]
61 */
62 
63 /* note that there is a step here where i want to do a rol by 1, which
64 * normally would look like this:
65 *
66 * r1 = psrld r0,$31
67 * r0 = pslld r0,$1
68 * r0 = por r0,r1
69 *
70 * but instead i do this:
71 *
72 * r1 = pcmpltd r0,zero
73 * r0 = paddd r0,r0
74 * r0 = psub r0,r1
75 *
76 * because pcmpltd and paddd are availabe in both MMX units on
77 * efficeon, pentium-m, and opteron but shifts are available in
78 * only one unit.
79 */
80 #define prep(prep, XW0, XW1, XW2, XW3, K) \
81  do { \
82  __m128i r0, r1, r2, r3; \
83  \
84  /* load W[t-4] 16-byte aligned, and shift */ \
85  r3 = _mm_srli_si128((XW3), 4); \
86  r0 = (XW0); \
87  /* get high 64-bits of XW0 into low 64-bits */ \
88  r1 = _mm_shuffle_epi32((XW0), _MM_SHUFFLE(1,0,3,2)); \
89  /* load high 64-bits of r1 */ \
90  r1 = _mm_unpacklo_epi64(r1, (XW1)); \
91  r2 = (XW2); \
92  \
93  r0 = _mm_xor_si128(r1, r0); \
94  r2 = _mm_xor_si128(r3, r2); \
95  r0 = _mm_xor_si128(r2, r0); \
96  /* unrotated W[t]..W[t+2] in r0 ... still need W[t+3] */ \
97  \
98  r2 = _mm_slli_si128(r0, 12); \
99  r1 = _mm_cmplt_epi32(r0, _mm_setzero_si128()); \
100  r0 = _mm_add_epi32(r0, r0); /* shift left by 1 */ \
101  r0 = _mm_sub_epi32(r0, r1); /* r0 has W[t]..W[t+2] */ \
102  \
103  r3 = _mm_srli_epi32(r2, 30); \
104  r2 = _mm_slli_epi32(r2, 2); \
105  \
106  r0 = _mm_xor_si128(r0, r3); \
107  r0 = _mm_xor_si128(r0, r2); /* r0 now has W[t+3] */ \
108  \
109  (XW0) = r0; \
110  (prep).u128 = _mm_add_epi32(r0, K); \
111  } while(0)
112 
113 /*
114 * SHA-160 F1 Function
115 */
116 inline void F1(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
117  {
118  E += (D ^ (B & (C ^ D))) + msg + rotate_left(A, 5);
119  B = rotate_left(B, 30);
120  }
121 
122 /*
123 * SHA-160 F2 Function
124 */
125 inline void F2(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
126  {
127  E += (B ^ C ^ D) + msg + rotate_left(A, 5);
128  B = rotate_left(B, 30);
129  }
130 
131 /*
132 * SHA-160 F3 Function
133 */
134 inline void F3(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
135  {
136  E += ((B & C) | ((B | C) & D)) + msg + rotate_left(A, 5);
137  B = rotate_left(B, 30);
138  }
139 
140 /*
141 * SHA-160 F4 Function
142 */
143 inline void F4(u32bit A, u32bit& B, u32bit C, u32bit D, u32bit& E, u32bit msg)
144  {
145  E += (B ^ C ^ D) + msg + rotate_left(A, 5);
146  B = rotate_left(B, 30);
147  }
148 
149 }
150 
151 }
152 
153 /*
154 * SHA-160 Compression Function using SSE for message expansion
155 */
156 void SHA_160_SSE2::compress_n(const byte input_bytes[], size_t blocks)
157  {
158  using namespace SHA1_SSE2_F;
159 
160  const __m128i K00_19 = _mm_set1_epi32(0x5A827999);
161  const __m128i K20_39 = _mm_set1_epi32(0x6ED9EBA1);
162  const __m128i K40_59 = _mm_set1_epi32(0x8F1BBCDC);
163  const __m128i K60_79 = _mm_set1_epi32(0xCA62C1D6);
164 
165  u32bit A = digest[0],
166  B = digest[1],
167  C = digest[2],
168  D = digest[3],
169  E = digest[4];
170 
171  const __m128i* input = reinterpret_cast<const __m128i*>(input_bytes);
172 
173  for(size_t i = 0; i != blocks; ++i)
174  {
175  union v4si {
176  u32bit u32[4];
177  __m128i u128;
178  };
179 
180  v4si P0, P1, P2, P3;
181 
182  __m128i W0 = _mm_loadu_si128(&input[0]);
183  prep00_15(P0, W0);
184 
185  __m128i W1 = _mm_loadu_si128(&input[1]);
186  prep00_15(P1, W1);
187 
188  __m128i W2 = _mm_loadu_si128(&input[2]);
189  prep00_15(P2, W2);
190 
191  __m128i W3 = _mm_loadu_si128(&input[3]);
192  prep00_15(P3, W3);
193 
194  /*
195  Using SSE4; slower on Core2 and Nehalem
196  #define GET_P_32(P, i) _mm_extract_epi32(P.u128, i)
197 
198  Much slower on all tested platforms
199  #define GET_P_32(P,i) _mm_cvtsi128_si32(_mm_srli_si128(P.u128, i*4))
200  */
201 
202 #define GET_P_32(P, i) P.u32[i]
203 
204  F1(A, B, C, D, E, GET_P_32(P0, 0));
205  F1(E, A, B, C, D, GET_P_32(P0, 1));
206  F1(D, E, A, B, C, GET_P_32(P0, 2));
207  F1(C, D, E, A, B, GET_P_32(P0, 3));
208  prep(P0, W0, W1, W2, W3, K00_19);
209 
210  F1(B, C, D, E, A, GET_P_32(P1, 0));
211  F1(A, B, C, D, E, GET_P_32(P1, 1));
212  F1(E, A, B, C, D, GET_P_32(P1, 2));
213  F1(D, E, A, B, C, GET_P_32(P1, 3));
214  prep(P1, W1, W2, W3, W0, K20_39);
215 
216  F1(C, D, E, A, B, GET_P_32(P2, 0));
217  F1(B, C, D, E, A, GET_P_32(P2, 1));
218  F1(A, B, C, D, E, GET_P_32(P2, 2));
219  F1(E, A, B, C, D, GET_P_32(P2, 3));
220  prep(P2, W2, W3, W0, W1, K20_39);
221 
222  F1(D, E, A, B, C, GET_P_32(P3, 0));
223  F1(C, D, E, A, B, GET_P_32(P3, 1));
224  F1(B, C, D, E, A, GET_P_32(P3, 2));
225  F1(A, B, C, D, E, GET_P_32(P3, 3));
226  prep(P3, W3, W0, W1, W2, K20_39);
227 
228  F1(E, A, B, C, D, GET_P_32(P0, 0));
229  F1(D, E, A, B, C, GET_P_32(P0, 1));
230  F1(C, D, E, A, B, GET_P_32(P0, 2));
231  F1(B, C, D, E, A, GET_P_32(P0, 3));
232  prep(P0, W0, W1, W2, W3, K20_39);
233 
234  F2(A, B, C, D, E, GET_P_32(P1, 0));
235  F2(E, A, B, C, D, GET_P_32(P1, 1));
236  F2(D, E, A, B, C, GET_P_32(P1, 2));
237  F2(C, D, E, A, B, GET_P_32(P1, 3));
238  prep(P1, W1, W2, W3, W0, K20_39);
239 
240  F2(B, C, D, E, A, GET_P_32(P2, 0));
241  F2(A, B, C, D, E, GET_P_32(P2, 1));
242  F2(E, A, B, C, D, GET_P_32(P2, 2));
243  F2(D, E, A, B, C, GET_P_32(P2, 3));
244  prep(P2, W2, W3, W0, W1, K40_59);
245 
246  F2(C, D, E, A, B, GET_P_32(P3, 0));
247  F2(B, C, D, E, A, GET_P_32(P3, 1));
248  F2(A, B, C, D, E, GET_P_32(P3, 2));
249  F2(E, A, B, C, D, GET_P_32(P3, 3));
250  prep(P3, W3, W0, W1, W2, K40_59);
251 
252  F2(D, E, A, B, C, GET_P_32(P0, 0));
253  F2(C, D, E, A, B, GET_P_32(P0, 1));
254  F2(B, C, D, E, A, GET_P_32(P0, 2));
255  F2(A, B, C, D, E, GET_P_32(P0, 3));
256  prep(P0, W0, W1, W2, W3, K40_59);
257 
258  F2(E, A, B, C, D, GET_P_32(P1, 0));
259  F2(D, E, A, B, C, GET_P_32(P1, 1));
260  F2(C, D, E, A, B, GET_P_32(P1, 2));
261  F2(B, C, D, E, A, GET_P_32(P1, 3));
262  prep(P1, W1, W2, W3, W0, K40_59);
263 
264  F3(A, B, C, D, E, GET_P_32(P2, 0));
265  F3(E, A, B, C, D, GET_P_32(P2, 1));
266  F3(D, E, A, B, C, GET_P_32(P2, 2));
267  F3(C, D, E, A, B, GET_P_32(P2, 3));
268  prep(P2, W2, W3, W0, W1, K40_59);
269 
270  F3(B, C, D, E, A, GET_P_32(P3, 0));
271  F3(A, B, C, D, E, GET_P_32(P3, 1));
272  F3(E, A, B, C, D, GET_P_32(P3, 2));
273  F3(D, E, A, B, C, GET_P_32(P3, 3));
274  prep(P3, W3, W0, W1, W2, K60_79);
275 
276  F3(C, D, E, A, B, GET_P_32(P0, 0));
277  F3(B, C, D, E, A, GET_P_32(P0, 1));
278  F3(A, B, C, D, E, GET_P_32(P0, 2));
279  F3(E, A, B, C, D, GET_P_32(P0, 3));
280  prep(P0, W0, W1, W2, W3, K60_79);
281 
282  F3(D, E, A, B, C, GET_P_32(P1, 0));
283  F3(C, D, E, A, B, GET_P_32(P1, 1));
284  F3(B, C, D, E, A, GET_P_32(P1, 2));
285  F3(A, B, C, D, E, GET_P_32(P1, 3));
286  prep(P1, W1, W2, W3, W0, K60_79);
287 
288  F3(E, A, B, C, D, GET_P_32(P2, 0));
289  F3(D, E, A, B, C, GET_P_32(P2, 1));
290  F3(C, D, E, A, B, GET_P_32(P2, 2));
291  F3(B, C, D, E, A, GET_P_32(P2, 3));
292  prep(P2, W2, W3, W0, W1, K60_79);
293 
294  F4(A, B, C, D, E, GET_P_32(P3, 0));
295  F4(E, A, B, C, D, GET_P_32(P3, 1));
296  F4(D, E, A, B, C, GET_P_32(P3, 2));
297  F4(C, D, E, A, B, GET_P_32(P3, 3));
298  prep(P3, W3, W0, W1, W2, K60_79);
299 
300  F4(B, C, D, E, A, GET_P_32(P0, 0));
301  F4(A, B, C, D, E, GET_P_32(P0, 1));
302  F4(E, A, B, C, D, GET_P_32(P0, 2));
303  F4(D, E, A, B, C, GET_P_32(P0, 3));
304 
305  F4(C, D, E, A, B, GET_P_32(P1, 0));
306  F4(B, C, D, E, A, GET_P_32(P1, 1));
307  F4(A, B, C, D, E, GET_P_32(P1, 2));
308  F4(E, A, B, C, D, GET_P_32(P1, 3));
309 
310  F4(D, E, A, B, C, GET_P_32(P2, 0));
311  F4(C, D, E, A, B, GET_P_32(P2, 1));
312  F4(B, C, D, E, A, GET_P_32(P2, 2));
313  F4(A, B, C, D, E, GET_P_32(P2, 3));
314 
315  F4(E, A, B, C, D, GET_P_32(P3, 0));
316  F4(D, E, A, B, C, GET_P_32(P3, 1));
317  F4(C, D, E, A, B, GET_P_32(P3, 2));
318  F4(B, C, D, E, A, GET_P_32(P3, 3));
319 
320  A = (digest[0] += A);
321  B = (digest[1] += B);
322  C = (digest[2] += C);
323  D = (digest[3] += D);
324  E = (digest[4] += E);
325 
326  input += (hash_block_size() / 16);
327  }
328 
329 #undef GET_P_32
330  }
331 
332 #undef prep00_15
333 #undef prep
334 
335 }
size_t hash_block_size() const
Definition: mdx_hash.h:32
#define prep00_15(P, W)
Definition: sha1_sse2.cpp:26
T rotate_left(T input, size_t rot)
Definition: rotate.h:21
secure_vector< u32bit > digest
Definition: sha160.h:50
void F2(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:29
void F3(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:39
#define prep(prep, XW0, XW1, XW2, XW3, K)
Definition: sha1_sse2.cpp:80
uint32_t u32bit
Definition: types.h:32
uint8_t byte
Definition: types.h:30
Definition: buf_comp.h:15
void F4(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:49
#define GET_P_32(P, i)
void F1(u32bit A, u32bit &B, u32bit C, u32bit D, u32bit &E, u32bit msg, u32bit rot)
Definition: has160.cpp:19