]>
Commit | Line | Data |
---|---|---|
5663535b TC |
1 | ######################################################################## |
2 | # Implement fast SHA-512 with AVX2 instructions. (x86_64) | |
3 | # | |
4 | # Copyright (C) 2013 Intel Corporation. | |
5 | # | |
6 | # Authors: | |
7 | # James Guilford <[email protected]> | |
8 | # Kirk Yap <[email protected]> | |
9 | # David Cote <[email protected]> | |
10 | # Tim Chen <[email protected]> | |
11 | # | |
12 | # This software is available to you under a choice of one of two | |
13 | # licenses. You may choose to be licensed under the terms of the GNU | |
14 | # General Public License (GPL) Version 2, available from the file | |
15 | # COPYING in the main directory of this source tree, or the | |
16 | # OpenIB.org BSD license below: | |
17 | # | |
18 | # Redistribution and use in source and binary forms, with or | |
19 | # without modification, are permitted provided that the following | |
20 | # conditions are met: | |
21 | # | |
22 | # - Redistributions of source code must retain the above | |
23 | # copyright notice, this list of conditions and the following | |
24 | # disclaimer. | |
25 | # | |
26 | # - Redistributions in binary form must reproduce the above | |
27 | # copyright notice, this list of conditions and the following | |
28 | # disclaimer in the documentation and/or other materials | |
29 | # provided with the distribution. | |
30 | # | |
31 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
32 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
33 | # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
34 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
35 | # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
36 | # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
37 | # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
38 | # SOFTWARE. | |
39 | # | |
40 | ######################################################################## | |
41 | # | |
42 | # This code is described in an Intel White-Paper: | |
43 | # "Fast SHA-512 Implementations on Intel Architecture Processors" | |
44 | # | |
45 | # To find it, surf to http://www.intel.com/p/en_US/embedded | |
46 | # and search for that title. | |
47 | # | |
48 | ######################################################################## | |
49 | # This code schedules 1 blocks at a time, with 4 lanes per block | |
50 | ######################################################################## | |
51 | ||
52 | #ifdef CONFIG_AS_AVX2 | |
53 | #include <linux/linkage.h> | |
54 | ||
55 | .text | |
56 | ||
57 | # Virtual Registers | |
58 | Y_0 = %ymm4 | |
59 | Y_1 = %ymm5 | |
60 | Y_2 = %ymm6 | |
61 | Y_3 = %ymm7 | |
62 | ||
63 | YTMP0 = %ymm0 | |
64 | YTMP1 = %ymm1 | |
65 | YTMP2 = %ymm2 | |
66 | YTMP3 = %ymm3 | |
67 | YTMP4 = %ymm8 | |
68 | XFER = YTMP0 | |
69 | ||
70 | BYTE_FLIP_MASK = %ymm9 | |
71 | ||
ca04c823 JP |
72 | # 1st arg is %rdi, which is saved to the stack and accessed later via %r12 |
73 | CTX1 = %rdi | |
74 | CTX2 = %r12 | |
5663535b | 75 | # 2nd arg |
e68410eb | 76 | INP = %rsi |
5663535b TC |
77 | # 3rd arg |
78 | NUM_BLKS = %rdx | |
79 | ||
80 | c = %rcx | |
81 | d = %r8 | |
82 | e = %rdx | |
00425bb1 | 83 | y3 = %rsi |
5663535b | 84 | |
ca04c823 | 85 | TBL = %rdi # clobbers CTX1 |
5663535b TC |
86 | |
87 | a = %rax | |
88 | b = %rbx | |
89 | ||
90 | f = %r9 | |
91 | g = %r10 | |
92 | h = %r11 | |
93 | old_h = %r11 | |
94 | ||
ca04c823 | 95 | T1 = %r12 # clobbers CTX2 |
5663535b TC |
96 | y0 = %r13 |
97 | y1 = %r14 | |
98 | y2 = %r15 | |
99 | ||
5663535b TC |
100 | # Local variables (stack frame) |
101 | XFER_SIZE = 4*8 | |
102 | SRND_SIZE = 1*8 | |
103 | INP_SIZE = 1*8 | |
104 | INPEND_SIZE = 1*8 | |
ca04c823 | 105 | CTX_SIZE = 1*8 |
5663535b | 106 | RSPSAVE_SIZE = 1*8 |
ca04c823 | 107 | GPRSAVE_SIZE = 5*8 |
5663535b TC |
108 | |
109 | frame_XFER = 0 | |
110 | frame_SRND = frame_XFER + XFER_SIZE | |
111 | frame_INP = frame_SRND + SRND_SIZE | |
112 | frame_INPEND = frame_INP + INP_SIZE | |
ca04c823 JP |
113 | frame_CTX = frame_INPEND + INPEND_SIZE |
114 | frame_RSPSAVE = frame_CTX + CTX_SIZE | |
5663535b TC |
115 | frame_GPRSAVE = frame_RSPSAVE + RSPSAVE_SIZE |
116 | frame_size = frame_GPRSAVE + GPRSAVE_SIZE | |
117 | ||
118 | ## assume buffers not aligned | |
119 | #define VMOVDQ vmovdqu | |
120 | ||
121 | # addm [mem], reg | |
122 | # Add reg to mem using reg-mem add and store | |
123 | .macro addm p1 p2 | |
124 | add \p1, \p2 | |
125 | mov \p2, \p1 | |
126 | .endm | |
127 | ||
128 | ||
129 | # COPY_YMM_AND_BSWAP ymm, [mem], byte_flip_mask | |
130 | # Load ymm with mem and byte swap each dword | |
131 | .macro COPY_YMM_AND_BSWAP p1 p2 p3 | |
132 | VMOVDQ \p2, \p1 | |
133 | vpshufb \p3, \p1, \p1 | |
134 | .endm | |
135 | # rotate_Ys | |
136 | # Rotate values of symbols Y0...Y3 | |
137 | .macro rotate_Ys | |
138 | Y_ = Y_0 | |
139 | Y_0 = Y_1 | |
140 | Y_1 = Y_2 | |
141 | Y_2 = Y_3 | |
142 | Y_3 = Y_ | |
143 | .endm | |
144 | ||
145 | # RotateState | |
146 | .macro RotateState | |
147 | # Rotate symbols a..h right | |
148 | old_h = h | |
149 | TMP_ = h | |
150 | h = g | |
151 | g = f | |
152 | f = e | |
153 | e = d | |
154 | d = c | |
155 | c = b | |
156 | b = a | |
157 | a = TMP_ | |
158 | .endm | |
159 | ||
160 | # macro MY_VPALIGNR YDST, YSRC1, YSRC2, RVAL | |
161 | # YDST = {YSRC1, YSRC2} >> RVAL*8 | |
162 | .macro MY_VPALIGNR YDST YSRC1 YSRC2 RVAL | |
163 | vperm2f128 $0x3, \YSRC2, \YSRC1, \YDST # YDST = {YS1_LO, YS2_HI} | |
164 | vpalignr $\RVAL, \YSRC2, \YDST, \YDST # YDST = {YDS1, YS2} >> RVAL*8 | |
165 | .endm | |
166 | ||
167 | .macro FOUR_ROUNDS_AND_SCHED | |
168 | ################################### RND N + 0 ######################################### | |
169 | ||
170 | # Extract w[t-7] | |
171 | MY_VPALIGNR YTMP0, Y_3, Y_2, 8 # YTMP0 = W[-7] | |
172 | # Calculate w[t-16] + w[t-7] | |
173 | vpaddq Y_0, YTMP0, YTMP0 # YTMP0 = W[-7] + W[-16] | |
174 | # Extract w[t-15] | |
175 | MY_VPALIGNR YTMP1, Y_1, Y_0, 8 # YTMP1 = W[-15] | |
176 | ||
177 | # Calculate sigma0 | |
178 | ||
179 | # Calculate w[t-15] ror 1 | |
180 | vpsrlq $1, YTMP1, YTMP2 | |
181 | vpsllq $(64-1), YTMP1, YTMP3 | |
182 | vpor YTMP2, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 | |
183 | # Calculate w[t-15] shr 7 | |
184 | vpsrlq $7, YTMP1, YTMP4 # YTMP4 = W[-15] >> 7 | |
185 | ||
186 | mov a, y3 # y3 = a # MAJA | |
187 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
188 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
189 | add frame_XFER(%rsp),h # h = k + w + h # -- | |
190 | or c, y3 # y3 = a|c # MAJA | |
191 | mov f, y2 # y2 = f # CH | |
192 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
193 | ||
194 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
195 | xor g, y2 # y2 = f^g # CH | |
196 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
197 | ||
198 | and e, y2 # y2 = (f^g)&e # CH | |
199 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
200 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
201 | add h, d # d = k + w + h + d # -- | |
202 | ||
203 | and b, y3 # y3 = (a|c)&b # MAJA | |
204 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
205 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
206 | ||
207 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
208 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
209 | mov a, T1 # T1 = a # MAJB | |
210 | and c, T1 # T1 = a&c # MAJB | |
211 | ||
212 | add y0, y2 # y2 = S1 + CH # -- | |
213 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
214 | add y1, h # h = k + w + h + S0 # -- | |
215 | ||
216 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
217 | ||
218 | add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
219 | add y3, h # h = t1 + S0 + MAJ # -- | |
220 | ||
221 | RotateState | |
222 | ||
223 | ################################### RND N + 1 ######################################### | |
224 | ||
225 | # Calculate w[t-15] ror 8 | |
226 | vpsrlq $8, YTMP1, YTMP2 | |
227 | vpsllq $(64-8), YTMP1, YTMP1 | |
228 | vpor YTMP2, YTMP1, YTMP1 # YTMP1 = W[-15] ror 8 | |
229 | # XOR the three components | |
230 | vpxor YTMP4, YTMP3, YTMP3 # YTMP3 = W[-15] ror 1 ^ W[-15] >> 7 | |
231 | vpxor YTMP1, YTMP3, YTMP1 # YTMP1 = s0 | |
232 | ||
233 | ||
234 | # Add three components, w[t-16], w[t-7] and sigma0 | |
235 | vpaddq YTMP1, YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 | |
236 | # Move to appropriate lanes for calculating w[16] and w[17] | |
237 | vperm2f128 $0x0, YTMP0, YTMP0, Y_0 # Y_0 = W[-16] + W[-7] + s0 {BABA} | |
238 | # Move to appropriate lanes for calculating w[18] and w[19] | |
239 | vpand MASK_YMM_LO(%rip), YTMP0, YTMP0 # YTMP0 = W[-16] + W[-7] + s0 {DC00} | |
240 | ||
241 | # Calculate w[16] and w[17] in both 128 bit lanes | |
242 | ||
243 | # Calculate sigma1 for w[16] and w[17] on both 128 bit lanes | |
244 | vperm2f128 $0x11, Y_3, Y_3, YTMP2 # YTMP2 = W[-2] {BABA} | |
245 | vpsrlq $6, YTMP2, YTMP4 # YTMP4 = W[-2] >> 6 {BABA} | |
246 | ||
247 | ||
248 | mov a, y3 # y3 = a # MAJA | |
249 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
250 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
251 | add 1*8+frame_XFER(%rsp), h # h = k + w + h # -- | |
252 | or c, y3 # y3 = a|c # MAJA | |
253 | ||
254 | ||
255 | mov f, y2 # y2 = f # CH | |
256 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
257 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
258 | xor g, y2 # y2 = f^g # CH | |
259 | ||
260 | ||
261 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
262 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
263 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
264 | and e, y2 # y2 = (f^g)&e # CH | |
265 | add h, d # d = k + w + h + d # -- | |
266 | ||
267 | and b, y3 # y3 = (a|c)&b # MAJA | |
268 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
269 | ||
270 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
271 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
272 | ||
273 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
274 | mov a, T1 # T1 = a # MAJB | |
275 | and c, T1 # T1 = a&c # MAJB | |
276 | add y0, y2 # y2 = S1 + CH # -- | |
277 | ||
278 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
279 | add y1, h # h = k + w + h + S0 # -- | |
280 | ||
281 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
282 | add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
283 | add y3, h # h = t1 + S0 + MAJ # -- | |
284 | ||
285 | RotateState | |
286 | ||
287 | ||
288 | ################################### RND N + 2 ######################################### | |
289 | ||
290 | vpsrlq $19, YTMP2, YTMP3 # YTMP3 = W[-2] >> 19 {BABA} | |
291 | vpsllq $(64-19), YTMP2, YTMP1 # YTMP1 = W[-2] << 19 {BABA} | |
292 | vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {BABA} | |
293 | vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {BABA} | |
294 | vpsrlq $61, YTMP2, YTMP3 # YTMP3 = W[-2] >> 61 {BABA} | |
295 | vpsllq $(64-61), YTMP2, YTMP1 # YTMP1 = W[-2] << 61 {BABA} | |
296 | vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {BABA} | |
297 | vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ | |
298 | # (W[-2] ror 61) ^ (W[-2] >> 6) {BABA} | |
299 | ||
300 | # Add sigma1 to the other compunents to get w[16] and w[17] | |
301 | vpaddq YTMP4, Y_0, Y_0 # Y_0 = {W[1], W[0], W[1], W[0]} | |
302 | ||
303 | # Calculate sigma1 for w[18] and w[19] for upper 128 bit lane | |
304 | vpsrlq $6, Y_0, YTMP4 # YTMP4 = W[-2] >> 6 {DC--} | |
305 | ||
306 | mov a, y3 # y3 = a # MAJA | |
307 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
308 | add 2*8+frame_XFER(%rsp), h # h = k + w + h # -- | |
309 | ||
310 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
311 | or c, y3 # y3 = a|c # MAJA | |
312 | mov f, y2 # y2 = f # CH | |
313 | xor g, y2 # y2 = f^g # CH | |
314 | ||
315 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
316 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
317 | and e, y2 # y2 = (f^g)&e # CH | |
318 | ||
319 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
320 | add h, d # d = k + w + h + d # -- | |
321 | and b, y3 # y3 = (a|c)&b # MAJA | |
322 | ||
323 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
324 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
325 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
326 | ||
327 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
328 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
329 | ||
330 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
331 | mov a, T1 # T1 = a # MAJB | |
332 | and c, T1 # T1 = a&c # MAJB | |
333 | add y0, y2 # y2 = S1 + CH # -- | |
334 | ||
335 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
336 | add y1, h # h = k + w + h + S0 # -- | |
337 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
338 | add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
339 | ||
340 | add y3, h # h = t1 + S0 + MAJ # -- | |
341 | ||
342 | RotateState | |
343 | ||
344 | ################################### RND N + 3 ######################################### | |
345 | ||
346 | vpsrlq $19, Y_0, YTMP3 # YTMP3 = W[-2] >> 19 {DC--} | |
347 | vpsllq $(64-19), Y_0, YTMP1 # YTMP1 = W[-2] << 19 {DC--} | |
348 | vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 19 {DC--} | |
349 | vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = W[-2] ror 19 ^ W[-2] >> 6 {DC--} | |
350 | vpsrlq $61, Y_0, YTMP3 # YTMP3 = W[-2] >> 61 {DC--} | |
351 | vpsllq $(64-61), Y_0, YTMP1 # YTMP1 = W[-2] << 61 {DC--} | |
352 | vpor YTMP1, YTMP3, YTMP3 # YTMP3 = W[-2] ror 61 {DC--} | |
353 | vpxor YTMP3, YTMP4, YTMP4 # YTMP4 = s1 = (W[-2] ror 19) ^ | |
354 | # (W[-2] ror 61) ^ (W[-2] >> 6) {DC--} | |
355 | ||
356 | # Add the sigma0 + w[t-7] + w[t-16] for w[18] and w[19] | |
357 | # to newly calculated sigma1 to get w[18] and w[19] | |
358 | vpaddq YTMP4, YTMP0, YTMP2 # YTMP2 = {W[3], W[2], --, --} | |
359 | ||
360 | # Form w[19, w[18], w17], w[16] | |
361 | vpblendd $0xF0, YTMP2, Y_0, Y_0 # Y_0 = {W[3], W[2], W[1], W[0]} | |
362 | ||
363 | mov a, y3 # y3 = a # MAJA | |
364 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
365 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
366 | add 3*8+frame_XFER(%rsp), h # h = k + w + h # -- | |
367 | or c, y3 # y3 = a|c # MAJA | |
368 | ||
369 | ||
370 | mov f, y2 # y2 = f # CH | |
371 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
372 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
373 | xor g, y2 # y2 = f^g # CH | |
374 | ||
375 | ||
376 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
377 | and e, y2 # y2 = (f^g)&e # CH | |
378 | add h, d # d = k + w + h + d # -- | |
379 | and b, y3 # y3 = (a|c)&b # MAJA | |
380 | ||
381 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
382 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
383 | ||
384 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
385 | add y0, y2 # y2 = S1 + CH # -- | |
386 | ||
387 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
388 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
389 | ||
390 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
391 | ||
392 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
393 | mov a, T1 # T1 = a # MAJB | |
394 | and c, T1 # T1 = a&c # MAJB | |
395 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
396 | ||
397 | add y1, h # h = k + w + h + S0 # -- | |
398 | add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
399 | add y3, h # h = t1 + S0 + MAJ # -- | |
400 | ||
401 | RotateState | |
402 | ||
403 | rotate_Ys | |
404 | .endm | |
405 | ||
406 | .macro DO_4ROUNDS | |
407 | ||
408 | ################################### RND N + 0 ######################################### | |
409 | ||
410 | mov f, y2 # y2 = f # CH | |
411 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
412 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
413 | xor g, y2 # y2 = f^g # CH | |
414 | ||
415 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
416 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
417 | and e, y2 # y2 = (f^g)&e # CH | |
418 | ||
419 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
420 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
421 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
422 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
423 | mov a, y3 # y3 = a # MAJA | |
424 | ||
425 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
426 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
427 | add frame_XFER(%rsp), h # h = k + w + h # -- | |
428 | or c, y3 # y3 = a|c # MAJA | |
429 | ||
430 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
431 | mov a, T1 # T1 = a # MAJB | |
432 | and b, y3 # y3 = (a|c)&b # MAJA | |
433 | and c, T1 # T1 = a&c # MAJB | |
434 | add y0, y2 # y2 = S1 + CH # -- | |
435 | ||
436 | add h, d # d = k + w + h + d # -- | |
437 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
438 | add y1, h # h = k + w + h + S0 # -- | |
439 | ||
440 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
441 | ||
442 | RotateState | |
443 | ||
444 | ################################### RND N + 1 ######################################### | |
445 | ||
446 | add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
447 | mov f, y2 # y2 = f # CH | |
448 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
449 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
450 | xor g, y2 # y2 = f^g # CH | |
451 | ||
452 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
453 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
454 | and e, y2 # y2 = (f^g)&e # CH | |
455 | add y3, old_h # h = t1 + S0 + MAJ # -- | |
456 | ||
457 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
458 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
459 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
460 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
461 | mov a, y3 # y3 = a # MAJA | |
462 | ||
463 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
464 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
465 | add 8*1+frame_XFER(%rsp), h # h = k + w + h # -- | |
466 | or c, y3 # y3 = a|c # MAJA | |
467 | ||
468 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
469 | mov a, T1 # T1 = a # MAJB | |
470 | and b, y3 # y3 = (a|c)&b # MAJA | |
471 | and c, T1 # T1 = a&c # MAJB | |
472 | add y0, y2 # y2 = S1 + CH # -- | |
473 | ||
474 | add h, d # d = k + w + h + d # -- | |
475 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
476 | add y1, h # h = k + w + h + S0 # -- | |
477 | ||
478 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
479 | ||
480 | RotateState | |
481 | ||
482 | ################################### RND N + 2 ######################################### | |
483 | ||
484 | add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
485 | mov f, y2 # y2 = f # CH | |
486 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
487 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
488 | xor g, y2 # y2 = f^g # CH | |
489 | ||
490 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
491 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
492 | and e, y2 # y2 = (f^g)&e # CH | |
493 | add y3, old_h # h = t1 + S0 + MAJ # -- | |
494 | ||
495 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
496 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
497 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
498 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
499 | mov a, y3 # y3 = a # MAJA | |
500 | ||
501 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
502 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
503 | add 8*2+frame_XFER(%rsp), h # h = k + w + h # -- | |
504 | or c, y3 # y3 = a|c # MAJA | |
505 | ||
506 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
507 | mov a, T1 # T1 = a # MAJB | |
508 | and b, y3 # y3 = (a|c)&b # MAJA | |
509 | and c, T1 # T1 = a&c # MAJB | |
510 | add y0, y2 # y2 = S1 + CH # -- | |
511 | ||
512 | add h, d # d = k + w + h + d # -- | |
513 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
514 | add y1, h # h = k + w + h + S0 # -- | |
515 | ||
516 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
517 | ||
518 | RotateState | |
519 | ||
520 | ################################### RND N + 3 ######################################### | |
521 | ||
522 | add y2, old_h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
523 | mov f, y2 # y2 = f # CH | |
524 | rorx $41, e, y0 # y0 = e >> 41 # S1A | |
525 | rorx $18, e, y1 # y1 = e >> 18 # S1B | |
526 | xor g, y2 # y2 = f^g # CH | |
527 | ||
528 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) # S1 | |
529 | rorx $14, e, y1 # y1 = (e >> 14) # S1 | |
530 | and e, y2 # y2 = (f^g)&e # CH | |
531 | add y3, old_h # h = t1 + S0 + MAJ # -- | |
532 | ||
533 | xor y1, y0 # y0 = (e>>41) ^ (e>>18) ^ (e>>14) # S1 | |
534 | rorx $34, a, T1 # T1 = a >> 34 # S0B | |
535 | xor g, y2 # y2 = CH = ((f^g)&e)^g # CH | |
536 | rorx $39, a, y1 # y1 = a >> 39 # S0A | |
537 | mov a, y3 # y3 = a # MAJA | |
538 | ||
539 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) # S0 | |
540 | rorx $28, a, T1 # T1 = (a >> 28) # S0 | |
541 | add 8*3+frame_XFER(%rsp), h # h = k + w + h # -- | |
542 | or c, y3 # y3 = a|c # MAJA | |
543 | ||
544 | xor T1, y1 # y1 = (a>>39) ^ (a>>34) ^ (a>>28) # S0 | |
545 | mov a, T1 # T1 = a # MAJB | |
546 | and b, y3 # y3 = (a|c)&b # MAJA | |
547 | and c, T1 # T1 = a&c # MAJB | |
548 | add y0, y2 # y2 = S1 + CH # -- | |
549 | ||
550 | ||
551 | add h, d # d = k + w + h + d # -- | |
552 | or T1, y3 # y3 = MAJ = (a|c)&b)|(a&c) # MAJ | |
553 | add y1, h # h = k + w + h + S0 # -- | |
554 | ||
555 | add y2, d # d = k + w + h + d + S1 + CH = d + t1 # -- | |
556 | ||
557 | add y2, h # h = k + w + h + S0 + S1 + CH = t1 + S0# -- | |
558 | ||
559 | add y3, h # h = t1 + S0 + MAJ # -- | |
560 | ||
561 | RotateState | |
562 | ||
563 | .endm | |
564 | ||
565 | ######################################################################## | |
e68410eb | 566 | # void sha512_transform_rorx(void* D, const void* M, uint64_t L)# |
5663535b TC |
567 | # Purpose: Updates the SHA512 digest stored at D with the message stored in M. |
568 | # The size of the message pointed to by M must be an integer multiple of SHA512 | |
569 | # message blocks. | |
570 | # L is the message length in SHA512 blocks | |
571 | ######################################################################## | |
572 | ENTRY(sha512_transform_rorx) | |
573 | # Allocate Stack Space | |
574 | mov %rsp, %rax | |
575 | sub $frame_size, %rsp | |
576 | and $~(0x20 - 1), %rsp | |
577 | mov %rax, frame_RSPSAVE(%rsp) | |
578 | ||
579 | # Save GPRs | |
ca04c823 JP |
580 | mov %rbx, 8*0+frame_GPRSAVE(%rsp) |
581 | mov %r12, 8*1+frame_GPRSAVE(%rsp) | |
582 | mov %r13, 8*2+frame_GPRSAVE(%rsp) | |
583 | mov %r14, 8*3+frame_GPRSAVE(%rsp) | |
584 | mov %r15, 8*4+frame_GPRSAVE(%rsp) | |
5663535b TC |
585 | |
586 | shl $7, NUM_BLKS # convert to bytes | |
587 | jz done_hash | |
588 | add INP, NUM_BLKS # pointer to end of data | |
589 | mov NUM_BLKS, frame_INPEND(%rsp) | |
590 | ||
591 | ## load initial digest | |
ca04c823 JP |
592 | mov 8*0(CTX1), a |
593 | mov 8*1(CTX1), b | |
594 | mov 8*2(CTX1), c | |
595 | mov 8*3(CTX1), d | |
596 | mov 8*4(CTX1), e | |
597 | mov 8*5(CTX1), f | |
598 | mov 8*6(CTX1), g | |
599 | mov 8*7(CTX1), h | |
600 | ||
601 | # save %rdi (CTX) before it gets clobbered | |
602 | mov %rdi, frame_CTX(%rsp) | |
5663535b TC |
603 | |
604 | vmovdqa PSHUFFLE_BYTE_FLIP_MASK(%rip), BYTE_FLIP_MASK | |
605 | ||
606 | loop0: | |
607 | lea K512(%rip), TBL | |
608 | ||
609 | ## byte swap first 16 dwords | |
610 | COPY_YMM_AND_BSWAP Y_0, (INP), BYTE_FLIP_MASK | |
611 | COPY_YMM_AND_BSWAP Y_1, 1*32(INP), BYTE_FLIP_MASK | |
612 | COPY_YMM_AND_BSWAP Y_2, 2*32(INP), BYTE_FLIP_MASK | |
613 | COPY_YMM_AND_BSWAP Y_3, 3*32(INP), BYTE_FLIP_MASK | |
614 | ||
615 | mov INP, frame_INP(%rsp) | |
616 | ||
617 | ## schedule 64 input dwords, by doing 12 rounds of 4 each | |
618 | movq $4, frame_SRND(%rsp) | |
619 | ||
620 | .align 16 | |
621 | loop1: | |
622 | vpaddq (TBL), Y_0, XFER | |
623 | vmovdqa XFER, frame_XFER(%rsp) | |
624 | FOUR_ROUNDS_AND_SCHED | |
625 | ||
626 | vpaddq 1*32(TBL), Y_0, XFER | |
627 | vmovdqa XFER, frame_XFER(%rsp) | |
628 | FOUR_ROUNDS_AND_SCHED | |
629 | ||
630 | vpaddq 2*32(TBL), Y_0, XFER | |
631 | vmovdqa XFER, frame_XFER(%rsp) | |
632 | FOUR_ROUNDS_AND_SCHED | |
633 | ||
634 | vpaddq 3*32(TBL), Y_0, XFER | |
635 | vmovdqa XFER, frame_XFER(%rsp) | |
636 | add $(4*32), TBL | |
637 | FOUR_ROUNDS_AND_SCHED | |
638 | ||
639 | subq $1, frame_SRND(%rsp) | |
640 | jne loop1 | |
641 | ||
642 | movq $2, frame_SRND(%rsp) | |
643 | loop2: | |
644 | vpaddq (TBL), Y_0, XFER | |
645 | vmovdqa XFER, frame_XFER(%rsp) | |
646 | DO_4ROUNDS | |
647 | vpaddq 1*32(TBL), Y_1, XFER | |
648 | vmovdqa XFER, frame_XFER(%rsp) | |
649 | add $(2*32), TBL | |
650 | DO_4ROUNDS | |
651 | ||
652 | vmovdqa Y_2, Y_0 | |
653 | vmovdqa Y_3, Y_1 | |
654 | ||
655 | subq $1, frame_SRND(%rsp) | |
656 | jne loop2 | |
657 | ||
ca04c823 JP |
658 | mov frame_CTX(%rsp), CTX2 |
659 | addm 8*0(CTX2), a | |
660 | addm 8*1(CTX2), b | |
661 | addm 8*2(CTX2), c | |
662 | addm 8*3(CTX2), d | |
663 | addm 8*4(CTX2), e | |
664 | addm 8*5(CTX2), f | |
665 | addm 8*6(CTX2), g | |
666 | addm 8*7(CTX2), h | |
5663535b TC |
667 | |
668 | mov frame_INP(%rsp), INP | |
669 | add $128, INP | |
670 | cmp frame_INPEND(%rsp), INP | |
671 | jne loop0 | |
672 | ||
673 | done_hash: | |
674 | ||
675 | # Restore GPRs | |
ca04c823 JP |
676 | mov 8*0+frame_GPRSAVE(%rsp), %rbx |
677 | mov 8*1+frame_GPRSAVE(%rsp), %r12 | |
678 | mov 8*2+frame_GPRSAVE(%rsp), %r13 | |
679 | mov 8*3+frame_GPRSAVE(%rsp), %r14 | |
680 | mov 8*4+frame_GPRSAVE(%rsp), %r15 | |
5663535b TC |
681 | |
682 | # Restore Stack Pointer | |
683 | mov frame_RSPSAVE(%rsp), %rsp | |
684 | ret | |
685 | ENDPROC(sha512_transform_rorx) | |
686 | ||
687 | ######################################################################## | |
688 | ### Binary Data | |
689 | ||
5663535b | 690 | |
e183914a DV |
691 | # Mergeable 640-byte rodata section. This allows linker to merge the table |
692 | # with other, exactly the same 640-byte fragment of another rodata section | |
693 | # (if such section exists). | |
694 | .section .rodata.cst640.K512, "aM", @progbits, 640 | |
5663535b TC |
695 | .align 64 |
696 | # K[t] used in SHA512 hashing | |
697 | K512: | |
698 | .quad 0x428a2f98d728ae22,0x7137449123ef65cd | |
699 | .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc | |
700 | .quad 0x3956c25bf348b538,0x59f111f1b605d019 | |
701 | .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 | |
702 | .quad 0xd807aa98a3030242,0x12835b0145706fbe | |
703 | .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 | |
704 | .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 | |
705 | .quad 0x9bdc06a725c71235,0xc19bf174cf692694 | |
706 | .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 | |
707 | .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 | |
708 | .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 | |
709 | .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 | |
710 | .quad 0x983e5152ee66dfab,0xa831c66d2db43210 | |
711 | .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 | |
712 | .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 | |
713 | .quad 0x06ca6351e003826f,0x142929670a0e6e70 | |
714 | .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 | |
715 | .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df | |
716 | .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 | |
717 | .quad 0x81c2c92e47edaee6,0x92722c851482353b | |
718 | .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 | |
719 | .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 | |
720 | .quad 0xd192e819d6ef5218,0xd69906245565a910 | |
721 | .quad 0xf40e35855771202a,0x106aa07032bbd1b8 | |
722 | .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 | |
723 | .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 | |
724 | .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb | |
725 | .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 | |
726 | .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 | |
727 | .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec | |
728 | .quad 0x90befffa23631e28,0xa4506cebde82bde9 | |
729 | .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b | |
730 | .quad 0xca273eceea26619c,0xd186b8c721c0c207 | |
731 | .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 | |
732 | .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 | |
733 | .quad 0x113f9804bef90dae,0x1b710b35131c471b | |
734 | .quad 0x28db77f523047d84,0x32caab7b40c72493 | |
735 | .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c | |
736 | .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a | |
737 | .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 | |
738 | ||
e183914a | 739 | .section .rodata.cst32.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 32 |
5663535b | 740 | .align 32 |
5663535b TC |
741 | # Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. |
742 | PSHUFFLE_BYTE_FLIP_MASK: | |
743 | .octa 0x08090a0b0c0d0e0f0001020304050607 | |
744 | .octa 0x18191a1b1c1d1e1f1011121314151617 | |
745 | ||
e183914a DV |
746 | .section .rodata.cst32.MASK_YMM_LO, "aM", @progbits, 32 |
747 | .align 32 | |
5663535b TC |
748 | MASK_YMM_LO: |
749 | .octa 0x00000000000000000000000000000000 | |
750 | .octa 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF | |
e183914a | 751 | |
5663535b | 752 | #endif |