2 The MIT License (MIT)
\r
4 Copyright (c) 2016 kste
\r
6 Permission is hereby granted, free of charge, to any person obtaining a copy
\r
7 of this software and associated documentation files (the "Software"), to deal
\r
8 in the Software without restriction, including without limitation the rights
\r
9 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
\r
10 copies of the Software, and to permit persons to whom the Software is
\r
11 furnished to do so, subject to the following conditions:
\r
13 The above copyright notice and this permission notice shall be included in all
\r
14 copies or substantial portions of the Software.
\r
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
\r
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
\r
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
\r
19 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
\r
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
\r
21 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
\r
24 Optimized Implementations for Haraka256 and Haraka512
\r
33 void load_constants() {
\r
34 MIX_4 = _mm512_set_epi32(3,11,7,15,8,0,12,4,9,1,13,5,2,10,6,14);
\r
36 rc[0] = _mm512_set_epi32(0x0ed6eae6,0x2e7b4f08,0xbbf3bcaf,0xfd5b4f79,0x3402de2d,0x53f28498,0xcf029d60,0x9f029114,0x8b66b4e1,0x88f3a06b,0x640f6ba4,0x2f08f717,0x0684704c,0xe620c00a,0xb2c5fef0,0x75817b9d);
\r
37 rc[1] = _mm512_set_epi32(0x2924d9b0,0xafcacc07,0x675ffde2,0x1fc70b3b,0x67c28f43,0x5e2e7cd0,0xe2412761,0xda4fef1b,0x7eeacdee,0x6e9032b7,0x8d5335ed,0x2b8a057b,0xcbcfb0cb,0x4872448b,0x79eecd1c,0xbe397044);
\r
38 rc[2] = _mm512_set_epi32(0xfa0478a6,0xde6f5572,0x4aaa9ec8,0x5c9d2d8a,0xb2cc0bb9,0x941723bf,0x69028b2e,0x8df69800,0x1c30bf84,0xd4b7cd64,0x5b2a404f,0xad037e33,0xab4d63f1,0xe6867fe9,0xecdb8fca,0xb9d465ee);
\r
39 rc[3] = _mm512_set_epi32(0x21025ed8,0x9d199c4f,0x78a2c7e3,0x27e593ec,0xaf044988,0x4b050084,0x5f9600c9,0x9ca8eca6,0x1ea10344,0xf449a236,0x32d611ae,0xbb6a12ee,0xdfb49f2b,0x6b772a12,0x0efa4f2e,0x29129fd4);
\r
40 rc[4] = _mm512_set_epi32(0x9223973c,0x226b68bb,0x2caf92e8,0x36d1943a,0x5aca45c2,0x21300443,0x81c29153,0xf6fc9ac6,0x6260700d,0x6186b017,0x37f2efd9,0x10307d6b,0xbf3aaaf8,0xa759c9b7,0xb9282ecd,0x82d40173);
\r
41 rc[5] = _mm512_set_epi32(0x734bd3dc,0xe2e4d19c,0x2db91a4e,0xc72bf77d,0xbb606268,0xffeba09c,0x83e48de3,0xcb2212b1,0xdb863ce5,0xaef0c677,0x933dfddd,0x24e1128d,0xd3bf9238,0x225886eb,0x6cbab958,0xe51071b4);
\r
42 rc[6] = _mm512_set_epi32(0xcda75a17,0xd6de7d77,0x6d1be5b9,0xb88617f9,0x6df3614b,0x3c755977,0x8e5e2302,0x7eca472c,0xdba775a8,0xe707eff6,0x03b231dd,0x16eb6899,0x43bb47c3,0x61301b43,0x4b1415c4,0x2cb3924e);
\r
43 rc[7] = _mm512_set_epi32(0xf0b1a5a1,0x96e90cab,0x80bbbabc,0x63a4a350,0x2cee0c75,0x00da619c,0xe4ed0353,0x600ed0d9,0xcb1e6950,0xf957332b,0xa2531159,0x3bf327c1,0xec6b43f0,0x6ba8e9aa,0x9d6c069d,0xa946ee5d);
\r
44 rc[8] = _mm512_set_epi32(0x26f65241,0xcbe55438,0x43ce5918,0xffbaafde,0x34bb8a5b,0x5f427fd7,0xaeb6b779,0x360a16f6,0x17bb8f38,0xd554a40b,0x8814f3a8,0x2e75b442,0xae3db102,0x5e962988,0xab0dde30,0x938dca39);
\r
45 rc[9] = _mm512_set_epi32(0x756acc03,0x02288288,0x4ad6bdfd,0xe9c59da1,0xa0c1613c,0xba7ed22b,0xc173bc0f,0x48a659cf,0xae51a51a,0x1bdff7be,0x40c06e28,0x22901235,0x4ce99a54,0xb9f3026a,0xa2ca9cf7,0x839ec978);
\r
46 }//duck its endien ness // good programers make programs to make their programs i fixed it
\r
49 void test_implementations() {
\r
50 unsigned char *in = (unsigned char *)calloc(64*8, sizeof(unsigned char));
\r
51 unsigned char *out256 = (unsigned char *)calloc(32*8, sizeof(unsigned char));
\r
52 unsigned char *out512 = (unsigned char *)calloc(32*8, sizeof(unsigned char));
\r
53 unsigned char testvector256[32] = {0x80, 0x27, 0xcc, 0xb8, 0x79, 0x49, 0x77, 0x4b,
\r
54 0x78, 0xd0, 0x54, 0x5f, 0xb7, 0x2b, 0xf7, 0x0c,
\r
55 0x69, 0x5c, 0x2a, 0x09, 0x23, 0xcb, 0xd4, 0x7b,
\r
56 0xba, 0x11, 0x59, 0xef, 0xbf, 0x2b, 0x2c, 0x1c};
\r
58 unsigned char testvector512[32] = {0xbe, 0x7f, 0x72, 0x3b, 0x4e, 0x80, 0xa9, 0x98,
\r
59 0x13, 0xb2, 0x92, 0x28, 0x7f, 0x30, 0x6f, 0x62,
\r
60 0x5a, 0x6d, 0x57, 0x33, 0x1c, 0xae, 0x5f, 0x34,
\r
61 0xdd, 0x92, 0x77, 0xb0, 0x94, 0x5b, 0xe2, 0xaa};
\r
67 // Input for testvector
\r
68 for(i = 0; i < 512; i++) {
\r
73 // haraka512_8x(out512, in);
\r
76 for(i = 0; i < 32; i++) {
\r
77 if (out512[i % 32] != testvector512[i]) {
\r
78 printf("Error: testvector incorrect.\n");
\r
88 void haraka256(unsigned char *out, const unsigned char *in) {
\r
92 s[1] = LOAD(in + 16);
\r
94 AES2(s[0], s[1], 0);
\r
97 AES2(s[0], s[1], 4);
\r
100 AES2(s[0], s[1], 8);
\r
103 AES2(s[0], s[1], 12);
\r
106 AES2(s[0], s[1], 16);
\r
109 s[0] = _mm_xor_si128(s[0], LOAD(in));
\r
110 s[1] = _mm_xor_si128(s[1], LOAD(in + 16));
\r
113 STORE(out + 16, s[1]);
\r
116 void haraka256_keyed(unsigned char *out, const unsigned char *in, const u128 *rc) {
\r
120 s[1] = LOAD(in + 16);
\r
123 s[0] = _mm_xor_si128(s[0], LOAD(in));
\r
124 s[1] = _mm_xor_si128(s[1], LOAD(in + 16));
\r
127 STORE(out + 16, s[1]);
\r
130 void haraka256_4x(unsigned char *out, const unsigned char *in) {
\r
131 __m128i s[4][2], tmp;
\r
133 s[0][0] = LOAD(in);
\r
134 s[0][1] = LOAD(in + 16);
\r
135 s[1][0] = LOAD(in + 32);
\r
136 s[1][1] = LOAD(in + 48);
\r
137 s[2][0] = LOAD(in + 64);
\r
138 s[2][1] = LOAD(in + 80);
\r
139 s[3][0] = LOAD(in + 96);
\r
142 MIX2(s[0][0], s[0][1]);
\r
143 MIX2(s[1][0], s[1][1]);
\r
144 MIX2(s[2][0], s[2][1]);
\r
145 MIX2(s[3][0], s[3][1]);
\r
148 s[0][0] = _mm_xor_si128(s[0][0], LOAD(in));
\r
149 s[0][1] = _mm_xor_si128(s[0][1], LOAD(in + 16));
\r
150 s[1][0] = _mm_xor_si128(s[1][0], LOAD(in + 32));
\r
151 s[1][1] = _mm_xor_si128(s[1][1], LOAD(in + 48));
\r
152 s[2][0] = _mm_xor_si128(s[2][0], LOAD(in + 64));
\r
153 s[2][1] = _mm_xor_si128(s[2][1], LOAD(in + 80));
\r
154 s[3][0] = _mm_xor_si128(s[3][0], LOAD(in + 96));
\r
155 s[3][1] = _mm_xor_si128(s[3][1], LOAD(in + 112));
\r
157 STORE(out, s[0][0]);
\r
158 STORE(out + 16, s[0][1]);
\r
159 STORE(out + 32, s[1][0]);
\r
160 STORE(out + 48, s[1][1]);
\r
161 STORE(out + 64, s[2][0]);
\r
162 STORE(out + 80, s[2][1]);
\r
163 STORE(out + 96, s[3][0]);
\r
164 STORE(out + 112, s[3][1]);
\r
167 void haraka256_8x(unsigned char *out, const unsigned char *in) {
\r
168 // This is faster on Skylake, the code below is faster on Haswell.
\r
169 haraka256_4x(out, in);
\r
170 haraka256_4x(out + 128, in + 128);
\r
172 // __m128i s[8][2], tmp;
\r
176 // s[0][0] = LOAD(in);
\r
177 // s[0][1] = LOAD(in + 16);
\r
178 // s[1][0] = LOAD(in + 32);
\r
179 // s[1][1] = LOAD(in + 48);
\r
180 // s[2][0] = LOAD(in + 64);
\r
181 // s[2][1] = LOAD(in + 80);
\r
182 // s[3][0] = LOAD(in + 96);
\r
183 // s[3][1] = LOAD(in + 112);
\r
184 // s[4][0] = LOAD(in + 128);
\r
185 // s[4][1] = LOAD(in + 144);
\r
186 // s[5][0] = LOAD(in + 160);
\r
187 // s[5][1] = LOAD(in + 176);
\r
188 // s[6][0] = LOAD(in + 192);
\r
189 // s[6][1] = LOAD(in + 208);
\r
190 // s[7][0] = LOAD(in + 224);
\r
191 // s[7][1] = LOAD(in + 240);
\r
194 // AES2_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 0);
\r
196 // MIX2(s[0][0], s[0][1]);
\r
197 // MIX2(s[1][0], s[1][1]);
\r
198 // MIX2(s[2][0], s[2][1]);
\r
199 // MIX2(s[3][0], s[3][1]);
\r
200 // MIX2(s[4][0], s[4][1]);
\r
201 // MIX2(s[5][0], s[5][1]);
\r
202 // MIX2(s[6][0], s[6][1]);
\r
203 // MIX2(s[7][0], s[7][1]);
\r
207 // AES2_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 4);
\r
209 // MIX2(s[0][0], s[0][1]);
\r
210 // MIX2(s[1][0], s[1][1]);
\r
211 // MIX2(s[2][0], s[2][1]);
\r
212 // MIX2(s[3][0], s[3][1]);
\r
213 // MIX2(s[4][0], s[4][1]);
\r
214 // MIX2(s[5][0], s[5][1]);
\r
215 // MIX2(s[6][0], s[6][1]);
\r
216 // MIX2(s[7][0], s[7][1]);
\r
219 // AES2_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 8);
\r
221 // MIX2(s[0][0], s[0][1]);
\r
222 // MIX2(s[1][0], s[1][1]);
\r
223 // MIX2(s[2][0], s[2][1]);
\r
224 // MIX2(s[3][0], s[3][1]);
\r
225 // MIX2(s[4][0], s[4][1]);
\r
226 // MIX2(s[5][0], s[5][1]);
\r
227 // MIX2(s[6][0], s[6][1]);
\r
228 // MIX2(s[7][0], s[7][1]);
\r
231 // AES2_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 12);
\r
233 // MIX2(s[0][0], s[0][1]);
\r
234 // MIX2(s[1][0], s[1][1]);
\r
235 // MIX2(s[2][0], s[2][1]);
\r
236 // MIX2(s[3][0], s[3][1]);
\r
237 // MIX2(s[4][0], s[4][1]);
\r
238 // MIX2(s[5][0], s[5][1]);
\r
239 // MIX2(s[6][0], s[6][1]);
\r
240 // MIX2(s[7][0], s[7][1]);
\r
243 // AES2_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 16);
\r
245 // MIX2(s[0][0], s[0][1]);
\r
246 // MIX2(s[1][0], s[1][1]);
\r
247 // MIX2(s[2][0], s[2][1]);
\r
248 // MIX2(s[3][0], s[3][1]);
\r
249 // MIX2(s[4][0], s[4][1]);
\r
250 // MIX2(s[5][0], s[5][1]);
\r
251 // MIX2(s[6][0], s[6][1]);
\r
252 // MIX2(s[7][0], s[7][1]);
\r
255 // s[0][0] = _mm_xor_si128(s[0][0], LOAD(in));
\r
256 // s[0][1] = _mm_xor_si128(s[0][1], LOAD(in + 16));
\r
257 // s[1][0] = _mm_xor_si128(s[1][0], LOAD(in + 32));
\r
258 // s[1][1] = _mm_xor_si128(s[1][1], LOAD(in + 48));
\r
259 // s[2][0] = _mm_xor_si128(s[2][0], LOAD(in + 64));
\r
260 // s[2][1] = _mm_xor_si128(s[2][1], LOAD(in + 80));
\r
261 // s[3][0] = _mm_xor_si128(s[3][0], LOAD(in + 96));
\r
262 // s[3][1] = _mm_xor_si128(s[3][1], LOAD(in + 112));
\r
263 // s[4][0] = _mm_xor_si128(s[4][0], LOAD(in + 128));
\r
264 // s[4][1] = _mm_xor_si128(s[4][1], LOAD(in + 144));
\r
265 // s[5][0] = _mm_xor_si128(s[5][0], LOAD(in + 160));
\r
266 // s[5][1] = _mm_xor_si128(s[5][1], LOAD(in + 176));
\r
267 // s[6][0] = _mm_xor_si128(s[6][0], LOAD(in + 192));
\r
268 // s[6][1] = _mm_xor_si128(s[6][1], LOAD(in + 208));
\r
269 // s[7][0] = _mm_xor_si128(s[7][0], LOAD(in + 224));
\r
270 // s[7][1] = _mm_xor_si128(s[7][1], LOAD(in + 240));
\r
272 // STORE(out, s[0][0]);
\r
273 // STORE(out + 16, s[0][1]);
\r
274 // STORE(out + 32, s[1][0]);
\r
275 // STORE(out + 48, s[1][1]);
\r
276 // STORE(out + 64, s[2][0]);
\r
277 // STORE(out + 80, s[2][1]);
\r
278 // STORE(out + 96, s[3][0]);
\r
279 // STORE(out + 112, s[3][1]);
\r
280 // STORE(out + 128, s[4][0]);
\r
281 // STORE(out + 144, s[4][1]);
\r
282 // STORE(out + 160, s[5][0]);
\r
283 // STORE(out + 176, s[5][1]);
\r
284 // STORE(out + 192, s[6][0]);
\r
285 // STORE(out + 208, s[6][1]);
\r
286 // STORE(out + 224, s[7][0]);
\r
287 // STORE(out + 240, s[7][1]);
\r
290 static void phex(uint8_t* str)
\r
295 for (i = 0; i < len; ++i)
\r
296 printf("%lx, ", str[i]);
\r
299 void haraka512(unsigned char *out, const unsigned char *in) {
\r
319 s = _mm512_xor_si512(s, i);
\r
322 TRUNCSTORE(out, s);
\r
325 void haraka512_zero(unsigned char *out, const unsigned char *in) {
\r
346 s = _mm512_xor_si512(s, i);
\r
351 TRUNCSTORE(out, s);
\r
354 void haraka512_keyed(unsigned char *out, const unsigned char *in, const u128 *rc) {
\r
358 s[1] = LOAD(in + 16);
\r
359 s[2] = LOAD(in + 32);
\r
360 s[3] = LOAD(in + 48);
\r
362 AES4(s[0], s[1], s[2], s[3], 0);
\r
363 MIX4(s[0], s[1], s[2], s[3]);
\r
365 AES4(s[0], s[1], s[2], s[3], 8);
\r
366 MIX4(s[0], s[1], s[2], s[3]);
\r
368 AES4(s[0], s[1], s[2], s[3], 16);
\r
369 MIX4(s[0], s[1], s[2], s[3]);
\r
371 AES4(s[0], s[1], s[2], s[3], 24);
\r
372 MIX4_LAST(s[0], s[1], s[2], s[3]);
\r
374 AES4_LAST(s[0], s[1], s[2], s[3], 32);
\r
377 // s[0] = _mm_xor_si128(s[0], LOAD(in));
\r
378 // s[1] = _mm_xor_si128(s[1], LOAD(in + 16));
\r
379 // s[2] = _mm_xor_si128(s[2], LOAD(in + 32));
\r
380 // s[3] = _mm_xor_si128(s[0], LOAD(in + 48));
\r
381 ((uint32_t*)&out[0])[7] = ((uint32_t*)&s[0])[10] ^ ((uint32_t*)&in[52])[0];
\r
383 //TRUNCSTORE(out, s[0],s[1], s[2], s[3]);
\r
386 void haraka512_4x(unsigned char *out, const unsigned char *in) {
\r
389 s[0][0] = LOAD(in);
\r
390 s[0][1] = LOAD(in + 16);
\r
391 s[0][2] = LOAD(in + 32);
\r
392 s[0][3] = LOAD(in + 48);
\r
393 s[1][0] = LOAD(in + 64);
\r
394 s[1][1] = LOAD(in + 80);
\r
395 s[1][2] = LOAD(in + 96);
\r
396 s[1][3] = LOAD(in + 112);
\r
397 s[2][0] = LOAD(in + 128);
\r
398 s[2][1] = LOAD(in + 144);
\r
399 s[2][2] = LOAD(in + 160);
\r
400 s[2][3] = LOAD(in + 176);
\r
401 s[3][0] = LOAD(in + 192);
\r
402 s[3][1] = LOAD(in + 208);
\r
403 s[3][2] = LOAD(in + 224);
\r
404 s[3][3] = LOAD(in + 240);
\r
406 AES4_4x(s[0], s[1], s[2], s[3], 0);
\r
407 MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
408 MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
409 MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
410 MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
412 AES4_4x(s[0], s[1], s[2], s[3], 8);
\r
413 MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
414 MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
415 MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
416 MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
418 AES4_4x(s[0], s[1], s[2], s[3], 16);
\r
419 MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
420 MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
421 MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
422 MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
424 AES4_4x(s[0], s[1], s[2], s[3], 24);
\r
425 MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
426 MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
427 MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
428 MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
430 AES4_4x(s[0], s[1], s[2], s[3], 32);
\r
431 MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
432 MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
433 MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
434 MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
437 s[0][0] = _mm_xor_si128(s[0][0], LOAD(in));
\r
438 s[0][1] = _mm_xor_si128(s[0][1], LOAD(in + 16));
\r
439 s[0][2] = _mm_xor_si128(s[0][2], LOAD(in + 32));
\r
440 s[0][3] = _mm_xor_si128(s[0][3], LOAD(in + 48));
\r
441 s[1][0] = _mm_xor_si128(s[1][0], LOAD(in + 64));
\r
442 s[1][1] = _mm_xor_si128(s[1][1], LOAD(in + 80));
\r
443 s[1][2] = _mm_xor_si128(s[1][2], LOAD(in + 96));
\r
444 s[1][3] = _mm_xor_si128(s[1][3], LOAD(in + 112));
\r
445 s[2][0] = _mm_xor_si128(s[2][0], LOAD(in + 128));
\r
446 s[2][1] = _mm_xor_si128(s[2][1], LOAD(in + 144));
\r
447 s[2][2] = _mm_xor_si128(s[2][2], LOAD(in + 160));
\r
448 s[2][3] = _mm_xor_si128(s[2][3], LOAD(in + 176));
\r
449 s[3][0] = _mm_xor_si128(s[3][0], LOAD(in + 192));
\r
450 s[3][1] = _mm_xor_si128(s[3][1], LOAD(in + 208));
\r
451 s[3][2] = _mm_xor_si128(s[3][2], LOAD(in + 224));
\r
452 s[3][3] = _mm_xor_si128(s[3][3], LOAD(in + 240));
\r
454 TRUNCSTORE(out, s[0][0], s[0][1], s[0][2], s[0][3]);
\r
455 TRUNCSTORE(out + 32, s[1][0], s[1][1], s[1][2], s[1][3]);
\r
456 TRUNCSTORE(out + 64, s[2][0], s[2][1], s[2][2], s[2][3]);
\r
457 TRUNCSTORE(out + 96, s[3][0], s[3][1], s[3][2], s[3][3]);
\r
460 void haraka512_8x(unsigned char *out, const unsigned char *in) {
\r
461 // This is faster on Skylake, the code below is faster on Haswell.
\r
462 haraka512_4x(out, in);
\r
463 haraka512_4x(out + 128, in + 256);
\r
465 // u128 s[8][4], tmp;
\r
467 // s[0][0] = LOAD(in);
\r
468 // s[0][1] = LOAD(in + 16);
\r
469 // s[0][2] = LOAD(in + 32);
\r
470 // s[0][3] = LOAD(in + 48);
\r
471 // s[1][0] = LOAD(in + 64);
\r
472 // s[1][1] = LOAD(in + 80);
\r
473 // s[1][2] = LOAD(in + 96);
\r
474 // s[1][3] = LOAD(in + 112);
\r
475 // s[2][0] = LOAD(in + 128);
\r
476 // s[2][1] = LOAD(in + 144);
\r
477 // s[2][2] = LOAD(in + 160);
\r
478 // s[2][3] = LOAD(in + 176);
\r
479 // s[3][0] = LOAD(in + 192);
\r
480 // s[3][1] = LOAD(in + 208);
\r
481 // s[3][2] = LOAD(in + 224);
\r
482 // s[3][3] = LOAD(in + 240);
\r
483 // s[4][0] = LOAD(in + 256);
\r
484 // s[4][1] = LOAD(in + 272);
\r
485 // s[4][2] = LOAD(in + 288);
\r
486 // s[4][3] = LOAD(in + 304);
\r
487 // s[5][0] = LOAD(in + 320);
\r
488 // s[5][1] = LOAD(in + 336);
\r
489 // s[5][2] = LOAD(in + 352);
\r
490 // s[5][3] = LOAD(in + 368);
\r
491 // s[6][0] = LOAD(in + 384);
\r
492 // s[6][1] = LOAD(in + 400);
\r
493 // s[6][2] = LOAD(in + 416);
\r
494 // s[6][3] = LOAD(in + 432);
\r
495 // s[7][0] = LOAD(in + 448);
\r
496 // s[7][1] = LOAD(in + 464);
\r
497 // s[7][2] = LOAD(in + 480);
\r
498 // s[7][3] = LOAD(in + 496);
\r
500 // AES4_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 0);
\r
501 // MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
502 // MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
503 // MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
504 // MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
505 // MIX4(s[4][0], s[4][1], s[4][2], s[4][3]);
\r
506 // MIX4(s[5][0], s[5][1], s[5][2], s[5][3]);
\r
507 // MIX4(s[6][0], s[6][1], s[6][2], s[6][3]);
\r
508 // MIX4(s[7][0], s[7][1], s[7][2], s[7][3]);
\r
510 // AES4_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 8);
\r
511 // MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
512 // MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
513 // MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
514 // MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
515 // MIX4(s[4][0], s[4][1], s[4][2], s[4][3]);
\r
516 // MIX4(s[5][0], s[5][1], s[5][2], s[5][3]);
\r
517 // MIX4(s[6][0], s[6][1], s[6][2], s[6][3]);
\r
518 // MIX4(s[7][0], s[7][1], s[7][2], s[7][3]);
\r
520 // AES4_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 16);
\r
521 // MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
522 // MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
523 // MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
524 // MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
525 // MIX4(s[4][0], s[4][1], s[4][2], s[4][3]);
\r
526 // MIX4(s[5][0], s[5][1], s[5][2], s[5][3]);
\r
527 // MIX4(s[6][0], s[6][1], s[6][2], s[6][3]);
\r
528 // MIX4(s[7][0], s[7][1], s[7][2], s[7][3]);
\r
530 // AES4_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 24);
\r
531 // MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
532 // MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
533 // MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
534 // MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
535 // MIX4(s[4][0], s[4][1], s[4][2], s[4][3]);
\r
536 // MIX4(s[5][0], s[5][1], s[5][2], s[5][3]);
\r
537 // MIX4(s[6][0], s[6][1], s[6][2], s[6][3]);
\r
538 // MIX4(s[7][0], s[7][1], s[7][2], s[7][3]);
\r
540 // AES4_8x(s[0], s[1], s[2], s[3], s[4], s[5], s[6], s[7], 32);
\r
541 // MIX4(s[0][0], s[0][1], s[0][2], s[0][3]);
\r
542 // MIX4(s[1][0], s[1][1], s[1][2], s[1][3]);
\r
543 // MIX4(s[2][0], s[2][1], s[2][2], s[2][3]);
\r
544 // MIX4(s[3][0], s[3][1], s[3][2], s[3][3]);
\r
545 // MIX4(s[4][0], s[4][1], s[4][2], s[4][3]);
\r
546 // MIX4(s[5][0], s[5][1], s[5][2], s[5][3]);
\r
547 // MIX4(s[6][0], s[6][1], s[6][2], s[6][3]);
\r
548 // MIX4(s[7][0], s[7][1], s[7][2], s[7][3]);
\r
551 // s[0][0] = _mm_xor_si128(s[0][0], LOAD(in));
\r
552 // s[0][1] = _mm_xor_si128(s[0][1], LOAD(in + 16));
\r
553 // s[0][2] = _mm_xor_si128(s[0][2], LOAD(in + 32));
\r
554 // s[0][3] = _mm_xor_si128(s[0][3], LOAD(in + 48));
\r
555 // s[1][0] = _mm_xor_si128(s[1][0], LOAD(in + 64));
\r
556 // s[1][1] = _mm_xor_si128(s[1][1], LOAD(in + 80));
\r
557 // s[1][2] = _mm_xor_si128(s[1][2], LOAD(in + 96));
\r
558 // s[1][3] = _mm_xor_si128(s[1][3], LOAD(in + 112));
\r
559 // s[2][0] = _mm_xor_si128(s[2][0], LOAD(in + 128));
\r
560 // s[2][1] = _mm_xor_si128(s[2][1], LOAD(in + 144));
\r
561 // s[2][2] = _mm_xor_si128(s[2][2], LOAD(in + 160));
\r
562 // s[2][3] = _mm_xor_si128(s[2][3], LOAD(in + 176));
\r
563 // s[3][0] = _mm_xor_si128(s[3][0], LOAD(in + 192));
\r
564 // s[3][1] = _mm_xor_si128(s[3][1], LOAD(in + 208));
\r
565 // s[3][2] = _mm_xor_si128(s[3][2], LOAD(in + 224));
\r
566 // s[3][3] = _mm_xor_si128(s[3][3], LOAD(in + 240));
\r
567 // s[4][0] = _mm_xor_si128(s[4][0], LOAD(in + 256));
\r
568 // s[4][1] = _mm_xor_si128(s[4][1], LOAD(in + 272));
\r
569 // s[4][2] = _mm_xor_si128(s[4][2], LOAD(in + 288));
\r
570 // s[4][3] = _mm_xor_si128(s[4][3], LOAD(in + 304));
\r
571 // s[5][0] = _mm_xor_si128(s[5][0], LOAD(in + 320));
\r
572 // s[5][1] = _mm_xor_si128(s[5][1], LOAD(in + 336));
\r
573 // s[5][2] = _mm_xor_si128(s[5][2], LOAD(in + 352));
\r
574 // s[5][3] = _mm_xor_si128(s[5][3], LOAD(in + 368));
\r
575 // s[6][0] = _mm_xor_si128(s[6][0], LOAD(in + 384));
\r
576 // s[6][1] = _mm_xor_si128(s[6][1], LOAD(in + 400));
\r
577 // s[6][2] = _mm_xor_si128(s[6][2], LOAD(in + 416));
\r
578 // s[6][3] = _mm_xor_si128(s[6][3], LOAD(in + 432));
\r
579 // s[7][0] = _mm_xor_si128(s[7][0], LOAD(in + 448));
\r
580 // s[7][1] = _mm_xor_si128(s[7][1], LOAD(in + 464));
\r
581 // s[7][2] = _mm_xor_si128(s[7][2], LOAD(in + 480));
\r
582 // s[7][3] = _mm_xor_si128(s[7][3], LOAD(in + 496));
\r
584 // TRUNCSTORE(out, s[0][0], s[0][1], s[0][2], s[0][3]);
\r
585 // TRUNCSTORE(out + 32, s[1][0], s[1][1], s[1][2], s[1][3]);
\r
586 // TRUNCSTORE(out + 64, s[2][0], s[2][1], s[2][2], s[2][3]);
\r
587 // TRUNCSTORE(out + 96, s[3][0], s[3][1], s[3][2], s[3][3]);
\r
588 // TRUNCSTORE(out + 128, s[4][0], s[4][1], s[4][2], s[4][3]);
\r
589 // TRUNCSTORE(out + 160, s[5][0], s[5][1], s[5][2], s[5][3]);
\r
590 // TRUNCSTORE(out + 192, s[6][0], s[6][1], s[6][2], s[6][3]);
\r
591 // TRUNCSTORE(out + 224, s[7][0], s[7][1], s[7][2], s[7][3]);
\r