]> Git Repo - linux.git/blob - arch/x86/crypto/aesni-intel_glue.c
net: udp: prefer listeners bound to an address
[linux.git] / arch / x86 / crypto / aesni-intel_glue.c
1 /*
2  * Support for Intel AES-NI instructions. This file contains glue
3  * code, the real AES implementation is in intel-aes_asm.S.
4  *
5  * Copyright (C) 2008, Intel Corp.
6  *    Author: Huang Ying <[email protected]>
7  *
8  * Added RFC4106 AES-GCM support for 128-bit keys under the AEAD
9  * interface for 64-bit kernels.
10  *    Authors: Adrian Hoban <[email protected]>
11  *             Gabriele Paoloni <[email protected]>
12  *             Tadeusz Struk ([email protected])
13  *             Aidan O'Mahony ([email protected])
14  *    Copyright (c) 2010, Intel Corporation.
15  *
16  * This program is free software; you can redistribute it and/or modify
17  * it under the terms of the GNU General Public License as published by
18  * the Free Software Foundation; either version 2 of the License, or
19  * (at your option) any later version.
20  */
21
22 #include <linux/hardirq.h>
23 #include <linux/types.h>
24 #include <linux/module.h>
25 #include <linux/err.h>
26 #include <crypto/algapi.h>
27 #include <crypto/aes.h>
28 #include <crypto/cryptd.h>
29 #include <crypto/ctr.h>
30 #include <crypto/b128ops.h>
31 #include <crypto/gcm.h>
32 #include <crypto/xts.h>
33 #include <asm/cpu_device_id.h>
34 #include <asm/fpu/api.h>
35 #include <asm/crypto/aes.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/internal/simd.h>
39 #include <crypto/internal/skcipher.h>
40 #include <linux/workqueue.h>
41 #include <linux/spinlock.h>
42 #ifdef CONFIG_X86_64
43 #include <asm/crypto/glue_helper.h>
44 #endif
45
46
47 #define AESNI_ALIGN     16
48 #define AESNI_ALIGN_ATTR __attribute__ ((__aligned__(AESNI_ALIGN)))
49 #define AES_BLOCK_MASK  (~(AES_BLOCK_SIZE - 1))
50 #define RFC4106_HASH_SUBKEY_SIZE 16
51 #define AESNI_ALIGN_EXTRA ((AESNI_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
52 #define CRYPTO_AES_CTX_SIZE (sizeof(struct crypto_aes_ctx) + AESNI_ALIGN_EXTRA)
53 #define XTS_AES_CTX_SIZE (sizeof(struct aesni_xts_ctx) + AESNI_ALIGN_EXTRA)
54
55 /* This data is stored at the end of the crypto_tfm struct.
56  * It's a type of per "session" data storage location.
57  * This needs to be 16 byte aligned.
58  */
59 struct aesni_rfc4106_gcm_ctx {
60         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
61         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
62         u8 nonce[4];
63 };
64
65 struct generic_gcmaes_ctx {
66         u8 hash_subkey[16] AESNI_ALIGN_ATTR;
67         struct crypto_aes_ctx aes_key_expanded AESNI_ALIGN_ATTR;
68 };
69
70 struct aesni_xts_ctx {
71         u8 raw_tweak_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
72         u8 raw_crypt_ctx[sizeof(struct crypto_aes_ctx)] AESNI_ALIGN_ATTR;
73 };
74
75 #define GCM_BLOCK_LEN 16
76
77 struct gcm_context_data {
78         /* init, update and finalize context data */
79         u8 aad_hash[GCM_BLOCK_LEN];
80         u64 aad_length;
81         u64 in_length;
82         u8 partial_block_enc_key[GCM_BLOCK_LEN];
83         u8 orig_IV[GCM_BLOCK_LEN];
84         u8 current_counter[GCM_BLOCK_LEN];
85         u64 partial_block_len;
86         u64 unused;
87         u8 hash_keys[GCM_BLOCK_LEN * 8];
88 };
89
90 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
91                              unsigned int key_len);
92 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
93                           const u8 *in);
94 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
95                           const u8 *in);
96 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
97                               const u8 *in, unsigned int len);
98 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
99                               const u8 *in, unsigned int len);
100 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
101                               const u8 *in, unsigned int len, u8 *iv);
102 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
103                               const u8 *in, unsigned int len, u8 *iv);
104
105 #define AVX_GEN2_OPTSIZE 640
106 #define AVX_GEN4_OPTSIZE 4096
107
108 #ifdef CONFIG_X86_64
109
110 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
111                               const u8 *in, unsigned int len, u8 *iv);
112 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
113                               const u8 *in, unsigned int len, u8 *iv);
114
115 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
116                                  const u8 *in, bool enc, u8 *iv);
117
118 /* asmlinkage void aesni_gcm_enc()
119  * void *ctx,  AES Key schedule. Starts on a 16 byte boundary.
120  * struct gcm_context_data.  May be uninitialized.
121  * u8 *out, Ciphertext output. Encrypt in-place is allowed.
122  * const u8 *in, Plaintext input
123  * unsigned long plaintext_len, Length of data in bytes for encryption.
124  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
125  *         16-byte aligned pointer.
126  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
127  * const u8 *aad, Additional Authentication Data (AAD)
128  * unsigned long aad_len, Length of AAD in bytes.
129  * u8 *auth_tag, Authenticated Tag output.
130  * unsigned long auth_tag_len), Authenticated Tag Length in bytes.
131  *          Valid values are 16 (most likely), 12 or 8.
132  */
133 asmlinkage void aesni_gcm_enc(void *ctx,
134                         struct gcm_context_data *gdata, u8 *out,
135                         const u8 *in, unsigned long plaintext_len, u8 *iv,
136                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
137                         u8 *auth_tag, unsigned long auth_tag_len);
138
139 /* asmlinkage void aesni_gcm_dec()
140  * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
141  * struct gcm_context_data.  May be uninitialized.
142  * u8 *out, Plaintext output. Decrypt in-place is allowed.
143  * const u8 *in, Ciphertext input
144  * unsigned long ciphertext_len, Length of data in bytes for decryption.
145  * u8 *iv, Pre-counter block j0: 12 byte IV concatenated with 0x00000001.
146  *         16-byte aligned pointer.
147  * u8 *hash_subkey, the Hash sub key input. Data starts on a 16-byte boundary.
148  * const u8 *aad, Additional Authentication Data (AAD)
149  * unsigned long aad_len, Length of AAD in bytes. With RFC4106 this is going
150  * to be 8 or 12 bytes
151  * u8 *auth_tag, Authenticated Tag output.
152  * unsigned long auth_tag_len) Authenticated Tag Length in bytes.
153  * Valid values are 16 (most likely), 12 or 8.
154  */
155 asmlinkage void aesni_gcm_dec(void *ctx,
156                         struct gcm_context_data *gdata, u8 *out,
157                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
158                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
159                         u8 *auth_tag, unsigned long auth_tag_len);
160
161 /* Scatter / Gather routines, with args similar to above */
162 asmlinkage void aesni_gcm_init(void *ctx,
163                                struct gcm_context_data *gdata,
164                                u8 *iv,
165                                u8 *hash_subkey, const u8 *aad,
166                                unsigned long aad_len);
167 asmlinkage void aesni_gcm_enc_update(void *ctx,
168                                      struct gcm_context_data *gdata, u8 *out,
169                                      const u8 *in, unsigned long plaintext_len);
170 asmlinkage void aesni_gcm_dec_update(void *ctx,
171                                      struct gcm_context_data *gdata, u8 *out,
172                                      const u8 *in,
173                                      unsigned long ciphertext_len);
174 asmlinkage void aesni_gcm_finalize(void *ctx,
175                                    struct gcm_context_data *gdata,
176                                    u8 *auth_tag, unsigned long auth_tag_len);
177
178 #ifdef CONFIG_AS_AVX
179 asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
180                 void *keys, u8 *out, unsigned int num_bytes);
181 asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
182                 void *keys, u8 *out, unsigned int num_bytes);
183 asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
184                 void *keys, u8 *out, unsigned int num_bytes);
185 /*
186  * asmlinkage void aesni_gcm_precomp_avx_gen2()
187  * gcm_data *my_ctx_data, context data
188  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
189  */
190 asmlinkage void aesni_gcm_precomp_avx_gen2(void *my_ctx_data, u8 *hash_subkey);
191
192 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
193                         const u8 *in, unsigned long plaintext_len, u8 *iv,
194                         const u8 *aad, unsigned long aad_len,
195                         u8 *auth_tag, unsigned long auth_tag_len);
196
197 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
198                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
199                         const u8 *aad, unsigned long aad_len,
200                         u8 *auth_tag, unsigned long auth_tag_len);
201
202 static void aesni_gcm_enc_avx(void *ctx,
203                         struct gcm_context_data *data, u8 *out,
204                         const u8 *in, unsigned long plaintext_len, u8 *iv,
205                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
206                         u8 *auth_tag, unsigned long auth_tag_len)
207 {
208         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
209         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)){
210                 aesni_gcm_enc(ctx, data, out, in,
211                         plaintext_len, iv, hash_subkey, aad,
212                         aad_len, auth_tag, auth_tag_len);
213         } else {
214                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
215                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
216                                         aad_len, auth_tag, auth_tag_len);
217         }
218 }
219
220 static void aesni_gcm_dec_avx(void *ctx,
221                         struct gcm_context_data *data, u8 *out,
222                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
223                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
224                         u8 *auth_tag, unsigned long auth_tag_len)
225 {
226         struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
227         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
228                 aesni_gcm_dec(ctx, data, out, in,
229                         ciphertext_len, iv, hash_subkey, aad,
230                         aad_len, auth_tag, auth_tag_len);
231         } else {
232                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
233                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
234                                         aad_len, auth_tag, auth_tag_len);
235         }
236 }
237 #endif
238
239 #ifdef CONFIG_AS_AVX2
240 /*
241  * asmlinkage void aesni_gcm_precomp_avx_gen4()
242  * gcm_data *my_ctx_data, context data
243  * u8 *hash_subkey,  the Hash sub key input. Data starts on a 16-byte boundary.
244  */
245 asmlinkage void aesni_gcm_precomp_avx_gen4(void *my_ctx_data, u8 *hash_subkey);
246
247 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
248                         const u8 *in, unsigned long plaintext_len, u8 *iv,
249                         const u8 *aad, unsigned long aad_len,
250                         u8 *auth_tag, unsigned long auth_tag_len);
251
252 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
253                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
254                         const u8 *aad, unsigned long aad_len,
255                         u8 *auth_tag, unsigned long auth_tag_len);
256
257 static void aesni_gcm_enc_avx2(void *ctx,
258                         struct gcm_context_data *data, u8 *out,
259                         const u8 *in, unsigned long plaintext_len, u8 *iv,
260                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
261                         u8 *auth_tag, unsigned long auth_tag_len)
262 {
263        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
264         if ((plaintext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
265                 aesni_gcm_enc(ctx, data, out, in,
266                               plaintext_len, iv, hash_subkey, aad,
267                               aad_len, auth_tag, auth_tag_len);
268         } else if (plaintext_len < AVX_GEN4_OPTSIZE) {
269                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
270                 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad,
271                                         aad_len, auth_tag, auth_tag_len);
272         } else {
273                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
274                 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad,
275                                         aad_len, auth_tag, auth_tag_len);
276         }
277 }
278
279 static void aesni_gcm_dec_avx2(void *ctx,
280         struct gcm_context_data *data, u8 *out,
281                         const u8 *in, unsigned long ciphertext_len, u8 *iv,
282                         u8 *hash_subkey, const u8 *aad, unsigned long aad_len,
283                         u8 *auth_tag, unsigned long auth_tag_len)
284 {
285        struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx;
286         if ((ciphertext_len < AVX_GEN2_OPTSIZE) || (aes_ctx-> key_length != AES_KEYSIZE_128)) {
287                 aesni_gcm_dec(ctx, data, out, in,
288                               ciphertext_len, iv, hash_subkey,
289                               aad, aad_len, auth_tag, auth_tag_len);
290         } else if (ciphertext_len < AVX_GEN4_OPTSIZE) {
291                 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey);
292                 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad,
293                                         aad_len, auth_tag, auth_tag_len);
294         } else {
295                 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey);
296                 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad,
297                                         aad_len, auth_tag, auth_tag_len);
298         }
299 }
300 #endif
301
302 static void (*aesni_gcm_enc_tfm)(void *ctx,
303                                  struct gcm_context_data *data, u8 *out,
304                                  const u8 *in, unsigned long plaintext_len,
305                                  u8 *iv, u8 *hash_subkey, const u8 *aad,
306                                  unsigned long aad_len, u8 *auth_tag,
307                                  unsigned long auth_tag_len);
308
309 static void (*aesni_gcm_dec_tfm)(void *ctx,
310                                  struct gcm_context_data *data, u8 *out,
311                                  const u8 *in, unsigned long ciphertext_len,
312                                  u8 *iv, u8 *hash_subkey, const u8 *aad,
313                                  unsigned long aad_len, u8 *auth_tag,
314                                  unsigned long auth_tag_len);
315
316 static inline struct
317 aesni_rfc4106_gcm_ctx *aesni_rfc4106_gcm_ctx_get(struct crypto_aead *tfm)
318 {
319         unsigned long align = AESNI_ALIGN;
320
321         if (align <= crypto_tfm_ctx_alignment())
322                 align = 1;
323         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
324 }
325
326 static inline struct
327 generic_gcmaes_ctx *generic_gcmaes_ctx_get(struct crypto_aead *tfm)
328 {
329         unsigned long align = AESNI_ALIGN;
330
331         if (align <= crypto_tfm_ctx_alignment())
332                 align = 1;
333         return PTR_ALIGN(crypto_aead_ctx(tfm), align);
334 }
335 #endif
336
337 static inline struct crypto_aes_ctx *aes_ctx(void *raw_ctx)
338 {
339         unsigned long addr = (unsigned long)raw_ctx;
340         unsigned long align = AESNI_ALIGN;
341
342         if (align <= crypto_tfm_ctx_alignment())
343                 align = 1;
344         return (struct crypto_aes_ctx *)ALIGN(addr, align);
345 }
346
347 static int aes_set_key_common(struct crypto_tfm *tfm, void *raw_ctx,
348                               const u8 *in_key, unsigned int key_len)
349 {
350         struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx);
351         u32 *flags = &tfm->crt_flags;
352         int err;
353
354         if (key_len != AES_KEYSIZE_128 && key_len != AES_KEYSIZE_192 &&
355             key_len != AES_KEYSIZE_256) {
356                 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
357                 return -EINVAL;
358         }
359
360         if (!irq_fpu_usable())
361                 err = crypto_aes_expand_key(ctx, in_key, key_len);
362         else {
363                 kernel_fpu_begin();
364                 err = aesni_set_key(ctx, in_key, key_len);
365                 kernel_fpu_end();
366         }
367
368         return err;
369 }
370
371 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
372                        unsigned int key_len)
373 {
374         return aes_set_key_common(tfm, crypto_tfm_ctx(tfm), in_key, key_len);
375 }
376
377 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
378 {
379         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
380
381         if (!irq_fpu_usable())
382                 crypto_aes_encrypt_x86(ctx, dst, src);
383         else {
384                 kernel_fpu_begin();
385                 aesni_enc(ctx, dst, src);
386                 kernel_fpu_end();
387         }
388 }
389
390 static void aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
391 {
392         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
393
394         if (!irq_fpu_usable())
395                 crypto_aes_decrypt_x86(ctx, dst, src);
396         else {
397                 kernel_fpu_begin();
398                 aesni_dec(ctx, dst, src);
399                 kernel_fpu_end();
400         }
401 }
402
403 static void __aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
404 {
405         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
406
407         aesni_enc(ctx, dst, src);
408 }
409
410 static void __aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
411 {
412         struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm));
413
414         aesni_dec(ctx, dst, src);
415 }
416
417 static int aesni_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
418                                  unsigned int len)
419 {
420         return aes_set_key_common(crypto_skcipher_tfm(tfm),
421                                   crypto_skcipher_ctx(tfm), key, len);
422 }
423
424 static int ecb_encrypt(struct skcipher_request *req)
425 {
426         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
427         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
428         struct skcipher_walk walk;
429         unsigned int nbytes;
430         int err;
431
432         err = skcipher_walk_virt(&walk, req, true);
433
434         kernel_fpu_begin();
435         while ((nbytes = walk.nbytes)) {
436                 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
437                               nbytes & AES_BLOCK_MASK);
438                 nbytes &= AES_BLOCK_SIZE - 1;
439                 err = skcipher_walk_done(&walk, nbytes);
440         }
441         kernel_fpu_end();
442
443         return err;
444 }
445
446 static int ecb_decrypt(struct skcipher_request *req)
447 {
448         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
449         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
450         struct skcipher_walk walk;
451         unsigned int nbytes;
452         int err;
453
454         err = skcipher_walk_virt(&walk, req, true);
455
456         kernel_fpu_begin();
457         while ((nbytes = walk.nbytes)) {
458                 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
459                               nbytes & AES_BLOCK_MASK);
460                 nbytes &= AES_BLOCK_SIZE - 1;
461                 err = skcipher_walk_done(&walk, nbytes);
462         }
463         kernel_fpu_end();
464
465         return err;
466 }
467
468 static int cbc_encrypt(struct skcipher_request *req)
469 {
470         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
471         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
472         struct skcipher_walk walk;
473         unsigned int nbytes;
474         int err;
475
476         err = skcipher_walk_virt(&walk, req, true);
477
478         kernel_fpu_begin();
479         while ((nbytes = walk.nbytes)) {
480                 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
481                               nbytes & AES_BLOCK_MASK, walk.iv);
482                 nbytes &= AES_BLOCK_SIZE - 1;
483                 err = skcipher_walk_done(&walk, nbytes);
484         }
485         kernel_fpu_end();
486
487         return err;
488 }
489
490 static int cbc_decrypt(struct skcipher_request *req)
491 {
492         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
493         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
494         struct skcipher_walk walk;
495         unsigned int nbytes;
496         int err;
497
498         err = skcipher_walk_virt(&walk, req, true);
499
500         kernel_fpu_begin();
501         while ((nbytes = walk.nbytes)) {
502                 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
503                               nbytes & AES_BLOCK_MASK, walk.iv);
504                 nbytes &= AES_BLOCK_SIZE - 1;
505                 err = skcipher_walk_done(&walk, nbytes);
506         }
507         kernel_fpu_end();
508
509         return err;
510 }
511
512 #ifdef CONFIG_X86_64
513 static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
514                             struct skcipher_walk *walk)
515 {
516         u8 *ctrblk = walk->iv;
517         u8 keystream[AES_BLOCK_SIZE];
518         u8 *src = walk->src.virt.addr;
519         u8 *dst = walk->dst.virt.addr;
520         unsigned int nbytes = walk->nbytes;
521
522         aesni_enc(ctx, keystream, ctrblk);
523         crypto_xor_cpy(dst, keystream, src, nbytes);
524
525         crypto_inc(ctrblk, AES_BLOCK_SIZE);
526 }
527
528 #ifdef CONFIG_AS_AVX
529 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
530                               const u8 *in, unsigned int len, u8 *iv)
531 {
532         /*
533          * based on key length, override with the by8 version
534          * of ctr mode encryption/decryption for improved performance
535          * aes_set_key_common() ensures that key length is one of
536          * {128,192,256}
537          */
538         if (ctx->key_length == AES_KEYSIZE_128)
539                 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
540         else if (ctx->key_length == AES_KEYSIZE_192)
541                 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
542         else
543                 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
544 }
545 #endif
546
547 static int ctr_crypt(struct skcipher_request *req)
548 {
549         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
550         struct crypto_aes_ctx *ctx = aes_ctx(crypto_skcipher_ctx(tfm));
551         struct skcipher_walk walk;
552         unsigned int nbytes;
553         int err;
554
555         err = skcipher_walk_virt(&walk, req, true);
556
557         kernel_fpu_begin();
558         while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
559                 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
560                                       nbytes & AES_BLOCK_MASK, walk.iv);
561                 nbytes &= AES_BLOCK_SIZE - 1;
562                 err = skcipher_walk_done(&walk, nbytes);
563         }
564         if (walk.nbytes) {
565                 ctr_crypt_final(ctx, &walk);
566                 err = skcipher_walk_done(&walk, 0);
567         }
568         kernel_fpu_end();
569
570         return err;
571 }
572
573 static int xts_aesni_setkey(struct crypto_skcipher *tfm, const u8 *key,
574                             unsigned int keylen)
575 {
576         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
577         int err;
578
579         err = xts_verify_key(tfm, key, keylen);
580         if (err)
581                 return err;
582
583         keylen /= 2;
584
585         /* first half of xts-key is for crypt */
586         err = aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_crypt_ctx,
587                                  key, keylen);
588         if (err)
589                 return err;
590
591         /* second half of xts-key is for tweak */
592         return aes_set_key_common(crypto_skcipher_tfm(tfm), ctx->raw_tweak_ctx,
593                                   key + keylen, keylen);
594 }
595
596
597 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in)
598 {
599         aesni_enc(ctx, out, in);
600 }
601
602 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv)
603 {
604         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc));
605 }
606
607 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv)
608 {
609         glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec));
610 }
611
612 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
613 {
614         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv);
615 }
616
617 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv)
618 {
619         aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv);
620 }
621
622 static const struct common_glue_ctx aesni_enc_xts = {
623         .num_funcs = 2,
624         .fpu_blocks_limit = 1,
625
626         .funcs = { {
627                 .num_blocks = 8,
628                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc8) }
629         }, {
630                 .num_blocks = 1,
631                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_enc) }
632         } }
633 };
634
635 static const struct common_glue_ctx aesni_dec_xts = {
636         .num_funcs = 2,
637         .fpu_blocks_limit = 1,
638
639         .funcs = { {
640                 .num_blocks = 8,
641                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec8) }
642         }, {
643                 .num_blocks = 1,
644                 .fn_u = { .xts = GLUE_XTS_FUNC_CAST(aesni_xts_dec) }
645         } }
646 };
647
648 static int xts_encrypt(struct skcipher_request *req)
649 {
650         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
651         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
652
653         return glue_xts_req_128bit(&aesni_enc_xts, req,
654                                    XTS_TWEAK_CAST(aesni_xts_tweak),
655                                    aes_ctx(ctx->raw_tweak_ctx),
656                                    aes_ctx(ctx->raw_crypt_ctx));
657 }
658
659 static int xts_decrypt(struct skcipher_request *req)
660 {
661         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
662         struct aesni_xts_ctx *ctx = crypto_skcipher_ctx(tfm);
663
664         return glue_xts_req_128bit(&aesni_dec_xts, req,
665                                    XTS_TWEAK_CAST(aesni_xts_tweak),
666                                    aes_ctx(ctx->raw_tweak_ctx),
667                                    aes_ctx(ctx->raw_crypt_ctx));
668 }
669
670 static int rfc4106_init(struct crypto_aead *aead)
671 {
672         struct cryptd_aead *cryptd_tfm;
673         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
674
675         cryptd_tfm = cryptd_alloc_aead("__driver-gcm-aes-aesni",
676                                        CRYPTO_ALG_INTERNAL,
677                                        CRYPTO_ALG_INTERNAL);
678         if (IS_ERR(cryptd_tfm))
679                 return PTR_ERR(cryptd_tfm);
680
681         *ctx = cryptd_tfm;
682         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
683         return 0;
684 }
685
686 static void rfc4106_exit(struct crypto_aead *aead)
687 {
688         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
689
690         cryptd_free_aead(*ctx);
691 }
692
693 static int
694 rfc4106_set_hash_subkey(u8 *hash_subkey, const u8 *key, unsigned int key_len)
695 {
696         struct crypto_cipher *tfm;
697         int ret;
698
699         tfm = crypto_alloc_cipher("aes", 0, 0);
700         if (IS_ERR(tfm))
701                 return PTR_ERR(tfm);
702
703         ret = crypto_cipher_setkey(tfm, key, key_len);
704         if (ret)
705                 goto out_free_cipher;
706
707         /* Clear the data in the hash sub key container to zero.*/
708         /* We want to cipher all zeros to create the hash sub key. */
709         memset(hash_subkey, 0, RFC4106_HASH_SUBKEY_SIZE);
710
711         crypto_cipher_encrypt_one(tfm, hash_subkey, hash_subkey);
712
713 out_free_cipher:
714         crypto_free_cipher(tfm);
715         return ret;
716 }
717
718 static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
719                                   unsigned int key_len)
720 {
721         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead);
722
723         if (key_len < 4) {
724                 crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
725                 return -EINVAL;
726         }
727         /*Account for 4 byte nonce at the end.*/
728         key_len -= 4;
729
730         memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce));
731
732         return aes_set_key_common(crypto_aead_tfm(aead),
733                                   &ctx->aes_key_expanded, key, key_len) ?:
734                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
735 }
736
737 static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
738                                   unsigned int key_len)
739 {
740         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
741         struct cryptd_aead *cryptd_tfm = *ctx;
742
743         return crypto_aead_setkey(&cryptd_tfm->base, key, key_len);
744 }
745
746 static int common_rfc4106_set_authsize(struct crypto_aead *aead,
747                                        unsigned int authsize)
748 {
749         switch (authsize) {
750         case 8:
751         case 12:
752         case 16:
753                 break;
754         default:
755                 return -EINVAL;
756         }
757
758         return 0;
759 }
760
761 /* This is the Integrity Check Value (aka the authentication tag length and can
762  * be 8, 12 or 16 bytes long. */
763 static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
764                                        unsigned int authsize)
765 {
766         struct cryptd_aead **ctx = crypto_aead_ctx(parent);
767         struct cryptd_aead *cryptd_tfm = *ctx;
768
769         return crypto_aead_setauthsize(&cryptd_tfm->base, authsize);
770 }
771
772 static int generic_gcmaes_set_authsize(struct crypto_aead *tfm,
773                                        unsigned int authsize)
774 {
775         switch (authsize) {
776         case 4:
777         case 8:
778         case 12:
779         case 13:
780         case 14:
781         case 15:
782         case 16:
783                 break;
784         default:
785                 return -EINVAL;
786         }
787
788         return 0;
789 }
790
791 static int gcmaes_crypt_by_sg(bool enc, struct aead_request *req,
792                               unsigned int assoclen, u8 *hash_subkey,
793                               u8 *iv, void *aes_ctx)
794 {
795         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
796         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
797         struct gcm_context_data data AESNI_ALIGN_ATTR;
798         struct scatter_walk dst_sg_walk = {};
799         unsigned long left = req->cryptlen;
800         unsigned long len, srclen, dstlen;
801         struct scatter_walk assoc_sg_walk;
802         struct scatter_walk src_sg_walk;
803         struct scatterlist src_start[2];
804         struct scatterlist dst_start[2];
805         struct scatterlist *src_sg;
806         struct scatterlist *dst_sg;
807         u8 *src, *dst, *assoc;
808         u8 *assocmem = NULL;
809         u8 authTag[16];
810
811         if (!enc)
812                 left -= auth_tag_len;
813
814         /* Linearize assoc, if not already linear */
815         if (req->src->length >= assoclen && req->src->length &&
816                 (!PageHighMem(sg_page(req->src)) ||
817                         req->src->offset + req->src->length <= PAGE_SIZE)) {
818                 scatterwalk_start(&assoc_sg_walk, req->src);
819                 assoc = scatterwalk_map(&assoc_sg_walk);
820         } else {
821                 /* assoc can be any length, so must be on heap */
822                 assocmem = kmalloc(assoclen, GFP_ATOMIC);
823                 if (unlikely(!assocmem))
824                         return -ENOMEM;
825                 assoc = assocmem;
826
827                 scatterwalk_map_and_copy(assoc, req->src, 0, assoclen, 0);
828         }
829
830         src_sg = scatterwalk_ffwd(src_start, req->src, req->assoclen);
831         scatterwalk_start(&src_sg_walk, src_sg);
832         if (req->src != req->dst) {
833                 dst_sg = scatterwalk_ffwd(dst_start, req->dst, req->assoclen);
834                 scatterwalk_start(&dst_sg_walk, dst_sg);
835         }
836
837         kernel_fpu_begin();
838         aesni_gcm_init(aes_ctx, &data, iv,
839                 hash_subkey, assoc, assoclen);
840         if (req->src != req->dst) {
841                 while (left) {
842                         src = scatterwalk_map(&src_sg_walk);
843                         dst = scatterwalk_map(&dst_sg_walk);
844                         srclen = scatterwalk_clamp(&src_sg_walk, left);
845                         dstlen = scatterwalk_clamp(&dst_sg_walk, left);
846                         len = min(srclen, dstlen);
847                         if (len) {
848                                 if (enc)
849                                         aesni_gcm_enc_update(aes_ctx, &data,
850                                                              dst, src, len);
851                                 else
852                                         aesni_gcm_dec_update(aes_ctx, &data,
853                                                              dst, src, len);
854                         }
855                         left -= len;
856
857                         scatterwalk_unmap(src);
858                         scatterwalk_unmap(dst);
859                         scatterwalk_advance(&src_sg_walk, len);
860                         scatterwalk_advance(&dst_sg_walk, len);
861                         scatterwalk_done(&src_sg_walk, 0, left);
862                         scatterwalk_done(&dst_sg_walk, 1, left);
863                 }
864         } else {
865                 while (left) {
866                         dst = src = scatterwalk_map(&src_sg_walk);
867                         len = scatterwalk_clamp(&src_sg_walk, left);
868                         if (len) {
869                                 if (enc)
870                                         aesni_gcm_enc_update(aes_ctx, &data,
871                                                              src, src, len);
872                                 else
873                                         aesni_gcm_dec_update(aes_ctx, &data,
874                                                              src, src, len);
875                         }
876                         left -= len;
877                         scatterwalk_unmap(src);
878                         scatterwalk_advance(&src_sg_walk, len);
879                         scatterwalk_done(&src_sg_walk, 1, left);
880                 }
881         }
882         aesni_gcm_finalize(aes_ctx, &data, authTag, auth_tag_len);
883         kernel_fpu_end();
884
885         if (!assocmem)
886                 scatterwalk_unmap(assoc);
887         else
888                 kfree(assocmem);
889
890         if (!enc) {
891                 u8 authTagMsg[16];
892
893                 /* Copy out original authTag */
894                 scatterwalk_map_and_copy(authTagMsg, req->src,
895                                          req->assoclen + req->cryptlen -
896                                          auth_tag_len,
897                                          auth_tag_len, 0);
898
899                 /* Compare generated tag with passed in tag. */
900                 return crypto_memneq(authTagMsg, authTag, auth_tag_len) ?
901                         -EBADMSG : 0;
902         }
903
904         /* Copy in the authTag */
905         scatterwalk_map_and_copy(authTag, req->dst,
906                                  req->assoclen + req->cryptlen,
907                                  auth_tag_len, 1);
908
909         return 0;
910 }
911
912 static int gcmaes_encrypt(struct aead_request *req, unsigned int assoclen,
913                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
914 {
915         u8 one_entry_in_sg = 0;
916         u8 *src, *dst, *assoc;
917         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
918         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
919         struct scatter_walk src_sg_walk;
920         struct scatter_walk dst_sg_walk = {};
921         struct gcm_context_data data AESNI_ALIGN_ATTR;
922
923         if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
924                 aesni_gcm_enc_tfm == aesni_gcm_enc ||
925                 req->cryptlen < AVX_GEN2_OPTSIZE) {
926                 return gcmaes_crypt_by_sg(true, req, assoclen, hash_subkey, iv,
927                                           aes_ctx);
928         }
929         if (sg_is_last(req->src) &&
930             (!PageHighMem(sg_page(req->src)) ||
931             req->src->offset + req->src->length <= PAGE_SIZE) &&
932             sg_is_last(req->dst) &&
933             (!PageHighMem(sg_page(req->dst)) ||
934             req->dst->offset + req->dst->length <= PAGE_SIZE)) {
935                 one_entry_in_sg = 1;
936                 scatterwalk_start(&src_sg_walk, req->src);
937                 assoc = scatterwalk_map(&src_sg_walk);
938                 src = assoc + req->assoclen;
939                 dst = src;
940                 if (unlikely(req->src != req->dst)) {
941                         scatterwalk_start(&dst_sg_walk, req->dst);
942                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
943                 }
944         } else {
945                 /* Allocate memory for src, dst, assoc */
946                 assoc = kmalloc(req->cryptlen + auth_tag_len + req->assoclen,
947                         GFP_ATOMIC);
948                 if (unlikely(!assoc))
949                         return -ENOMEM;
950                 scatterwalk_map_and_copy(assoc, req->src, 0,
951                                          req->assoclen + req->cryptlen, 0);
952                 src = assoc + req->assoclen;
953                 dst = src;
954         }
955
956         kernel_fpu_begin();
957         aesni_gcm_enc_tfm(aes_ctx, &data, dst, src, req->cryptlen, iv,
958                           hash_subkey, assoc, assoclen,
959                           dst + req->cryptlen, auth_tag_len);
960         kernel_fpu_end();
961
962         /* The authTag (aka the Integrity Check Value) needs to be written
963          * back to the packet. */
964         if (one_entry_in_sg) {
965                 if (unlikely(req->src != req->dst)) {
966                         scatterwalk_unmap(dst - req->assoclen);
967                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
968                         scatterwalk_done(&dst_sg_walk, 1, 0);
969                 }
970                 scatterwalk_unmap(assoc);
971                 scatterwalk_advance(&src_sg_walk, req->src->length);
972                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
973         } else {
974                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
975                                          req->cryptlen + auth_tag_len, 1);
976                 kfree(assoc);
977         }
978         return 0;
979 }
980
981 static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
982                           u8 *hash_subkey, u8 *iv, void *aes_ctx)
983 {
984         u8 one_entry_in_sg = 0;
985         u8 *src, *dst, *assoc;
986         unsigned long tempCipherLen = 0;
987         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
988         unsigned long auth_tag_len = crypto_aead_authsize(tfm);
989         u8 authTag[16];
990         struct scatter_walk src_sg_walk;
991         struct scatter_walk dst_sg_walk = {};
992         struct gcm_context_data data AESNI_ALIGN_ATTR;
993         int retval = 0;
994
995         if (((struct crypto_aes_ctx *)aes_ctx)->key_length != AES_KEYSIZE_128 ||
996                 aesni_gcm_enc_tfm == aesni_gcm_enc ||
997                 req->cryptlen < AVX_GEN2_OPTSIZE) {
998                 return gcmaes_crypt_by_sg(false, req, assoclen, hash_subkey, iv,
999                                           aes_ctx);
1000         }
1001         tempCipherLen = (unsigned long)(req->cryptlen - auth_tag_len);
1002
1003         if (sg_is_last(req->src) &&
1004             (!PageHighMem(sg_page(req->src)) ||
1005             req->src->offset + req->src->length <= PAGE_SIZE) &&
1006             sg_is_last(req->dst) && req->dst->length &&
1007             (!PageHighMem(sg_page(req->dst)) ||
1008             req->dst->offset + req->dst->length <= PAGE_SIZE)) {
1009                 one_entry_in_sg = 1;
1010                 scatterwalk_start(&src_sg_walk, req->src);
1011                 assoc = scatterwalk_map(&src_sg_walk);
1012                 src = assoc + req->assoclen;
1013                 dst = src;
1014                 if (unlikely(req->src != req->dst)) {
1015                         scatterwalk_start(&dst_sg_walk, req->dst);
1016                         dst = scatterwalk_map(&dst_sg_walk) + req->assoclen;
1017                 }
1018         } else {
1019                 /* Allocate memory for src, dst, assoc */
1020                 assoc = kmalloc(req->cryptlen + req->assoclen, GFP_ATOMIC);
1021                 if (!assoc)
1022                         return -ENOMEM;
1023                 scatterwalk_map_and_copy(assoc, req->src, 0,
1024                                          req->assoclen + req->cryptlen, 0);
1025                 src = assoc + req->assoclen;
1026                 dst = src;
1027         }
1028
1029
1030         kernel_fpu_begin();
1031         aesni_gcm_dec_tfm(aes_ctx, &data, dst, src, tempCipherLen, iv,
1032                           hash_subkey, assoc, assoclen,
1033                           authTag, auth_tag_len);
1034         kernel_fpu_end();
1035
1036         /* Compare generated tag with passed in tag. */
1037         retval = crypto_memneq(src + tempCipherLen, authTag, auth_tag_len) ?
1038                 -EBADMSG : 0;
1039
1040         if (one_entry_in_sg) {
1041                 if (unlikely(req->src != req->dst)) {
1042                         scatterwalk_unmap(dst - req->assoclen);
1043                         scatterwalk_advance(&dst_sg_walk, req->dst->length);
1044                         scatterwalk_done(&dst_sg_walk, 1, 0);
1045                 }
1046                 scatterwalk_unmap(assoc);
1047                 scatterwalk_advance(&src_sg_walk, req->src->length);
1048                 scatterwalk_done(&src_sg_walk, req->src == req->dst, 0);
1049         } else {
1050                 scatterwalk_map_and_copy(dst, req->dst, req->assoclen,
1051                                          tempCipherLen, 1);
1052                 kfree(assoc);
1053         }
1054         return retval;
1055
1056 }
1057
1058 static int helper_rfc4106_encrypt(struct aead_request *req)
1059 {
1060         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1061         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1062         void *aes_ctx = &(ctx->aes_key_expanded);
1063         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1064         unsigned int i;
1065         __be32 counter = cpu_to_be32(1);
1066
1067         /* Assuming we are supporting rfc4106 64-bit extended */
1068         /* sequence numbers We need to have the AAD length equal */
1069         /* to 16 or 20 bytes */
1070         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1071                 return -EINVAL;
1072
1073         /* IV below built */
1074         for (i = 0; i < 4; i++)
1075                 *(iv+i) = ctx->nonce[i];
1076         for (i = 0; i < 8; i++)
1077                 *(iv+4+i) = req->iv[i];
1078         *((__be32 *)(iv+12)) = counter;
1079
1080         return gcmaes_encrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
1081                               aes_ctx);
1082 }
1083
1084 static int helper_rfc4106_decrypt(struct aead_request *req)
1085 {
1086         __be32 counter = cpu_to_be32(1);
1087         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1088         struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
1089         void *aes_ctx = &(ctx->aes_key_expanded);
1090         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1091         unsigned int i;
1092
1093         if (unlikely(req->assoclen != 16 && req->assoclen != 20))
1094                 return -EINVAL;
1095
1096         /* Assuming we are supporting rfc4106 64-bit extended */
1097         /* sequence numbers We need to have the AAD length */
1098         /* equal to 16 or 20 bytes */
1099
1100         /* IV below built */
1101         for (i = 0; i < 4; i++)
1102                 *(iv+i) = ctx->nonce[i];
1103         for (i = 0; i < 8; i++)
1104                 *(iv+4+i) = req->iv[i];
1105         *((__be32 *)(iv+12)) = counter;
1106
1107         return gcmaes_decrypt(req, req->assoclen - 8, ctx->hash_subkey, iv,
1108                               aes_ctx);
1109 }
1110
1111 static int gcmaes_wrapper_encrypt(struct aead_request *req)
1112 {
1113         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1114         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1115         struct cryptd_aead *cryptd_tfm = *ctx;
1116
1117         tfm = &cryptd_tfm->base;
1118         if (irq_fpu_usable() && (!in_atomic() ||
1119                                  !cryptd_aead_queued(cryptd_tfm)))
1120                 tfm = cryptd_aead_child(cryptd_tfm);
1121
1122         aead_request_set_tfm(req, tfm);
1123
1124         return crypto_aead_encrypt(req);
1125 }
1126
1127 static int gcmaes_wrapper_decrypt(struct aead_request *req)
1128 {
1129         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1130         struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
1131         struct cryptd_aead *cryptd_tfm = *ctx;
1132
1133         tfm = &cryptd_tfm->base;
1134         if (irq_fpu_usable() && (!in_atomic() ||
1135                                  !cryptd_aead_queued(cryptd_tfm)))
1136                 tfm = cryptd_aead_child(cryptd_tfm);
1137
1138         aead_request_set_tfm(req, tfm);
1139
1140         return crypto_aead_decrypt(req);
1141 }
1142 #endif
1143
1144 static struct crypto_alg aesni_algs[] = { {
1145         .cra_name               = "aes",
1146         .cra_driver_name        = "aes-aesni",
1147         .cra_priority           = 300,
1148         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
1149         .cra_blocksize          = AES_BLOCK_SIZE,
1150         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1151         .cra_module             = THIS_MODULE,
1152         .cra_u  = {
1153                 .cipher = {
1154                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1155                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1156                         .cia_setkey             = aes_set_key,
1157                         .cia_encrypt            = aes_encrypt,
1158                         .cia_decrypt            = aes_decrypt
1159                 }
1160         }
1161 }, {
1162         .cra_name               = "__aes",
1163         .cra_driver_name        = "__aes-aesni",
1164         .cra_priority           = 300,
1165         .cra_flags              = CRYPTO_ALG_TYPE_CIPHER | CRYPTO_ALG_INTERNAL,
1166         .cra_blocksize          = AES_BLOCK_SIZE,
1167         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1168         .cra_module             = THIS_MODULE,
1169         .cra_u  = {
1170                 .cipher = {
1171                         .cia_min_keysize        = AES_MIN_KEY_SIZE,
1172                         .cia_max_keysize        = AES_MAX_KEY_SIZE,
1173                         .cia_setkey             = aes_set_key,
1174                         .cia_encrypt            = __aes_encrypt,
1175                         .cia_decrypt            = __aes_decrypt
1176                 }
1177         }
1178 } };
1179
1180 static struct skcipher_alg aesni_skciphers[] = {
1181         {
1182                 .base = {
1183                         .cra_name               = "__ecb(aes)",
1184                         .cra_driver_name        = "__ecb-aes-aesni",
1185                         .cra_priority           = 400,
1186                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1187                         .cra_blocksize          = AES_BLOCK_SIZE,
1188                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1189                         .cra_module             = THIS_MODULE,
1190                 },
1191                 .min_keysize    = AES_MIN_KEY_SIZE,
1192                 .max_keysize    = AES_MAX_KEY_SIZE,
1193                 .setkey         = aesni_skcipher_setkey,
1194                 .encrypt        = ecb_encrypt,
1195                 .decrypt        = ecb_decrypt,
1196         }, {
1197                 .base = {
1198                         .cra_name               = "__cbc(aes)",
1199                         .cra_driver_name        = "__cbc-aes-aesni",
1200                         .cra_priority           = 400,
1201                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1202                         .cra_blocksize          = AES_BLOCK_SIZE,
1203                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1204                         .cra_module             = THIS_MODULE,
1205                 },
1206                 .min_keysize    = AES_MIN_KEY_SIZE,
1207                 .max_keysize    = AES_MAX_KEY_SIZE,
1208                 .ivsize         = AES_BLOCK_SIZE,
1209                 .setkey         = aesni_skcipher_setkey,
1210                 .encrypt        = cbc_encrypt,
1211                 .decrypt        = cbc_decrypt,
1212 #ifdef CONFIG_X86_64
1213         }, {
1214                 .base = {
1215                         .cra_name               = "__ctr(aes)",
1216                         .cra_driver_name        = "__ctr-aes-aesni",
1217                         .cra_priority           = 400,
1218                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1219                         .cra_blocksize          = 1,
1220                         .cra_ctxsize            = CRYPTO_AES_CTX_SIZE,
1221                         .cra_module             = THIS_MODULE,
1222                 },
1223                 .min_keysize    = AES_MIN_KEY_SIZE,
1224                 .max_keysize    = AES_MAX_KEY_SIZE,
1225                 .ivsize         = AES_BLOCK_SIZE,
1226                 .chunksize      = AES_BLOCK_SIZE,
1227                 .setkey         = aesni_skcipher_setkey,
1228                 .encrypt        = ctr_crypt,
1229                 .decrypt        = ctr_crypt,
1230         }, {
1231                 .base = {
1232                         .cra_name               = "__xts(aes)",
1233                         .cra_driver_name        = "__xts-aes-aesni",
1234                         .cra_priority           = 401,
1235                         .cra_flags              = CRYPTO_ALG_INTERNAL,
1236                         .cra_blocksize          = AES_BLOCK_SIZE,
1237                         .cra_ctxsize            = XTS_AES_CTX_SIZE,
1238                         .cra_module             = THIS_MODULE,
1239                 },
1240                 .min_keysize    = 2 * AES_MIN_KEY_SIZE,
1241                 .max_keysize    = 2 * AES_MAX_KEY_SIZE,
1242                 .ivsize         = AES_BLOCK_SIZE,
1243                 .setkey         = xts_aesni_setkey,
1244                 .encrypt        = xts_encrypt,
1245                 .decrypt        = xts_decrypt,
1246 #endif
1247         }
1248 };
1249
1250 static
1251 struct simd_skcipher_alg *aesni_simd_skciphers[ARRAY_SIZE(aesni_skciphers)];
1252
1253 #ifdef CONFIG_X86_64
1254 static int generic_gcmaes_set_key(struct crypto_aead *aead, const u8 *key,
1255                                   unsigned int key_len)
1256 {
1257         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(aead);
1258
1259         return aes_set_key_common(crypto_aead_tfm(aead),
1260                                   &ctx->aes_key_expanded, key, key_len) ?:
1261                rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
1262 }
1263
1264 static int generic_gcmaes_encrypt(struct aead_request *req)
1265 {
1266         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1267         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1268         void *aes_ctx = &(ctx->aes_key_expanded);
1269         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1270         __be32 counter = cpu_to_be32(1);
1271
1272         memcpy(iv, req->iv, 12);
1273         *((__be32 *)(iv+12)) = counter;
1274
1275         return gcmaes_encrypt(req, req->assoclen, ctx->hash_subkey, iv,
1276                               aes_ctx);
1277 }
1278
1279 static int generic_gcmaes_decrypt(struct aead_request *req)
1280 {
1281         __be32 counter = cpu_to_be32(1);
1282         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1283         struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
1284         void *aes_ctx = &(ctx->aes_key_expanded);
1285         u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
1286
1287         memcpy(iv, req->iv, 12);
1288         *((__be32 *)(iv+12)) = counter;
1289
1290         return gcmaes_decrypt(req, req->assoclen, ctx->hash_subkey, iv,
1291                               aes_ctx);
1292 }
1293
1294 static int generic_gcmaes_init(struct crypto_aead *aead)
1295 {
1296         struct cryptd_aead *cryptd_tfm;
1297         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1298
1299         cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
1300                                        CRYPTO_ALG_INTERNAL,
1301                                        CRYPTO_ALG_INTERNAL);
1302         if (IS_ERR(cryptd_tfm))
1303                 return PTR_ERR(cryptd_tfm);
1304
1305         *ctx = cryptd_tfm;
1306         crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
1307
1308         return 0;
1309 }
1310
1311 static void generic_gcmaes_exit(struct crypto_aead *aead)
1312 {
1313         struct cryptd_aead **ctx = crypto_aead_ctx(aead);
1314
1315         cryptd_free_aead(*ctx);
1316 }
1317
1318 static struct aead_alg aesni_aead_algs[] = { {
1319         .setkey                 = common_rfc4106_set_key,
1320         .setauthsize            = common_rfc4106_set_authsize,
1321         .encrypt                = helper_rfc4106_encrypt,
1322         .decrypt                = helper_rfc4106_decrypt,
1323         .ivsize                 = GCM_RFC4106_IV_SIZE,
1324         .maxauthsize            = 16,
1325         .base = {
1326                 .cra_name               = "__gcm-aes-aesni",
1327                 .cra_driver_name        = "__driver-gcm-aes-aesni",
1328                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1329                 .cra_blocksize          = 1,
1330                 .cra_ctxsize            = sizeof(struct aesni_rfc4106_gcm_ctx),
1331                 .cra_alignmask          = AESNI_ALIGN - 1,
1332                 .cra_module             = THIS_MODULE,
1333         },
1334 }, {
1335         .init                   = rfc4106_init,
1336         .exit                   = rfc4106_exit,
1337         .setkey                 = gcmaes_wrapper_set_key,
1338         .setauthsize            = gcmaes_wrapper_set_authsize,
1339         .encrypt                = gcmaes_wrapper_encrypt,
1340         .decrypt                = gcmaes_wrapper_decrypt,
1341         .ivsize                 = GCM_RFC4106_IV_SIZE,
1342         .maxauthsize            = 16,
1343         .base = {
1344                 .cra_name               = "rfc4106(gcm(aes))",
1345                 .cra_driver_name        = "rfc4106-gcm-aesni",
1346                 .cra_priority           = 400,
1347                 .cra_flags              = CRYPTO_ALG_ASYNC,
1348                 .cra_blocksize          = 1,
1349                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1350                 .cra_module             = THIS_MODULE,
1351         },
1352 }, {
1353         .setkey                 = generic_gcmaes_set_key,
1354         .setauthsize            = generic_gcmaes_set_authsize,
1355         .encrypt                = generic_gcmaes_encrypt,
1356         .decrypt                = generic_gcmaes_decrypt,
1357         .ivsize                 = GCM_AES_IV_SIZE,
1358         .maxauthsize            = 16,
1359         .base = {
1360                 .cra_name               = "__generic-gcm-aes-aesni",
1361                 .cra_driver_name        = "__driver-generic-gcm-aes-aesni",
1362                 .cra_priority           = 0,
1363                 .cra_flags              = CRYPTO_ALG_INTERNAL,
1364                 .cra_blocksize          = 1,
1365                 .cra_ctxsize            = sizeof(struct generic_gcmaes_ctx),
1366                 .cra_alignmask          = AESNI_ALIGN - 1,
1367                 .cra_module             = THIS_MODULE,
1368         },
1369 }, {
1370         .init                   = generic_gcmaes_init,
1371         .exit                   = generic_gcmaes_exit,
1372         .setkey                 = gcmaes_wrapper_set_key,
1373         .setauthsize            = gcmaes_wrapper_set_authsize,
1374         .encrypt                = gcmaes_wrapper_encrypt,
1375         .decrypt                = gcmaes_wrapper_decrypt,
1376         .ivsize                 = GCM_AES_IV_SIZE,
1377         .maxauthsize            = 16,
1378         .base = {
1379                 .cra_name               = "gcm(aes)",
1380                 .cra_driver_name        = "generic-gcm-aesni",
1381                 .cra_priority           = 400,
1382                 .cra_flags              = CRYPTO_ALG_ASYNC,
1383                 .cra_blocksize          = 1,
1384                 .cra_ctxsize            = sizeof(struct cryptd_aead *),
1385                 .cra_module             = THIS_MODULE,
1386         },
1387 } };
1388 #else
1389 static struct aead_alg aesni_aead_algs[0];
1390 #endif
1391
1392
1393 static const struct x86_cpu_id aesni_cpu_id[] = {
1394         X86_FEATURE_MATCH(X86_FEATURE_AES),
1395         {}
1396 };
1397 MODULE_DEVICE_TABLE(x86cpu, aesni_cpu_id);
1398
1399 static void aesni_free_simds(void)
1400 {
1401         int i;
1402
1403         for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers) &&
1404                     aesni_simd_skciphers[i]; i++)
1405                 simd_skcipher_free(aesni_simd_skciphers[i]);
1406 }
1407
1408 static int __init aesni_init(void)
1409 {
1410         struct simd_skcipher_alg *simd;
1411         const char *basename;
1412         const char *algname;
1413         const char *drvname;
1414         int err;
1415         int i;
1416
1417         if (!x86_match_cpu(aesni_cpu_id))
1418                 return -ENODEV;
1419 #ifdef CONFIG_X86_64
1420 #ifdef CONFIG_AS_AVX2
1421         if (boot_cpu_has(X86_FEATURE_AVX2)) {
1422                 pr_info("AVX2 version of gcm_enc/dec engaged.\n");
1423                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx2;
1424                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx2;
1425         } else
1426 #endif
1427 #ifdef CONFIG_AS_AVX
1428         if (boot_cpu_has(X86_FEATURE_AVX)) {
1429                 pr_info("AVX version of gcm_enc/dec engaged.\n");
1430                 aesni_gcm_enc_tfm = aesni_gcm_enc_avx;
1431                 aesni_gcm_dec_tfm = aesni_gcm_dec_avx;
1432         } else
1433 #endif
1434         {
1435                 pr_info("SSE version of gcm_enc/dec engaged.\n");
1436                 aesni_gcm_enc_tfm = aesni_gcm_enc;
1437                 aesni_gcm_dec_tfm = aesni_gcm_dec;
1438         }
1439         aesni_ctr_enc_tfm = aesni_ctr_enc;
1440 #ifdef CONFIG_AS_AVX
1441         if (boot_cpu_has(X86_FEATURE_AVX)) {
1442                 /* optimize performance of ctr mode encryption transform */
1443                 aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
1444                 pr_info("AES CTR mode by8 optimization enabled\n");
1445         }
1446 #endif
1447 #endif
1448
1449         err = crypto_register_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1450         if (err)
1451                 return err;
1452
1453         err = crypto_register_skciphers(aesni_skciphers,
1454                                         ARRAY_SIZE(aesni_skciphers));
1455         if (err)
1456                 goto unregister_algs;
1457
1458         err = crypto_register_aeads(aesni_aead_algs,
1459                                     ARRAY_SIZE(aesni_aead_algs));
1460         if (err)
1461                 goto unregister_skciphers;
1462
1463         for (i = 0; i < ARRAY_SIZE(aesni_skciphers); i++) {
1464                 algname = aesni_skciphers[i].base.cra_name + 2;
1465                 drvname = aesni_skciphers[i].base.cra_driver_name + 2;
1466                 basename = aesni_skciphers[i].base.cra_driver_name;
1467                 simd = simd_skcipher_create_compat(algname, drvname, basename);
1468                 err = PTR_ERR(simd);
1469                 if (IS_ERR(simd))
1470                         goto unregister_simds;
1471
1472                 aesni_simd_skciphers[i] = simd;
1473         }
1474
1475         return 0;
1476
1477 unregister_simds:
1478         aesni_free_simds();
1479         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1480 unregister_skciphers:
1481         crypto_unregister_skciphers(aesni_skciphers,
1482                                     ARRAY_SIZE(aesni_skciphers));
1483 unregister_algs:
1484         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1485         return err;
1486 }
1487
1488 static void __exit aesni_exit(void)
1489 {
1490         aesni_free_simds();
1491         crypto_unregister_aeads(aesni_aead_algs, ARRAY_SIZE(aesni_aead_algs));
1492         crypto_unregister_skciphers(aesni_skciphers,
1493                                     ARRAY_SIZE(aesni_skciphers));
1494         crypto_unregister_algs(aesni_algs, ARRAY_SIZE(aesni_algs));
1495 }
1496
1497 late_initcall(aesni_init);
1498 module_exit(aesni_exit);
1499
1500 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
1501 MODULE_LICENSE("GPL");
1502 MODULE_ALIAS_CRYPTO("aes");
This page took 0.12149 seconds and 4 git commands to generate.