]> Git Repo - linux.git/blob - drivers/crypto/chelsio/chcr_algo.c
Merge tag 'acpi-part2-4.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya ([email protected])
36  *      Atul Gupta ([email protected])
37  *      Jitendra Lulla ([email protected])
38  *      Yeshaswi M R Gowda ([email protected])
39  *      Harsh Jain ([email protected])
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static unsigned int sgl_ent_len[] = {
77         0, 0, 16, 24, 40, 48, 64, 72, 88,
78         96, 112, 120, 136, 144, 160, 168, 184,
79         192, 208, 216, 232, 240, 256, 264, 280,
80         288, 304, 312, 328, 336, 352, 360, 376
81 };
82
83 static unsigned int dsgl_ent_len[] = {
84         0, 32, 32, 48, 48, 64, 64, 80, 80,
85         112, 112, 128, 128, 144, 144, 160, 160,
86         192, 192, 208, 208, 224, 224, 240, 240,
87         272, 272, 288, 288, 304, 304, 320, 320
88 };
89
90 static u32 round_constant[11] = {
91         0x01000000, 0x02000000, 0x04000000, 0x08000000,
92         0x10000000, 0x20000000, 0x40000000, 0x80000000,
93         0x1B000000, 0x36000000, 0x6C000000
94 };
95
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97                                    unsigned char *input, int err);
98
99 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
100 {
101         return ctx->crypto_ctx->aeadctx;
102 }
103
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
105 {
106         return ctx->crypto_ctx->ablkctx;
107 }
108
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
110 {
111         return ctx->crypto_ctx->hmacctx;
112 }
113
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
115 {
116         return gctx->ctx->gcm;
117 }
118
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
120 {
121         return gctx->ctx->authenc;
122 }
123
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125 {
126         return ctx->dev->u_ctx;
127 }
128
129 static inline int is_ofld_imm(const struct sk_buff *skb)
130 {
131         return (skb->len <= SGE_MAX_WR_LEN);
132 }
133
134 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
135                          unsigned int entlen,
136                          unsigned int skip)
137 {
138         int nents = 0;
139         unsigned int less;
140         unsigned int skip_len = 0;
141
142         while (sg && skip) {
143                 if (sg_dma_len(sg) <= skip) {
144                         skip -= sg_dma_len(sg);
145                         skip_len = 0;
146                         sg = sg_next(sg);
147                 } else {
148                         skip_len = skip;
149                         skip = 0;
150                 }
151         }
152
153         while (sg && reqlen) {
154                 less = min(reqlen, sg_dma_len(sg) - skip_len);
155                 nents += DIV_ROUND_UP(less, entlen);
156                 reqlen -= less;
157                 skip_len = 0;
158                 sg = sg_next(sg);
159         }
160         return nents;
161 }
162
163 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
164                                           unsigned char *input,
165                                           int err)
166 {
167         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
168         int digestsize, updated_digestsize;
169         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
170         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
171
172         if (input == NULL)
173                 goto out;
174         digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
175         if (reqctx->is_sg_map)
176                 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
177         if (reqctx->dma_addr)
178                 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
179                                  reqctx->dma_len, DMA_TO_DEVICE);
180         reqctx->dma_addr = 0;
181         updated_digestsize = digestsize;
182         if (digestsize == SHA224_DIGEST_SIZE)
183                 updated_digestsize = SHA256_DIGEST_SIZE;
184         else if (digestsize == SHA384_DIGEST_SIZE)
185                 updated_digestsize = SHA512_DIGEST_SIZE;
186         if (reqctx->result == 1) {
187                 reqctx->result = 0;
188                 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
189                        digestsize);
190         } else {
191                 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
192                        updated_digestsize);
193         }
194 out:
195         req->base.complete(&req->base, err);
196 }
197
198 static inline int get_aead_subtype(struct crypto_aead *aead)
199 {
200         struct aead_alg *alg = crypto_aead_alg(aead);
201         struct chcr_alg_template *chcr_crypto_alg =
202                 container_of(alg, struct chcr_alg_template, alg.aead);
203         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
204 }
205
206 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
207 {
208         u8 temp[SHA512_DIGEST_SIZE];
209         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
210         int authsize = crypto_aead_authsize(tfm);
211         struct cpl_fw6_pld *fw6_pld;
212         int cmp = 0;
213
214         fw6_pld = (struct cpl_fw6_pld *)input;
215         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
216             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
217                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
218         } else {
219
220                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
221                                 authsize, req->assoclen +
222                                 req->cryptlen - authsize);
223                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
224         }
225         if (cmp)
226                 *err = -EBADMSG;
227         else
228                 *err = 0;
229 }
230
231 static inline void chcr_handle_aead_resp(struct aead_request *req,
232                                          unsigned char *input,
233                                          int err)
234 {
235         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
236         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
237         struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
238
239         chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
240         if (reqctx->b0_dma)
241                 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
242                                  reqctx->b0_len, DMA_BIDIRECTIONAL);
243         if (reqctx->verify == VERIFY_SW) {
244                 chcr_verify_tag(req, input, &err);
245                 reqctx->verify = VERIFY_HW;
246         }
247         req->base.complete(&req->base, err);
248 }
249
250 /*
251  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
252  *      @req: crypto request
253  */
254 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
255                          int err)
256 {
257         struct crypto_tfm *tfm = req->tfm;
258         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
259         struct adapter *adap = padap(ctx->dev);
260
261         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
262         case CRYPTO_ALG_TYPE_AEAD:
263                 chcr_handle_aead_resp(aead_request_cast(req), input, err);
264                 break;
265
266         case CRYPTO_ALG_TYPE_ABLKCIPHER:
267                  err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
268                                                input, err);
269                 break;
270
271         case CRYPTO_ALG_TYPE_AHASH:
272                 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
273                 }
274         atomic_inc(&adap->chcr_stats.complete);
275         return err;
276 }
277
278 static void get_aes_decrypt_key(unsigned char *dec_key,
279                                        const unsigned char *key,
280                                        unsigned int keylength)
281 {
282         u32 temp;
283         u32 w_ring[MAX_NK];
284         int i, j, k;
285         u8  nr, nk;
286
287         switch (keylength) {
288         case AES_KEYLENGTH_128BIT:
289                 nk = KEYLENGTH_4BYTES;
290                 nr = NUMBER_OF_ROUNDS_10;
291                 break;
292         case AES_KEYLENGTH_192BIT:
293                 nk = KEYLENGTH_6BYTES;
294                 nr = NUMBER_OF_ROUNDS_12;
295                 break;
296         case AES_KEYLENGTH_256BIT:
297                 nk = KEYLENGTH_8BYTES;
298                 nr = NUMBER_OF_ROUNDS_14;
299                 break;
300         default:
301                 return;
302         }
303         for (i = 0; i < nk; i++)
304                 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
305
306         i = 0;
307         temp = w_ring[nk - 1];
308         while (i + nk < (nr + 1) * 4) {
309                 if (!(i % nk)) {
310                         /* RotWord(temp) */
311                         temp = (temp << 8) | (temp >> 24);
312                         temp = aes_ks_subword(temp);
313                         temp ^= round_constant[i / nk];
314                 } else if (nk == 8 && (i % 4 == 0)) {
315                         temp = aes_ks_subword(temp);
316                 }
317                 w_ring[i % nk] ^= temp;
318                 temp = w_ring[i % nk];
319                 i++;
320         }
321         i--;
322         for (k = 0, j = i % nk; k < nk; k++) {
323                 *((u32 *)dec_key + k) = htonl(w_ring[j]);
324                 j--;
325                 if (j < 0)
326                         j += nk;
327         }
328 }
329
330 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
331 {
332         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
333
334         switch (ds) {
335         case SHA1_DIGEST_SIZE:
336                 base_hash = crypto_alloc_shash("sha1", 0, 0);
337                 break;
338         case SHA224_DIGEST_SIZE:
339                 base_hash = crypto_alloc_shash("sha224", 0, 0);
340                 break;
341         case SHA256_DIGEST_SIZE:
342                 base_hash = crypto_alloc_shash("sha256", 0, 0);
343                 break;
344         case SHA384_DIGEST_SIZE:
345                 base_hash = crypto_alloc_shash("sha384", 0, 0);
346                 break;
347         case SHA512_DIGEST_SIZE:
348                 base_hash = crypto_alloc_shash("sha512", 0, 0);
349                 break;
350         }
351
352         return base_hash;
353 }
354
355 static int chcr_compute_partial_hash(struct shash_desc *desc,
356                                      char *iopad, char *result_hash,
357                                      int digest_size)
358 {
359         struct sha1_state sha1_st;
360         struct sha256_state sha256_st;
361         struct sha512_state sha512_st;
362         int error;
363
364         if (digest_size == SHA1_DIGEST_SIZE) {
365                 error = crypto_shash_init(desc) ?:
366                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
367                         crypto_shash_export(desc, (void *)&sha1_st);
368                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
369         } else if (digest_size == SHA224_DIGEST_SIZE) {
370                 error = crypto_shash_init(desc) ?:
371                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
372                         crypto_shash_export(desc, (void *)&sha256_st);
373                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
374
375         } else if (digest_size == SHA256_DIGEST_SIZE) {
376                 error = crypto_shash_init(desc) ?:
377                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
378                         crypto_shash_export(desc, (void *)&sha256_st);
379                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
380
381         } else if (digest_size == SHA384_DIGEST_SIZE) {
382                 error = crypto_shash_init(desc) ?:
383                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
384                         crypto_shash_export(desc, (void *)&sha512_st);
385                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
386
387         } else if (digest_size == SHA512_DIGEST_SIZE) {
388                 error = crypto_shash_init(desc) ?:
389                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
390                         crypto_shash_export(desc, (void *)&sha512_st);
391                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
392         } else {
393                 error = -EINVAL;
394                 pr_err("Unknown digest size %d\n", digest_size);
395         }
396         return error;
397 }
398
399 static void chcr_change_order(char *buf, int ds)
400 {
401         int i;
402
403         if (ds == SHA512_DIGEST_SIZE) {
404                 for (i = 0; i < (ds / sizeof(u64)); i++)
405                         *((__be64 *)buf + i) =
406                                 cpu_to_be64(*((u64 *)buf + i));
407         } else {
408                 for (i = 0; i < (ds / sizeof(u32)); i++)
409                         *((__be32 *)buf + i) =
410                                 cpu_to_be32(*((u32 *)buf + i));
411         }
412 }
413
414 static inline int is_hmac(struct crypto_tfm *tfm)
415 {
416         struct crypto_alg *alg = tfm->__crt_alg;
417         struct chcr_alg_template *chcr_crypto_alg =
418                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
419                              alg.hash);
420         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
421                 return 1;
422         return 0;
423 }
424
425 static inline void dsgl_walk_init(struct dsgl_walk *walk,
426                                    struct cpl_rx_phys_dsgl *dsgl)
427 {
428         walk->dsgl = dsgl;
429         walk->nents = 0;
430         walk->to = (struct phys_sge_pairs *)(dsgl + 1);
431 }
432
433 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
434 {
435         struct cpl_rx_phys_dsgl *phys_cpl;
436
437         phys_cpl = walk->dsgl;
438
439         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
440                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
441         phys_cpl->pcirlxorder_to_noofsgentr =
442                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
443                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
444                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
445                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
446                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
447                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
448         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
449         phys_cpl->rss_hdr_int.qid = htons(qid);
450         phys_cpl->rss_hdr_int.hash_val = 0;
451 }
452
453 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
454                                         size_t size,
455                                         dma_addr_t *addr)
456 {
457         int j;
458
459         if (!size)
460                 return;
461         j = walk->nents;
462         walk->to->len[j % 8] = htons(size);
463         walk->to->addr[j % 8] = cpu_to_be64(*addr);
464         j++;
465         if ((j % 8) == 0)
466                 walk->to++;
467         walk->nents = j;
468 }
469
470 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
471                            struct scatterlist *sg,
472                               unsigned int slen,
473                               unsigned int skip)
474 {
475         int skip_len = 0;
476         unsigned int left_size = slen, len = 0;
477         unsigned int j = walk->nents;
478         int offset, ent_len;
479
480         if (!slen)
481                 return;
482         while (sg && skip) {
483                 if (sg_dma_len(sg) <= skip) {
484                         skip -= sg_dma_len(sg);
485                         skip_len = 0;
486                         sg = sg_next(sg);
487                 } else {
488                         skip_len = skip;
489                         skip = 0;
490                 }
491         }
492
493         while (left_size && sg) {
494                 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
495                 offset = 0;
496                 while (len) {
497                         ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
498                         walk->to->len[j % 8] = htons(ent_len);
499                         walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
500                                                       offset + skip_len);
501                         offset += ent_len;
502                         len -= ent_len;
503                         j++;
504                         if ((j % 8) == 0)
505                                 walk->to++;
506                 }
507                 walk->last_sg = sg;
508                 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
509                                           skip_len) + skip_len;
510                 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
511                 skip_len = 0;
512                 sg = sg_next(sg);
513         }
514         walk->nents = j;
515 }
516
517 static inline void ulptx_walk_init(struct ulptx_walk *walk,
518                                    struct ulptx_sgl *ulp)
519 {
520         walk->sgl = ulp;
521         walk->nents = 0;
522         walk->pair_idx = 0;
523         walk->pair = ulp->sge;
524         walk->last_sg = NULL;
525         walk->last_sg_len = 0;
526 }
527
528 static inline void ulptx_walk_end(struct ulptx_walk *walk)
529 {
530         walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
531                               ULPTX_NSGE_V(walk->nents));
532 }
533
534
535 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
536                                         size_t size,
537                                         dma_addr_t *addr)
538 {
539         if (!size)
540                 return;
541
542         if (walk->nents == 0) {
543                 walk->sgl->len0 = cpu_to_be32(size);
544                 walk->sgl->addr0 = cpu_to_be64(*addr);
545         } else {
546                 walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
547                 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
548                 walk->pair_idx = !walk->pair_idx;
549                 if (!walk->pair_idx)
550                         walk->pair++;
551         }
552         walk->nents++;
553 }
554
555 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
556                                         struct scatterlist *sg,
557                                unsigned int len,
558                                unsigned int skip)
559 {
560         int small;
561         int skip_len = 0;
562         unsigned int sgmin;
563
564         if (!len)
565                 return;
566
567         while (sg && skip) {
568                 if (sg_dma_len(sg) <= skip) {
569                         skip -= sg_dma_len(sg);
570                         skip_len = 0;
571                         sg = sg_next(sg);
572                 } else {
573                         skip_len = skip;
574                         skip = 0;
575                 }
576         }
577         WARN(!sg, "SG should not be null here\n");
578         if (sg && (walk->nents == 0)) {
579                 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
580                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
581                 walk->sgl->len0 = cpu_to_be32(sgmin);
582                 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
583                 walk->nents++;
584                 len -= sgmin;
585                 walk->last_sg = sg;
586                 walk->last_sg_len = sgmin + skip_len;
587                 skip_len += sgmin;
588                 if (sg_dma_len(sg) == skip_len) {
589                         sg = sg_next(sg);
590                         skip_len = 0;
591                 }
592         }
593
594         while (sg && len) {
595                 small = min(sg_dma_len(sg) - skip_len, len);
596                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
597                 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
598                 walk->pair->addr[walk->pair_idx] =
599                         cpu_to_be64(sg_dma_address(sg) + skip_len);
600                 walk->pair_idx = !walk->pair_idx;
601                 walk->nents++;
602                 if (!walk->pair_idx)
603                         walk->pair++;
604                 len -= sgmin;
605                 skip_len += sgmin;
606                 walk->last_sg = sg;
607                 walk->last_sg_len = skip_len;
608                 if (sg_dma_len(sg) == skip_len) {
609                         sg = sg_next(sg);
610                         skip_len = 0;
611                 }
612         }
613 }
614
615 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
616 {
617         struct crypto_alg *alg = tfm->__crt_alg;
618         struct chcr_alg_template *chcr_crypto_alg =
619                 container_of(alg, struct chcr_alg_template, alg.crypto);
620
621         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
622 }
623
624 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
625 {
626         struct adapter *adap = netdev2adap(dev);
627         struct sge_uld_txq_info *txq_info =
628                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
629         struct sge_uld_txq *txq;
630         int ret = 0;
631
632         local_bh_disable();
633         txq = &txq_info->uldtxq[idx];
634         spin_lock(&txq->sendq.lock);
635         if (txq->full)
636                 ret = -1;
637         spin_unlock(&txq->sendq.lock);
638         local_bh_enable();
639         return ret;
640 }
641
642 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
643                                struct _key_ctx *key_ctx)
644 {
645         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
646                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
647         } else {
648                 memcpy(key_ctx->key,
649                        ablkctx->key + (ablkctx->enckey_len >> 1),
650                        ablkctx->enckey_len >> 1);
651                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
652                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
653         }
654         return 0;
655 }
656 static int chcr_sg_ent_in_wr(struct scatterlist *src,
657                              struct scatterlist *dst,
658                              unsigned int minsg,
659                              unsigned int space,
660                              unsigned int srcskip,
661                              unsigned int dstskip)
662 {
663         int srclen = 0, dstlen = 0;
664         int srcsg = minsg, dstsg = minsg;
665         int offset = 0, less;
666
667         if (sg_dma_len(src) == srcskip) {
668                 src = sg_next(src);
669                 srcskip = 0;
670         }
671
672         if (sg_dma_len(dst) == dstskip) {
673                 dst = sg_next(dst);
674                 dstskip = 0;
675         }
676
677         while (src && dst &&
678                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
679                 srclen += (sg_dma_len(src) - srcskip);
680                 srcsg++;
681                 offset = 0;
682                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
683                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
684                         if (srclen <= dstlen)
685                                 break;
686                         less = min_t(unsigned int, sg_dma_len(dst) - offset -
687                                      dstskip, CHCR_DST_SG_SIZE);
688                         dstlen += less;
689                         offset += less;
690                         if (offset == sg_dma_len(dst)) {
691                                 dst = sg_next(dst);
692                                 offset = 0;
693                         }
694                         dstsg++;
695                         dstskip = 0;
696                 }
697                 src = sg_next(src);
698                 srcskip = 0;
699         }
700         return min(srclen, dstlen);
701 }
702
703 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
704                                 u32 flags,
705                                 struct scatterlist *src,
706                                 struct scatterlist *dst,
707                                 unsigned int nbytes,
708                                 u8 *iv,
709                                 unsigned short op_type)
710 {
711         int err;
712
713         SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
714         skcipher_request_set_tfm(subreq, cipher);
715         skcipher_request_set_callback(subreq, flags, NULL, NULL);
716         skcipher_request_set_crypt(subreq, src, dst,
717                                    nbytes, iv);
718
719         err = op_type ? crypto_skcipher_decrypt(subreq) :
720                 crypto_skcipher_encrypt(subreq);
721         skcipher_request_zero(subreq);
722
723         return err;
724
725 }
726 static inline void create_wreq(struct chcr_context *ctx,
727                                struct chcr_wr *chcr_req,
728                                struct crypto_async_request *req,
729                                unsigned int imm,
730                                int hash_sz,
731                                unsigned int len16,
732                                unsigned int sc_len,
733                                unsigned int lcb)
734 {
735         struct uld_ctx *u_ctx = ULD_CTX(ctx);
736         int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
737
738
739         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
740         chcr_req->wreq.pld_size_hash_size =
741                 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
742         chcr_req->wreq.len16_pkd =
743                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
744         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
745         chcr_req->wreq.rx_chid_to_rx_q_id =
746                 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
747                                 !!lcb, ctx->tx_qidx);
748
749         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
750                                                        qid);
751         chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
752                                      ((sizeof(chcr_req->wreq)) >> 4)));
753
754         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
755         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
756                                            sizeof(chcr_req->key_ctx) + sc_len);
757 }
758
759 /**
760  *      create_cipher_wr - form the WR for cipher operations
761  *      @req: cipher req.
762  *      @ctx: crypto driver context of the request.
763  *      @qid: ingress qid where response of this WR should be received.
764  *      @op_type:       encryption or decryption
765  */
766 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
767 {
768         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
769         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
770         struct sk_buff *skb = NULL;
771         struct chcr_wr *chcr_req;
772         struct cpl_rx_phys_dsgl *phys_cpl;
773         struct ulptx_sgl *ulptx;
774         struct chcr_blkcipher_req_ctx *reqctx =
775                 ablkcipher_request_ctx(wrparam->req);
776         unsigned int temp = 0, transhdr_len, dst_size;
777         int error;
778         int nents;
779         unsigned int kctx_len;
780         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
781                         GFP_KERNEL : GFP_ATOMIC;
782         struct adapter *adap = padap(c_ctx(tfm)->dev);
783
784         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
785                               reqctx->dst_ofst);
786         dst_size = get_space_for_phys_dsgl(nents + 1);
787         kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
788         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
789         nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
790                                   CHCR_SRC_SG_SIZE, reqctx->src_ofst);
791         temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
792                               * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
793         transhdr_len += temp;
794         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
795         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
796         if (!skb) {
797                 error = -ENOMEM;
798                 goto err;
799         }
800         chcr_req = __skb_put_zero(skb, transhdr_len);
801         chcr_req->sec_cpl.op_ivinsrtofst =
802                 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
803
804         chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
805         chcr_req->sec_cpl.aadstart_cipherstop_hi =
806                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
807
808         chcr_req->sec_cpl.cipherstop_lo_authinsert =
809                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
810         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
811                                                          ablkctx->ciph_mode,
812                                                          0, 0, IV >> 1);
813         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
814                                                           0, 0, dst_size);
815
816         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
817         if ((reqctx->op == CHCR_DECRYPT_OP) &&
818             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
819                CRYPTO_ALG_SUB_TYPE_CTR)) &&
820             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
821                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
822                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
823         } else {
824                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
825                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
826                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
827                                ablkctx->enckey_len);
828                 } else {
829                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
830                                (ablkctx->enckey_len >> 1),
831                                ablkctx->enckey_len >> 1);
832                         memcpy(chcr_req->key_ctx.key +
833                                (ablkctx->enckey_len >> 1),
834                                ablkctx->key,
835                                ablkctx->enckey_len >> 1);
836                 }
837         }
838         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
839         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
840         chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
841         chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
842
843         atomic_inc(&adap->chcr_stats.cipher_rqst);
844         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
845                 +(reqctx->imm ? (IV + wrparam->bytes) : 0);
846         create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
847                     transhdr_len, temp,
848                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
849         reqctx->skb = skb;
850         return skb;
851 err:
852         return ERR_PTR(error);
853 }
854
855 static inline int chcr_keyctx_ck_size(unsigned int keylen)
856 {
857         int ck_size = 0;
858
859         if (keylen == AES_KEYSIZE_128)
860                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
861         else if (keylen == AES_KEYSIZE_192)
862                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
863         else if (keylen == AES_KEYSIZE_256)
864                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
865         else
866                 ck_size = 0;
867
868         return ck_size;
869 }
870 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
871                                        const u8 *key,
872                                        unsigned int keylen)
873 {
874         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
875         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
876         int err = 0;
877
878         crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
879         crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
880                                   CRYPTO_TFM_REQ_MASK);
881         err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
882         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
883         tfm->crt_flags |=
884                 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
885                 CRYPTO_TFM_RES_MASK;
886         return err;
887 }
888
889 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
890                                const u8 *key,
891                                unsigned int keylen)
892 {
893         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
894         unsigned int ck_size, context_size;
895         u16 alignment = 0;
896         int err;
897
898         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
899         if (err)
900                 goto badkey_err;
901
902         ck_size = chcr_keyctx_ck_size(keylen);
903         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
904         memcpy(ablkctx->key, key, keylen);
905         ablkctx->enckey_len = keylen;
906         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
907         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
908                         keylen + alignment) >> 4;
909
910         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
911                                                 0, 0, context_size);
912         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
913         return 0;
914 badkey_err:
915         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
916         ablkctx->enckey_len = 0;
917
918         return err;
919 }
920
921 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
922                                    const u8 *key,
923                                    unsigned int keylen)
924 {
925         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926         unsigned int ck_size, context_size;
927         u16 alignment = 0;
928         int err;
929
930         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
931         if (err)
932                 goto badkey_err;
933         ck_size = chcr_keyctx_ck_size(keylen);
934         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
935         memcpy(ablkctx->key, key, keylen);
936         ablkctx->enckey_len = keylen;
937         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
938                         keylen + alignment) >> 4;
939
940         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
941                                                 0, 0, context_size);
942         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
943
944         return 0;
945 badkey_err:
946         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
947         ablkctx->enckey_len = 0;
948
949         return err;
950 }
951
952 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
953                                    const u8 *key,
954                                    unsigned int keylen)
955 {
956         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
957         unsigned int ck_size, context_size;
958         u16 alignment = 0;
959         int err;
960
961         if (keylen < CTR_RFC3686_NONCE_SIZE)
962                 return -EINVAL;
963         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
964                CTR_RFC3686_NONCE_SIZE);
965
966         keylen -= CTR_RFC3686_NONCE_SIZE;
967         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
968         if (err)
969                 goto badkey_err;
970
971         ck_size = chcr_keyctx_ck_size(keylen);
972         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973         memcpy(ablkctx->key, key, keylen);
974         ablkctx->enckey_len = keylen;
975         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976                         keylen + alignment) >> 4;
977
978         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
979                                                 0, 0, context_size);
980         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
981
982         return 0;
983 badkey_err:
984         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
985         ablkctx->enckey_len = 0;
986
987         return err;
988 }
989 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
990 {
991         unsigned int size = AES_BLOCK_SIZE;
992         __be32 *b = (__be32 *)(dstiv + size);
993         u32 c, prev;
994
995         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
996         for (; size >= 4; size -= 4) {
997                 prev = be32_to_cpu(*--b);
998                 c = prev + add;
999                 *b = cpu_to_be32(c);
1000                 if (prev < c)
1001                         break;
1002                 add = 1;
1003         }
1004
1005 }
1006
1007 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1008 {
1009         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1010         u64 c;
1011         u32 temp = be32_to_cpu(*--b);
1012
1013         temp = ~temp;
1014         c = (u64)temp +  1; // No of block can processed withou overflow
1015         if ((bytes / AES_BLOCK_SIZE) > c)
1016                 bytes = c * AES_BLOCK_SIZE;
1017         return bytes;
1018 }
1019
1020 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1021                              u32 isfinal)
1022 {
1023         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1024         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1026         struct crypto_cipher *cipher;
1027         int ret, i;
1028         u8 *key;
1029         unsigned int keylen;
1030         int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031         int round8 = round / 8;
1032
1033         cipher = ablkctx->aes_generic;
1034         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1035
1036         keylen = ablkctx->enckey_len / 2;
1037         key = ablkctx->key + keylen;
1038         ret = crypto_cipher_setkey(cipher, key, keylen);
1039         if (ret)
1040                 goto out;
1041         /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
1042         for (i = 0; i < round8; i++)
1043                 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1044
1045         for (i = 0; i < (round % 8); i++)
1046                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1047
1048         if (!isfinal)
1049                 crypto_cipher_decrypt_one(cipher, iv, iv);
1050 out:
1051         return ret;
1052 }
1053
1054 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1055                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1056 {
1057         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1058         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1059         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1060         int ret = 0;
1061
1062         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1063                 ctr_add_iv(iv, req->info, (reqctx->processed /
1064                            AES_BLOCK_SIZE));
1065         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1066                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1067                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1068                                                 AES_BLOCK_SIZE) + 1);
1069         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1070                 ret = chcr_update_tweak(req, iv, 0);
1071         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1072                 if (reqctx->op)
1073                         sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
1074                                            16,
1075                                            reqctx->processed - AES_BLOCK_SIZE);
1076                 else
1077                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1078         }
1079
1080         return ret;
1081
1082 }
1083
1084 /* We need separate function for final iv because in rfc3686  Initial counter
1085  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1086  * for subsequent update requests
1087  */
1088
1089 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1090                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1091 {
1092         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1093         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1094         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1095         int ret = 0;
1096
1097         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1098                 ctr_add_iv(iv, req->info, (reqctx->processed /
1099                            AES_BLOCK_SIZE));
1100         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1101                 ret = chcr_update_tweak(req, iv, 1);
1102         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1103                 if (reqctx->op)
1104                         sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
1105                                            16,
1106                                            reqctx->processed - AES_BLOCK_SIZE);
1107                 else
1108                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1109
1110         }
1111         return ret;
1112
1113 }
1114
1115 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1116                                    unsigned char *input, int err)
1117 {
1118         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1119         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1120         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1121         struct sk_buff *skb;
1122         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1123         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1124         struct  cipher_wr_param wrparam;
1125         int bytes;
1126
1127         if (err)
1128                 goto unmap;
1129         if (req->nbytes == reqctx->processed) {
1130                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1131                                       req);
1132                 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1133                 goto complete;
1134         }
1135
1136         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1137                                             c_ctx(tfm)->tx_qidx))) {
1138                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1139                         err = -EBUSY;
1140                         goto unmap;
1141                 }
1142
1143         }
1144         if (!reqctx->imm) {
1145                 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
1146                                           SPACE_LEFT(ablkctx->enckey_len),
1147                                           reqctx->src_ofst, reqctx->dst_ofst);
1148                 if ((bytes + reqctx->processed) >= req->nbytes)
1149                         bytes  = req->nbytes - reqctx->processed;
1150                 else
1151                         bytes = ROUND_16(bytes);
1152         } else {
1153                 /*CTR mode counter overfloa*/
1154                 bytes  = req->nbytes - reqctx->processed;
1155         }
1156         dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1157                                 reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1158         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1159         dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1160                                    reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1161         if (err)
1162                 goto unmap;
1163
1164         if (unlikely(bytes == 0)) {
1165                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1166                                       req);
1167                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1168                                      req->base.flags,
1169                                      req->src,
1170                                      req->dst,
1171                                      req->nbytes,
1172                                      req->info,
1173                                      reqctx->op);
1174                 goto complete;
1175         }
1176
1177         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1178             CRYPTO_ALG_SUB_TYPE_CTR)
1179                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1180         wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1181         wrparam.req = req;
1182         wrparam.bytes = bytes;
1183         skb = create_cipher_wr(&wrparam);
1184         if (IS_ERR(skb)) {
1185                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1186                 err = PTR_ERR(skb);
1187                 goto unmap;
1188         }
1189         skb->dev = u_ctx->lldi.ports[0];
1190         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1191         chcr_send_wr(skb);
1192         reqctx->last_req_len = bytes;
1193         reqctx->processed += bytes;
1194         return 0;
1195 unmap:
1196         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1197 complete:
1198         req->base.complete(&req->base, err);
1199         return err;
1200 }
1201
1202 static int process_cipher(struct ablkcipher_request *req,
1203                                   unsigned short qid,
1204                                   struct sk_buff **skb,
1205                                   unsigned short op_type)
1206 {
1207         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1208         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1209         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1210         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1211         struct  cipher_wr_param wrparam;
1212         int bytes, err = -EINVAL;
1213
1214         reqctx->processed = 0;
1215         if (!req->info)
1216                 goto error;
1217         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1218             (req->nbytes == 0) ||
1219             (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1220                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1221                        ablkctx->enckey_len, req->nbytes, ivsize);
1222                 goto error;
1223         }
1224         chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1225         if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1226                                             AES_MIN_KEY_SIZE +
1227                                             sizeof(struct cpl_rx_phys_dsgl) +
1228                                         /*Min dsgl size*/
1229                                             32))) {
1230                 /* Can be sent as Imm*/
1231                 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1232
1233                 dnents = sg_nents_xlen(req->dst, req->nbytes,
1234                                        CHCR_DST_SG_SIZE, 0);
1235                 dnents += 1; // IV
1236                 phys_dsgl = get_space_for_phys_dsgl(dnents);
1237                 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
1238                 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1239                 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1240                         SGE_MAX_WR_LEN;
1241                 bytes = IV + req->nbytes;
1242
1243         } else {
1244                 reqctx->imm = 0;
1245         }
1246
1247         if (!reqctx->imm) {
1248                 bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1249                                           MIN_CIPHER_SG,
1250                                           SPACE_LEFT(ablkctx->enckey_len),
1251                                           0, 0);
1252                 if ((bytes + reqctx->processed) >= req->nbytes)
1253                         bytes  = req->nbytes - reqctx->processed;
1254                 else
1255                         bytes = ROUND_16(bytes);
1256         } else {
1257                 bytes = req->nbytes;
1258         }
1259         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1260             CRYPTO_ALG_SUB_TYPE_CTR) {
1261                 bytes = adjust_ctr_overflow(req->info, bytes);
1262         }
1263         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1264             CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1265                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1266                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1267                                 CTR_RFC3686_IV_SIZE);
1268
1269                 /* initialize counter portion of counter block */
1270                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1271                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1272
1273         } else {
1274
1275                 memcpy(reqctx->iv, req->info, IV);
1276         }
1277         if (unlikely(bytes == 0)) {
1278                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1279                                       req);
1280                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1281                                            req->base.flags,
1282                                            req->src,
1283                                            req->dst,
1284                                            req->nbytes,
1285                                            req->info,
1286                                            op_type);
1287                 goto error;
1288         }
1289         reqctx->op = op_type;
1290         reqctx->srcsg = req->src;
1291         reqctx->dstsg = req->dst;
1292         reqctx->src_ofst = 0;
1293         reqctx->dst_ofst = 0;
1294         wrparam.qid = qid;
1295         wrparam.req = req;
1296         wrparam.bytes = bytes;
1297         *skb = create_cipher_wr(&wrparam);
1298         if (IS_ERR(*skb)) {
1299                 err = PTR_ERR(*skb);
1300                 goto unmap;
1301         }
1302         reqctx->processed = bytes;
1303         reqctx->last_req_len = bytes;
1304
1305         return 0;
1306 unmap:
1307         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1308 error:
1309         return err;
1310 }
1311
1312 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1313 {
1314         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1315         struct sk_buff *skb = NULL;
1316         int err;
1317         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1318
1319         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1320                                             c_ctx(tfm)->tx_qidx))) {
1321                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1322                         return -EBUSY;
1323         }
1324
1325         err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1326                              &skb, CHCR_ENCRYPT_OP);
1327         if (err || !skb)
1328                 return  err;
1329         skb->dev = u_ctx->lldi.ports[0];
1330         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1331         chcr_send_wr(skb);
1332         return -EINPROGRESS;
1333 }
1334
1335 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1336 {
1337         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1338         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1339         struct sk_buff *skb = NULL;
1340         int err;
1341
1342         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1343                                             c_ctx(tfm)->tx_qidx))) {
1344                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1345                         return -EBUSY;
1346         }
1347
1348          err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1349                               &skb, CHCR_DECRYPT_OP);
1350         if (err || !skb)
1351                 return err;
1352         skb->dev = u_ctx->lldi.ports[0];
1353         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1354         chcr_send_wr(skb);
1355         return -EINPROGRESS;
1356 }
1357
1358 static int chcr_device_init(struct chcr_context *ctx)
1359 {
1360         struct uld_ctx *u_ctx = NULL;
1361         struct adapter *adap;
1362         unsigned int id;
1363         int txq_perchan, txq_idx, ntxq;
1364         int err = 0, rxq_perchan, rxq_idx;
1365
1366         id = smp_processor_id();
1367         if (!ctx->dev) {
1368                 u_ctx = assign_chcr_device();
1369                 if (!u_ctx) {
1370                         pr_err("chcr device assignment fails\n");
1371                         goto out;
1372                 }
1373                 ctx->dev = u_ctx->dev;
1374                 adap = padap(ctx->dev);
1375                 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1376                                     adap->vres.ncrypto_fc);
1377                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1378                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1379                 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1380                 rxq_idx += id % rxq_perchan;
1381                 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1382                 txq_idx += id % txq_perchan;
1383                 spin_lock(&ctx->dev->lock_chcr_dev);
1384                 ctx->rx_qidx = rxq_idx;
1385                 ctx->tx_qidx = txq_idx;
1386                 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1387                 ctx->dev->rx_channel_id = 0;
1388                 spin_unlock(&ctx->dev->lock_chcr_dev);
1389         }
1390 out:
1391         return err;
1392 }
1393
1394 static int chcr_cra_init(struct crypto_tfm *tfm)
1395 {
1396         struct crypto_alg *alg = tfm->__crt_alg;
1397         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1398         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1399
1400         ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1401                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1402         if (IS_ERR(ablkctx->sw_cipher)) {
1403                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1404                 return PTR_ERR(ablkctx->sw_cipher);
1405         }
1406
1407         if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1408                 /* To update tweak*/
1409                 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1410                 if (IS_ERR(ablkctx->aes_generic)) {
1411                         pr_err("failed to allocate aes cipher for tweak\n");
1412                         return PTR_ERR(ablkctx->aes_generic);
1413                 }
1414         } else
1415                 ablkctx->aes_generic = NULL;
1416
1417         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1418         return chcr_device_init(crypto_tfm_ctx(tfm));
1419 }
1420
1421 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1422 {
1423         struct crypto_alg *alg = tfm->__crt_alg;
1424         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1425         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1426
1427         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1428          * cannot be used as fallback in chcr_handle_cipher_response
1429          */
1430         ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1431                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1432         if (IS_ERR(ablkctx->sw_cipher)) {
1433                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1434                 return PTR_ERR(ablkctx->sw_cipher);
1435         }
1436         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1437         return chcr_device_init(crypto_tfm_ctx(tfm));
1438 }
1439
1440
1441 static void chcr_cra_exit(struct crypto_tfm *tfm)
1442 {
1443         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1444         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1445
1446         crypto_free_skcipher(ablkctx->sw_cipher);
1447         if (ablkctx->aes_generic)
1448                 crypto_free_cipher(ablkctx->aes_generic);
1449 }
1450
1451 static int get_alg_config(struct algo_param *params,
1452                           unsigned int auth_size)
1453 {
1454         switch (auth_size) {
1455         case SHA1_DIGEST_SIZE:
1456                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1457                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1458                 params->result_size = SHA1_DIGEST_SIZE;
1459                 break;
1460         case SHA224_DIGEST_SIZE:
1461                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1462                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1463                 params->result_size = SHA256_DIGEST_SIZE;
1464                 break;
1465         case SHA256_DIGEST_SIZE:
1466                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1467                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1468                 params->result_size = SHA256_DIGEST_SIZE;
1469                 break;
1470         case SHA384_DIGEST_SIZE:
1471                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1472                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1473                 params->result_size = SHA512_DIGEST_SIZE;
1474                 break;
1475         case SHA512_DIGEST_SIZE:
1476                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1477                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1478                 params->result_size = SHA512_DIGEST_SIZE;
1479                 break;
1480         default:
1481                 pr_err("chcr : ERROR, unsupported digest size\n");
1482                 return -EINVAL;
1483         }
1484         return 0;
1485 }
1486
1487 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1488 {
1489                 crypto_free_shash(base_hash);
1490 }
1491
1492 /**
1493  *      create_hash_wr - Create hash work request
1494  *      @req - Cipher req base
1495  */
1496 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1497                                       struct hash_wr_param *param)
1498 {
1499         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1500         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1501         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1502         struct sk_buff *skb = NULL;
1503         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1504         struct chcr_wr *chcr_req;
1505         struct ulptx_sgl *ulptx;
1506         unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
1507         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1508         unsigned int kctx_len = 0, temp = 0;
1509         u8 hash_size_in_response = 0;
1510         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1511                 GFP_ATOMIC;
1512         struct adapter *adap = padap(h_ctx(tfm)->dev);
1513         int error = 0;
1514
1515         iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
1516         kctx_len = param->alg_prm.result_size + iopad_alignment;
1517         if (param->opad_needed)
1518                 kctx_len += param->alg_prm.result_size + iopad_alignment;
1519
1520         if (req_ctx->result)
1521                 hash_size_in_response = digestsize;
1522         else
1523                 hash_size_in_response = param->alg_prm.result_size;
1524         transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
1525         req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
1526                 SGE_MAX_WR_LEN;
1527         nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
1528         nents += param->bfr_len ? 1 : 0;
1529         transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
1530                         param->sg_len), 16) * 16) :
1531                         (sgl_len(nents) * 8);
1532         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
1533
1534         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
1535         if (!skb)
1536                 return ERR_PTR(-ENOMEM);
1537         chcr_req = __skb_put_zero(skb, transhdr_len);
1538
1539         chcr_req->sec_cpl.op_ivinsrtofst =
1540                 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1541         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1542
1543         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1544                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1545         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1546                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1547         chcr_req->sec_cpl.seqno_numivs =
1548                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1549                                          param->opad_needed, 0);
1550
1551         chcr_req->sec_cpl.ivgen_hdrlen =
1552                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1553
1554         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1555                param->alg_prm.result_size);
1556
1557         if (param->opad_needed)
1558                 memcpy(chcr_req->key_ctx.key +
1559                        ((param->alg_prm.result_size <= 32) ? 32 :
1560                         CHCR_HASH_MAX_DIGEST_SIZE),
1561                        hmacctx->opad, param->alg_prm.result_size);
1562
1563         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1564                                             param->alg_prm.mk_size, 0,
1565                                             param->opad_needed,
1566                                             ((kctx_len +
1567                                              sizeof(chcr_req->key_ctx)) >> 4));
1568         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1569         ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
1570                                      DUMMY_BYTES);
1571         if (param->bfr_len != 0) {
1572                 req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
1573                                           req_ctx->reqbfr, param->bfr_len,
1574                                           DMA_TO_DEVICE);
1575                 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1576                                        req_ctx->dma_addr)) {
1577                         error = -ENOMEM;
1578                         goto err;
1579                 }
1580                 req_ctx->dma_len = param->bfr_len;
1581         } else {
1582                 req_ctx->dma_addr = 0;
1583         }
1584         chcr_add_hash_src_ent(req, ulptx, param);
1585         /* Request upto max wr size */
1586         temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
1587                                         + param->bfr_len) : 0);
1588         atomic_inc(&adap->chcr_stats.digest_rqst);
1589         create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
1590                     hash_size_in_response, transhdr_len,
1591                     temp,  0);
1592         req_ctx->skb = skb;
1593         return skb;
1594 err:
1595         kfree_skb(skb);
1596         return  ERR_PTR(error);
1597 }
1598
1599 static int chcr_ahash_update(struct ahash_request *req)
1600 {
1601         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1602         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1603         struct uld_ctx *u_ctx = NULL;
1604         struct sk_buff *skb;
1605         u8 remainder = 0, bs;
1606         unsigned int nbytes = req->nbytes;
1607         struct hash_wr_param params;
1608         int error;
1609
1610         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1611
1612         u_ctx = ULD_CTX(h_ctx(rtfm));
1613         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1614                                             h_ctx(rtfm)->tx_qidx))) {
1615                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1616                         return -EBUSY;
1617         }
1618
1619         if (nbytes + req_ctx->reqlen >= bs) {
1620                 remainder = (nbytes + req_ctx->reqlen) % bs;
1621                 nbytes = nbytes + req_ctx->reqlen - remainder;
1622         } else {
1623                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1624                                    + req_ctx->reqlen, nbytes, 0);
1625                 req_ctx->reqlen += nbytes;
1626                 return 0;
1627         }
1628         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1629         if (error)
1630                 return -ENOMEM;
1631         params.opad_needed = 0;
1632         params.more = 1;
1633         params.last = 0;
1634         params.sg_len = nbytes - req_ctx->reqlen;
1635         params.bfr_len = req_ctx->reqlen;
1636         params.scmd1 = 0;
1637         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1638         req_ctx->result = 0;
1639         req_ctx->data_len += params.sg_len + params.bfr_len;
1640         skb = create_hash_wr(req, &params);
1641         if (IS_ERR(skb)) {
1642                 error = PTR_ERR(skb);
1643                 goto unmap;
1644         }
1645
1646         if (remainder) {
1647                 /* Swap buffers */
1648                 swap(req_ctx->reqbfr, req_ctx->skbfr);
1649                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1650                                    req_ctx->reqbfr, remainder, req->nbytes -
1651                                    remainder);
1652         }
1653         req_ctx->reqlen = remainder;
1654         skb->dev = u_ctx->lldi.ports[0];
1655         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1656         chcr_send_wr(skb);
1657
1658         return -EINPROGRESS;
1659 unmap:
1660         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1661         return error;
1662 }
1663
1664 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1665 {
1666         memset(bfr_ptr, 0, bs);
1667         *bfr_ptr = 0x80;
1668         if (bs == 64)
1669                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1670         else
1671                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1672 }
1673
1674 static int chcr_ahash_final(struct ahash_request *req)
1675 {
1676         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1677         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1678         struct hash_wr_param params;
1679         struct sk_buff *skb;
1680         struct uld_ctx *u_ctx = NULL;
1681         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1682
1683         u_ctx = ULD_CTX(h_ctx(rtfm));
1684         if (is_hmac(crypto_ahash_tfm(rtfm)))
1685                 params.opad_needed = 1;
1686         else
1687                 params.opad_needed = 0;
1688         params.sg_len = 0;
1689         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1690         req_ctx->result = 1;
1691         params.bfr_len = req_ctx->reqlen;
1692         req_ctx->data_len += params.bfr_len + params.sg_len;
1693         if (req_ctx->reqlen == 0) {
1694                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1695                 params.last = 0;
1696                 params.more = 1;
1697                 params.scmd1 = 0;
1698                 params.bfr_len = bs;
1699
1700         } else {
1701                 params.scmd1 = req_ctx->data_len;
1702                 params.last = 1;
1703                 params.more = 0;
1704         }
1705         skb = create_hash_wr(req, &params);
1706         if (IS_ERR(skb))
1707                 return PTR_ERR(skb);
1708
1709         skb->dev = u_ctx->lldi.ports[0];
1710         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1711         chcr_send_wr(skb);
1712         return -EINPROGRESS;
1713 }
1714
1715 static int chcr_ahash_finup(struct ahash_request *req)
1716 {
1717         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1718         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1719         struct uld_ctx *u_ctx = NULL;
1720         struct sk_buff *skb;
1721         struct hash_wr_param params;
1722         u8  bs;
1723         int error;
1724
1725         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1726         u_ctx = ULD_CTX(h_ctx(rtfm));
1727
1728         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1729                                             h_ctx(rtfm)->tx_qidx))) {
1730                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1731                         return -EBUSY;
1732         }
1733
1734         if (is_hmac(crypto_ahash_tfm(rtfm)))
1735                 params.opad_needed = 1;
1736         else
1737                 params.opad_needed = 0;
1738
1739         params.sg_len = req->nbytes;
1740         params.bfr_len = req_ctx->reqlen;
1741         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1742         req_ctx->data_len += params.bfr_len + params.sg_len;
1743         req_ctx->result = 1;
1744         if ((req_ctx->reqlen + req->nbytes) == 0) {
1745                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1746                 params.last = 0;
1747                 params.more = 1;
1748                 params.scmd1 = 0;
1749                 params.bfr_len = bs;
1750         } else {
1751                 params.scmd1 = req_ctx->data_len;
1752                 params.last = 1;
1753                 params.more = 0;
1754         }
1755         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1756         if (error)
1757                 return -ENOMEM;
1758
1759         skb = create_hash_wr(req, &params);
1760         if (IS_ERR(skb)) {
1761                 error = PTR_ERR(skb);
1762                 goto unmap;
1763         }
1764         skb->dev = u_ctx->lldi.ports[0];
1765         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1766         chcr_send_wr(skb);
1767
1768         return -EINPROGRESS;
1769 unmap:
1770         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1771         return error;
1772 }
1773
1774 static int chcr_ahash_digest(struct ahash_request *req)
1775 {
1776         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1777         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1778         struct uld_ctx *u_ctx = NULL;
1779         struct sk_buff *skb;
1780         struct hash_wr_param params;
1781         u8  bs;
1782         int error;
1783
1784         rtfm->init(req);
1785         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1786
1787         u_ctx = ULD_CTX(h_ctx(rtfm));
1788         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1789                                             h_ctx(rtfm)->tx_qidx))) {
1790                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1791                         return -EBUSY;
1792         }
1793
1794         if (is_hmac(crypto_ahash_tfm(rtfm)))
1795                 params.opad_needed = 1;
1796         else
1797                 params.opad_needed = 0;
1798         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1799         if (error)
1800                 return -ENOMEM;
1801
1802         params.last = 0;
1803         params.more = 0;
1804         params.sg_len = req->nbytes;
1805         params.bfr_len = 0;
1806         params.scmd1 = 0;
1807         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1808         req_ctx->result = 1;
1809         req_ctx->data_len += params.bfr_len + params.sg_len;
1810
1811         if (req->nbytes == 0) {
1812                 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1813                 params.more = 1;
1814                 params.bfr_len = bs;
1815         }
1816
1817         skb = create_hash_wr(req, &params);
1818         if (IS_ERR(skb)) {
1819                 error = PTR_ERR(skb);
1820                 goto unmap;
1821         }
1822         skb->dev = u_ctx->lldi.ports[0];
1823         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1824         chcr_send_wr(skb);
1825         return -EINPROGRESS;
1826 unmap:
1827         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1828         return error;
1829 }
1830
1831 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1832 {
1833         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1834         struct chcr_ahash_req_ctx *state = out;
1835
1836         state->reqlen = req_ctx->reqlen;
1837         state->data_len = req_ctx->data_len;
1838         state->is_sg_map = 0;
1839         state->result = 0;
1840         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1841         memcpy(state->partial_hash, req_ctx->partial_hash,
1842                CHCR_HASH_MAX_DIGEST_SIZE);
1843                 return 0;
1844 }
1845
1846 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1847 {
1848         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1849         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1850
1851         req_ctx->reqlen = state->reqlen;
1852         req_ctx->data_len = state->data_len;
1853         req_ctx->reqbfr = req_ctx->bfr1;
1854         req_ctx->skbfr = req_ctx->bfr2;
1855         req_ctx->is_sg_map = 0;
1856         req_ctx->result = 0;
1857         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1858         memcpy(req_ctx->partial_hash, state->partial_hash,
1859                CHCR_HASH_MAX_DIGEST_SIZE);
1860         return 0;
1861 }
1862
1863 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1864                              unsigned int keylen)
1865 {
1866         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1867         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1868         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1869         unsigned int i, err = 0, updated_digestsize;
1870
1871         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1872
1873         /* use the key to calculate the ipad and opad. ipad will sent with the
1874          * first request's data. opad will be sent with the final hash result
1875          * ipad in hmacctx->ipad and opad in hmacctx->opad location
1876          */
1877         shash->tfm = hmacctx->base_hash;
1878         shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1879         if (keylen > bs) {
1880                 err = crypto_shash_digest(shash, key, keylen,
1881                                           hmacctx->ipad);
1882                 if (err)
1883                         goto out;
1884                 keylen = digestsize;
1885         } else {
1886                 memcpy(hmacctx->ipad, key, keylen);
1887         }
1888         memset(hmacctx->ipad + keylen, 0, bs - keylen);
1889         memcpy(hmacctx->opad, hmacctx->ipad, bs);
1890
1891         for (i = 0; i < bs / sizeof(int); i++) {
1892                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1893                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1894         }
1895
1896         updated_digestsize = digestsize;
1897         if (digestsize == SHA224_DIGEST_SIZE)
1898                 updated_digestsize = SHA256_DIGEST_SIZE;
1899         else if (digestsize == SHA384_DIGEST_SIZE)
1900                 updated_digestsize = SHA512_DIGEST_SIZE;
1901         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1902                                         hmacctx->ipad, digestsize);
1903         if (err)
1904                 goto out;
1905         chcr_change_order(hmacctx->ipad, updated_digestsize);
1906
1907         err = chcr_compute_partial_hash(shash, hmacctx->opad,
1908                                         hmacctx->opad, digestsize);
1909         if (err)
1910                 goto out;
1911         chcr_change_order(hmacctx->opad, updated_digestsize);
1912 out:
1913         return err;
1914 }
1915
1916 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1917                                unsigned int key_len)
1918 {
1919         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
1920         unsigned short context_size = 0;
1921         int err;
1922
1923         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1924         if (err)
1925                 goto badkey_err;
1926
1927         memcpy(ablkctx->key, key, key_len);
1928         ablkctx->enckey_len = key_len;
1929         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1930         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1931         ablkctx->key_ctx_hdr =
1932                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1933                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1934                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1935                                  CHCR_KEYCTX_NO_KEY, 1,
1936                                  0, context_size);
1937         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1938         return 0;
1939 badkey_err:
1940         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1941         ablkctx->enckey_len = 0;
1942
1943         return err;
1944 }
1945
1946 static int chcr_sha_init(struct ahash_request *areq)
1947 {
1948         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1949         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1950         int digestsize =  crypto_ahash_digestsize(tfm);
1951
1952         req_ctx->data_len = 0;
1953         req_ctx->reqlen = 0;
1954         req_ctx->reqbfr = req_ctx->bfr1;
1955         req_ctx->skbfr = req_ctx->bfr2;
1956         req_ctx->skb = NULL;
1957         req_ctx->result = 0;
1958         req_ctx->is_sg_map = 0;
1959         copy_hash_init_values(req_ctx->partial_hash, digestsize);
1960         return 0;
1961 }
1962
1963 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1964 {
1965         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1966                                  sizeof(struct chcr_ahash_req_ctx));
1967         return chcr_device_init(crypto_tfm_ctx(tfm));
1968 }
1969
1970 static int chcr_hmac_init(struct ahash_request *areq)
1971 {
1972         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1973         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1974         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
1975         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1976         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1977
1978         chcr_sha_init(areq);
1979         req_ctx->data_len = bs;
1980         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1981                 if (digestsize == SHA224_DIGEST_SIZE)
1982                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1983                                SHA256_DIGEST_SIZE);
1984                 else if (digestsize == SHA384_DIGEST_SIZE)
1985                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1986                                SHA512_DIGEST_SIZE);
1987                 else
1988                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1989                                digestsize);
1990         }
1991         return 0;
1992 }
1993
1994 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1995 {
1996         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1997         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1998         unsigned int digestsize =
1999                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2000
2001         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2002                                  sizeof(struct chcr_ahash_req_ctx));
2003         hmacctx->base_hash = chcr_alloc_shash(digestsize);
2004         if (IS_ERR(hmacctx->base_hash))
2005                 return PTR_ERR(hmacctx->base_hash);
2006         return chcr_device_init(crypto_tfm_ctx(tfm));
2007 }
2008
2009 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2010 {
2011         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2012         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2013
2014         if (hmacctx->base_hash) {
2015                 chcr_free_shash(hmacctx->base_hash);
2016                 hmacctx->base_hash = NULL;
2017         }
2018 }
2019
2020 static int chcr_aead_common_init(struct aead_request *req,
2021                                  unsigned short op_type)
2022 {
2023         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2024         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2025         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2026         int error = -EINVAL;
2027         unsigned int authsize = crypto_aead_authsize(tfm);
2028
2029         /* validate key size */
2030         if (aeadctx->enckey_len == 0)
2031                 goto err;
2032         if (op_type && req->cryptlen < authsize)
2033                 goto err;
2034         error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2035                                   op_type);
2036         if (error) {
2037                 error = -ENOMEM;
2038                 goto err;
2039         }
2040         reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2041                                           CHCR_SRC_SG_SIZE, 0);
2042         reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2043                                           CHCR_SRC_SG_SIZE, req->assoclen);
2044         return 0;
2045 err:
2046         return error;
2047 }
2048
2049 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2050                                    int aadmax, int wrlen,
2051                                    unsigned short op_type)
2052 {
2053         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2054
2055         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2056             dst_nents > MAX_DSGL_ENT ||
2057             (req->assoclen > aadmax) ||
2058             (wrlen > SGE_MAX_WR_LEN))
2059                 return 1;
2060         return 0;
2061 }
2062
2063 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2064 {
2065         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2066         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2067         struct aead_request *subreq = aead_request_ctx(req);
2068
2069         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2070         aead_request_set_callback(subreq, req->base.flags,
2071                                   req->base.complete, req->base.data);
2072          aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2073                                  req->iv);
2074          aead_request_set_ad(subreq, req->assoclen);
2075         return op_type ? crypto_aead_decrypt(subreq) :
2076                 crypto_aead_encrypt(subreq);
2077 }
2078
2079 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2080                                          unsigned short qid,
2081                                          int size,
2082                                          unsigned short op_type)
2083 {
2084         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2085         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2086         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2087         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2088         struct sk_buff *skb = NULL;
2089         struct chcr_wr *chcr_req;
2090         struct cpl_rx_phys_dsgl *phys_cpl;
2091         struct ulptx_sgl *ulptx;
2092         unsigned int transhdr_len;
2093         unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2094         unsigned int   kctx_len = 0, dnents;
2095         unsigned int  assoclen = req->assoclen;
2096         unsigned int  authsize = crypto_aead_authsize(tfm);
2097         int error = -EINVAL;
2098         int null = 0;
2099         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2100                 GFP_ATOMIC;
2101         struct adapter *adap = padap(a_ctx(tfm)->dev);
2102
2103         if (req->cryptlen == 0)
2104                 return NULL;
2105
2106         reqctx->b0_dma = 0;
2107         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2108         subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2109                 null = 1;
2110                 assoclen = 0;
2111         }
2112         error = chcr_aead_common_init(req, op_type);
2113         if (error)
2114                 return ERR_PTR(error);
2115         dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2116         dnents += sg_nents_xlen(req->dst, req->cryptlen +
2117                 (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
2118                 req->assoclen);
2119         dnents += MIN_AUTH_SG; // For IV
2120
2121         dst_size = get_space_for_phys_dsgl(dnents);
2122         kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2123                 - sizeof(chcr_req->key_ctx);
2124         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2125         reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2126                         SGE_MAX_WR_LEN;
2127         temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
2128                         * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2129                         + MIN_GCM_SG) * 8);
2130         transhdr_len += temp;
2131         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2132
2133         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2134                                     transhdr_len, op_type)) {
2135                 atomic_inc(&adap->chcr_stats.fallback);
2136                 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2137                                     op_type);
2138                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2139         }
2140         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2141         if (!skb) {
2142                 error = -ENOMEM;
2143                 goto err;
2144         }
2145
2146         chcr_req = __skb_put_zero(skb, transhdr_len);
2147
2148         temp  = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2149
2150         /*
2151          * Input order  is AAD,IV and Payload. where IV should be included as
2152          * the part of authdata. All other fields should be filled according
2153          * to the hardware spec
2154          */
2155         chcr_req->sec_cpl.op_ivinsrtofst =
2156                 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2157                                        assoclen + 1);
2158         chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2159         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2160                                         assoclen ? 1 : 0, assoclen,
2161                                         assoclen + IV + 1,
2162                                         (temp & 0x1F0) >> 4);
2163         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2164                                         temp & 0xF,
2165                                         null ? 0 : assoclen + IV + 1,
2166                                         temp, temp);
2167         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2168             subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2169                 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2170         else
2171                 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2172         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2173                                         (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2174                                         temp,
2175                                         actx->auth_mode, aeadctx->hmac_ctrl,
2176                                         IV >> 1);
2177         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2178                                          0, 0, dst_size);
2179
2180         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2181         if (op_type == CHCR_ENCRYPT_OP ||
2182                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2183                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2184                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2185                        aeadctx->enckey_len);
2186         else
2187                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2188                        aeadctx->enckey_len);
2189
2190         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2191                                         4), actx->h_iopad, kctx_len -
2192                                 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2193         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2194             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2195                 memcpy(reqctx->iv, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2196                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->iv,
2197                                 CTR_RFC3686_IV_SIZE);
2198                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
2199                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2200         } else {
2201                 memcpy(reqctx->iv, req->iv, IV);
2202         }
2203         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2204         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2205         chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2206         chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2207         atomic_inc(&adap->chcr_stats.cipher_rqst);
2208         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2209                 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2210         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2211                    transhdr_len, temp, 0);
2212         reqctx->skb = skb;
2213         reqctx->op = op_type;
2214
2215         return skb;
2216 err:
2217         chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2218                             op_type);
2219
2220         return ERR_PTR(error);
2221 }
2222
2223 int chcr_aead_dma_map(struct device *dev,
2224                       struct aead_request *req,
2225                       unsigned short op_type)
2226 {
2227         int error;
2228         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2229         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2230         unsigned int authsize = crypto_aead_authsize(tfm);
2231         int dst_size;
2232
2233         dst_size = req->assoclen + req->cryptlen + (op_type ?
2234                                 -authsize : authsize);
2235         if (!req->cryptlen || !dst_size)
2236                 return 0;
2237         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2238                                         DMA_BIDIRECTIONAL);
2239         if (dma_mapping_error(dev, reqctx->iv_dma))
2240                 return -ENOMEM;
2241
2242         if (req->src == req->dst) {
2243                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2244                                    DMA_BIDIRECTIONAL);
2245                 if (!error)
2246                         goto err;
2247         } else {
2248                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2249                                    DMA_TO_DEVICE);
2250                 if (!error)
2251                         goto err;
2252                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2253                                    DMA_FROM_DEVICE);
2254                 if (!error) {
2255                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2256                                    DMA_TO_DEVICE);
2257                         goto err;
2258                 }
2259         }
2260
2261         return 0;
2262 err:
2263         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2264         return -ENOMEM;
2265 }
2266
2267 void chcr_aead_dma_unmap(struct device *dev,
2268                          struct aead_request *req,
2269                          unsigned short op_type)
2270 {
2271         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2272         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2273         unsigned int authsize = crypto_aead_authsize(tfm);
2274         int dst_size;
2275
2276         dst_size = req->assoclen + req->cryptlen + (op_type ?
2277                                         -authsize : authsize);
2278         if (!req->cryptlen || !dst_size)
2279                 return;
2280
2281         dma_unmap_single(dev, reqctx->iv_dma, IV,
2282                                         DMA_BIDIRECTIONAL);
2283         if (req->src == req->dst) {
2284                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2285                                    DMA_BIDIRECTIONAL);
2286         } else {
2287                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2288                                    DMA_TO_DEVICE);
2289                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2290                                    DMA_FROM_DEVICE);
2291         }
2292 }
2293
2294 void chcr_add_aead_src_ent(struct aead_request *req,
2295                            struct ulptx_sgl *ulptx,
2296                            unsigned int assoclen,
2297                            unsigned short op_type)
2298 {
2299         struct ulptx_walk ulp_walk;
2300         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2301
2302         if (reqctx->imm) {
2303                 u8 *buf = (u8 *)ulptx;
2304
2305                 if (reqctx->b0_dma) {
2306                         memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2307                         buf += reqctx->b0_len;
2308                 }
2309                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2310                                    buf, assoclen, 0);
2311                 buf += assoclen;
2312                 memcpy(buf, reqctx->iv, IV);
2313                 buf += IV;
2314                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2315                                    buf, req->cryptlen, req->assoclen);
2316         } else {
2317                 ulptx_walk_init(&ulp_walk, ulptx);
2318                 if (reqctx->b0_dma)
2319                         ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2320                                             &reqctx->b0_dma);
2321                 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2322                 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2323                 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2324                                   req->assoclen);
2325                 ulptx_walk_end(&ulp_walk);
2326         }
2327 }
2328
2329 void chcr_add_aead_dst_ent(struct aead_request *req,
2330                            struct cpl_rx_phys_dsgl *phys_cpl,
2331                            unsigned int assoclen,
2332                            unsigned short op_type,
2333                            unsigned short qid)
2334 {
2335         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2336         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2337         struct dsgl_walk dsgl_walk;
2338         unsigned int authsize = crypto_aead_authsize(tfm);
2339         u32 temp;
2340
2341         dsgl_walk_init(&dsgl_walk, phys_cpl);
2342         if (reqctx->b0_dma)
2343                 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2344         dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2345         dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2346         temp = req->cryptlen + (op_type ? -authsize : authsize);
2347         dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2348         dsgl_walk_end(&dsgl_walk, qid);
2349 }
2350
2351 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2352                              struct ulptx_sgl *ulptx,
2353                              struct  cipher_wr_param *wrparam)
2354 {
2355         struct ulptx_walk ulp_walk;
2356         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2357
2358         if (reqctx->imm) {
2359                 u8 *buf = (u8 *)ulptx;
2360
2361                 memcpy(buf, reqctx->iv, IV);
2362                 buf += IV;
2363                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2364                                    buf, wrparam->bytes, reqctx->processed);
2365         } else {
2366                 ulptx_walk_init(&ulp_walk, ulptx);
2367                 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2368                 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2369                                   reqctx->src_ofst);
2370                 reqctx->srcsg = ulp_walk.last_sg;
2371                 reqctx->src_ofst = ulp_walk.last_sg_len;
2372                 ulptx_walk_end(&ulp_walk);
2373         }
2374 }
2375
2376 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2377                              struct cpl_rx_phys_dsgl *phys_cpl,
2378                              struct  cipher_wr_param *wrparam,
2379                              unsigned short qid)
2380 {
2381         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2382         struct dsgl_walk dsgl_walk;
2383
2384         dsgl_walk_init(&dsgl_walk, phys_cpl);
2385         dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2386         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2387                          reqctx->dst_ofst);
2388         reqctx->dstsg = dsgl_walk.last_sg;
2389         reqctx->dst_ofst = dsgl_walk.last_sg_len;
2390
2391         dsgl_walk_end(&dsgl_walk, qid);
2392 }
2393
2394 void chcr_add_hash_src_ent(struct ahash_request *req,
2395                            struct ulptx_sgl *ulptx,
2396                            struct hash_wr_param *param)
2397 {
2398         struct ulptx_walk ulp_walk;
2399         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2400
2401         if (reqctx->imm) {
2402                 u8 *buf = (u8 *)ulptx;
2403
2404                 if (param->bfr_len) {
2405                         memcpy(buf, reqctx->reqbfr, param->bfr_len);
2406                         buf += param->bfr_len;
2407                 }
2408                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2409                                    buf, param->sg_len, 0);
2410         } else {
2411                 ulptx_walk_init(&ulp_walk, ulptx);
2412                 if (param->bfr_len)
2413                         ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2414                                             &reqctx->dma_addr);
2415                 ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
2416                                   0);
2417                 ulptx_walk_end(&ulp_walk);
2418         }
2419 }
2420
2421 int chcr_hash_dma_map(struct device *dev,
2422                       struct ahash_request *req)
2423 {
2424         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2425         int error = 0;
2426
2427         if (!req->nbytes)
2428                 return 0;
2429         error = dma_map_sg(dev, req->src, sg_nents(req->src),
2430                            DMA_TO_DEVICE);
2431         if (!error)
2432                 return -ENOMEM;
2433         req_ctx->is_sg_map = 1;
2434         return 0;
2435 }
2436
2437 void chcr_hash_dma_unmap(struct device *dev,
2438                          struct ahash_request *req)
2439 {
2440         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2441
2442         if (!req->nbytes)
2443                 return;
2444
2445         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2446                            DMA_TO_DEVICE);
2447         req_ctx->is_sg_map = 0;
2448
2449 }
2450
2451 int chcr_cipher_dma_map(struct device *dev,
2452                         struct ablkcipher_request *req)
2453 {
2454         int error;
2455         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2456
2457         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2458                                         DMA_BIDIRECTIONAL);
2459         if (dma_mapping_error(dev, reqctx->iv_dma))
2460                 return -ENOMEM;
2461
2462         if (req->src == req->dst) {
2463                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2464                                    DMA_BIDIRECTIONAL);
2465                 if (!error)
2466                         goto err;
2467         } else {
2468                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2469                                    DMA_TO_DEVICE);
2470                 if (!error)
2471                         goto err;
2472                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2473                                    DMA_FROM_DEVICE);
2474                 if (!error) {
2475                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2476                                    DMA_TO_DEVICE);
2477                         goto err;
2478                 }
2479         }
2480
2481         return 0;
2482 err:
2483         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2484         return -ENOMEM;
2485 }
2486
2487 void chcr_cipher_dma_unmap(struct device *dev,
2488                            struct ablkcipher_request *req)
2489 {
2490         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2491
2492         dma_unmap_single(dev, reqctx->iv_dma, IV,
2493                                         DMA_BIDIRECTIONAL);
2494         if (req->src == req->dst) {
2495                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2496                                    DMA_BIDIRECTIONAL);
2497         } else {
2498                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2499                                    DMA_TO_DEVICE);
2500                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2501                                    DMA_FROM_DEVICE);
2502         }
2503 }
2504
2505 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2506 {
2507         __be32 data;
2508
2509         memset(block, 0, csize);
2510         block += csize;
2511
2512         if (csize >= 4)
2513                 csize = 4;
2514         else if (msglen > (unsigned int)(1 << (8 * csize)))
2515                 return -EOVERFLOW;
2516
2517         data = cpu_to_be32(msglen);
2518         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2519
2520         return 0;
2521 }
2522
2523 static void generate_b0(struct aead_request *req,
2524                         struct chcr_aead_ctx *aeadctx,
2525                         unsigned short op_type)
2526 {
2527         unsigned int l, lp, m;
2528         int rc;
2529         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2530         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2531         u8 *b0 = reqctx->scratch_pad;
2532
2533         m = crypto_aead_authsize(aead);
2534
2535         memcpy(b0, reqctx->iv, 16);
2536
2537         lp = b0[0];
2538         l = lp + 1;
2539
2540         /* set m, bits 3-5 */
2541         *b0 |= (8 * ((m - 2) / 2));
2542
2543         /* set adata, bit 6, if associated data is used */
2544         if (req->assoclen)
2545                 *b0 |= 64;
2546         rc = set_msg_len(b0 + 16 - l,
2547                          (op_type == CHCR_DECRYPT_OP) ?
2548                          req->cryptlen - m : req->cryptlen, l);
2549 }
2550
2551 static inline int crypto_ccm_check_iv(const u8 *iv)
2552 {
2553         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2554         if (iv[0] < 1 || iv[0] > 7)
2555                 return -EINVAL;
2556
2557         return 0;
2558 }
2559
2560 static int ccm_format_packet(struct aead_request *req,
2561                              struct chcr_aead_ctx *aeadctx,
2562                              unsigned int sub_type,
2563                              unsigned short op_type)
2564 {
2565         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2566         int rc = 0;
2567
2568         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2569                 reqctx->iv[0] = 3;
2570                 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2571                 memcpy(reqctx->iv + 4, req->iv, 8);
2572                 memset(reqctx->iv + 12, 0, 4);
2573                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2574                         htons(req->assoclen - 8);
2575         } else {
2576                 memcpy(reqctx->iv, req->iv, 16);
2577                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2578                         htons(req->assoclen);
2579         }
2580         generate_b0(req, aeadctx, op_type);
2581         /* zero the ctr value */
2582         memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2583         return rc;
2584 }
2585
2586 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2587                                   unsigned int dst_size,
2588                                   struct aead_request *req,
2589                                   unsigned short op_type)
2590 {
2591         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2592         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2593         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2594         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2595         unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2596         unsigned int ccm_xtra;
2597         unsigned char tag_offset = 0, auth_offset = 0;
2598         unsigned int assoclen;
2599
2600         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2601                 assoclen = req->assoclen - 8;
2602         else
2603                 assoclen = req->assoclen;
2604         ccm_xtra = CCM_B0_SIZE +
2605                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2606
2607         auth_offset = req->cryptlen ?
2608                 (assoclen + IV + 1 + ccm_xtra) : 0;
2609         if (op_type == CHCR_DECRYPT_OP) {
2610                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2611                         tag_offset = crypto_aead_authsize(tfm);
2612                 else
2613                         auth_offset = 0;
2614         }
2615
2616
2617         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2618                                          2, assoclen + 1 + ccm_xtra);
2619         sec_cpl->pldlen =
2620                 htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2621         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2622         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2623                                         1, assoclen + ccm_xtra, assoclen
2624                                         + IV + 1 + ccm_xtra, 0);
2625
2626         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2627                                         auth_offset, tag_offset,
2628                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2629                                         crypto_aead_authsize(tfm));
2630         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2631                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2632                                         cipher_mode, mac_mode,
2633                                         aeadctx->hmac_ctrl, IV >> 1);
2634
2635         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2636                                         0, dst_size);
2637 }
2638
2639 int aead_ccm_validate_input(unsigned short op_type,
2640                             struct aead_request *req,
2641                             struct chcr_aead_ctx *aeadctx,
2642                             unsigned int sub_type)
2643 {
2644         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2645                 if (crypto_ccm_check_iv(req->iv)) {
2646                         pr_err("CCM: IV check fails\n");
2647                         return -EINVAL;
2648                 }
2649         } else {
2650                 if (req->assoclen != 16 && req->assoclen != 20) {
2651                         pr_err("RFC4309: Invalid AAD length %d\n",
2652                                req->assoclen);
2653                         return -EINVAL;
2654                 }
2655         }
2656         return 0;
2657 }
2658
2659 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2660                                           unsigned short qid,
2661                                           int size,
2662                                           unsigned short op_type)
2663 {
2664         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2665         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2666         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2667         struct sk_buff *skb = NULL;
2668         struct chcr_wr *chcr_req;
2669         struct cpl_rx_phys_dsgl *phys_cpl;
2670         struct ulptx_sgl *ulptx;
2671         unsigned int transhdr_len;
2672         unsigned int dst_size = 0, kctx_len, dnents, temp;
2673         unsigned int sub_type, assoclen = req->assoclen;
2674         unsigned int authsize = crypto_aead_authsize(tfm);
2675         int error = -EINVAL;
2676         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2677                 GFP_ATOMIC;
2678         struct adapter *adap = padap(a_ctx(tfm)->dev);
2679
2680         reqctx->b0_dma = 0;
2681         sub_type = get_aead_subtype(tfm);
2682         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2683                 assoclen -= 8;
2684         error = chcr_aead_common_init(req, op_type);
2685         if (error)
2686                 return ERR_PTR(error);
2687
2688
2689         reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2690         error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2691         if (error)
2692                 goto err;
2693         dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2694         dnents += sg_nents_xlen(req->dst, req->cryptlen
2695                         + (op_type ? -authsize : authsize),
2696                         CHCR_DST_SG_SIZE, req->assoclen);
2697         dnents += MIN_CCM_SG; // For IV and B0
2698         dst_size = get_space_for_phys_dsgl(dnents);
2699         kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2700         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2701         reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2702                        reqctx->b0_len) <= SGE_MAX_WR_LEN;
2703         temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
2704                                 reqctx->b0_len), 16) * 16) :
2705                 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2706                                     MIN_CCM_SG) *  8);
2707         transhdr_len += temp;
2708         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2709
2710         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2711                                     reqctx->b0_len, transhdr_len, op_type)) {
2712                 atomic_inc(&adap->chcr_stats.fallback);
2713                 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2714                                     op_type);
2715                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2716         }
2717         skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
2718
2719         if (!skb) {
2720                 error = -ENOMEM;
2721                 goto err;
2722         }
2723
2724         chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2725
2726         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
2727
2728         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2729         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2730         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2731                                         16), aeadctx->key, aeadctx->enckey_len);
2732
2733         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2734         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2735         error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2736         if (error)
2737                 goto dstmap_fail;
2738
2739         reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2740                                         &reqctx->scratch_pad, reqctx->b0_len,
2741                                         DMA_BIDIRECTIONAL);
2742         if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2743                               reqctx->b0_dma)) {
2744                 error = -ENOMEM;
2745                 goto dstmap_fail;
2746         }
2747
2748         chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2749         chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2750
2751         atomic_inc(&adap->chcr_stats.aead_rqst);
2752         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2753                 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2754                 reqctx->b0_len) : 0);
2755         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2756                     transhdr_len, temp, 0);
2757         reqctx->skb = skb;
2758         reqctx->op = op_type;
2759
2760         return skb;
2761 dstmap_fail:
2762         kfree_skb(skb);
2763 err:
2764         chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
2765         return ERR_PTR(error);
2766 }
2767
2768 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2769                                      unsigned short qid,
2770                                      int size,
2771                                      unsigned short op_type)
2772 {
2773         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2774         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2775         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2776         struct sk_buff *skb = NULL;
2777         struct chcr_wr *chcr_req;
2778         struct cpl_rx_phys_dsgl *phys_cpl;
2779         struct ulptx_sgl *ulptx;
2780         unsigned int transhdr_len, dnents = 0;
2781         unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2782         unsigned int authsize = crypto_aead_authsize(tfm);
2783         int error = -EINVAL;
2784         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2785                 GFP_ATOMIC;
2786         struct adapter *adap = padap(a_ctx(tfm)->dev);
2787
2788         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2789                 assoclen = req->assoclen - 8;
2790
2791         reqctx->b0_dma = 0;
2792         error = chcr_aead_common_init(req, op_type);
2793         if (error)
2794                 return ERR_PTR(error);
2795         dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2796         dnents += sg_nents_xlen(req->dst, req->cryptlen +
2797                                 (op_type ? -authsize : authsize),
2798                                 CHCR_DST_SG_SIZE, req->assoclen);
2799         dnents += MIN_GCM_SG; // For IV
2800         dst_size = get_space_for_phys_dsgl(dnents);
2801         kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2802                 AEAD_H_SIZE;
2803         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2804         reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2805                         SGE_MAX_WR_LEN;
2806         temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
2807         req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
2808                                 reqctx->aad_nents + MIN_GCM_SG) * 8);
2809         transhdr_len += temp;
2810         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2811         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2812                             transhdr_len, op_type)) {
2813                 atomic_inc(&adap->chcr_stats.fallback);
2814                 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2815                                     op_type);
2816                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2817         }
2818         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2819         if (!skb) {
2820                 error = -ENOMEM;
2821                 goto err;
2822         }
2823
2824         chcr_req = __skb_put_zero(skb, transhdr_len);
2825
2826         //Offset of tag from end
2827         temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2828         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2829                                         a_ctx(tfm)->dev->rx_channel_id, 2,
2830                                         (assoclen + 1));
2831         chcr_req->sec_cpl.pldlen =
2832                 htonl(assoclen + IV + req->cryptlen);
2833         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2834                                         assoclen ? 1 : 0, assoclen,
2835                                         assoclen + IV + 1, 0);
2836         chcr_req->sec_cpl.cipherstop_lo_authinsert =
2837                         FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2838                                                 temp, temp);
2839         chcr_req->sec_cpl.seqno_numivs =
2840                         FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2841                                         CHCR_ENCRYPT_OP) ? 1 : 0,
2842                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
2843                                         CHCR_SCMD_AUTH_MODE_GHASH,
2844                                         aeadctx->hmac_ctrl, IV >> 1);
2845         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2846                                         0, 0, dst_size);
2847         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2848         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2849         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2850                                 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2851
2852         /* prepare a 16 byte iv */
2853         /* S   A   L  T |  IV | 0x00000001 */
2854         if (get_aead_subtype(tfm) ==
2855             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2856                 memcpy(reqctx->iv, aeadctx->salt, 4);
2857                 memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
2858         } else {
2859                 memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
2860         }
2861         *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2862
2863         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2864         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2865
2866         chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2867         chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2868         atomic_inc(&adap->chcr_stats.aead_rqst);
2869         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2870                 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2871         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2872                     transhdr_len, temp, reqctx->verify);
2873         reqctx->skb = skb;
2874         reqctx->op = op_type;
2875         return skb;
2876
2877 err:
2878         chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
2879         return ERR_PTR(error);
2880 }
2881
2882
2883
2884 static int chcr_aead_cra_init(struct crypto_aead *tfm)
2885 {
2886         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2887         struct aead_alg *alg = crypto_aead_alg(tfm);
2888
2889         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
2890                                                CRYPTO_ALG_NEED_FALLBACK |
2891                                                CRYPTO_ALG_ASYNC);
2892         if  (IS_ERR(aeadctx->sw_cipher))
2893                 return PTR_ERR(aeadctx->sw_cipher);
2894         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2895                                  sizeof(struct aead_request) +
2896                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
2897         return chcr_device_init(a_ctx(tfm));
2898 }
2899
2900 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2901 {
2902         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2903
2904         crypto_free_aead(aeadctx->sw_cipher);
2905 }
2906
2907 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2908                                         unsigned int authsize)
2909 {
2910         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2911
2912         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2913         aeadctx->mayverify = VERIFY_HW;
2914         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2915 }
2916 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2917                                     unsigned int authsize)
2918 {
2919         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2920         u32 maxauth = crypto_aead_maxauthsize(tfm);
2921
2922         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2923          * true for sha1. authsize == 12 condition should be before
2924          * authsize == (maxauth >> 1)
2925          */
2926         if (authsize == ICV_4) {
2927                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2928                 aeadctx->mayverify = VERIFY_HW;
2929         } else if (authsize == ICV_6) {
2930                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2931                 aeadctx->mayverify = VERIFY_HW;
2932         } else if (authsize == ICV_10) {
2933                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2934                 aeadctx->mayverify = VERIFY_HW;
2935         } else if (authsize == ICV_12) {
2936                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2937                 aeadctx->mayverify = VERIFY_HW;
2938         } else if (authsize == ICV_14) {
2939                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2940                 aeadctx->mayverify = VERIFY_HW;
2941         } else if (authsize == (maxauth >> 1)) {
2942                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2943                 aeadctx->mayverify = VERIFY_HW;
2944         } else if (authsize == maxauth) {
2945                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2946                 aeadctx->mayverify = VERIFY_HW;
2947         } else {
2948                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2949                 aeadctx->mayverify = VERIFY_SW;
2950         }
2951         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2952 }
2953
2954
2955 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2956 {
2957         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2958
2959         switch (authsize) {
2960         case ICV_4:
2961                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2962                 aeadctx->mayverify = VERIFY_HW;
2963                 break;
2964         case ICV_8:
2965                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2966                 aeadctx->mayverify = VERIFY_HW;
2967                 break;
2968         case ICV_12:
2969                  aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2970                  aeadctx->mayverify = VERIFY_HW;
2971                 break;
2972         case ICV_14:
2973                  aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2974                  aeadctx->mayverify = VERIFY_HW;
2975                 break;
2976         case ICV_16:
2977                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2978                 aeadctx->mayverify = VERIFY_HW;
2979                 break;
2980         case ICV_13:
2981         case ICV_15:
2982                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2983                 aeadctx->mayverify = VERIFY_SW;
2984                 break;
2985         default:
2986
2987                   crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2988                         CRYPTO_TFM_RES_BAD_KEY_LEN);
2989                 return -EINVAL;
2990         }
2991         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2992 }
2993
2994 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2995                                           unsigned int authsize)
2996 {
2997         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2998
2999         switch (authsize) {
3000         case ICV_8:
3001                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3002                 aeadctx->mayverify = VERIFY_HW;
3003                 break;
3004         case ICV_12:
3005                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3006                 aeadctx->mayverify = VERIFY_HW;
3007                 break;
3008         case ICV_16:
3009                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3010                 aeadctx->mayverify = VERIFY_HW;
3011                 break;
3012         default:
3013                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3014                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3015                 return -EINVAL;
3016         }
3017         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3018 }
3019
3020 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3021                                 unsigned int authsize)
3022 {
3023         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3024
3025         switch (authsize) {
3026         case ICV_4:
3027                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3028                 aeadctx->mayverify = VERIFY_HW;
3029                 break;
3030         case ICV_6:
3031                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3032                 aeadctx->mayverify = VERIFY_HW;
3033                 break;
3034         case ICV_8:
3035                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3036                 aeadctx->mayverify = VERIFY_HW;
3037                 break;
3038         case ICV_10:
3039                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3040                 aeadctx->mayverify = VERIFY_HW;
3041                 break;
3042         case ICV_12:
3043                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3044                 aeadctx->mayverify = VERIFY_HW;
3045                 break;
3046         case ICV_14:
3047                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3048                 aeadctx->mayverify = VERIFY_HW;
3049                 break;
3050         case ICV_16:
3051                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3052                 aeadctx->mayverify = VERIFY_HW;
3053                 break;
3054         default:
3055                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3056                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3057                 return -EINVAL;
3058         }
3059         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3060 }
3061
3062 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3063                                 const u8 *key,
3064                                 unsigned int keylen)
3065 {
3066         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3067         unsigned char ck_size, mk_size;
3068         int key_ctx_size = 0;
3069
3070         key_ctx_size = sizeof(struct _key_ctx) +
3071                 ((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
3072         if (keylen == AES_KEYSIZE_128) {
3073                 mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3074                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3075         } else if (keylen == AES_KEYSIZE_192) {
3076                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3077                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3078         } else if (keylen == AES_KEYSIZE_256) {
3079                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3080                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3081         } else {
3082                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3083                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3084                 aeadctx->enckey_len = 0;
3085                 return  -EINVAL;
3086         }
3087         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3088                                                 key_ctx_size >> 4);
3089         memcpy(aeadctx->key, key, keylen);
3090         aeadctx->enckey_len = keylen;
3091
3092         return 0;
3093 }
3094
3095 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3096                                 const u8 *key,
3097                                 unsigned int keylen)
3098 {
3099         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3100         int error;
3101
3102         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3103         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3104                               CRYPTO_TFM_REQ_MASK);
3105         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3106         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3107         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3108                               CRYPTO_TFM_RES_MASK);
3109         if (error)
3110                 return error;
3111         return chcr_ccm_common_setkey(aead, key, keylen);
3112 }
3113
3114 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3115                                     unsigned int keylen)
3116 {
3117         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3118         int error;
3119
3120         if (keylen < 3) {
3121                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3122                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3123                 aeadctx->enckey_len = 0;
3124                 return  -EINVAL;
3125         }
3126         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3127         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3128                               CRYPTO_TFM_REQ_MASK);
3129         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3130         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3131         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3132                               CRYPTO_TFM_RES_MASK);
3133         if (error)
3134                 return error;
3135         keylen -= 3;
3136         memcpy(aeadctx->salt, key + keylen, 3);
3137         return chcr_ccm_common_setkey(aead, key, keylen);
3138 }
3139
3140 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3141                            unsigned int keylen)
3142 {
3143         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3144         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3145         struct crypto_cipher *cipher;
3146         unsigned int ck_size;
3147         int ret = 0, key_ctx_size = 0;
3148
3149         aeadctx->enckey_len = 0;
3150         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3151         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3152                               & CRYPTO_TFM_REQ_MASK);
3153         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3154         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3155         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3156                               CRYPTO_TFM_RES_MASK);
3157         if (ret)
3158                 goto out;
3159
3160         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3161             keylen > 3) {
3162                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3163                 memcpy(aeadctx->salt, key + keylen, 4);
3164         }
3165         if (keylen == AES_KEYSIZE_128) {
3166                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3167         } else if (keylen == AES_KEYSIZE_192) {
3168                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3169         } else if (keylen == AES_KEYSIZE_256) {
3170                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3171         } else {
3172                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3173                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3174                 pr_err("GCM: Invalid key length %d\n", keylen);
3175                 ret = -EINVAL;
3176                 goto out;
3177         }
3178
3179         memcpy(aeadctx->key, key, keylen);
3180         aeadctx->enckey_len = keylen;
3181         key_ctx_size = sizeof(struct _key_ctx) +
3182                 ((DIV_ROUND_UP(keylen, 16)) << 4) +
3183                 AEAD_H_SIZE;
3184                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3185                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3186                                                 0, 0,
3187                                                 key_ctx_size >> 4);
3188         /* Calculate the H = CIPH(K, 0 repeated 16 times).
3189          * It will go in key context
3190          */
3191         cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3192         if (IS_ERR(cipher)) {
3193                 aeadctx->enckey_len = 0;
3194                 ret = -ENOMEM;
3195                 goto out;
3196         }
3197
3198         ret = crypto_cipher_setkey(cipher, key, keylen);
3199         if (ret) {
3200                 aeadctx->enckey_len = 0;
3201                 goto out1;
3202         }
3203         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3204         crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3205
3206 out1:
3207         crypto_free_cipher(cipher);
3208 out:
3209         return ret;
3210 }
3211
3212 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3213                                    unsigned int keylen)
3214 {
3215         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3216         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3217         /* it contains auth and cipher key both*/
3218         struct crypto_authenc_keys keys;
3219         unsigned int bs, subtype;
3220         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3221         int err = 0, i, key_ctx_len = 0;
3222         unsigned char ck_size = 0;
3223         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3224         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3225         struct algo_param param;
3226         int align;
3227         u8 *o_ptr = NULL;
3228
3229         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3230         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3231                               & CRYPTO_TFM_REQ_MASK);
3232         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3233         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3234         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3235                               & CRYPTO_TFM_RES_MASK);
3236         if (err)
3237                 goto out;
3238
3239         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3240                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3241                 goto out;
3242         }
3243
3244         if (get_alg_config(&param, max_authsize)) {
3245                 pr_err("chcr : Unsupported digest size\n");
3246                 goto out;
3247         }
3248         subtype = get_aead_subtype(authenc);
3249         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3250                 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3251                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3252                         goto out;
3253                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3254                 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3255                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3256         }
3257         if (keys.enckeylen == AES_KEYSIZE_128) {
3258                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3259         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3260                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3261         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3262                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3263         } else {
3264                 pr_err("chcr : Unsupported cipher key\n");
3265                 goto out;
3266         }
3267
3268         /* Copy only encryption key. We use authkey to generate h(ipad) and
3269          * h(opad) so authkey is not needed again. authkeylen size have the
3270          * size of the hash digest size.
3271          */
3272         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3273         aeadctx->enckey_len = keys.enckeylen;
3274         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3275                 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3276
3277                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3278                             aeadctx->enckey_len << 3);
3279         }
3280         base_hash  = chcr_alloc_shash(max_authsize);
3281         if (IS_ERR(base_hash)) {
3282                 pr_err("chcr : Base driver cannot be loaded\n");
3283                 aeadctx->enckey_len = 0;
3284                 return -EINVAL;
3285         }
3286         {
3287                 SHASH_DESC_ON_STACK(shash, base_hash);
3288                 shash->tfm = base_hash;
3289                 shash->flags = crypto_shash_get_flags(base_hash);
3290                 bs = crypto_shash_blocksize(base_hash);
3291                 align = KEYCTX_ALIGN_PAD(max_authsize);
3292                 o_ptr =  actx->h_iopad + param.result_size + align;
3293
3294                 if (keys.authkeylen > bs) {
3295                         err = crypto_shash_digest(shash, keys.authkey,
3296                                                   keys.authkeylen,
3297                                                   o_ptr);
3298                         if (err) {
3299                                 pr_err("chcr : Base driver cannot be loaded\n");
3300                                 goto out;
3301                         }
3302                         keys.authkeylen = max_authsize;
3303                 } else
3304                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
3305
3306                 /* Compute the ipad-digest*/
3307                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3308                 memcpy(pad, o_ptr, keys.authkeylen);
3309                 for (i = 0; i < bs >> 2; i++)
3310                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3311
3312                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3313                                               max_authsize))
3314                         goto out;
3315                 /* Compute the opad-digest */
3316                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3317                 memcpy(pad, o_ptr, keys.authkeylen);
3318                 for (i = 0; i < bs >> 2; i++)
3319                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3320
3321                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3322                         goto out;
3323
3324                 /* convert the ipad and opad digest to network order */
3325                 chcr_change_order(actx->h_iopad, param.result_size);
3326                 chcr_change_order(o_ptr, param.result_size);
3327                 key_ctx_len = sizeof(struct _key_ctx) +
3328                         ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3329                         (param.result_size + align) * 2;
3330                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3331                                                 0, 1, key_ctx_len >> 4);
3332                 actx->auth_mode = param.auth_mode;
3333                 chcr_free_shash(base_hash);
3334
3335                 return 0;
3336         }
3337 out:
3338         aeadctx->enckey_len = 0;
3339         if (!IS_ERR(base_hash))
3340                 chcr_free_shash(base_hash);
3341         return -EINVAL;
3342 }
3343
3344 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3345                                         const u8 *key, unsigned int keylen)
3346 {
3347         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3348         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3349         struct crypto_authenc_keys keys;
3350         int err;
3351         /* it contains auth and cipher key both*/
3352         unsigned int subtype;
3353         int key_ctx_len = 0;
3354         unsigned char ck_size = 0;
3355
3356         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3357         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3358                               & CRYPTO_TFM_REQ_MASK);
3359         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3360         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3361         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3362                               & CRYPTO_TFM_RES_MASK);
3363         if (err)
3364                 goto out;
3365
3366         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3367                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3368                 goto out;
3369         }
3370         subtype = get_aead_subtype(authenc);
3371         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3372             subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3373                 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3374                         goto out;
3375                 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3376                         - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3377                 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3378         }
3379         if (keys.enckeylen == AES_KEYSIZE_128) {
3380                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3381         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3382                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3383         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3384                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3385         } else {
3386                 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3387                 goto out;
3388         }
3389         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3390         aeadctx->enckey_len = keys.enckeylen;
3391         if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3392             subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3393                 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3394                                 aeadctx->enckey_len << 3);
3395         }
3396         key_ctx_len =  sizeof(struct _key_ctx)
3397                 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3398
3399         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3400                                                 0, key_ctx_len >> 4);
3401         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3402         return 0;
3403 out:
3404         aeadctx->enckey_len = 0;
3405         return -EINVAL;
3406 }
3407
3408 static int chcr_aead_op(struct aead_request *req,
3409                         unsigned short op_type,
3410                         int size,
3411                         create_wr_t create_wr_fn)
3412 {
3413         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3414         struct uld_ctx *u_ctx;
3415         struct sk_buff *skb;
3416
3417         if (!a_ctx(tfm)->dev) {
3418                 pr_err("chcr : %s : No crypto device.\n", __func__);
3419                 return -ENXIO;
3420         }
3421         u_ctx = ULD_CTX(a_ctx(tfm));
3422         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3423                                    a_ctx(tfm)->tx_qidx)) {
3424                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3425                         return -EBUSY;
3426         }
3427
3428         /* Form a WR from req */
3429         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
3430                            op_type);
3431
3432         if (IS_ERR(skb) || !skb)
3433                 return PTR_ERR(skb);
3434
3435         skb->dev = u_ctx->lldi.ports[0];
3436         set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3437         chcr_send_wr(skb);
3438         return -EINPROGRESS;
3439 }
3440
3441 static int chcr_aead_encrypt(struct aead_request *req)
3442 {
3443         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3444         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3445
3446         reqctx->verify = VERIFY_HW;
3447
3448         switch (get_aead_subtype(tfm)) {
3449         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3450         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3451         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3452         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3453                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3454                                     create_authenc_wr);
3455         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3456         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3457                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3458                                     create_aead_ccm_wr);
3459         default:
3460                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3461                                     create_gcm_wr);
3462         }
3463 }
3464
3465 static int chcr_aead_decrypt(struct aead_request *req)
3466 {
3467         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3468         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3469         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3470         int size;
3471
3472         if (aeadctx->mayverify == VERIFY_SW) {
3473                 size = crypto_aead_maxauthsize(tfm);
3474                 reqctx->verify = VERIFY_SW;
3475         } else {
3476                 size = 0;
3477                 reqctx->verify = VERIFY_HW;
3478         }
3479
3480         switch (get_aead_subtype(tfm)) {
3481         case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3482         case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3483         case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3484         case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3485                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3486                                     create_authenc_wr);
3487         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3488         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3489                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3490                                     create_aead_ccm_wr);
3491         default:
3492                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3493                                     create_gcm_wr);
3494         }
3495 }
3496
3497 static struct chcr_alg_template driver_algs[] = {
3498         /* AES-CBC */
3499         {
3500                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3501                 .is_registered = 0,
3502                 .alg.crypto = {
3503                         .cra_name               = "cbc(aes)",
3504                         .cra_driver_name        = "cbc-aes-chcr",
3505                         .cra_blocksize          = AES_BLOCK_SIZE,
3506                         .cra_init               = chcr_cra_init,
3507                         .cra_exit               = chcr_cra_exit,
3508                         .cra_u.ablkcipher       = {
3509                                 .min_keysize    = AES_MIN_KEY_SIZE,
3510                                 .max_keysize    = AES_MAX_KEY_SIZE,
3511                                 .ivsize         = AES_BLOCK_SIZE,
3512                                 .setkey                 = chcr_aes_cbc_setkey,
3513                                 .encrypt                = chcr_aes_encrypt,
3514                                 .decrypt                = chcr_aes_decrypt,
3515                         }
3516                 }
3517         },
3518         {
3519                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3520                 .is_registered = 0,
3521                 .alg.crypto =   {
3522                         .cra_name               = "xts(aes)",
3523                         .cra_driver_name        = "xts-aes-chcr",
3524                         .cra_blocksize          = AES_BLOCK_SIZE,
3525                         .cra_init               = chcr_cra_init,
3526                         .cra_exit               = NULL,
3527                         .cra_u .ablkcipher = {
3528                                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
3529                                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
3530                                         .ivsize         = AES_BLOCK_SIZE,
3531                                         .setkey         = chcr_aes_xts_setkey,
3532                                         .encrypt        = chcr_aes_encrypt,
3533                                         .decrypt        = chcr_aes_decrypt,
3534                                 }
3535                         }
3536         },
3537         {
3538                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3539                 .is_registered = 0,
3540                 .alg.crypto = {
3541                         .cra_name               = "ctr(aes)",
3542                         .cra_driver_name        = "ctr-aes-chcr",
3543                         .cra_blocksize          = 1,
3544                         .cra_init               = chcr_cra_init,
3545                         .cra_exit               = chcr_cra_exit,
3546                         .cra_u.ablkcipher       = {
3547                                 .min_keysize    = AES_MIN_KEY_SIZE,
3548                                 .max_keysize    = AES_MAX_KEY_SIZE,
3549                                 .ivsize         = AES_BLOCK_SIZE,
3550                                 .setkey         = chcr_aes_ctr_setkey,
3551                                 .encrypt        = chcr_aes_encrypt,
3552                                 .decrypt        = chcr_aes_decrypt,
3553                         }
3554                 }
3555         },
3556         {
3557                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3558                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3559                 .is_registered = 0,
3560                 .alg.crypto = {
3561                         .cra_name               = "rfc3686(ctr(aes))",
3562                         .cra_driver_name        = "rfc3686-ctr-aes-chcr",
3563                         .cra_blocksize          = 1,
3564                         .cra_init               = chcr_rfc3686_init,
3565                         .cra_exit               = chcr_cra_exit,
3566                         .cra_u.ablkcipher       = {
3567                                 .min_keysize    = AES_MIN_KEY_SIZE +
3568                                         CTR_RFC3686_NONCE_SIZE,
3569                                 .max_keysize    = AES_MAX_KEY_SIZE +
3570                                         CTR_RFC3686_NONCE_SIZE,
3571                                 .ivsize         = CTR_RFC3686_IV_SIZE,
3572                                 .setkey         = chcr_aes_rfc3686_setkey,
3573                                 .encrypt        = chcr_aes_encrypt,
3574                                 .decrypt        = chcr_aes_decrypt,
3575                                 .geniv          = "seqiv",
3576                         }
3577                 }
3578         },
3579         /* SHA */
3580         {
3581                 .type = CRYPTO_ALG_TYPE_AHASH,
3582                 .is_registered = 0,
3583                 .alg.hash = {
3584                         .halg.digestsize = SHA1_DIGEST_SIZE,
3585                         .halg.base = {
3586                                 .cra_name = "sha1",
3587                                 .cra_driver_name = "sha1-chcr",
3588                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3589                         }
3590                 }
3591         },
3592         {
3593                 .type = CRYPTO_ALG_TYPE_AHASH,
3594                 .is_registered = 0,
3595                 .alg.hash = {
3596                         .halg.digestsize = SHA256_DIGEST_SIZE,
3597                         .halg.base = {
3598                                 .cra_name = "sha256",
3599                                 .cra_driver_name = "sha256-chcr",
3600                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3601                         }
3602                 }
3603         },
3604         {
3605                 .type = CRYPTO_ALG_TYPE_AHASH,
3606                 .is_registered = 0,
3607                 .alg.hash = {
3608                         .halg.digestsize = SHA224_DIGEST_SIZE,
3609                         .halg.base = {
3610                                 .cra_name = "sha224",
3611                                 .cra_driver_name = "sha224-chcr",
3612                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3613                         }
3614                 }
3615         },
3616         {
3617                 .type = CRYPTO_ALG_TYPE_AHASH,
3618                 .is_registered = 0,
3619                 .alg.hash = {
3620                         .halg.digestsize = SHA384_DIGEST_SIZE,
3621                         .halg.base = {
3622                                 .cra_name = "sha384",
3623                                 .cra_driver_name = "sha384-chcr",
3624                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3625                         }
3626                 }
3627         },
3628         {
3629                 .type = CRYPTO_ALG_TYPE_AHASH,
3630                 .is_registered = 0,
3631                 .alg.hash = {
3632                         .halg.digestsize = SHA512_DIGEST_SIZE,
3633                         .halg.base = {
3634                                 .cra_name = "sha512",
3635                                 .cra_driver_name = "sha512-chcr",
3636                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3637                         }
3638                 }
3639         },
3640         /* HMAC */
3641         {
3642                 .type = CRYPTO_ALG_TYPE_HMAC,
3643                 .is_registered = 0,
3644                 .alg.hash = {
3645                         .halg.digestsize = SHA1_DIGEST_SIZE,
3646                         .halg.base = {
3647                                 .cra_name = "hmac(sha1)",
3648                                 .cra_driver_name = "hmac-sha1-chcr",
3649                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3650                         }
3651                 }
3652         },
3653         {
3654                 .type = CRYPTO_ALG_TYPE_HMAC,
3655                 .is_registered = 0,
3656                 .alg.hash = {
3657                         .halg.digestsize = SHA224_DIGEST_SIZE,
3658                         .halg.base = {
3659                                 .cra_name = "hmac(sha224)",
3660                                 .cra_driver_name = "hmac-sha224-chcr",
3661                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3662                         }
3663                 }
3664         },
3665         {
3666                 .type = CRYPTO_ALG_TYPE_HMAC,
3667                 .is_registered = 0,
3668                 .alg.hash = {
3669                         .halg.digestsize = SHA256_DIGEST_SIZE,
3670                         .halg.base = {
3671                                 .cra_name = "hmac(sha256)",
3672                                 .cra_driver_name = "hmac-sha256-chcr",
3673                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3674                         }
3675                 }
3676         },
3677         {
3678                 .type = CRYPTO_ALG_TYPE_HMAC,
3679                 .is_registered = 0,
3680                 .alg.hash = {
3681                         .halg.digestsize = SHA384_DIGEST_SIZE,
3682                         .halg.base = {
3683                                 .cra_name = "hmac(sha384)",
3684                                 .cra_driver_name = "hmac-sha384-chcr",
3685                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3686                         }
3687                 }
3688         },
3689         {
3690                 .type = CRYPTO_ALG_TYPE_HMAC,
3691                 .is_registered = 0,
3692                 .alg.hash = {
3693                         .halg.digestsize = SHA512_DIGEST_SIZE,
3694                         .halg.base = {
3695                                 .cra_name = "hmac(sha512)",
3696                                 .cra_driver_name = "hmac-sha512-chcr",
3697                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3698                         }
3699                 }
3700         },
3701         /* Add AEAD Algorithms */
3702         {
3703                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3704                 .is_registered = 0,
3705                 .alg.aead = {
3706                         .base = {
3707                                 .cra_name = "gcm(aes)",
3708                                 .cra_driver_name = "gcm-aes-chcr",
3709                                 .cra_blocksize  = 1,
3710                                 .cra_priority = CHCR_AEAD_PRIORITY,
3711                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3712                                                 sizeof(struct chcr_aead_ctx) +
3713                                                 sizeof(struct chcr_gcm_ctx),
3714                         },
3715                         .ivsize = GCM_AES_IV_SIZE,
3716                         .maxauthsize = GHASH_DIGEST_SIZE,
3717                         .setkey = chcr_gcm_setkey,
3718                         .setauthsize = chcr_gcm_setauthsize,
3719                 }
3720         },
3721         {
3722                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3723                 .is_registered = 0,
3724                 .alg.aead = {
3725                         .base = {
3726                                 .cra_name = "rfc4106(gcm(aes))",
3727                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3728                                 .cra_blocksize   = 1,
3729                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3730                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3731                                                 sizeof(struct chcr_aead_ctx) +
3732                                                 sizeof(struct chcr_gcm_ctx),
3733
3734                         },
3735                         .ivsize = GCM_RFC4106_IV_SIZE,
3736                         .maxauthsize    = GHASH_DIGEST_SIZE,
3737                         .setkey = chcr_gcm_setkey,
3738                         .setauthsize    = chcr_4106_4309_setauthsize,
3739                 }
3740         },
3741         {
3742                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3743                 .is_registered = 0,
3744                 .alg.aead = {
3745                         .base = {
3746                                 .cra_name = "ccm(aes)",
3747                                 .cra_driver_name = "ccm-aes-chcr",
3748                                 .cra_blocksize   = 1,
3749                                 .cra_priority = CHCR_AEAD_PRIORITY,
3750                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3751                                                 sizeof(struct chcr_aead_ctx),
3752
3753                         },
3754                         .ivsize = AES_BLOCK_SIZE,
3755                         .maxauthsize    = GHASH_DIGEST_SIZE,
3756                         .setkey = chcr_aead_ccm_setkey,
3757                         .setauthsize    = chcr_ccm_setauthsize,
3758                 }
3759         },
3760         {
3761                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3762                 .is_registered = 0,
3763                 .alg.aead = {
3764                         .base = {
3765                                 .cra_name = "rfc4309(ccm(aes))",
3766                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3767                                 .cra_blocksize   = 1,
3768                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3769                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3770                                                 sizeof(struct chcr_aead_ctx),
3771
3772                         },
3773                         .ivsize = 8,
3774                         .maxauthsize    = GHASH_DIGEST_SIZE,
3775                         .setkey = chcr_aead_rfc4309_setkey,
3776                         .setauthsize = chcr_4106_4309_setauthsize,
3777                 }
3778         },
3779         {
3780                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3781                 .is_registered = 0,
3782                 .alg.aead = {
3783                         .base = {
3784                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3785                                 .cra_driver_name =
3786                                         "authenc-hmac-sha1-cbc-aes-chcr",
3787                                 .cra_blocksize   = AES_BLOCK_SIZE,
3788                                 .cra_priority = CHCR_AEAD_PRIORITY,
3789                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3790                                                 sizeof(struct chcr_aead_ctx) +
3791                                                 sizeof(struct chcr_authenc_ctx),
3792
3793                         },
3794                         .ivsize = AES_BLOCK_SIZE,
3795                         .maxauthsize = SHA1_DIGEST_SIZE,
3796                         .setkey = chcr_authenc_setkey,
3797                         .setauthsize = chcr_authenc_setauthsize,
3798                 }
3799         },
3800         {
3801                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3802                 .is_registered = 0,
3803                 .alg.aead = {
3804                         .base = {
3805
3806                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3807                                 .cra_driver_name =
3808                                         "authenc-hmac-sha256-cbc-aes-chcr",
3809                                 .cra_blocksize   = AES_BLOCK_SIZE,
3810                                 .cra_priority = CHCR_AEAD_PRIORITY,
3811                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3812                                                 sizeof(struct chcr_aead_ctx) +
3813                                                 sizeof(struct chcr_authenc_ctx),
3814
3815                         },
3816                         .ivsize = AES_BLOCK_SIZE,
3817                         .maxauthsize    = SHA256_DIGEST_SIZE,
3818                         .setkey = chcr_authenc_setkey,
3819                         .setauthsize = chcr_authenc_setauthsize,
3820                 }
3821         },
3822         {
3823                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3824                 .is_registered = 0,
3825                 .alg.aead = {
3826                         .base = {
3827                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3828                                 .cra_driver_name =
3829                                         "authenc-hmac-sha224-cbc-aes-chcr",
3830                                 .cra_blocksize   = AES_BLOCK_SIZE,
3831                                 .cra_priority = CHCR_AEAD_PRIORITY,
3832                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3833                                                 sizeof(struct chcr_aead_ctx) +
3834                                                 sizeof(struct chcr_authenc_ctx),
3835                         },
3836                         .ivsize = AES_BLOCK_SIZE,
3837                         .maxauthsize = SHA224_DIGEST_SIZE,
3838                         .setkey = chcr_authenc_setkey,
3839                         .setauthsize = chcr_authenc_setauthsize,
3840                 }
3841         },
3842         {
3843                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3844                 .is_registered = 0,
3845                 .alg.aead = {
3846                         .base = {
3847                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3848                                 .cra_driver_name =
3849                                         "authenc-hmac-sha384-cbc-aes-chcr",
3850                                 .cra_blocksize   = AES_BLOCK_SIZE,
3851                                 .cra_priority = CHCR_AEAD_PRIORITY,
3852                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3853                                                 sizeof(struct chcr_aead_ctx) +
3854                                                 sizeof(struct chcr_authenc_ctx),
3855
3856                         },
3857                         .ivsize = AES_BLOCK_SIZE,
3858                         .maxauthsize = SHA384_DIGEST_SIZE,
3859                         .setkey = chcr_authenc_setkey,
3860                         .setauthsize = chcr_authenc_setauthsize,
3861                 }
3862         },
3863         {
3864                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3865                 .is_registered = 0,
3866                 .alg.aead = {
3867                         .base = {
3868                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3869                                 .cra_driver_name =
3870                                         "authenc-hmac-sha512-cbc-aes-chcr",
3871                                 .cra_blocksize   = AES_BLOCK_SIZE,
3872                                 .cra_priority = CHCR_AEAD_PRIORITY,
3873                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3874                                                 sizeof(struct chcr_aead_ctx) +
3875                                                 sizeof(struct chcr_authenc_ctx),
3876
3877                         },
3878                         .ivsize = AES_BLOCK_SIZE,
3879                         .maxauthsize = SHA512_DIGEST_SIZE,
3880                         .setkey = chcr_authenc_setkey,
3881                         .setauthsize = chcr_authenc_setauthsize,
3882                 }
3883         },
3884         {
3885                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
3886                 .is_registered = 0,
3887                 .alg.aead = {
3888                         .base = {
3889                                 .cra_name = "authenc(digest_null,cbc(aes))",
3890                                 .cra_driver_name =
3891                                         "authenc-digest_null-cbc-aes-chcr",
3892                                 .cra_blocksize   = AES_BLOCK_SIZE,
3893                                 .cra_priority = CHCR_AEAD_PRIORITY,
3894                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3895                                                 sizeof(struct chcr_aead_ctx) +
3896                                                 sizeof(struct chcr_authenc_ctx),
3897
3898                         },
3899                         .ivsize  = AES_BLOCK_SIZE,
3900                         .maxauthsize = 0,
3901                         .setkey  = chcr_aead_digest_null_setkey,
3902                         .setauthsize = chcr_authenc_null_setauthsize,
3903                 }
3904         },
3905         {
3906                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
3907                 .is_registered = 0,
3908                 .alg.aead = {
3909                         .base = {
3910                                 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
3911                                 .cra_driver_name =
3912                                 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
3913                                 .cra_blocksize   = 1,
3914                                 .cra_priority = CHCR_AEAD_PRIORITY,
3915                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3916                                                 sizeof(struct chcr_aead_ctx) +
3917                                                 sizeof(struct chcr_authenc_ctx),
3918
3919                         },
3920                         .ivsize = CTR_RFC3686_IV_SIZE,
3921                         .maxauthsize = SHA1_DIGEST_SIZE,
3922                         .setkey = chcr_authenc_setkey,
3923                         .setauthsize = chcr_authenc_setauthsize,
3924                 }
3925         },
3926         {
3927                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
3928                 .is_registered = 0,
3929                 .alg.aead = {
3930                         .base = {
3931
3932                                 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
3933                                 .cra_driver_name =
3934                                 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
3935                                 .cra_blocksize   = 1,
3936                                 .cra_priority = CHCR_AEAD_PRIORITY,
3937                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3938                                                 sizeof(struct chcr_aead_ctx) +
3939                                                 sizeof(struct chcr_authenc_ctx),
3940
3941                         },
3942                         .ivsize = CTR_RFC3686_IV_SIZE,
3943                         .maxauthsize    = SHA256_DIGEST_SIZE,
3944                         .setkey = chcr_authenc_setkey,
3945                         .setauthsize = chcr_authenc_setauthsize,
3946                 }
3947         },
3948         {
3949                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
3950                 .is_registered = 0,
3951                 .alg.aead = {
3952                         .base = {
3953                                 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
3954                                 .cra_driver_name =
3955                                 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
3956                                 .cra_blocksize   = 1,
3957                                 .cra_priority = CHCR_AEAD_PRIORITY,
3958                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3959                                                 sizeof(struct chcr_aead_ctx) +
3960                                                 sizeof(struct chcr_authenc_ctx),
3961                         },
3962                         .ivsize = CTR_RFC3686_IV_SIZE,
3963                         .maxauthsize = SHA224_DIGEST_SIZE,
3964                         .setkey = chcr_authenc_setkey,
3965                         .setauthsize = chcr_authenc_setauthsize,
3966                 }
3967         },
3968         {
3969                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
3970                 .is_registered = 0,
3971                 .alg.aead = {
3972                         .base = {
3973                                 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
3974                                 .cra_driver_name =
3975                                 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
3976                                 .cra_blocksize   = 1,
3977                                 .cra_priority = CHCR_AEAD_PRIORITY,
3978                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3979                                                 sizeof(struct chcr_aead_ctx) +
3980                                                 sizeof(struct chcr_authenc_ctx),
3981
3982                         },
3983                         .ivsize = CTR_RFC3686_IV_SIZE,
3984                         .maxauthsize = SHA384_DIGEST_SIZE,
3985                         .setkey = chcr_authenc_setkey,
3986                         .setauthsize = chcr_authenc_setauthsize,
3987                 }
3988         },
3989         {
3990                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
3991                 .is_registered = 0,
3992                 .alg.aead = {
3993                         .base = {
3994                                 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
3995                                 .cra_driver_name =
3996                                 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
3997                                 .cra_blocksize   = 1,
3998                                 .cra_priority = CHCR_AEAD_PRIORITY,
3999                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4000                                                 sizeof(struct chcr_aead_ctx) +
4001                                                 sizeof(struct chcr_authenc_ctx),
4002
4003                         },
4004                         .ivsize = CTR_RFC3686_IV_SIZE,
4005                         .maxauthsize = SHA512_DIGEST_SIZE,
4006                         .setkey = chcr_authenc_setkey,
4007                         .setauthsize = chcr_authenc_setauthsize,
4008                 }
4009         },
4010         {
4011                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4012                 .is_registered = 0,
4013                 .alg.aead = {
4014                         .base = {
4015                                 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4016                                 .cra_driver_name =
4017                                 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4018                                 .cra_blocksize   = 1,
4019                                 .cra_priority = CHCR_AEAD_PRIORITY,
4020                                 .cra_ctxsize =  sizeof(struct chcr_context) +
4021                                                 sizeof(struct chcr_aead_ctx) +
4022                                                 sizeof(struct chcr_authenc_ctx),
4023
4024                         },
4025                         .ivsize  = CTR_RFC3686_IV_SIZE,
4026                         .maxauthsize = 0,
4027                         .setkey  = chcr_aead_digest_null_setkey,
4028                         .setauthsize = chcr_authenc_null_setauthsize,
4029                 }
4030         },
4031
4032 };
4033
4034 /*
4035  *      chcr_unregister_alg - Deregister crypto algorithms with
4036  *      kernel framework.
4037  */
4038 static int chcr_unregister_alg(void)
4039 {
4040         int i;
4041
4042         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4043                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4044                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4045                         if (driver_algs[i].is_registered)
4046                                 crypto_unregister_alg(
4047                                                 &driver_algs[i].alg.crypto);
4048                         break;
4049                 case CRYPTO_ALG_TYPE_AEAD:
4050                         if (driver_algs[i].is_registered)
4051                                 crypto_unregister_aead(
4052                                                 &driver_algs[i].alg.aead);
4053                         break;
4054                 case CRYPTO_ALG_TYPE_AHASH:
4055                         if (driver_algs[i].is_registered)
4056                                 crypto_unregister_ahash(
4057                                                 &driver_algs[i].alg.hash);
4058                         break;
4059                 }
4060                 driver_algs[i].is_registered = 0;
4061         }
4062         return 0;
4063 }
4064
4065 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4066 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4067 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4068 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
4069
4070 /*
4071  *      chcr_register_alg - Register crypto algorithms with kernel framework.
4072  */
4073 static int chcr_register_alg(void)
4074 {
4075         struct crypto_alg ai;
4076         struct ahash_alg *a_hash;
4077         int err = 0, i;
4078         char *name = NULL;
4079
4080         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4081                 if (driver_algs[i].is_registered)
4082                         continue;
4083                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4084                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4085                         driver_algs[i].alg.crypto.cra_priority =
4086                                 CHCR_CRA_PRIORITY;
4087                         driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4088                         driver_algs[i].alg.crypto.cra_flags =
4089                                 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4090                                 CRYPTO_ALG_NEED_FALLBACK;
4091                         driver_algs[i].alg.crypto.cra_ctxsize =
4092                                 sizeof(struct chcr_context) +
4093                                 sizeof(struct ablk_ctx);
4094                         driver_algs[i].alg.crypto.cra_alignmask = 0;
4095                         driver_algs[i].alg.crypto.cra_type =
4096                                 &crypto_ablkcipher_type;
4097                         err = crypto_register_alg(&driver_algs[i].alg.crypto);
4098                         name = driver_algs[i].alg.crypto.cra_driver_name;
4099                         break;
4100                 case CRYPTO_ALG_TYPE_AEAD:
4101                         driver_algs[i].alg.aead.base.cra_flags =
4102                                 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
4103                                 CRYPTO_ALG_NEED_FALLBACK;
4104                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4105                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4106                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4107                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4108                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4109                         err = crypto_register_aead(&driver_algs[i].alg.aead);
4110                         name = driver_algs[i].alg.aead.base.cra_driver_name;
4111                         break;
4112                 case CRYPTO_ALG_TYPE_AHASH:
4113                         a_hash = &driver_algs[i].alg.hash;
4114                         a_hash->update = chcr_ahash_update;
4115                         a_hash->final = chcr_ahash_final;
4116                         a_hash->finup = chcr_ahash_finup;
4117                         a_hash->digest = chcr_ahash_digest;
4118                         a_hash->export = chcr_ahash_export;
4119                         a_hash->import = chcr_ahash_import;
4120                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4121                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4122                         a_hash->halg.base.cra_module = THIS_MODULE;
4123                         a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
4124                         a_hash->halg.base.cra_alignmask = 0;
4125                         a_hash->halg.base.cra_exit = NULL;
4126                         a_hash->halg.base.cra_type = &crypto_ahash_type;
4127
4128                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4129                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4130                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4131                                 a_hash->init = chcr_hmac_init;
4132                                 a_hash->setkey = chcr_ahash_setkey;
4133                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4134                         } else {
4135                                 a_hash->init = chcr_sha_init;
4136                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4137                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4138                         }
4139                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
4140                         ai = driver_algs[i].alg.hash.halg.base;
4141                         name = ai.cra_driver_name;
4142                         break;
4143                 }
4144                 if (err) {
4145                         pr_err("chcr : %s : Algorithm registration failed\n",
4146                                name);
4147                         goto register_err;
4148                 } else {
4149                         driver_algs[i].is_registered = 1;
4150                 }
4151         }
4152         return 0;
4153
4154 register_err:
4155         chcr_unregister_alg();
4156         return err;
4157 }
4158
4159 /*
4160  *      start_crypto - Register the crypto algorithms.
4161  *      This should called once when the first device comesup. After this
4162  *      kernel will start calling driver APIs for crypto operations.
4163  */
4164 int start_crypto(void)
4165 {
4166         return chcr_register_alg();
4167 }
4168
4169 /*
4170  *      stop_crypto - Deregister all the crypto algorithms with kernel.
4171  *      This should be called once when the last device goes down. After this
4172  *      kernel will not call the driver API for crypto operations.
4173  */
4174 int stop_crypto(void)
4175 {
4176         chcr_unregister_alg();
4177         return 0;
4178 }
This page took 0.310225 seconds and 4 git commands to generate.