]> Git Repo - linux.git/blob - drivers/crypto/chelsio/chcr_algo.c
ACPI / scan: Use acpi_bus_get_status() to initialize ACPI_TYPE_DEVICE devs
[linux.git] / drivers / crypto / chelsio / chcr_algo.c
1 /*
2  * This file is part of the Chelsio T6 Crypto driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  *
34  * Written and Maintained by:
35  *      Manoj Malviya ([email protected])
36  *      Atul Gupta ([email protected])
37  *      Jitendra Lulla ([email protected])
38  *      Yeshaswi M R Gowda ([email protected])
39  *      Harsh Jain ([email protected])
40  */
41
42 #define pr_fmt(fmt) "chcr:" fmt
43
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
52
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
67
68 #include "t4fw_api.h"
69 #include "t4_msg.h"
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
73
74 #define IV AES_BLOCK_SIZE
75
76 static inline  struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
77 {
78         return ctx->crypto_ctx->aeadctx;
79 }
80
81 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
82 {
83         return ctx->crypto_ctx->ablkctx;
84 }
85
86 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
87 {
88         return ctx->crypto_ctx->hmacctx;
89 }
90
91 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
92 {
93         return gctx->ctx->gcm;
94 }
95
96 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
97 {
98         return gctx->ctx->authenc;
99 }
100
101 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
102 {
103         return ctx->dev->u_ctx;
104 }
105
106 static inline int is_ofld_imm(const struct sk_buff *skb)
107 {
108         return (skb->len <= SGE_MAX_WR_LEN);
109 }
110
111 /*
112  *      sgl_len - calculates the size of an SGL of the given capacity
113  *      @n: the number of SGL entries
114  *      Calculates the number of flits needed for a scatter/gather list that
115  *      can hold the given number of entries.
116  */
117 static inline unsigned int sgl_len(unsigned int n)
118 {
119         n--;
120         return (3 * n) / 2 + (n & 1) + 2;
121 }
122
123 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
124                          unsigned int entlen,
125                          unsigned int skip)
126 {
127         int nents = 0;
128         unsigned int less;
129         unsigned int skip_len = 0;
130
131         while (sg && skip) {
132                 if (sg_dma_len(sg) <= skip) {
133                         skip -= sg_dma_len(sg);
134                         skip_len = 0;
135                         sg = sg_next(sg);
136                 } else {
137                         skip_len = skip;
138                         skip = 0;
139                 }
140         }
141
142         while (sg && reqlen) {
143                 less = min(reqlen, sg_dma_len(sg) - skip_len);
144                 nents += DIV_ROUND_UP(less, entlen);
145                 reqlen -= less;
146                 skip_len = 0;
147                 sg = sg_next(sg);
148         }
149         return nents;
150 }
151
152 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
153                                           unsigned char *input,
154                                           int err)
155 {
156         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
157         int digestsize, updated_digestsize;
158         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
159         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
160
161         if (input == NULL)
162                 goto out;
163         reqctx = ahash_request_ctx(req);
164         digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
165         if (reqctx->is_sg_map)
166                 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
167         if (reqctx->dma_addr)
168                 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->dma_addr,
169                                  reqctx->dma_len, DMA_TO_DEVICE);
170         reqctx->dma_addr = 0;
171         updated_digestsize = digestsize;
172         if (digestsize == SHA224_DIGEST_SIZE)
173                 updated_digestsize = SHA256_DIGEST_SIZE;
174         else if (digestsize == SHA384_DIGEST_SIZE)
175                 updated_digestsize = SHA512_DIGEST_SIZE;
176         if (reqctx->result == 1) {
177                 reqctx->result = 0;
178                 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
179                        digestsize);
180         } else {
181                 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
182                        updated_digestsize);
183         }
184 out:
185         req->base.complete(&req->base, err);
186
187         }
188
189 static inline void chcr_handle_aead_resp(struct aead_request *req,
190                                          unsigned char *input,
191                                          int err)
192 {
193         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
194         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
195         struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
196
197
198         chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
199         if (reqctx->b0_dma)
200                 dma_unmap_single(&u_ctx->lldi.pdev->dev, reqctx->b0_dma,
201                                  reqctx->b0_len, DMA_BIDIRECTIONAL);
202         if (reqctx->verify == VERIFY_SW) {
203                 chcr_verify_tag(req, input, &err);
204                 reqctx->verify = VERIFY_HW;
205 }
206         req->base.complete(&req->base, err);
207
208 }
209 static void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
210 {
211         u8 temp[SHA512_DIGEST_SIZE];
212         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
213         int authsize = crypto_aead_authsize(tfm);
214         struct cpl_fw6_pld *fw6_pld;
215         int cmp = 0;
216
217         fw6_pld = (struct cpl_fw6_pld *)input;
218         if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
219             (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
220                 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
221         } else {
222
223                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
224                                 authsize, req->assoclen +
225                                 req->cryptlen - authsize);
226                 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
227         }
228         if (cmp)
229                 *err = -EBADMSG;
230         else
231                 *err = 0;
232 }
233
234 /*
235  *      chcr_handle_resp - Unmap the DMA buffers associated with the request
236  *      @req: crypto request
237  */
238 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
239                          int err)
240 {
241         struct crypto_tfm *tfm = req->tfm;
242         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
243         struct adapter *adap = padap(ctx->dev);
244
245         switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
246         case CRYPTO_ALG_TYPE_AEAD:
247                 chcr_handle_aead_resp(aead_request_cast(req), input, err);
248                 break;
249
250         case CRYPTO_ALG_TYPE_ABLKCIPHER:
251                  err = chcr_handle_cipher_resp(ablkcipher_request_cast(req),
252                                                input, err);
253                 break;
254
255         case CRYPTO_ALG_TYPE_AHASH:
256                 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
257                 }
258         atomic_inc(&adap->chcr_stats.complete);
259         return err;
260 }
261
262 static void get_aes_decrypt_key(unsigned char *dec_key,
263                                        const unsigned char *key,
264                                        unsigned int keylength)
265 {
266         u32 temp;
267         u32 w_ring[MAX_NK];
268         int i, j, k;
269         u8  nr, nk;
270
271         switch (keylength) {
272         case AES_KEYLENGTH_128BIT:
273                 nk = KEYLENGTH_4BYTES;
274                 nr = NUMBER_OF_ROUNDS_10;
275                 break;
276         case AES_KEYLENGTH_192BIT:
277                 nk = KEYLENGTH_6BYTES;
278                 nr = NUMBER_OF_ROUNDS_12;
279                 break;
280         case AES_KEYLENGTH_256BIT:
281                 nk = KEYLENGTH_8BYTES;
282                 nr = NUMBER_OF_ROUNDS_14;
283                 break;
284         default:
285                 return;
286         }
287         for (i = 0; i < nk; i++)
288                 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
289
290         i = 0;
291         temp = w_ring[nk - 1];
292         while (i + nk < (nr + 1) * 4) {
293                 if (!(i % nk)) {
294                         /* RotWord(temp) */
295                         temp = (temp << 8) | (temp >> 24);
296                         temp = aes_ks_subword(temp);
297                         temp ^= round_constant[i / nk];
298                 } else if (nk == 8 && (i % 4 == 0)) {
299                         temp = aes_ks_subword(temp);
300                 }
301                 w_ring[i % nk] ^= temp;
302                 temp = w_ring[i % nk];
303                 i++;
304         }
305         i--;
306         for (k = 0, j = i % nk; k < nk; k++) {
307                 *((u32 *)dec_key + k) = htonl(w_ring[j]);
308                 j--;
309                 if (j < 0)
310                         j += nk;
311         }
312 }
313
314 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
315 {
316         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
317
318         switch (ds) {
319         case SHA1_DIGEST_SIZE:
320                 base_hash = crypto_alloc_shash("sha1", 0, 0);
321                 break;
322         case SHA224_DIGEST_SIZE:
323                 base_hash = crypto_alloc_shash("sha224", 0, 0);
324                 break;
325         case SHA256_DIGEST_SIZE:
326                 base_hash = crypto_alloc_shash("sha256", 0, 0);
327                 break;
328         case SHA384_DIGEST_SIZE:
329                 base_hash = crypto_alloc_shash("sha384", 0, 0);
330                 break;
331         case SHA512_DIGEST_SIZE:
332                 base_hash = crypto_alloc_shash("sha512", 0, 0);
333                 break;
334         }
335
336         return base_hash;
337 }
338
339 static int chcr_compute_partial_hash(struct shash_desc *desc,
340                                      char *iopad, char *result_hash,
341                                      int digest_size)
342 {
343         struct sha1_state sha1_st;
344         struct sha256_state sha256_st;
345         struct sha512_state sha512_st;
346         int error;
347
348         if (digest_size == SHA1_DIGEST_SIZE) {
349                 error = crypto_shash_init(desc) ?:
350                         crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
351                         crypto_shash_export(desc, (void *)&sha1_st);
352                 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
353         } else if (digest_size == SHA224_DIGEST_SIZE) {
354                 error = crypto_shash_init(desc) ?:
355                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
356                         crypto_shash_export(desc, (void *)&sha256_st);
357                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
358
359         } else if (digest_size == SHA256_DIGEST_SIZE) {
360                 error = crypto_shash_init(desc) ?:
361                         crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
362                         crypto_shash_export(desc, (void *)&sha256_st);
363                 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
364
365         } else if (digest_size == SHA384_DIGEST_SIZE) {
366                 error = crypto_shash_init(desc) ?:
367                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
368                         crypto_shash_export(desc, (void *)&sha512_st);
369                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
370
371         } else if (digest_size == SHA512_DIGEST_SIZE) {
372                 error = crypto_shash_init(desc) ?:
373                         crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
374                         crypto_shash_export(desc, (void *)&sha512_st);
375                 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
376         } else {
377                 error = -EINVAL;
378                 pr_err("Unknown digest size %d\n", digest_size);
379         }
380         return error;
381 }
382
383 static void chcr_change_order(char *buf, int ds)
384 {
385         int i;
386
387         if (ds == SHA512_DIGEST_SIZE) {
388                 for (i = 0; i < (ds / sizeof(u64)); i++)
389                         *((__be64 *)buf + i) =
390                                 cpu_to_be64(*((u64 *)buf + i));
391         } else {
392                 for (i = 0; i < (ds / sizeof(u32)); i++)
393                         *((__be32 *)buf + i) =
394                                 cpu_to_be32(*((u32 *)buf + i));
395         }
396 }
397
398 static inline int is_hmac(struct crypto_tfm *tfm)
399 {
400         struct crypto_alg *alg = tfm->__crt_alg;
401         struct chcr_alg_template *chcr_crypto_alg =
402                 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
403                              alg.hash);
404         if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
405                 return 1;
406         return 0;
407 }
408
409 static inline void dsgl_walk_init(struct dsgl_walk *walk,
410                                    struct cpl_rx_phys_dsgl *dsgl)
411 {
412         walk->dsgl = dsgl;
413         walk->nents = 0;
414         walk->to = (struct phys_sge_pairs *)(dsgl + 1);
415 }
416
417 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
418 {
419         struct cpl_rx_phys_dsgl *phys_cpl;
420
421         phys_cpl = walk->dsgl;
422
423         phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
424                                     | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
425         phys_cpl->pcirlxorder_to_noofsgentr =
426                 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
427                       CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
428                       CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
429                       CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
430                       CPL_RX_PHYS_DSGL_DCAID_V(0) |
431                       CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
432         phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
433         phys_cpl->rss_hdr_int.qid = htons(qid);
434         phys_cpl->rss_hdr_int.hash_val = 0;
435 }
436
437 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
438                                         size_t size,
439                                         dma_addr_t *addr)
440 {
441         int j;
442
443         if (!size)
444                 return;
445         j = walk->nents;
446         walk->to->len[j % 8] = htons(size);
447         walk->to->addr[j % 8] = cpu_to_be64(*addr);
448         j++;
449         if ((j % 8) == 0)
450                 walk->to++;
451         walk->nents = j;
452 }
453
454 static void  dsgl_walk_add_sg(struct dsgl_walk *walk,
455                            struct scatterlist *sg,
456                               unsigned int slen,
457                               unsigned int skip)
458 {
459         int skip_len = 0;
460         unsigned int left_size = slen, len = 0;
461         unsigned int j = walk->nents;
462         int offset, ent_len;
463
464         if (!slen)
465                 return;
466         while (sg && skip) {
467                 if (sg_dma_len(sg) <= skip) {
468                         skip -= sg_dma_len(sg);
469                         skip_len = 0;
470                         sg = sg_next(sg);
471                 } else {
472                         skip_len = skip;
473                         skip = 0;
474                 }
475         }
476
477         while (left_size && sg) {
478                 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
479                 offset = 0;
480                 while (len) {
481                         ent_len =  min_t(u32, len, CHCR_DST_SG_SIZE);
482                         walk->to->len[j % 8] = htons(ent_len);
483                         walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
484                                                       offset + skip_len);
485                         offset += ent_len;
486                         len -= ent_len;
487                         j++;
488                         if ((j % 8) == 0)
489                                 walk->to++;
490                 }
491                 walk->last_sg = sg;
492                 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
493                                           skip_len) + skip_len;
494                 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
495                 skip_len = 0;
496                 sg = sg_next(sg);
497         }
498         walk->nents = j;
499 }
500
501 static inline void ulptx_walk_init(struct ulptx_walk *walk,
502                                    struct ulptx_sgl *ulp)
503 {
504         walk->sgl = ulp;
505         walk->nents = 0;
506         walk->pair_idx = 0;
507         walk->pair = ulp->sge;
508         walk->last_sg = NULL;
509         walk->last_sg_len = 0;
510 }
511
512 static inline void ulptx_walk_end(struct ulptx_walk *walk)
513 {
514         walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
515                               ULPTX_NSGE_V(walk->nents));
516 }
517
518
519 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
520                                         size_t size,
521                                         dma_addr_t *addr)
522 {
523         if (!size)
524                 return;
525
526         if (walk->nents == 0) {
527                 walk->sgl->len0 = cpu_to_be32(size);
528                 walk->sgl->addr0 = cpu_to_be64(*addr);
529         } else {
530                 walk->pair->addr[walk->pair_idx] = cpu_to_be64(*addr);
531                 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
532                 walk->pair_idx = !walk->pair_idx;
533                 if (!walk->pair_idx)
534                         walk->pair++;
535         }
536         walk->nents++;
537 }
538
539 static void  ulptx_walk_add_sg(struct ulptx_walk *walk,
540                                         struct scatterlist *sg,
541                                unsigned int len,
542                                unsigned int skip)
543 {
544         int small;
545         int skip_len = 0;
546         unsigned int sgmin;
547
548         if (!len)
549                 return;
550
551         while (sg && skip) {
552                 if (sg_dma_len(sg) <= skip) {
553                         skip -= sg_dma_len(sg);
554                         skip_len = 0;
555                         sg = sg_next(sg);
556                 } else {
557                         skip_len = skip;
558                         skip = 0;
559                 }
560         }
561         if (walk->nents == 0) {
562                 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
563                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
564                 walk->sgl->len0 = cpu_to_be32(sgmin);
565                 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
566                 walk->nents++;
567                 len -= sgmin;
568                 walk->last_sg = sg;
569                 walk->last_sg_len = sgmin + skip_len;
570                 skip_len += sgmin;
571                 if (sg_dma_len(sg) == skip_len) {
572                         sg = sg_next(sg);
573                         skip_len = 0;
574                 }
575         }
576
577         while (sg && len) {
578                 small = min(sg_dma_len(sg) - skip_len, len);
579                 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
580                 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
581                 walk->pair->addr[walk->pair_idx] =
582                         cpu_to_be64(sg_dma_address(sg) + skip_len);
583                 walk->pair_idx = !walk->pair_idx;
584                 walk->nents++;
585                 if (!walk->pair_idx)
586                         walk->pair++;
587                 len -= sgmin;
588                 skip_len += sgmin;
589                 walk->last_sg = sg;
590                 walk->last_sg_len = skip_len;
591                 if (sg_dma_len(sg) == skip_len) {
592                         sg = sg_next(sg);
593                         skip_len = 0;
594                 }
595         }
596 }
597
598 static inline int get_aead_subtype(struct crypto_aead *aead)
599 {
600         struct aead_alg *alg = crypto_aead_alg(aead);
601         struct chcr_alg_template *chcr_crypto_alg =
602                 container_of(alg, struct chcr_alg_template, alg.aead);
603         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
604 }
605
606 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
607 {
608         struct crypto_alg *alg = tfm->__crt_alg;
609         struct chcr_alg_template *chcr_crypto_alg =
610                 container_of(alg, struct chcr_alg_template, alg.crypto);
611
612         return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
613 }
614
615 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
616 {
617         struct adapter *adap = netdev2adap(dev);
618         struct sge_uld_txq_info *txq_info =
619                 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
620         struct sge_uld_txq *txq;
621         int ret = 0;
622
623         local_bh_disable();
624         txq = &txq_info->uldtxq[idx];
625         spin_lock(&txq->sendq.lock);
626         if (txq->full)
627                 ret = -1;
628         spin_unlock(&txq->sendq.lock);
629         local_bh_enable();
630         return ret;
631 }
632
633 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
634                                struct _key_ctx *key_ctx)
635 {
636         if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
637                 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
638         } else {
639                 memcpy(key_ctx->key,
640                        ablkctx->key + (ablkctx->enckey_len >> 1),
641                        ablkctx->enckey_len >> 1);
642                 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
643                        ablkctx->rrkey, ablkctx->enckey_len >> 1);
644         }
645         return 0;
646 }
647 static int chcr_sg_ent_in_wr(struct scatterlist *src,
648                              struct scatterlist *dst,
649                              unsigned int minsg,
650                              unsigned int space,
651                              unsigned int srcskip,
652                              unsigned int dstskip)
653 {
654         int srclen = 0, dstlen = 0;
655         int srcsg = minsg, dstsg = minsg;
656         int offset = 0, less;
657
658         if (sg_dma_len(src) == srcskip) {
659                 src = sg_next(src);
660                 srcskip = 0;
661         }
662
663         if (sg_dma_len(dst) == dstskip) {
664                 dst = sg_next(dst);
665                 dstskip = 0;
666         }
667
668         while (src && dst &&
669                space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
670                 srclen += (sg_dma_len(src) - srcskip);
671                 srcsg++;
672                 offset = 0;
673                 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
674                        space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
675                         if (srclen <= dstlen)
676                                 break;
677                         less = min_t(unsigned int, sg_dma_len(dst) - offset -
678                                 dstskip, CHCR_DST_SG_SIZE);
679                         dstlen += less;
680                         offset += less;
681                         if (offset == sg_dma_len(dst)) {
682                                 dst = sg_next(dst);
683                                 offset = 0;
684                         }
685                         dstsg++;
686                         dstskip = 0;
687                 }
688                 src = sg_next(src);
689                  srcskip = 0;
690         }
691         return min(srclen, dstlen);
692 }
693
694 static int chcr_cipher_fallback(struct crypto_skcipher *cipher,
695                                 u32 flags,
696                                 struct scatterlist *src,
697                                 struct scatterlist *dst,
698                                 unsigned int nbytes,
699                                 u8 *iv,
700                                 unsigned short op_type)
701 {
702         int err;
703
704         SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
705         skcipher_request_set_tfm(subreq, cipher);
706         skcipher_request_set_callback(subreq, flags, NULL, NULL);
707         skcipher_request_set_crypt(subreq, src, dst,
708                                    nbytes, iv);
709
710         err = op_type ? crypto_skcipher_decrypt(subreq) :
711                 crypto_skcipher_encrypt(subreq);
712         skcipher_request_zero(subreq);
713
714         return err;
715
716 }
717 static inline void create_wreq(struct chcr_context *ctx,
718                                struct chcr_wr *chcr_req,
719                                struct crypto_async_request *req,
720                                unsigned int imm,
721                                int hash_sz,
722                                unsigned int len16,
723                                unsigned int sc_len,
724                                unsigned int lcb)
725 {
726         struct uld_ctx *u_ctx = ULD_CTX(ctx);
727         int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
728
729
730         chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
731         chcr_req->wreq.pld_size_hash_size =
732                 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
733         chcr_req->wreq.len16_pkd =
734                 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
735         chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
736         chcr_req->wreq.rx_chid_to_rx_q_id =
737                 FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
738                                 !!lcb, ctx->tx_qidx);
739
740         chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
741                                                        qid);
742         chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
743                                      ((sizeof(chcr_req->wreq)) >> 4)));
744
745         chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
746         chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
747                                            sizeof(chcr_req->key_ctx) + sc_len);
748 }
749
750 /**
751  *      create_cipher_wr - form the WR for cipher operations
752  *      @req: cipher req.
753  *      @ctx: crypto driver context of the request.
754  *      @qid: ingress qid where response of this WR should be received.
755  *      @op_type:       encryption or decryption
756  */
757 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
758 {
759         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
760         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
761         struct sk_buff *skb = NULL;
762         struct chcr_wr *chcr_req;
763         struct cpl_rx_phys_dsgl *phys_cpl;
764         struct ulptx_sgl *ulptx;
765         struct chcr_blkcipher_req_ctx *reqctx =
766                 ablkcipher_request_ctx(wrparam->req);
767         unsigned int temp = 0, transhdr_len, dst_size;
768         int error;
769         int nents;
770         unsigned int kctx_len;
771         gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
772                         GFP_KERNEL : GFP_ATOMIC;
773         struct adapter *adap = padap(c_ctx(tfm)->dev);
774
775         nents = sg_nents_xlen(reqctx->dstsg,  wrparam->bytes, CHCR_DST_SG_SIZE,
776                               reqctx->dst_ofst);
777         dst_size = get_space_for_phys_dsgl(nents + 1);
778         kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
779         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
780         nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
781                                   CHCR_SRC_SG_SIZE, reqctx->src_ofst);
782         temp = reqctx->imm ? (DIV_ROUND_UP((IV + wrparam->req->nbytes), 16)
783                               * 16) : (sgl_len(nents + MIN_CIPHER_SG) * 8);
784         transhdr_len += temp;
785         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
786         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
787         if (!skb) {
788                 error = -ENOMEM;
789                 goto err;
790         }
791         chcr_req = __skb_put_zero(skb, transhdr_len);
792         chcr_req->sec_cpl.op_ivinsrtofst =
793                 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->dev->rx_channel_id, 2, 1);
794
795         chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
796         chcr_req->sec_cpl.aadstart_cipherstop_hi =
797                         FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
798
799         chcr_req->sec_cpl.cipherstop_lo_authinsert =
800                         FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
801         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
802                                                          ablkctx->ciph_mode,
803                                                          0, 0, IV >> 1);
804         chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
805                                                           0, 0, dst_size);
806
807         chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
808         if ((reqctx->op == CHCR_DECRYPT_OP) &&
809             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
810                CRYPTO_ALG_SUB_TYPE_CTR)) &&
811             (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
812                CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
813                 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
814         } else {
815                 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
816                     (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
817                         memcpy(chcr_req->key_ctx.key, ablkctx->key,
818                                ablkctx->enckey_len);
819                 } else {
820                         memcpy(chcr_req->key_ctx.key, ablkctx->key +
821                                (ablkctx->enckey_len >> 1),
822                                ablkctx->enckey_len >> 1);
823                         memcpy(chcr_req->key_ctx.key +
824                                (ablkctx->enckey_len >> 1),
825                                ablkctx->key,
826                                ablkctx->enckey_len >> 1);
827                 }
828         }
829         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
830         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
831         chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
832         chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
833
834         atomic_inc(&adap->chcr_stats.cipher_rqst);
835         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len
836                 +(reqctx->imm ? (IV + wrparam->bytes) : 0);
837         create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
838                     transhdr_len, temp,
839                         ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
840         reqctx->skb = skb;
841         return skb;
842 err:
843         return ERR_PTR(error);
844 }
845
846 static inline int chcr_keyctx_ck_size(unsigned int keylen)
847 {
848         int ck_size = 0;
849
850         if (keylen == AES_KEYSIZE_128)
851                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
852         else if (keylen == AES_KEYSIZE_192)
853                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
854         else if (keylen == AES_KEYSIZE_256)
855                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
856         else
857                 ck_size = 0;
858
859         return ck_size;
860 }
861 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
862                                        const u8 *key,
863                                        unsigned int keylen)
864 {
865         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
866         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
867         int err = 0;
868
869         crypto_skcipher_clear_flags(ablkctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
870         crypto_skcipher_set_flags(ablkctx->sw_cipher, cipher->base.crt_flags &
871                                   CRYPTO_TFM_REQ_MASK);
872         err = crypto_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
873         tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
874         tfm->crt_flags |=
875                 crypto_skcipher_get_flags(ablkctx->sw_cipher) &
876                 CRYPTO_TFM_RES_MASK;
877         return err;
878 }
879
880 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
881                                const u8 *key,
882                                unsigned int keylen)
883 {
884         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
885         unsigned int ck_size, context_size;
886         u16 alignment = 0;
887         int err;
888
889         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
890         if (err)
891                 goto badkey_err;
892
893         ck_size = chcr_keyctx_ck_size(keylen);
894         alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
895         memcpy(ablkctx->key, key, keylen);
896         ablkctx->enckey_len = keylen;
897         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
898         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
899                         keylen + alignment) >> 4;
900
901         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
902                                                 0, 0, context_size);
903         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
904         return 0;
905 badkey_err:
906         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
907         ablkctx->enckey_len = 0;
908
909         return err;
910 }
911
912 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
913                                    const u8 *key,
914                                    unsigned int keylen)
915 {
916         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
917         unsigned int ck_size, context_size;
918         u16 alignment = 0;
919         int err;
920
921         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
922         if (err)
923                 goto badkey_err;
924         ck_size = chcr_keyctx_ck_size(keylen);
925         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
926         memcpy(ablkctx->key, key, keylen);
927         ablkctx->enckey_len = keylen;
928         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
929                         keylen + alignment) >> 4;
930
931         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
932                                                 0, 0, context_size);
933         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
934
935         return 0;
936 badkey_err:
937         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
938         ablkctx->enckey_len = 0;
939
940         return err;
941 }
942
943 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
944                                    const u8 *key,
945                                    unsigned int keylen)
946 {
947         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
948         unsigned int ck_size, context_size;
949         u16 alignment = 0;
950         int err;
951
952         if (keylen < CTR_RFC3686_NONCE_SIZE)
953                 return -EINVAL;
954         memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
955                CTR_RFC3686_NONCE_SIZE);
956
957         keylen -= CTR_RFC3686_NONCE_SIZE;
958         err = chcr_cipher_fallback_setkey(cipher, key, keylen);
959         if (err)
960                 goto badkey_err;
961
962         ck_size = chcr_keyctx_ck_size(keylen);
963         alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
964         memcpy(ablkctx->key, key, keylen);
965         ablkctx->enckey_len = keylen;
966         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
967                         keylen + alignment) >> 4;
968
969         ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
970                                                 0, 0, context_size);
971         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
972
973         return 0;
974 badkey_err:
975         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
976         ablkctx->enckey_len = 0;
977
978         return err;
979 }
980 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
981 {
982         unsigned int size = AES_BLOCK_SIZE;
983         __be32 *b = (__be32 *)(dstiv + size);
984         u32 c, prev;
985
986         memcpy(dstiv, srciv, AES_BLOCK_SIZE);
987         for (; size >= 4; size -= 4) {
988                 prev = be32_to_cpu(*--b);
989                 c = prev + add;
990                 *b = cpu_to_be32(c);
991                 if (prev < c)
992                         break;
993                 add = 1;
994         }
995
996 }
997
998 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
999 {
1000         __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1001         u64 c;
1002         u32 temp = be32_to_cpu(*--b);
1003
1004         temp = ~temp;
1005         c = (u64)temp +  1; // No of block can processed withou overflow
1006         if ((bytes / AES_BLOCK_SIZE) > c)
1007                 bytes = c * AES_BLOCK_SIZE;
1008         return bytes;
1009 }
1010
1011 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv)
1012 {
1013         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1014         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1015         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1016         struct crypto_cipher *cipher;
1017         int ret, i;
1018         u8 *key;
1019         unsigned int keylen;
1020         int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1021         int round8 = round / 8;
1022
1023         cipher = ablkctx->aes_generic;
1024         memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1025
1026         keylen = ablkctx->enckey_len / 2;
1027         key = ablkctx->key + keylen;
1028         ret = crypto_cipher_setkey(cipher, key, keylen);
1029         if (ret)
1030                 goto out;
1031         /*H/W sends the encrypted IV in dsgl when AADIVDROP bit is 0*/
1032         for (i = 0; i < round8; i++)
1033                 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1034
1035         for (i = 0; i < (round % 8); i++)
1036                 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1037
1038         crypto_cipher_decrypt_one(cipher, iv, iv);
1039 out:
1040         return ret;
1041 }
1042
1043 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1044                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1045 {
1046         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1047         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1048         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1049         int ret = 0;
1050
1051         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1052                 ctr_add_iv(iv, req->info, (reqctx->processed /
1053                            AES_BLOCK_SIZE));
1054         else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1055                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1056                         CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1057                                                 AES_BLOCK_SIZE) + 1);
1058         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1059                 ret = chcr_update_tweak(req, iv);
1060         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1061                 if (reqctx->op)
1062                         sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
1063                                            16,
1064                                            reqctx->processed - AES_BLOCK_SIZE);
1065                 else
1066                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1067         }
1068
1069         return ret;
1070
1071 }
1072
1073 /* We need separate function for final iv because in rfc3686  Initial counter
1074  * starts from 1 and buffer size of iv is 8 byte only which remains constant
1075  * for subsequent update requests
1076  */
1077
1078 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1079                                    struct cpl_fw6_pld *fw6_pld, u8 *iv)
1080 {
1081         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1082         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1083         int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1084         int ret = 0;
1085
1086         if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1087                 ctr_add_iv(iv, req->info, (reqctx->processed /
1088                            AES_BLOCK_SIZE));
1089         else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1090                 ret = chcr_update_tweak(req, iv);
1091         else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1092                 if (reqctx->op)
1093                         sg_pcopy_to_buffer(req->src, sg_nents(req->src), iv,
1094                                            16,
1095                                            reqctx->processed - AES_BLOCK_SIZE);
1096                 else
1097                         memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1098
1099         }
1100         return ret;
1101
1102 }
1103
1104
1105 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1106                                    unsigned char *input, int err)
1107 {
1108         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1109         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1110         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1111         struct sk_buff *skb;
1112         struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1113         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1114         struct  cipher_wr_param wrparam;
1115         int bytes;
1116
1117         if (err)
1118                 goto unmap;
1119         if (req->nbytes == reqctx->processed) {
1120                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1121                                       req);
1122                 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1123                 goto complete;
1124         }
1125
1126         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1127                                             c_ctx(tfm)->tx_qidx))) {
1128                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1129                         err = -EBUSY;
1130                         goto unmap;
1131                 }
1132
1133         }
1134         if (!reqctx->imm) {
1135                 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 1,
1136                                           SPACE_LEFT(ablkctx->enckey_len),
1137                                           reqctx->src_ofst, reqctx->dst_ofst);
1138         if ((bytes + reqctx->processed) >= req->nbytes)
1139                 bytes  = req->nbytes - reqctx->processed;
1140         else
1141                 bytes = ROUND_16(bytes);
1142         } else {
1143                 /*CTR mode counter overfloa*/
1144                 bytes  = req->nbytes - reqctx->processed;
1145         }
1146         dma_sync_single_for_cpu(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1147                                 reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1148         err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1149         dma_sync_single_for_device(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1150                                    reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
1151         if (err)
1152                 goto unmap;
1153
1154         if (unlikely(bytes == 0)) {
1155                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1156                                       req);
1157                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1158                                      req->base.flags,
1159                                      req->src,
1160                                      req->dst,
1161                                      req->nbytes,
1162                                      req->info,
1163                                      reqctx->op);
1164                 goto complete;
1165         }
1166
1167         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1168             CRYPTO_ALG_SUB_TYPE_CTR)
1169                 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1170         wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1171         wrparam.req = req;
1172         wrparam.bytes = bytes;
1173         skb = create_cipher_wr(&wrparam);
1174         if (IS_ERR(skb)) {
1175                 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1176                 err = PTR_ERR(skb);
1177                 goto unmap;
1178         }
1179         skb->dev = u_ctx->lldi.ports[0];
1180         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1181         chcr_send_wr(skb);
1182         reqctx->last_req_len = bytes;
1183         reqctx->processed += bytes;
1184         return 0;
1185 unmap:
1186         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1187 complete:
1188         req->base.complete(&req->base, err);
1189         return err;
1190 }
1191
1192 static int process_cipher(struct ablkcipher_request *req,
1193                                   unsigned short qid,
1194                                   struct sk_buff **skb,
1195                                   unsigned short op_type)
1196 {
1197         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1198         unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1199         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1200         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1201         struct  cipher_wr_param wrparam;
1202         int bytes, err = -EINVAL;
1203
1204         reqctx->processed = 0;
1205         if (!req->info)
1206                 goto error;
1207         if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1208             (req->nbytes == 0) ||
1209             (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1210                 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1211                        ablkctx->enckey_len, req->nbytes, ivsize);
1212                 goto error;
1213         }
1214         chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1215         if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1216                                             AES_MIN_KEY_SIZE +
1217                                             sizeof(struct cpl_rx_phys_dsgl) +
1218                                         /*Min dsgl size*/
1219                                             32))) {
1220                 /* Can be sent as Imm*/
1221                 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1222
1223                 dnents = sg_nents_xlen(req->dst, req->nbytes,
1224                                        CHCR_DST_SG_SIZE, 0);
1225                 dnents += 1; // IV
1226                 phys_dsgl = get_space_for_phys_dsgl(dnents);
1227                 kctx_len = (DIV_ROUND_UP(ablkctx->enckey_len, 16) * 16);
1228                 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1229                 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1230                         SGE_MAX_WR_LEN;
1231                 bytes = IV + req->nbytes;
1232
1233         } else {
1234                 reqctx->imm = 0;
1235         }
1236
1237         if (!reqctx->imm) {
1238                 bytes = chcr_sg_ent_in_wr(req->src, req->dst,
1239                                           MIN_CIPHER_SG,
1240                                           SPACE_LEFT(ablkctx->enckey_len),
1241                                           0, 0);
1242         if ((bytes + reqctx->processed) >= req->nbytes)
1243                 bytes  = req->nbytes - reqctx->processed;
1244         else
1245                 bytes = ROUND_16(bytes);
1246         } else {
1247                 bytes = req->nbytes;
1248         }
1249         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1250                                   CRYPTO_ALG_SUB_TYPE_CTR) {
1251                 bytes = adjust_ctr_overflow(req->info, bytes);
1252         }
1253         if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1254             CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1255                 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1256                 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1257                                 CTR_RFC3686_IV_SIZE);
1258
1259                 /* initialize counter portion of counter block */
1260                 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1261                         CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1262
1263         } else {
1264
1265                 memcpy(reqctx->iv, req->info, IV);
1266         }
1267         if (unlikely(bytes == 0)) {
1268                 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1269                                       req);
1270                 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1271                                            req->base.flags,
1272                                            req->src,
1273                                            req->dst,
1274                                            req->nbytes,
1275                                            req->info,
1276                                            op_type);
1277                 goto error;
1278         }
1279         reqctx->op = op_type;
1280         reqctx->srcsg = req->src;
1281         reqctx->dstsg = req->dst;
1282         reqctx->src_ofst = 0;
1283         reqctx->dst_ofst = 0;
1284         wrparam.qid = qid;
1285         wrparam.req = req;
1286         wrparam.bytes = bytes;
1287         *skb = create_cipher_wr(&wrparam);
1288         if (IS_ERR(*skb)) {
1289                 err = PTR_ERR(*skb);
1290                 goto unmap;
1291         }
1292         reqctx->processed = bytes;
1293         reqctx->last_req_len = bytes;
1294
1295         return 0;
1296 unmap:
1297         chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1298 error:
1299         return err;
1300 }
1301
1302 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1303 {
1304         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1305         struct sk_buff *skb = NULL;
1306         int err;
1307         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1308
1309         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1310                                             c_ctx(tfm)->tx_qidx))) {
1311                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1312                         return -EBUSY;
1313         }
1314
1315         err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1316                              &skb, CHCR_ENCRYPT_OP);
1317         if (err || !skb)
1318                 return  err;
1319         skb->dev = u_ctx->lldi.ports[0];
1320         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1321         chcr_send_wr(skb);
1322         return -EINPROGRESS;
1323 }
1324
1325 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1326 {
1327         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1328         struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1329         struct sk_buff *skb = NULL;
1330         int err;
1331
1332         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1333                                             c_ctx(tfm)->tx_qidx))) {
1334                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1335                         return -EBUSY;
1336         }
1337
1338          err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1339                               &skb, CHCR_DECRYPT_OP);
1340         if (err || !skb)
1341                 return err;
1342         skb->dev = u_ctx->lldi.ports[0];
1343         set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1344         chcr_send_wr(skb);
1345         return -EINPROGRESS;
1346 }
1347
1348 static int chcr_device_init(struct chcr_context *ctx)
1349 {
1350         struct uld_ctx *u_ctx = NULL;
1351         struct adapter *adap;
1352         unsigned int id;
1353         int txq_perchan, txq_idx, ntxq;
1354         int err = 0, rxq_perchan, rxq_idx;
1355
1356         id = smp_processor_id();
1357         if (!ctx->dev) {
1358                 u_ctx = assign_chcr_device();
1359                 if (!u_ctx) {
1360                         pr_err("chcr device assignment fails\n");
1361                         goto out;
1362                 }
1363                 ctx->dev = u_ctx->dev;
1364                 adap = padap(ctx->dev);
1365                 ntxq = min_not_zero((unsigned int)u_ctx->lldi.nrxq,
1366                                     adap->vres.ncrypto_fc);
1367                 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1368                 txq_perchan = ntxq / u_ctx->lldi.nchan;
1369                 rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
1370                 rxq_idx += id % rxq_perchan;
1371                 txq_idx = ctx->dev->tx_channel_id * txq_perchan;
1372                 txq_idx += id % txq_perchan;
1373                 spin_lock(&ctx->dev->lock_chcr_dev);
1374                 ctx->rx_qidx = rxq_idx;
1375                 ctx->tx_qidx = txq_idx;
1376                 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1377                 ctx->dev->rx_channel_id = 0;
1378                 spin_unlock(&ctx->dev->lock_chcr_dev);
1379         }
1380 out:
1381         return err;
1382 }
1383
1384 static int chcr_cra_init(struct crypto_tfm *tfm)
1385 {
1386         struct crypto_alg *alg = tfm->__crt_alg;
1387         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1388         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1389
1390         ablkctx->sw_cipher = crypto_alloc_skcipher(alg->cra_name, 0,
1391                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1392         if (IS_ERR(ablkctx->sw_cipher)) {
1393                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1394                 return PTR_ERR(ablkctx->sw_cipher);
1395         }
1396
1397         if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) {
1398                 /* To update tweak*/
1399                 ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0);
1400                 if (IS_ERR(ablkctx->aes_generic)) {
1401                         pr_err("failed to allocate aes cipher for tweak\n");
1402                         return PTR_ERR(ablkctx->aes_generic);
1403                 }
1404         } else
1405                 ablkctx->aes_generic = NULL;
1406
1407         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1408         return chcr_device_init(crypto_tfm_ctx(tfm));
1409 }
1410
1411 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1412 {
1413         struct crypto_alg *alg = tfm->__crt_alg;
1414         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1415         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1416
1417         /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1418          * cannot be used as fallback in chcr_handle_cipher_response
1419          */
1420         ablkctx->sw_cipher = crypto_alloc_skcipher("ctr(aes)", 0,
1421                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
1422         if (IS_ERR(ablkctx->sw_cipher)) {
1423                 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1424                 return PTR_ERR(ablkctx->sw_cipher);
1425         }
1426         tfm->crt_ablkcipher.reqsize =  sizeof(struct chcr_blkcipher_req_ctx);
1427         return chcr_device_init(crypto_tfm_ctx(tfm));
1428 }
1429
1430
1431 static void chcr_cra_exit(struct crypto_tfm *tfm)
1432 {
1433         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1434         struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1435
1436         crypto_free_skcipher(ablkctx->sw_cipher);
1437         if (ablkctx->aes_generic)
1438                 crypto_free_cipher(ablkctx->aes_generic);
1439 }
1440
1441 static int get_alg_config(struct algo_param *params,
1442                           unsigned int auth_size)
1443 {
1444         switch (auth_size) {
1445         case SHA1_DIGEST_SIZE:
1446                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1447                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1448                 params->result_size = SHA1_DIGEST_SIZE;
1449                 break;
1450         case SHA224_DIGEST_SIZE:
1451                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1452                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1453                 params->result_size = SHA256_DIGEST_SIZE;
1454                 break;
1455         case SHA256_DIGEST_SIZE:
1456                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1457                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1458                 params->result_size = SHA256_DIGEST_SIZE;
1459                 break;
1460         case SHA384_DIGEST_SIZE:
1461                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1462                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1463                 params->result_size = SHA512_DIGEST_SIZE;
1464                 break;
1465         case SHA512_DIGEST_SIZE:
1466                 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1467                 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1468                 params->result_size = SHA512_DIGEST_SIZE;
1469                 break;
1470         default:
1471                 pr_err("chcr : ERROR, unsupported digest size\n");
1472                 return -EINVAL;
1473         }
1474         return 0;
1475 }
1476
1477 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1478 {
1479                 crypto_free_shash(base_hash);
1480 }
1481
1482 /**
1483  *      create_hash_wr - Create hash work request
1484  *      @req - Cipher req base
1485  */
1486 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1487                                       struct hash_wr_param *param)
1488 {
1489         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1490         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1491         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1492         struct sk_buff *skb = NULL;
1493         struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1494         struct chcr_wr *chcr_req;
1495         struct ulptx_sgl *ulptx;
1496         unsigned int nents = 0, transhdr_len, iopad_alignment = 0;
1497         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1498         unsigned int kctx_len = 0, temp = 0;
1499         u8 hash_size_in_response = 0;
1500         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1501                 GFP_ATOMIC;
1502         struct adapter *adap = padap(h_ctx(tfm)->dev);
1503         int error = 0;
1504
1505         iopad_alignment = KEYCTX_ALIGN_PAD(digestsize);
1506         kctx_len = param->alg_prm.result_size + iopad_alignment;
1507         if (param->opad_needed)
1508                 kctx_len += param->alg_prm.result_size + iopad_alignment;
1509
1510         if (req_ctx->result)
1511                 hash_size_in_response = digestsize;
1512         else
1513                 hash_size_in_response = param->alg_prm.result_size;
1514         transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
1515         req_ctx->imm = (transhdr_len + param->bfr_len + param->sg_len) <=
1516                 SGE_MAX_WR_LEN;
1517         nents = sg_nents_xlen(req->src, param->sg_len, CHCR_SRC_SG_SIZE, 0);
1518         nents += param->bfr_len ? 1 : 0;
1519         transhdr_len += req_ctx->imm ? (DIV_ROUND_UP((param->bfr_len +
1520                         param->sg_len), 16) * 16) :
1521                         (sgl_len(nents) * 8);
1522         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
1523
1524         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
1525         if (!skb)
1526                 return ERR_PTR(-ENOMEM);
1527         chcr_req = __skb_put_zero(skb, transhdr_len);
1528
1529         chcr_req->sec_cpl.op_ivinsrtofst =
1530                 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->dev->rx_channel_id, 2, 0);
1531         chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1532
1533         chcr_req->sec_cpl.aadstart_cipherstop_hi =
1534                 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1535         chcr_req->sec_cpl.cipherstop_lo_authinsert =
1536                 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1537         chcr_req->sec_cpl.seqno_numivs =
1538                 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1539                                          param->opad_needed, 0);
1540
1541         chcr_req->sec_cpl.ivgen_hdrlen =
1542                 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1543
1544         memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1545                param->alg_prm.result_size);
1546
1547         if (param->opad_needed)
1548                 memcpy(chcr_req->key_ctx.key +
1549                        ((param->alg_prm.result_size <= 32) ? 32 :
1550                         CHCR_HASH_MAX_DIGEST_SIZE),
1551                        hmacctx->opad, param->alg_prm.result_size);
1552
1553         chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1554                                             param->alg_prm.mk_size, 0,
1555                                             param->opad_needed,
1556                                             ((kctx_len +
1557                                              sizeof(chcr_req->key_ctx)) >> 4));
1558         chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1559         ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + kctx_len +
1560                                      DUMMY_BYTES);
1561         if (param->bfr_len != 0) {
1562                 req_ctx->dma_addr = dma_map_single(&u_ctx->lldi.pdev->dev,
1563                                           req_ctx->reqbfr, param->bfr_len,
1564                                           DMA_TO_DEVICE);
1565                 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1566                                        req_ctx->dma_addr)) {
1567                         error = -ENOMEM;
1568                         goto err;
1569                 }
1570                 req_ctx->dma_len = param->bfr_len;
1571         } else {
1572                 req_ctx->dma_addr = 0;
1573         }
1574         chcr_add_hash_src_ent(req, ulptx, param);
1575         /* Request upto max wr size */
1576         temp = kctx_len + DUMMY_BYTES + (req_ctx->imm ? (param->sg_len
1577                                         + param->bfr_len) : 0);
1578         atomic_inc(&adap->chcr_stats.digest_rqst);
1579         create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->imm,
1580                     hash_size_in_response, transhdr_len,
1581                     temp,  0);
1582         req_ctx->skb = skb;
1583         return skb;
1584 err:
1585         kfree_skb(skb);
1586         return  ERR_PTR(error);
1587 }
1588
1589 static int chcr_ahash_update(struct ahash_request *req)
1590 {
1591         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1592         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1593         struct uld_ctx *u_ctx = NULL;
1594         struct sk_buff *skb;
1595         u8 remainder = 0, bs;
1596         unsigned int nbytes = req->nbytes;
1597         struct hash_wr_param params;
1598         int error;
1599
1600         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1601
1602         u_ctx = ULD_CTX(h_ctx(rtfm));
1603         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1604                                             h_ctx(rtfm)->tx_qidx))) {
1605                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1606                         return -EBUSY;
1607         }
1608
1609         if (nbytes + req_ctx->reqlen >= bs) {
1610                 remainder = (nbytes + req_ctx->reqlen) % bs;
1611                 nbytes = nbytes + req_ctx->reqlen - remainder;
1612         } else {
1613                 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1614                                    + req_ctx->reqlen, nbytes, 0);
1615                 req_ctx->reqlen += nbytes;
1616                 return 0;
1617         }
1618         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1619         if (error)
1620                 return -ENOMEM;
1621         params.opad_needed = 0;
1622         params.more = 1;
1623         params.last = 0;
1624         params.sg_len = nbytes - req_ctx->reqlen;
1625         params.bfr_len = req_ctx->reqlen;
1626         params.scmd1 = 0;
1627         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1628         req_ctx->result = 0;
1629         req_ctx->data_len += params.sg_len + params.bfr_len;
1630         skb = create_hash_wr(req, &params);
1631         if (IS_ERR(skb)) {
1632                 error = PTR_ERR(skb);
1633                 goto unmap;
1634         }
1635
1636         if (remainder) {
1637                 /* Swap buffers */
1638                 swap(req_ctx->reqbfr, req_ctx->skbfr);
1639                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1640                                    req_ctx->reqbfr, remainder, req->nbytes -
1641                                    remainder);
1642         }
1643         req_ctx->reqlen = remainder;
1644         skb->dev = u_ctx->lldi.ports[0];
1645         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1646         chcr_send_wr(skb);
1647
1648         return -EINPROGRESS;
1649 unmap:
1650         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1651         return error;
1652 }
1653
1654 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1655 {
1656         memset(bfr_ptr, 0, bs);
1657         *bfr_ptr = 0x80;
1658         if (bs == 64)
1659                 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1  << 3);
1660         else
1661                 *(__be64 *)(bfr_ptr + 120) =  cpu_to_be64(scmd1  << 3);
1662 }
1663
1664 static int chcr_ahash_final(struct ahash_request *req)
1665 {
1666         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1667         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1668         struct hash_wr_param params;
1669         struct sk_buff *skb;
1670         struct uld_ctx *u_ctx = NULL;
1671         u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1672
1673         u_ctx = ULD_CTX(h_ctx(rtfm));
1674         if (is_hmac(crypto_ahash_tfm(rtfm)))
1675                 params.opad_needed = 1;
1676         else
1677                 params.opad_needed = 0;
1678         params.sg_len = 0;
1679         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1680         req_ctx->result = 1;
1681         params.bfr_len = req_ctx->reqlen;
1682         req_ctx->data_len += params.bfr_len + params.sg_len;
1683         if (req_ctx->reqlen == 0) {
1684                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1685                 params.last = 0;
1686                 params.more = 1;
1687                 params.scmd1 = 0;
1688                 params.bfr_len = bs;
1689
1690         } else {
1691                 params.scmd1 = req_ctx->data_len;
1692                 params.last = 1;
1693                 params.more = 0;
1694         }
1695         skb = create_hash_wr(req, &params);
1696         if (IS_ERR(skb))
1697                 return PTR_ERR(skb);
1698
1699         skb->dev = u_ctx->lldi.ports[0];
1700         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1701         chcr_send_wr(skb);
1702         return -EINPROGRESS;
1703 }
1704
1705 static int chcr_ahash_finup(struct ahash_request *req)
1706 {
1707         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1708         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1709         struct uld_ctx *u_ctx = NULL;
1710         struct sk_buff *skb;
1711         struct hash_wr_param params;
1712         u8  bs;
1713         int error;
1714
1715         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1716         u_ctx = ULD_CTX(h_ctx(rtfm));
1717
1718         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1719                                             h_ctx(rtfm)->tx_qidx))) {
1720                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1721                         return -EBUSY;
1722         }
1723
1724         if (is_hmac(crypto_ahash_tfm(rtfm)))
1725                 params.opad_needed = 1;
1726         else
1727                 params.opad_needed = 0;
1728
1729         params.sg_len = req->nbytes;
1730         params.bfr_len = req_ctx->reqlen;
1731         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1732         req_ctx->data_len += params.bfr_len + params.sg_len;
1733         req_ctx->result = 1;
1734         if ((req_ctx->reqlen + req->nbytes) == 0) {
1735                 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1736                 params.last = 0;
1737                 params.more = 1;
1738                 params.scmd1 = 0;
1739                 params.bfr_len = bs;
1740         } else {
1741                 params.scmd1 = req_ctx->data_len;
1742                 params.last = 1;
1743                 params.more = 0;
1744         }
1745         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1746         if (error)
1747                 return -ENOMEM;
1748
1749         skb = create_hash_wr(req, &params);
1750         if (IS_ERR(skb)) {
1751                 error = PTR_ERR(skb);
1752                 goto unmap;
1753         }
1754         skb->dev = u_ctx->lldi.ports[0];
1755         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1756         chcr_send_wr(skb);
1757
1758         return -EINPROGRESS;
1759 unmap:
1760         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1761         return error;
1762 }
1763
1764 static int chcr_ahash_digest(struct ahash_request *req)
1765 {
1766         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1767         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1768         struct uld_ctx *u_ctx = NULL;
1769         struct sk_buff *skb;
1770         struct hash_wr_param params;
1771         u8  bs;
1772         int error;
1773
1774         rtfm->init(req);
1775         bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1776
1777         u_ctx = ULD_CTX(h_ctx(rtfm));
1778         if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1779                                             h_ctx(rtfm)->tx_qidx))) {
1780                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1781                         return -EBUSY;
1782         }
1783
1784         if (is_hmac(crypto_ahash_tfm(rtfm)))
1785                 params.opad_needed = 1;
1786         else
1787                 params.opad_needed = 0;
1788         error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1789         if (error)
1790                 return -ENOMEM;
1791
1792         params.last = 0;
1793         params.more = 0;
1794         params.sg_len = req->nbytes;
1795         params.bfr_len = 0;
1796         params.scmd1 = 0;
1797         get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
1798         req_ctx->result = 1;
1799         req_ctx->data_len += params.bfr_len + params.sg_len;
1800
1801         if (req->nbytes == 0) {
1802                 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1803                 params.more = 1;
1804                 params.bfr_len = bs;
1805         }
1806
1807         skb = create_hash_wr(req, &params);
1808         if (IS_ERR(skb)) {
1809                 error = PTR_ERR(skb);
1810                 goto unmap;
1811         }
1812         skb->dev = u_ctx->lldi.ports[0];
1813         set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1814         chcr_send_wr(skb);
1815         return -EINPROGRESS;
1816 unmap:
1817         chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1818         return error;
1819 }
1820
1821 static int chcr_ahash_export(struct ahash_request *areq, void *out)
1822 {
1823         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1824         struct chcr_ahash_req_ctx *state = out;
1825
1826         state->reqlen = req_ctx->reqlen;
1827         state->data_len = req_ctx->data_len;
1828         state->is_sg_map = 0;
1829         state->result = 0;
1830         memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
1831         memcpy(state->partial_hash, req_ctx->partial_hash,
1832                CHCR_HASH_MAX_DIGEST_SIZE);
1833                 return 0;
1834 }
1835
1836 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
1837 {
1838         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1839         struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
1840
1841         req_ctx->reqlen = state->reqlen;
1842         req_ctx->data_len = state->data_len;
1843         req_ctx->reqbfr = req_ctx->bfr1;
1844         req_ctx->skbfr = req_ctx->bfr2;
1845         req_ctx->is_sg_map = 0;
1846         req_ctx->result = 0;
1847         memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
1848         memcpy(req_ctx->partial_hash, state->partial_hash,
1849                CHCR_HASH_MAX_DIGEST_SIZE);
1850         return 0;
1851 }
1852
1853 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
1854                              unsigned int keylen)
1855 {
1856         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1857         unsigned int digestsize = crypto_ahash_digestsize(tfm);
1858         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1859         unsigned int i, err = 0, updated_digestsize;
1860
1861         SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
1862
1863         /* use the key to calculate the ipad and opad. ipad will sent with the
1864          * first request's data. opad will be sent with the final hash result
1865          * ipad in hmacctx->ipad and opad in hmacctx->opad location
1866          */
1867         shash->tfm = hmacctx->base_hash;
1868         shash->flags = crypto_shash_get_flags(hmacctx->base_hash);
1869         if (keylen > bs) {
1870                 err = crypto_shash_digest(shash, key, keylen,
1871                                           hmacctx->ipad);
1872                 if (err)
1873                         goto out;
1874                 keylen = digestsize;
1875         } else {
1876                 memcpy(hmacctx->ipad, key, keylen);
1877         }
1878         memset(hmacctx->ipad + keylen, 0, bs - keylen);
1879         memcpy(hmacctx->opad, hmacctx->ipad, bs);
1880
1881         for (i = 0; i < bs / sizeof(int); i++) {
1882                 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
1883                 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
1884         }
1885
1886         updated_digestsize = digestsize;
1887         if (digestsize == SHA224_DIGEST_SIZE)
1888                 updated_digestsize = SHA256_DIGEST_SIZE;
1889         else if (digestsize == SHA384_DIGEST_SIZE)
1890                 updated_digestsize = SHA512_DIGEST_SIZE;
1891         err = chcr_compute_partial_hash(shash, hmacctx->ipad,
1892                                         hmacctx->ipad, digestsize);
1893         if (err)
1894                 goto out;
1895         chcr_change_order(hmacctx->ipad, updated_digestsize);
1896
1897         err = chcr_compute_partial_hash(shash, hmacctx->opad,
1898                                         hmacctx->opad, digestsize);
1899         if (err)
1900                 goto out;
1901         chcr_change_order(hmacctx->opad, updated_digestsize);
1902 out:
1903         return err;
1904 }
1905
1906 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
1907                                unsigned int key_len)
1908 {
1909         struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
1910         unsigned short context_size = 0;
1911         int err;
1912
1913         err = chcr_cipher_fallback_setkey(cipher, key, key_len);
1914         if (err)
1915                 goto badkey_err;
1916
1917         memcpy(ablkctx->key, key, key_len);
1918         ablkctx->enckey_len = key_len;
1919         get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
1920         context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
1921         ablkctx->key_ctx_hdr =
1922                 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
1923                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
1924                                  CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
1925                                  CHCR_KEYCTX_NO_KEY, 1,
1926                                  0, context_size);
1927         ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
1928         return 0;
1929 badkey_err:
1930         crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1931         ablkctx->enckey_len = 0;
1932
1933         return err;
1934 }
1935
1936 static int chcr_sha_init(struct ahash_request *areq)
1937 {
1938         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1939         struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1940         int digestsize =  crypto_ahash_digestsize(tfm);
1941
1942         req_ctx->data_len = 0;
1943         req_ctx->reqlen = 0;
1944         req_ctx->reqbfr = req_ctx->bfr1;
1945         req_ctx->skbfr = req_ctx->bfr2;
1946         req_ctx->skb = NULL;
1947         req_ctx->result = 0;
1948         req_ctx->is_sg_map = 0;
1949         copy_hash_init_values(req_ctx->partial_hash, digestsize);
1950         return 0;
1951 }
1952
1953 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
1954 {
1955         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1956                                  sizeof(struct chcr_ahash_req_ctx));
1957         return chcr_device_init(crypto_tfm_ctx(tfm));
1958 }
1959
1960 static int chcr_hmac_init(struct ahash_request *areq)
1961 {
1962         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1963         struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
1964         struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
1965         unsigned int digestsize = crypto_ahash_digestsize(rtfm);
1966         unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1967
1968         chcr_sha_init(areq);
1969         req_ctx->data_len = bs;
1970         if (is_hmac(crypto_ahash_tfm(rtfm))) {
1971                 if (digestsize == SHA224_DIGEST_SIZE)
1972                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1973                                SHA256_DIGEST_SIZE);
1974                 else if (digestsize == SHA384_DIGEST_SIZE)
1975                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1976                                SHA512_DIGEST_SIZE);
1977                 else
1978                         memcpy(req_ctx->partial_hash, hmacctx->ipad,
1979                                digestsize);
1980         }
1981         return 0;
1982 }
1983
1984 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
1985 {
1986         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1987         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
1988         unsigned int digestsize =
1989                 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
1990
1991         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1992                                  sizeof(struct chcr_ahash_req_ctx));
1993         hmacctx->base_hash = chcr_alloc_shash(digestsize);
1994         if (IS_ERR(hmacctx->base_hash))
1995                 return PTR_ERR(hmacctx->base_hash);
1996         return chcr_device_init(crypto_tfm_ctx(tfm));
1997 }
1998
1999 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2000 {
2001         struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2002         struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2003
2004         if (hmacctx->base_hash) {
2005                 chcr_free_shash(hmacctx->base_hash);
2006                 hmacctx->base_hash = NULL;
2007         }
2008 }
2009
2010 static int chcr_aead_common_init(struct aead_request *req,
2011                                  unsigned short op_type)
2012 {
2013         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2014         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2015         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2016         int error = -EINVAL;
2017         unsigned int dst_size;
2018         unsigned int authsize = crypto_aead_authsize(tfm);
2019
2020         dst_size = req->assoclen + req->cryptlen + (op_type ?
2021                                         -authsize : authsize);
2022         /* validate key size */
2023         if (aeadctx->enckey_len == 0)
2024                 goto err;
2025         if (op_type && req->cryptlen < authsize)
2026                 goto err;
2027         error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2028                                   op_type);
2029         if (error) {
2030                 error = -ENOMEM;
2031                 goto err;
2032         }
2033         reqctx->aad_nents = sg_nents_xlen(req->src, req->assoclen,
2034                                           CHCR_SRC_SG_SIZE, 0);
2035         reqctx->src_nents = sg_nents_xlen(req->src, req->cryptlen,
2036                                           CHCR_SRC_SG_SIZE, req->assoclen);
2037         return 0;
2038 err:
2039         return error;
2040 }
2041
2042 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2043                                    int aadmax, int wrlen,
2044                                    unsigned short op_type)
2045 {
2046         unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2047
2048         if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2049             dst_nents > MAX_DSGL_ENT ||
2050             (req->assoclen > aadmax) ||
2051             (wrlen > SGE_MAX_WR_LEN))
2052                 return 1;
2053         return 0;
2054 }
2055
2056 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2057 {
2058         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2059         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2060         struct aead_request *subreq = aead_request_ctx(req);
2061
2062         aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2063         aead_request_set_callback(subreq, req->base.flags,
2064                                   req->base.complete, req->base.data);
2065          aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2066                                  req->iv);
2067          aead_request_set_ad(subreq, req->assoclen);
2068         return op_type ? crypto_aead_decrypt(subreq) :
2069                 crypto_aead_encrypt(subreq);
2070 }
2071
2072 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2073                                          unsigned short qid,
2074                                          int size,
2075                                          unsigned short op_type)
2076 {
2077         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2078         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2079         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2080         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2081         struct sk_buff *skb = NULL;
2082         struct chcr_wr *chcr_req;
2083         struct cpl_rx_phys_dsgl *phys_cpl;
2084         struct ulptx_sgl *ulptx;
2085         unsigned int transhdr_len;
2086         unsigned int dst_size = 0, temp;
2087         unsigned int   kctx_len = 0, dnents;
2088         unsigned int  assoclen = req->assoclen;
2089         unsigned int  authsize = crypto_aead_authsize(tfm);
2090         int error = -EINVAL;
2091         int null = 0;
2092         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2093                 GFP_ATOMIC;
2094         struct adapter *adap = padap(a_ctx(tfm)->dev);
2095
2096         if (req->cryptlen == 0)
2097                 return NULL;
2098
2099         reqctx->b0_dma = 0;
2100         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_NULL) {
2101                 null = 1;
2102                 assoclen = 0;
2103         }
2104         dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2105                                                     authsize);
2106         error = chcr_aead_common_init(req, op_type);
2107         if (error)
2108                 return ERR_PTR(error);
2109         if (dst_size) {
2110                 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2111                 dnents += sg_nents_xlen(req->dst, req->cryptlen +
2112                         (op_type ? -authsize : authsize), CHCR_DST_SG_SIZE,
2113                         req->assoclen);
2114                 dnents += MIN_AUTH_SG; // For IV
2115         } else {
2116                 dnents = 0;
2117         }
2118
2119         dst_size = get_space_for_phys_dsgl(dnents);
2120         kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2121                 - sizeof(chcr_req->key_ctx);
2122         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2123         reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <
2124                         SGE_MAX_WR_LEN;
2125         temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen), 16)
2126                         * 16) : (sgl_len(reqctx->src_nents + reqctx->aad_nents
2127                         + MIN_GCM_SG) * 8);
2128         transhdr_len += temp;
2129         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2130
2131         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2132                                     transhdr_len, op_type)) {
2133                 atomic_inc(&adap->chcr_stats.fallback);
2134                 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2135                                     op_type);
2136                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2137         }
2138         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2139         if (!skb) {
2140                 error = -ENOMEM;
2141                 goto err;
2142         }
2143
2144         chcr_req = __skb_put_zero(skb, transhdr_len);
2145
2146         temp  = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2147
2148         /*
2149          * Input order  is AAD,IV and Payload. where IV should be included as
2150          * the part of authdata. All other fields should be filled according
2151          * to the hardware spec
2152          */
2153         chcr_req->sec_cpl.op_ivinsrtofst =
2154                 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->dev->rx_channel_id, 2,
2155                                        assoclen + 1);
2156         chcr_req->sec_cpl.pldlen = htonl(assoclen + IV + req->cryptlen);
2157         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2158                                         assoclen ? 1 : 0, assoclen,
2159                                         assoclen + IV + 1,
2160                                         (temp & 0x1F0) >> 4);
2161         chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2162                                         temp & 0xF,
2163                                         null ? 0 : assoclen + IV + 1,
2164                                         temp, temp);
2165         chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2166                                         (op_type == CHCR_ENCRYPT_OP) ? 1 : 0,
2167                                         CHCR_SCMD_CIPHER_MODE_AES_CBC,
2168                                         actx->auth_mode, aeadctx->hmac_ctrl,
2169                                         IV >> 1);
2170         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2171                                          0, 0, dst_size);
2172
2173         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2174         if (op_type == CHCR_ENCRYPT_OP)
2175                 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2176                        aeadctx->enckey_len);
2177         else
2178                 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2179                        aeadctx->enckey_len);
2180
2181         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) <<
2182                                         4), actx->h_iopad, kctx_len -
2183                                 (DIV_ROUND_UP(aeadctx->enckey_len, 16) << 4));
2184         memcpy(reqctx->iv, req->iv, IV);
2185         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2186         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2187         chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2188         chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2189         atomic_inc(&adap->chcr_stats.cipher_rqst);
2190         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2191                 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2192         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2193                    transhdr_len, temp, 0);
2194         reqctx->skb = skb;
2195         reqctx->op = op_type;
2196
2197         return skb;
2198 err:
2199         chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2200                             op_type);
2201
2202         return ERR_PTR(error);
2203 }
2204
2205 static int chcr_aead_dma_map(struct device *dev,
2206                              struct aead_request *req,
2207                              unsigned short op_type)
2208 {
2209         int error;
2210         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2211         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2212         unsigned int authsize = crypto_aead_authsize(tfm);
2213         int dst_size;
2214
2215         dst_size = req->assoclen + req->cryptlen + (op_type ?
2216                                 -authsize : authsize);
2217         if (!req->cryptlen || !dst_size)
2218                 return 0;
2219         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2220                                         DMA_BIDIRECTIONAL);
2221         if (dma_mapping_error(dev, reqctx->iv_dma))
2222                 return -ENOMEM;
2223
2224         if (req->src == req->dst) {
2225                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2226                                    DMA_BIDIRECTIONAL);
2227                 if (!error)
2228                         goto err;
2229         } else {
2230                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2231                                    DMA_TO_DEVICE);
2232                 if (!error)
2233                         goto err;
2234                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2235                                    DMA_FROM_DEVICE);
2236                 if (!error) {
2237                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2238                                    DMA_TO_DEVICE);
2239                         goto err;
2240                 }
2241         }
2242
2243         return 0;
2244 err:
2245         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2246         return -ENOMEM;
2247 }
2248
2249 static void chcr_aead_dma_unmap(struct device *dev,
2250                              struct aead_request *req,
2251                              unsigned short op_type)
2252 {
2253         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2254         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2255         unsigned int authsize = crypto_aead_authsize(tfm);
2256         int dst_size;
2257
2258         dst_size = req->assoclen + req->cryptlen + (op_type ?
2259                                         -authsize : authsize);
2260         if (!req->cryptlen || !dst_size)
2261                 return;
2262
2263         dma_unmap_single(dev, reqctx->iv_dma, IV,
2264                                         DMA_BIDIRECTIONAL);
2265         if (req->src == req->dst) {
2266                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2267                                    DMA_BIDIRECTIONAL);
2268         } else {
2269                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2270                                    DMA_TO_DEVICE);
2271                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2272                                    DMA_FROM_DEVICE);
2273         }
2274 }
2275
2276 static inline void chcr_add_aead_src_ent(struct aead_request *req,
2277                                struct ulptx_sgl *ulptx,
2278                                unsigned int assoclen,
2279                                unsigned short op_type)
2280 {
2281         struct ulptx_walk ulp_walk;
2282         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2283
2284         if (reqctx->imm) {
2285                 u8 *buf = (u8 *)ulptx;
2286
2287                 if (reqctx->b0_dma) {
2288                         memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2289                         buf += reqctx->b0_len;
2290                 }
2291                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2292                                    buf, assoclen, 0);
2293                 buf += assoclen;
2294                 memcpy(buf, reqctx->iv, IV);
2295                 buf += IV;
2296                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2297                                    buf, req->cryptlen, req->assoclen);
2298         } else {
2299                 ulptx_walk_init(&ulp_walk, ulptx);
2300                 if (reqctx->b0_dma)
2301                         ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2302                                             &reqctx->b0_dma);
2303                 ulptx_walk_add_sg(&ulp_walk, req->src, assoclen, 0);
2304                 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2305                 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen,
2306                                   req->assoclen);
2307                 ulptx_walk_end(&ulp_walk);
2308         }
2309 }
2310
2311 static inline void chcr_add_aead_dst_ent(struct aead_request *req,
2312                                struct cpl_rx_phys_dsgl *phys_cpl,
2313                                unsigned int assoclen,
2314                                unsigned short op_type,
2315                                unsigned short qid)
2316 {
2317         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2318         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2319         struct dsgl_walk dsgl_walk;
2320         unsigned int authsize = crypto_aead_authsize(tfm);
2321         u32 temp;
2322
2323         dsgl_walk_init(&dsgl_walk, phys_cpl);
2324         if (reqctx->b0_dma)
2325                 dsgl_walk_add_page(&dsgl_walk, reqctx->b0_len, &reqctx->b0_dma);
2326         dsgl_walk_add_sg(&dsgl_walk, req->dst, assoclen, 0);
2327         dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2328         temp = req->cryptlen + (op_type ? -authsize : authsize);
2329         dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
2330         dsgl_walk_end(&dsgl_walk, qid);
2331 }
2332
2333 static inline void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2334                                            struct ulptx_sgl *ulptx,
2335                                            struct  cipher_wr_param *wrparam)
2336 {
2337         struct ulptx_walk ulp_walk;
2338         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2339
2340         if (reqctx->imm) {
2341                 u8 *buf = (u8 *)ulptx;
2342
2343                 memcpy(buf, reqctx->iv, IV);
2344                 buf += IV;
2345                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2346                                    buf, wrparam->bytes, reqctx->processed);
2347         } else {
2348                 ulptx_walk_init(&ulp_walk, ulptx);
2349                 ulptx_walk_add_page(&ulp_walk, IV, &reqctx->iv_dma);
2350                 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2351                                   reqctx->src_ofst);
2352                 reqctx->srcsg = ulp_walk.last_sg;
2353                 reqctx->src_ofst = ulp_walk.last_sg_len;
2354                 ulptx_walk_end(&ulp_walk);
2355         }
2356 }
2357
2358 static inline void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2359                                            struct cpl_rx_phys_dsgl *phys_cpl,
2360                                            struct  cipher_wr_param *wrparam,
2361                                            unsigned short qid)
2362 {
2363         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2364         struct dsgl_walk dsgl_walk;
2365
2366         dsgl_walk_init(&dsgl_walk, phys_cpl);
2367         dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
2368         dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2369                          reqctx->dst_ofst);
2370         reqctx->dstsg = dsgl_walk.last_sg;
2371         reqctx->dst_ofst = dsgl_walk.last_sg_len;
2372
2373         dsgl_walk_end(&dsgl_walk, qid);
2374 }
2375
2376 static inline void chcr_add_hash_src_ent(struct ahash_request *req,
2377                                            struct ulptx_sgl *ulptx,
2378                                            struct hash_wr_param *param)
2379 {
2380         struct ulptx_walk ulp_walk;
2381         struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2382
2383         if (reqctx->imm) {
2384                 u8 *buf = (u8 *)ulptx;
2385
2386                 if (param->bfr_len) {
2387                         memcpy(buf, reqctx->reqbfr, param->bfr_len);
2388                         buf += param->bfr_len;
2389                 }
2390                 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2391                                    buf, param->sg_len, 0);
2392         } else {
2393                 ulptx_walk_init(&ulp_walk, ulptx);
2394                 if (param->bfr_len)
2395                         ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2396                                             &reqctx->dma_addr);
2397                 ulptx_walk_add_sg(&ulp_walk, req->src, param->sg_len,
2398                                           0);
2399 //             reqctx->srcsg = ulp_walk.last_sg;
2400 //             reqctx->src_ofst = ulp_walk.last_sg_len;
2401                         ulptx_walk_end(&ulp_walk);
2402         }
2403 }
2404
2405
2406 static inline int chcr_hash_dma_map(struct device *dev,
2407                              struct ahash_request *req)
2408 {
2409         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2410         int error = 0;
2411
2412         if (!req->nbytes)
2413                 return 0;
2414         error = dma_map_sg(dev, req->src, sg_nents(req->src),
2415                            DMA_TO_DEVICE);
2416         if (!error)
2417                 return error;
2418         req_ctx->is_sg_map = 1;
2419         return 0;
2420 }
2421
2422 static inline void chcr_hash_dma_unmap(struct device *dev,
2423                              struct ahash_request *req)
2424 {
2425         struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2426
2427         if (!req->nbytes)
2428                 return;
2429
2430         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2431                            DMA_TO_DEVICE);
2432         req_ctx->is_sg_map = 0;
2433
2434 }
2435
2436
2437 static int chcr_cipher_dma_map(struct device *dev,
2438                              struct ablkcipher_request *req)
2439 {
2440         int error;
2441         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2442
2443         reqctx->iv_dma = dma_map_single(dev, reqctx->iv, IV,
2444                                         DMA_BIDIRECTIONAL);
2445         if (dma_mapping_error(dev, reqctx->iv_dma))
2446                 return -ENOMEM;
2447
2448         if (req->src == req->dst) {
2449                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2450                                    DMA_BIDIRECTIONAL);
2451                 if (!error)
2452                         goto err;
2453         } else {
2454                 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2455                                    DMA_TO_DEVICE);
2456                 if (!error)
2457                         goto err;
2458                 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2459                                    DMA_FROM_DEVICE);
2460                 if (!error) {
2461                         dma_unmap_sg(dev, req->src, sg_nents(req->src),
2462                                    DMA_TO_DEVICE);
2463                         goto err;
2464                 }
2465         }
2466
2467         return 0;
2468 err:
2469         dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2470         return -ENOMEM;
2471 }
2472 static void chcr_cipher_dma_unmap(struct device *dev,
2473                                   struct ablkcipher_request *req)
2474 {
2475         struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2476
2477         dma_unmap_single(dev, reqctx->iv_dma, IV,
2478                                         DMA_BIDIRECTIONAL);
2479         if (req->src == req->dst) {
2480                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2481                                    DMA_BIDIRECTIONAL);
2482         } else {
2483                 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2484                                    DMA_TO_DEVICE);
2485                 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2486                                    DMA_FROM_DEVICE);
2487         }
2488 }
2489
2490 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2491 {
2492         __be32 data;
2493
2494         memset(block, 0, csize);
2495         block += csize;
2496
2497         if (csize >= 4)
2498                 csize = 4;
2499         else if (msglen > (unsigned int)(1 << (8 * csize)))
2500                 return -EOVERFLOW;
2501
2502         data = cpu_to_be32(msglen);
2503         memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2504
2505         return 0;
2506 }
2507
2508 static void generate_b0(struct aead_request *req,
2509                         struct chcr_aead_ctx *aeadctx,
2510                         unsigned short op_type)
2511 {
2512         unsigned int l, lp, m;
2513         int rc;
2514         struct crypto_aead *aead = crypto_aead_reqtfm(req);
2515         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2516         u8 *b0 = reqctx->scratch_pad;
2517
2518         m = crypto_aead_authsize(aead);
2519
2520         memcpy(b0, reqctx->iv, 16);
2521
2522         lp = b0[0];
2523         l = lp + 1;
2524
2525         /* set m, bits 3-5 */
2526         *b0 |= (8 * ((m - 2) / 2));
2527
2528         /* set adata, bit 6, if associated data is used */
2529         if (req->assoclen)
2530                 *b0 |= 64;
2531         rc = set_msg_len(b0 + 16 - l,
2532                          (op_type == CHCR_DECRYPT_OP) ?
2533                          req->cryptlen - m : req->cryptlen, l);
2534 }
2535
2536 static inline int crypto_ccm_check_iv(const u8 *iv)
2537 {
2538         /* 2 <= L <= 8, so 1 <= L' <= 7. */
2539         if (iv[0] < 1 || iv[0] > 7)
2540                 return -EINVAL;
2541
2542         return 0;
2543 }
2544
2545 static int ccm_format_packet(struct aead_request *req,
2546                              struct chcr_aead_ctx *aeadctx,
2547                              unsigned int sub_type,
2548                              unsigned short op_type)
2549 {
2550         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2551         int rc = 0;
2552
2553         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2554                 reqctx->iv[0] = 3;
2555                 memcpy(reqctx->iv + 1, &aeadctx->salt[0], 3);
2556                 memcpy(reqctx->iv + 4, req->iv, 8);
2557                 memset(reqctx->iv + 12, 0, 4);
2558                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2559                         htons(req->assoclen - 8);
2560         } else {
2561                 memcpy(reqctx->iv, req->iv, 16);
2562                 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2563                         htons(req->assoclen);
2564         }
2565         generate_b0(req, aeadctx, op_type);
2566         /* zero the ctr value */
2567         memset(reqctx->iv + 15 - reqctx->iv[0], 0, reqctx->iv[0] + 1);
2568         return rc;
2569 }
2570
2571 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2572                                   unsigned int dst_size,
2573                                   struct aead_request *req,
2574                                   unsigned short op_type)
2575 {
2576         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2577         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2578         unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2579         unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2580         unsigned int c_id = a_ctx(tfm)->dev->rx_channel_id;
2581         unsigned int ccm_xtra;
2582         unsigned char tag_offset = 0, auth_offset = 0;
2583         unsigned int assoclen;
2584
2585         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2586                 assoclen = req->assoclen - 8;
2587         else
2588                 assoclen = req->assoclen;
2589         ccm_xtra = CCM_B0_SIZE +
2590                 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2591
2592         auth_offset = req->cryptlen ?
2593                 (assoclen + IV + 1 + ccm_xtra) : 0;
2594         if (op_type == CHCR_DECRYPT_OP) {
2595                 if (crypto_aead_authsize(tfm) != req->cryptlen)
2596                         tag_offset = crypto_aead_authsize(tfm);
2597                 else
2598                         auth_offset = 0;
2599         }
2600
2601
2602         sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2603                                          2, assoclen + 1 + ccm_xtra);
2604         sec_cpl->pldlen =
2605                 htonl(assoclen + IV + req->cryptlen + ccm_xtra);
2606         /* For CCM there wil be b0 always. So AAD start will be 1 always */
2607         sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2608                                         1, assoclen + ccm_xtra, assoclen
2609                                         + IV + 1 + ccm_xtra, 0);
2610
2611         sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2612                                         auth_offset, tag_offset,
2613                                         (op_type == CHCR_ENCRYPT_OP) ? 0 :
2614                                         crypto_aead_authsize(tfm));
2615         sec_cpl->seqno_numivs =  FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2616                                         (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2617                                         cipher_mode, mac_mode,
2618                                         aeadctx->hmac_ctrl, IV >> 1);
2619
2620         sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2621                                         0, dst_size);
2622 }
2623
2624 int aead_ccm_validate_input(unsigned short op_type,
2625                             struct aead_request *req,
2626                             struct chcr_aead_ctx *aeadctx,
2627                             unsigned int sub_type)
2628 {
2629         if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2630                 if (crypto_ccm_check_iv(req->iv)) {
2631                         pr_err("CCM: IV check fails\n");
2632                         return -EINVAL;
2633                 }
2634         } else {
2635                 if (req->assoclen != 16 && req->assoclen != 20) {
2636                         pr_err("RFC4309: Invalid AAD length %d\n",
2637                                req->assoclen);
2638                         return -EINVAL;
2639                 }
2640         }
2641         return 0;
2642 }
2643
2644 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2645                                           unsigned short qid,
2646                                           int size,
2647                                           unsigned short op_type)
2648 {
2649         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2650         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2651         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2652         struct sk_buff *skb = NULL;
2653         struct chcr_wr *chcr_req;
2654         struct cpl_rx_phys_dsgl *phys_cpl;
2655         struct ulptx_sgl *ulptx;
2656         unsigned int transhdr_len;
2657         unsigned int dst_size = 0, kctx_len, dnents, temp;
2658         unsigned int sub_type, assoclen = req->assoclen;
2659         unsigned int authsize = crypto_aead_authsize(tfm);
2660         int error = -EINVAL;
2661         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2662                 GFP_ATOMIC;
2663         struct adapter *adap = padap(a_ctx(tfm)->dev);
2664
2665         reqctx->b0_dma = 0;
2666         sub_type = get_aead_subtype(tfm);
2667         if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2668                 assoclen -= 8;
2669         dst_size = assoclen + req->cryptlen + (op_type ? -authsize :
2670                                                    authsize);
2671         error = chcr_aead_common_init(req, op_type);
2672         if (error)
2673                 return ERR_PTR(error);
2674
2675
2676         reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2677         error = aead_ccm_validate_input(op_type, req, aeadctx, sub_type);
2678         if (error)
2679                 goto err;
2680         if (dst_size) {
2681                 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2682                 dnents += sg_nents_xlen(req->dst, req->cryptlen
2683                                 + (op_type ? -authsize : authsize),
2684                                 CHCR_DST_SG_SIZE, req->assoclen);
2685                 dnents += MIN_CCM_SG; // For IV and B0
2686         } else {
2687                 dnents = 0;
2688         }
2689         dst_size = get_space_for_phys_dsgl(dnents);
2690         kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) * 2;
2691         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2692         reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen +
2693                        reqctx->b0_len) <= SGE_MAX_WR_LEN;
2694         temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV + req->cryptlen +
2695                                 reqctx->b0_len), 16) * 16) :
2696                 (sgl_len(reqctx->src_nents + reqctx->aad_nents +
2697                                     MIN_CCM_SG) *  8);
2698         transhdr_len += temp;
2699         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2700
2701         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2702                                     reqctx->b0_len, transhdr_len, op_type)) {
2703                 atomic_inc(&adap->chcr_stats.fallback);
2704                 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2705                                     op_type);
2706                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2707         }
2708         skb = alloc_skb(SGE_MAX_WR_LEN,  flags);
2709
2710         if (!skb) {
2711                 error = -ENOMEM;
2712                 goto err;
2713         }
2714
2715         chcr_req = (struct chcr_wr *) __skb_put_zero(skb, transhdr_len);
2716
2717         fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, op_type);
2718
2719         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2720         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2721         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2722                                         16), aeadctx->key, aeadctx->enckey_len);
2723
2724         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2725         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2726         error = ccm_format_packet(req, aeadctx, sub_type, op_type);
2727         if (error)
2728                 goto dstmap_fail;
2729
2730         reqctx->b0_dma = dma_map_single(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2731                                         &reqctx->scratch_pad, reqctx->b0_len,
2732                                         DMA_BIDIRECTIONAL);
2733         if (dma_mapping_error(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev,
2734                               reqctx->b0_dma)) {
2735                 error = -ENOMEM;
2736                 goto dstmap_fail;
2737         }
2738
2739         chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2740         chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2741
2742         atomic_inc(&adap->chcr_stats.aead_rqst);
2743         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2744                 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen +
2745                 reqctx->b0_len) : 0);
2746         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2747                     transhdr_len, temp, 0);
2748         reqctx->skb = skb;
2749         reqctx->op = op_type;
2750
2751         return skb;
2752 dstmap_fail:
2753         kfree_skb(skb);
2754 err:
2755         chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
2756         return ERR_PTR(error);
2757 }
2758
2759 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2760                                      unsigned short qid,
2761                                      int size,
2762                                      unsigned short op_type)
2763 {
2764         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2765         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2766         struct chcr_aead_reqctx  *reqctx = aead_request_ctx(req);
2767         struct sk_buff *skb = NULL;
2768         struct chcr_wr *chcr_req;
2769         struct cpl_rx_phys_dsgl *phys_cpl;
2770         struct ulptx_sgl *ulptx;
2771         unsigned int transhdr_len, dnents = 0;
2772         unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2773         unsigned int authsize = crypto_aead_authsize(tfm);
2774         int error = -EINVAL;
2775         gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2776                 GFP_ATOMIC;
2777         struct adapter *adap = padap(a_ctx(tfm)->dev);
2778
2779         if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
2780                 assoclen = req->assoclen - 8;
2781
2782         reqctx->b0_dma = 0;
2783         dst_size = assoclen + req->cryptlen + (op_type ? -authsize :  authsize);
2784         error = chcr_aead_common_init(req, op_type);
2785                 if (error)
2786                         return  ERR_PTR(error);
2787         if (dst_size) {
2788                 dnents = sg_nents_xlen(req->dst, assoclen, CHCR_DST_SG_SIZE, 0);
2789                 dnents += sg_nents_xlen(req->dst,
2790                         req->cryptlen + (op_type ? -authsize : authsize),
2791                                 CHCR_DST_SG_SIZE, req->assoclen);
2792                 dnents += MIN_GCM_SG; // For IV
2793         } else {
2794                 dnents = 0;
2795         }
2796         dst_size = get_space_for_phys_dsgl(dnents);
2797         kctx_len = ((DIV_ROUND_UP(aeadctx->enckey_len, 16)) << 4) +
2798                 AEAD_H_SIZE;
2799         transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2800         reqctx->imm = (transhdr_len + assoclen + IV + req->cryptlen) <=
2801                         SGE_MAX_WR_LEN;
2802         temp = reqctx->imm ? (DIV_ROUND_UP((assoclen + IV +
2803         req->cryptlen), 16) * 16) : (sgl_len(reqctx->src_nents +
2804                                 reqctx->aad_nents + MIN_GCM_SG) * 8);
2805         transhdr_len += temp;
2806         transhdr_len = DIV_ROUND_UP(transhdr_len, 16) * 16;
2807         if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2808                             transhdr_len, op_type)) {
2809                 atomic_inc(&adap->chcr_stats.fallback);
2810                 chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2811                                     op_type);
2812                 return ERR_PTR(chcr_aead_fallback(req, op_type));
2813         }
2814         skb = alloc_skb(SGE_MAX_WR_LEN, flags);
2815         if (!skb) {
2816                 error = -ENOMEM;
2817                 goto err;
2818         }
2819
2820         chcr_req = __skb_put_zero(skb, transhdr_len);
2821
2822         //Offset of tag from end
2823         temp = (op_type == CHCR_ENCRYPT_OP) ? 0 : authsize;
2824         chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
2825                                         a_ctx(tfm)->dev->rx_channel_id, 2,
2826                                         (assoclen + 1));
2827         chcr_req->sec_cpl.pldlen =
2828                 htonl(assoclen + IV + req->cryptlen);
2829         chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2830                                         assoclen ? 1 : 0, assoclen,
2831                                         assoclen + IV + 1, 0);
2832                 chcr_req->sec_cpl.cipherstop_lo_authinsert =
2833                         FILL_SEC_CPL_AUTHINSERT(0, assoclen + IV + 1,
2834                                                 temp, temp);
2835                 chcr_req->sec_cpl.seqno_numivs =
2836                         FILL_SEC_CPL_SCMD0_SEQNO(op_type, (op_type ==
2837                                         CHCR_ENCRYPT_OP) ? 1 : 0,
2838                                         CHCR_SCMD_CIPHER_MODE_AES_GCM,
2839                                         CHCR_SCMD_AUTH_MODE_GHASH,
2840                                         aeadctx->hmac_ctrl, IV >> 1);
2841         chcr_req->sec_cpl.ivgen_hdrlen =  FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2842                                         0, 0, dst_size);
2843         chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2844         memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2845         memcpy(chcr_req->key_ctx.key + (DIV_ROUND_UP(aeadctx->enckey_len, 16) *
2846                                 16), GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
2847
2848         /* prepare a 16 byte iv */
2849         /* S   A   L  T |  IV | 0x00000001 */
2850         if (get_aead_subtype(tfm) ==
2851             CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
2852                 memcpy(reqctx->iv, aeadctx->salt, 4);
2853                 memcpy(reqctx->iv + 4, req->iv, GCM_RFC4106_IV_SIZE);
2854         } else {
2855                 memcpy(reqctx->iv, req->iv, GCM_AES_IV_SIZE);
2856         }
2857         *((unsigned int *)(reqctx->iv + 12)) = htonl(0x01);
2858
2859         phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2860         ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
2861
2862         chcr_add_aead_dst_ent(req, phys_cpl, assoclen, op_type, qid);
2863         chcr_add_aead_src_ent(req, ulptx, assoclen, op_type);
2864         atomic_inc(&adap->chcr_stats.aead_rqst);
2865         temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size +
2866                 kctx_len + (reqctx->imm ? (assoclen + IV + req->cryptlen) : 0);
2867         create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2868                     transhdr_len, temp, reqctx->verify);
2869         reqctx->skb = skb;
2870         reqctx->op = op_type;
2871         return skb;
2872
2873 err:
2874         chcr_aead_dma_unmap(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req, op_type);
2875         return ERR_PTR(error);
2876 }
2877
2878
2879
2880 static int chcr_aead_cra_init(struct crypto_aead *tfm)
2881 {
2882         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2883         struct aead_alg *alg = crypto_aead_alg(tfm);
2884
2885         aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
2886                                                CRYPTO_ALG_NEED_FALLBACK |
2887                                                CRYPTO_ALG_ASYNC);
2888         if  (IS_ERR(aeadctx->sw_cipher))
2889                 return PTR_ERR(aeadctx->sw_cipher);
2890         crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
2891                                  sizeof(struct aead_request) +
2892                                  crypto_aead_reqsize(aeadctx->sw_cipher)));
2893         return chcr_device_init(a_ctx(tfm));
2894 }
2895
2896 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
2897 {
2898         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2899
2900         crypto_free_aead(aeadctx->sw_cipher);
2901 }
2902
2903 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
2904                                         unsigned int authsize)
2905 {
2906         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2907
2908         aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
2909         aeadctx->mayverify = VERIFY_HW;
2910         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2911 }
2912 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
2913                                     unsigned int authsize)
2914 {
2915         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2916         u32 maxauth = crypto_aead_maxauthsize(tfm);
2917
2918         /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
2919          * true for sha1. authsize == 12 condition should be before
2920          * authsize == (maxauth >> 1)
2921          */
2922         if (authsize == ICV_4) {
2923                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2924                 aeadctx->mayverify = VERIFY_HW;
2925         } else if (authsize == ICV_6) {
2926                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
2927                 aeadctx->mayverify = VERIFY_HW;
2928         } else if (authsize == ICV_10) {
2929                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
2930                 aeadctx->mayverify = VERIFY_HW;
2931         } else if (authsize == ICV_12) {
2932                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2933                 aeadctx->mayverify = VERIFY_HW;
2934         } else if (authsize == ICV_14) {
2935                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2936                 aeadctx->mayverify = VERIFY_HW;
2937         } else if (authsize == (maxauth >> 1)) {
2938                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2939                 aeadctx->mayverify = VERIFY_HW;
2940         } else if (authsize == maxauth) {
2941                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2942                 aeadctx->mayverify = VERIFY_HW;
2943         } else {
2944                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2945                 aeadctx->mayverify = VERIFY_SW;
2946         }
2947         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2948 }
2949
2950
2951 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
2952 {
2953         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2954
2955         switch (authsize) {
2956         case ICV_4:
2957                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
2958                 aeadctx->mayverify = VERIFY_HW;
2959                 break;
2960         case ICV_8:
2961                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2962                 aeadctx->mayverify = VERIFY_HW;
2963                 break;
2964         case ICV_12:
2965                  aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
2966                  aeadctx->mayverify = VERIFY_HW;
2967                 break;
2968         case ICV_14:
2969                  aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
2970                  aeadctx->mayverify = VERIFY_HW;
2971                 break;
2972         case ICV_16:
2973                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2974                 aeadctx->mayverify = VERIFY_HW;
2975                 break;
2976         case ICV_13:
2977         case ICV_15:
2978                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
2979                 aeadctx->mayverify = VERIFY_SW;
2980                 break;
2981         default:
2982
2983                   crypto_tfm_set_flags((struct crypto_tfm *) tfm,
2984                         CRYPTO_TFM_RES_BAD_KEY_LEN);
2985                 return -EINVAL;
2986         }
2987         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
2988 }
2989
2990 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
2991                                           unsigned int authsize)
2992 {
2993         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2994
2995         switch (authsize) {
2996         case ICV_8:
2997                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
2998                 aeadctx->mayverify = VERIFY_HW;
2999                 break;
3000         case ICV_12:
3001                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3002                 aeadctx->mayverify = VERIFY_HW;
3003                 break;
3004         case ICV_16:
3005                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3006                 aeadctx->mayverify = VERIFY_HW;
3007                 break;
3008         default:
3009                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3010                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3011                 return -EINVAL;
3012         }
3013         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3014 }
3015
3016 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3017                                 unsigned int authsize)
3018 {
3019         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3020
3021         switch (authsize) {
3022         case ICV_4:
3023                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3024                 aeadctx->mayverify = VERIFY_HW;
3025                 break;
3026         case ICV_6:
3027                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3028                 aeadctx->mayverify = VERIFY_HW;
3029                 break;
3030         case ICV_8:
3031                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3032                 aeadctx->mayverify = VERIFY_HW;
3033                 break;
3034         case ICV_10:
3035                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3036                 aeadctx->mayverify = VERIFY_HW;
3037                 break;
3038         case ICV_12:
3039                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3040                 aeadctx->mayverify = VERIFY_HW;
3041                 break;
3042         case ICV_14:
3043                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3044                 aeadctx->mayverify = VERIFY_HW;
3045                 break;
3046         case ICV_16:
3047                 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3048                 aeadctx->mayverify = VERIFY_HW;
3049                 break;
3050         default:
3051                 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3052                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3053                 return -EINVAL;
3054         }
3055         return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3056 }
3057
3058 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3059                                 const u8 *key,
3060                                 unsigned int keylen)
3061 {
3062         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3063         unsigned char ck_size, mk_size;
3064         int key_ctx_size = 0;
3065
3066         key_ctx_size = sizeof(struct _key_ctx) +
3067                 ((DIV_ROUND_UP(keylen, 16)) << 4)  * 2;
3068         if (keylen == AES_KEYSIZE_128) {
3069                 mk_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3070                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3071         } else if (keylen == AES_KEYSIZE_192) {
3072                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3073                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3074         } else if (keylen == AES_KEYSIZE_256) {
3075                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3076                 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3077         } else {
3078                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3079                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3080                 aeadctx->enckey_len = 0;
3081                 return  -EINVAL;
3082         }
3083         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3084                                                 key_ctx_size >> 4);
3085         memcpy(aeadctx->key, key, keylen);
3086         aeadctx->enckey_len = keylen;
3087
3088         return 0;
3089 }
3090
3091 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3092                                 const u8 *key,
3093                                 unsigned int keylen)
3094 {
3095         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3096         int error;
3097
3098         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3099         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3100                               CRYPTO_TFM_REQ_MASK);
3101         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3102         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3103         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3104                               CRYPTO_TFM_RES_MASK);
3105         if (error)
3106                 return error;
3107         return chcr_ccm_common_setkey(aead, key, keylen);
3108 }
3109
3110 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3111                                     unsigned int keylen)
3112 {
3113         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3114         int error;
3115
3116         if (keylen < 3) {
3117                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3118                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3119                 aeadctx->enckey_len = 0;
3120                 return  -EINVAL;
3121         }
3122         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3123         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3124                               CRYPTO_TFM_REQ_MASK);
3125         error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3126         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3127         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3128                               CRYPTO_TFM_RES_MASK);
3129         if (error)
3130                 return error;
3131         keylen -= 3;
3132         memcpy(aeadctx->salt, key + keylen, 3);
3133         return chcr_ccm_common_setkey(aead, key, keylen);
3134 }
3135
3136 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3137                            unsigned int keylen)
3138 {
3139         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3140         struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3141         struct crypto_cipher *cipher;
3142         unsigned int ck_size;
3143         int ret = 0, key_ctx_size = 0;
3144
3145         aeadctx->enckey_len = 0;
3146         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3147         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3148                               & CRYPTO_TFM_REQ_MASK);
3149         ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3150         crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3151         crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3152                               CRYPTO_TFM_RES_MASK);
3153         if (ret)
3154                 goto out;
3155
3156         if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3157             keylen > 3) {
3158                 keylen -= 4;  /* nonce/salt is present in the last 4 bytes */
3159                 memcpy(aeadctx->salt, key + keylen, 4);
3160         }
3161         if (keylen == AES_KEYSIZE_128) {
3162                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3163         } else if (keylen == AES_KEYSIZE_192) {
3164                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3165         } else if (keylen == AES_KEYSIZE_256) {
3166                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3167         } else {
3168                 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3169                                      CRYPTO_TFM_RES_BAD_KEY_LEN);
3170                 pr_err("GCM: Invalid key length %d\n", keylen);
3171                 ret = -EINVAL;
3172                 goto out;
3173         }
3174
3175         memcpy(aeadctx->key, key, keylen);
3176         aeadctx->enckey_len = keylen;
3177         key_ctx_size = sizeof(struct _key_ctx) +
3178                 ((DIV_ROUND_UP(keylen, 16)) << 4) +
3179                 AEAD_H_SIZE;
3180                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3181                                                 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3182                                                 0, 0,
3183                                                 key_ctx_size >> 4);
3184         /* Calculate the H = CIPH(K, 0 repeated 16 times).
3185          * It will go in key context
3186          */
3187         cipher = crypto_alloc_cipher("aes-generic", 0, 0);
3188         if (IS_ERR(cipher)) {
3189                 aeadctx->enckey_len = 0;
3190                 ret = -ENOMEM;
3191                 goto out;
3192         }
3193
3194         ret = crypto_cipher_setkey(cipher, key, keylen);
3195         if (ret) {
3196                 aeadctx->enckey_len = 0;
3197                 goto out1;
3198         }
3199         memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3200         crypto_cipher_encrypt_one(cipher, gctx->ghash_h, gctx->ghash_h);
3201
3202 out1:
3203         crypto_free_cipher(cipher);
3204 out:
3205         return ret;
3206 }
3207
3208 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3209                                    unsigned int keylen)
3210 {
3211         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3212         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3213         /* it contains auth and cipher key both*/
3214         struct crypto_authenc_keys keys;
3215         unsigned int bs;
3216         unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3217         int err = 0, i, key_ctx_len = 0;
3218         unsigned char ck_size = 0;
3219         unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3220         struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3221         struct algo_param param;
3222         int align;
3223         u8 *o_ptr = NULL;
3224
3225         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3226         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3227                               & CRYPTO_TFM_REQ_MASK);
3228         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3229         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3230         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3231                               & CRYPTO_TFM_RES_MASK);
3232         if (err)
3233                 goto out;
3234
3235         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3236                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3237                 goto out;
3238         }
3239
3240         if (get_alg_config(&param, max_authsize)) {
3241                 pr_err("chcr : Unsupported digest size\n");
3242                 goto out;
3243         }
3244         if (keys.enckeylen == AES_KEYSIZE_128) {
3245                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3246         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3247                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3248         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3249                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3250         } else {
3251                 pr_err("chcr : Unsupported cipher key\n");
3252                 goto out;
3253         }
3254
3255         /* Copy only encryption key. We use authkey to generate h(ipad) and
3256          * h(opad) so authkey is not needed again. authkeylen size have the
3257          * size of the hash digest size.
3258          */
3259         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3260         aeadctx->enckey_len = keys.enckeylen;
3261         get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3262                             aeadctx->enckey_len << 3);
3263
3264         base_hash  = chcr_alloc_shash(max_authsize);
3265         if (IS_ERR(base_hash)) {
3266                 pr_err("chcr : Base driver cannot be loaded\n");
3267                 aeadctx->enckey_len = 0;
3268                 return -EINVAL;
3269         }
3270         {
3271                 SHASH_DESC_ON_STACK(shash, base_hash);
3272                 shash->tfm = base_hash;
3273                 shash->flags = crypto_shash_get_flags(base_hash);
3274                 bs = crypto_shash_blocksize(base_hash);
3275                 align = KEYCTX_ALIGN_PAD(max_authsize);
3276                 o_ptr =  actx->h_iopad + param.result_size + align;
3277
3278                 if (keys.authkeylen > bs) {
3279                         err = crypto_shash_digest(shash, keys.authkey,
3280                                                   keys.authkeylen,
3281                                                   o_ptr);
3282                         if (err) {
3283                                 pr_err("chcr : Base driver cannot be loaded\n");
3284                                 goto out;
3285                         }
3286                         keys.authkeylen = max_authsize;
3287                 } else
3288                         memcpy(o_ptr, keys.authkey, keys.authkeylen);
3289
3290                 /* Compute the ipad-digest*/
3291                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3292                 memcpy(pad, o_ptr, keys.authkeylen);
3293                 for (i = 0; i < bs >> 2; i++)
3294                         *((unsigned int *)pad + i) ^= IPAD_DATA;
3295
3296                 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3297                                               max_authsize))
3298                         goto out;
3299                 /* Compute the opad-digest */
3300                 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3301                 memcpy(pad, o_ptr, keys.authkeylen);
3302                 for (i = 0; i < bs >> 2; i++)
3303                         *((unsigned int *)pad + i) ^= OPAD_DATA;
3304
3305                 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3306                         goto out;
3307
3308                 /* convert the ipad and opad digest to network order */
3309                 chcr_change_order(actx->h_iopad, param.result_size);
3310                 chcr_change_order(o_ptr, param.result_size);
3311                 key_ctx_len = sizeof(struct _key_ctx) +
3312                         ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4) +
3313                         (param.result_size + align) * 2;
3314                 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3315                                                 0, 1, key_ctx_len >> 4);
3316                 actx->auth_mode = param.auth_mode;
3317                 chcr_free_shash(base_hash);
3318
3319                 return 0;
3320         }
3321 out:
3322         aeadctx->enckey_len = 0;
3323         if (!IS_ERR(base_hash))
3324                 chcr_free_shash(base_hash);
3325         return -EINVAL;
3326 }
3327
3328 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3329                                         const u8 *key, unsigned int keylen)
3330 {
3331         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3332         struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3333         struct crypto_authenc_keys keys;
3334         int err;
3335         /* it contains auth and cipher key both*/
3336         int key_ctx_len = 0;
3337         unsigned char ck_size = 0;
3338
3339         crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3340         crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3341                               & CRYPTO_TFM_REQ_MASK);
3342         err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3343         crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3344         crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3345                               & CRYPTO_TFM_RES_MASK);
3346         if (err)
3347                 goto out;
3348
3349         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3350                 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3351                 goto out;
3352         }
3353         if (keys.enckeylen == AES_KEYSIZE_128) {
3354                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3355         } else if (keys.enckeylen == AES_KEYSIZE_192) {
3356                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3357         } else if (keys.enckeylen == AES_KEYSIZE_256) {
3358                 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3359         } else {
3360                 pr_err("chcr : Unsupported cipher key\n");
3361                 goto out;
3362         }
3363         memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3364         aeadctx->enckey_len = keys.enckeylen;
3365         get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3366                                     aeadctx->enckey_len << 3);
3367         key_ctx_len =  sizeof(struct _key_ctx)
3368                 + ((DIV_ROUND_UP(keys.enckeylen, 16)) << 4);
3369
3370         aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3371                                                 0, key_ctx_len >> 4);
3372         actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3373         return 0;
3374 out:
3375         aeadctx->enckey_len = 0;
3376         return -EINVAL;
3377 }
3378 static int chcr_aead_encrypt(struct aead_request *req)
3379 {
3380         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3381         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3382
3383         reqctx->verify = VERIFY_HW;
3384
3385         switch (get_aead_subtype(tfm)) {
3386         case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3387         case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3388                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3389                                     create_authenc_wr);
3390         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3391         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3392                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3393                                     create_aead_ccm_wr);
3394         default:
3395                 return chcr_aead_op(req, CHCR_ENCRYPT_OP, 0,
3396                                     create_gcm_wr);
3397         }
3398 }
3399
3400 static int chcr_aead_decrypt(struct aead_request *req)
3401 {
3402         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3403         struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3404         struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3405         int size;
3406
3407         if (aeadctx->mayverify == VERIFY_SW) {
3408                 size = crypto_aead_maxauthsize(tfm);
3409                 reqctx->verify = VERIFY_SW;
3410         } else {
3411                 size = 0;
3412                 reqctx->verify = VERIFY_HW;
3413         }
3414
3415         switch (get_aead_subtype(tfm)) {
3416         case CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC:
3417         case CRYPTO_ALG_SUB_TYPE_AEAD_NULL:
3418                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3419                                     create_authenc_wr);
3420         case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3421         case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3422                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3423                                     create_aead_ccm_wr);
3424         default:
3425                 return chcr_aead_op(req, CHCR_DECRYPT_OP, size,
3426                                     create_gcm_wr);
3427         }
3428 }
3429
3430 static int chcr_aead_op(struct aead_request *req,
3431                           unsigned short op_type,
3432                           int size,
3433                           create_wr_t create_wr_fn)
3434 {
3435         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3436         struct uld_ctx *u_ctx;
3437         struct sk_buff *skb;
3438
3439         if (!a_ctx(tfm)->dev) {
3440                 pr_err("chcr : %s : No crypto device.\n", __func__);
3441                 return -ENXIO;
3442         }
3443         u_ctx = ULD_CTX(a_ctx(tfm));
3444         if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3445                                    a_ctx(tfm)->tx_qidx)) {
3446                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3447                         return -EBUSY;
3448         }
3449
3450         /* Form a WR from req */
3451         skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size,
3452                            op_type);
3453
3454         if (IS_ERR(skb) || !skb)
3455                 return PTR_ERR(skb);
3456
3457         skb->dev = u_ctx->lldi.ports[0];
3458         set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3459         chcr_send_wr(skb);
3460         return -EINPROGRESS;
3461 }
3462 static struct chcr_alg_template driver_algs[] = {
3463         /* AES-CBC */
3464         {
3465                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3466                 .is_registered = 0,
3467                 .alg.crypto = {
3468                         .cra_name               = "cbc(aes)",
3469                         .cra_driver_name        = "cbc-aes-chcr",
3470                         .cra_blocksize          = AES_BLOCK_SIZE,
3471                         .cra_init               = chcr_cra_init,
3472                         .cra_exit               = chcr_cra_exit,
3473                         .cra_u.ablkcipher       = {
3474                                 .min_keysize    = AES_MIN_KEY_SIZE,
3475                                 .max_keysize    = AES_MAX_KEY_SIZE,
3476                                 .ivsize         = AES_BLOCK_SIZE,
3477                                 .setkey                 = chcr_aes_cbc_setkey,
3478                                 .encrypt                = chcr_aes_encrypt,
3479                                 .decrypt                = chcr_aes_decrypt,
3480                         }
3481                 }
3482         },
3483         {
3484                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3485                 .is_registered = 0,
3486                 .alg.crypto =   {
3487                         .cra_name               = "xts(aes)",
3488                         .cra_driver_name        = "xts-aes-chcr",
3489                         .cra_blocksize          = AES_BLOCK_SIZE,
3490                         .cra_init               = chcr_cra_init,
3491                         .cra_exit               = NULL,
3492                         .cra_u .ablkcipher = {
3493                                         .min_keysize    = 2 * AES_MIN_KEY_SIZE,
3494                                         .max_keysize    = 2 * AES_MAX_KEY_SIZE,
3495                                         .ivsize         = AES_BLOCK_SIZE,
3496                                         .setkey         = chcr_aes_xts_setkey,
3497                                         .encrypt        = chcr_aes_encrypt,
3498                                         .decrypt        = chcr_aes_decrypt,
3499                                 }
3500                         }
3501         },
3502         {
3503                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3504                 .is_registered = 0,
3505                 .alg.crypto = {
3506                         .cra_name               = "ctr(aes)",
3507                         .cra_driver_name        = "ctr-aes-chcr",
3508                         .cra_blocksize          = 1,
3509                         .cra_init               = chcr_cra_init,
3510                         .cra_exit               = chcr_cra_exit,
3511                         .cra_u.ablkcipher       = {
3512                                 .min_keysize    = AES_MIN_KEY_SIZE,
3513                                 .max_keysize    = AES_MAX_KEY_SIZE,
3514                                 .ivsize         = AES_BLOCK_SIZE,
3515                                 .setkey         = chcr_aes_ctr_setkey,
3516                                 .encrypt        = chcr_aes_encrypt,
3517                                 .decrypt        = chcr_aes_decrypt,
3518                         }
3519                 }
3520         },
3521         {
3522                 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3523                         CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3524                 .is_registered = 0,
3525                 .alg.crypto = {
3526                         .cra_name               = "rfc3686(ctr(aes))",
3527                         .cra_driver_name        = "rfc3686-ctr-aes-chcr",
3528                         .cra_blocksize          = 1,
3529                         .cra_init               = chcr_rfc3686_init,
3530                         .cra_exit               = chcr_cra_exit,
3531                         .cra_u.ablkcipher       = {
3532                                 .min_keysize    = AES_MIN_KEY_SIZE +
3533                                         CTR_RFC3686_NONCE_SIZE,
3534                                 .max_keysize    = AES_MAX_KEY_SIZE +
3535                                         CTR_RFC3686_NONCE_SIZE,
3536                                 .ivsize         = CTR_RFC3686_IV_SIZE,
3537                                 .setkey         = chcr_aes_rfc3686_setkey,
3538                                 .encrypt        = chcr_aes_encrypt,
3539                                 .decrypt        = chcr_aes_decrypt,
3540                                 .geniv          = "seqiv",
3541                         }
3542                 }
3543         },
3544         /* SHA */
3545         {
3546                 .type = CRYPTO_ALG_TYPE_AHASH,
3547                 .is_registered = 0,
3548                 .alg.hash = {
3549                         .halg.digestsize = SHA1_DIGEST_SIZE,
3550                         .halg.base = {
3551                                 .cra_name = "sha1",
3552                                 .cra_driver_name = "sha1-chcr",
3553                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3554                         }
3555                 }
3556         },
3557         {
3558                 .type = CRYPTO_ALG_TYPE_AHASH,
3559                 .is_registered = 0,
3560                 .alg.hash = {
3561                         .halg.digestsize = SHA256_DIGEST_SIZE,
3562                         .halg.base = {
3563                                 .cra_name = "sha256",
3564                                 .cra_driver_name = "sha256-chcr",
3565                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3566                         }
3567                 }
3568         },
3569         {
3570                 .type = CRYPTO_ALG_TYPE_AHASH,
3571                 .is_registered = 0,
3572                 .alg.hash = {
3573                         .halg.digestsize = SHA224_DIGEST_SIZE,
3574                         .halg.base = {
3575                                 .cra_name = "sha224",
3576                                 .cra_driver_name = "sha224-chcr",
3577                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3578                         }
3579                 }
3580         },
3581         {
3582                 .type = CRYPTO_ALG_TYPE_AHASH,
3583                 .is_registered = 0,
3584                 .alg.hash = {
3585                         .halg.digestsize = SHA384_DIGEST_SIZE,
3586                         .halg.base = {
3587                                 .cra_name = "sha384",
3588                                 .cra_driver_name = "sha384-chcr",
3589                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3590                         }
3591                 }
3592         },
3593         {
3594                 .type = CRYPTO_ALG_TYPE_AHASH,
3595                 .is_registered = 0,
3596                 .alg.hash = {
3597                         .halg.digestsize = SHA512_DIGEST_SIZE,
3598                         .halg.base = {
3599                                 .cra_name = "sha512",
3600                                 .cra_driver_name = "sha512-chcr",
3601                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3602                         }
3603                 }
3604         },
3605         /* HMAC */
3606         {
3607                 .type = CRYPTO_ALG_TYPE_HMAC,
3608                 .is_registered = 0,
3609                 .alg.hash = {
3610                         .halg.digestsize = SHA1_DIGEST_SIZE,
3611                         .halg.base = {
3612                                 .cra_name = "hmac(sha1)",
3613                                 .cra_driver_name = "hmac-sha1-chcr",
3614                                 .cra_blocksize = SHA1_BLOCK_SIZE,
3615                         }
3616                 }
3617         },
3618         {
3619                 .type = CRYPTO_ALG_TYPE_HMAC,
3620                 .is_registered = 0,
3621                 .alg.hash = {
3622                         .halg.digestsize = SHA224_DIGEST_SIZE,
3623                         .halg.base = {
3624                                 .cra_name = "hmac(sha224)",
3625                                 .cra_driver_name = "hmac-sha224-chcr",
3626                                 .cra_blocksize = SHA224_BLOCK_SIZE,
3627                         }
3628                 }
3629         },
3630         {
3631                 .type = CRYPTO_ALG_TYPE_HMAC,
3632                 .is_registered = 0,
3633                 .alg.hash = {
3634                         .halg.digestsize = SHA256_DIGEST_SIZE,
3635                         .halg.base = {
3636                                 .cra_name = "hmac(sha256)",
3637                                 .cra_driver_name = "hmac-sha256-chcr",
3638                                 .cra_blocksize = SHA256_BLOCK_SIZE,
3639                         }
3640                 }
3641         },
3642         {
3643                 .type = CRYPTO_ALG_TYPE_HMAC,
3644                 .is_registered = 0,
3645                 .alg.hash = {
3646                         .halg.digestsize = SHA384_DIGEST_SIZE,
3647                         .halg.base = {
3648                                 .cra_name = "hmac(sha384)",
3649                                 .cra_driver_name = "hmac-sha384-chcr",
3650                                 .cra_blocksize = SHA384_BLOCK_SIZE,
3651                         }
3652                 }
3653         },
3654         {
3655                 .type = CRYPTO_ALG_TYPE_HMAC,
3656                 .is_registered = 0,
3657                 .alg.hash = {
3658                         .halg.digestsize = SHA512_DIGEST_SIZE,
3659                         .halg.base = {
3660                                 .cra_name = "hmac(sha512)",
3661                                 .cra_driver_name = "hmac-sha512-chcr",
3662                                 .cra_blocksize = SHA512_BLOCK_SIZE,
3663                         }
3664                 }
3665         },
3666         /* Add AEAD Algorithms */
3667         {
3668                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3669                 .is_registered = 0,
3670                 .alg.aead = {
3671                         .base = {
3672                                 .cra_name = "gcm(aes)",
3673                                 .cra_driver_name = "gcm-aes-chcr",
3674                                 .cra_blocksize  = 1,
3675                                 .cra_priority = CHCR_AEAD_PRIORITY,
3676                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3677                                                 sizeof(struct chcr_aead_ctx) +
3678                                                 sizeof(struct chcr_gcm_ctx),
3679                         },
3680                         .ivsize = GCM_AES_IV_SIZE,
3681                         .maxauthsize = GHASH_DIGEST_SIZE,
3682                         .setkey = chcr_gcm_setkey,
3683                         .setauthsize = chcr_gcm_setauthsize,
3684                 }
3685         },
3686         {
3687                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3688                 .is_registered = 0,
3689                 .alg.aead = {
3690                         .base = {
3691                                 .cra_name = "rfc4106(gcm(aes))",
3692                                 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3693                                 .cra_blocksize   = 1,
3694                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3695                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3696                                                 sizeof(struct chcr_aead_ctx) +
3697                                                 sizeof(struct chcr_gcm_ctx),
3698
3699                         },
3700                         .ivsize = GCM_RFC4106_IV_SIZE,
3701                         .maxauthsize    = GHASH_DIGEST_SIZE,
3702                         .setkey = chcr_gcm_setkey,
3703                         .setauthsize    = chcr_4106_4309_setauthsize,
3704                 }
3705         },
3706         {
3707                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3708                 .is_registered = 0,
3709                 .alg.aead = {
3710                         .base = {
3711                                 .cra_name = "ccm(aes)",
3712                                 .cra_driver_name = "ccm-aes-chcr",
3713                                 .cra_blocksize   = 1,
3714                                 .cra_priority = CHCR_AEAD_PRIORITY,
3715                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3716                                                 sizeof(struct chcr_aead_ctx),
3717
3718                         },
3719                         .ivsize = AES_BLOCK_SIZE,
3720                         .maxauthsize    = GHASH_DIGEST_SIZE,
3721                         .setkey = chcr_aead_ccm_setkey,
3722                         .setauthsize    = chcr_ccm_setauthsize,
3723                 }
3724         },
3725         {
3726                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3727                 .is_registered = 0,
3728                 .alg.aead = {
3729                         .base = {
3730                                 .cra_name = "rfc4309(ccm(aes))",
3731                                 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3732                                 .cra_blocksize   = 1,
3733                                 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3734                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3735                                                 sizeof(struct chcr_aead_ctx),
3736
3737                         },
3738                         .ivsize = 8,
3739                         .maxauthsize    = GHASH_DIGEST_SIZE,
3740                         .setkey = chcr_aead_rfc4309_setkey,
3741                         .setauthsize = chcr_4106_4309_setauthsize,
3742                 }
3743         },
3744         {
3745                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3746                 .is_registered = 0,
3747                 .alg.aead = {
3748                         .base = {
3749                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
3750                                 .cra_driver_name =
3751                                         "authenc-hmac-sha1-cbc-aes-chcr",
3752                                 .cra_blocksize   = AES_BLOCK_SIZE,
3753                                 .cra_priority = CHCR_AEAD_PRIORITY,
3754                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3755                                                 sizeof(struct chcr_aead_ctx) +
3756                                                 sizeof(struct chcr_authenc_ctx),
3757
3758                         },
3759                         .ivsize = AES_BLOCK_SIZE,
3760                         .maxauthsize = SHA1_DIGEST_SIZE,
3761                         .setkey = chcr_authenc_setkey,
3762                         .setauthsize = chcr_authenc_setauthsize,
3763                 }
3764         },
3765         {
3766                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3767                 .is_registered = 0,
3768                 .alg.aead = {
3769                         .base = {
3770
3771                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
3772                                 .cra_driver_name =
3773                                         "authenc-hmac-sha256-cbc-aes-chcr",
3774                                 .cra_blocksize   = AES_BLOCK_SIZE,
3775                                 .cra_priority = CHCR_AEAD_PRIORITY,
3776                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3777                                                 sizeof(struct chcr_aead_ctx) +
3778                                                 sizeof(struct chcr_authenc_ctx),
3779
3780                         },
3781                         .ivsize = AES_BLOCK_SIZE,
3782                         .maxauthsize    = SHA256_DIGEST_SIZE,
3783                         .setkey = chcr_authenc_setkey,
3784                         .setauthsize = chcr_authenc_setauthsize,
3785                 }
3786         },
3787         {
3788                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3789                 .is_registered = 0,
3790                 .alg.aead = {
3791                         .base = {
3792                                 .cra_name = "authenc(hmac(sha224),cbc(aes))",
3793                                 .cra_driver_name =
3794                                         "authenc-hmac-sha224-cbc-aes-chcr",
3795                                 .cra_blocksize   = AES_BLOCK_SIZE,
3796                                 .cra_priority = CHCR_AEAD_PRIORITY,
3797                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3798                                                 sizeof(struct chcr_aead_ctx) +
3799                                                 sizeof(struct chcr_authenc_ctx),
3800                         },
3801                         .ivsize = AES_BLOCK_SIZE,
3802                         .maxauthsize = SHA224_DIGEST_SIZE,
3803                         .setkey = chcr_authenc_setkey,
3804                         .setauthsize = chcr_authenc_setauthsize,
3805                 }
3806         },
3807         {
3808                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3809                 .is_registered = 0,
3810                 .alg.aead = {
3811                         .base = {
3812                                 .cra_name = "authenc(hmac(sha384),cbc(aes))",
3813                                 .cra_driver_name =
3814                                         "authenc-hmac-sha384-cbc-aes-chcr",
3815                                 .cra_blocksize   = AES_BLOCK_SIZE,
3816                                 .cra_priority = CHCR_AEAD_PRIORITY,
3817                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3818                                                 sizeof(struct chcr_aead_ctx) +
3819                                                 sizeof(struct chcr_authenc_ctx),
3820
3821                         },
3822                         .ivsize = AES_BLOCK_SIZE,
3823                         .maxauthsize = SHA384_DIGEST_SIZE,
3824                         .setkey = chcr_authenc_setkey,
3825                         .setauthsize = chcr_authenc_setauthsize,
3826                 }
3827         },
3828         {
3829                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_AUTHENC,
3830                 .is_registered = 0,
3831                 .alg.aead = {
3832                         .base = {
3833                                 .cra_name = "authenc(hmac(sha512),cbc(aes))",
3834                                 .cra_driver_name =
3835                                         "authenc-hmac-sha512-cbc-aes-chcr",
3836                                 .cra_blocksize   = AES_BLOCK_SIZE,
3837                                 .cra_priority = CHCR_AEAD_PRIORITY,
3838                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3839                                                 sizeof(struct chcr_aead_ctx) +
3840                                                 sizeof(struct chcr_authenc_ctx),
3841
3842                         },
3843                         .ivsize = AES_BLOCK_SIZE,
3844                         .maxauthsize = SHA512_DIGEST_SIZE,
3845                         .setkey = chcr_authenc_setkey,
3846                         .setauthsize = chcr_authenc_setauthsize,
3847                 }
3848         },
3849         {
3850                 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_NULL,
3851                 .is_registered = 0,
3852                 .alg.aead = {
3853                         .base = {
3854                                 .cra_name = "authenc(digest_null,cbc(aes))",
3855                                 .cra_driver_name =
3856                                         "authenc-digest_null-cbc-aes-chcr",
3857                                 .cra_blocksize   = AES_BLOCK_SIZE,
3858                                 .cra_priority = CHCR_AEAD_PRIORITY,
3859                                 .cra_ctxsize =  sizeof(struct chcr_context) +
3860                                                 sizeof(struct chcr_aead_ctx) +
3861                                                 sizeof(struct chcr_authenc_ctx),
3862
3863                         },
3864                         .ivsize  = AES_BLOCK_SIZE,
3865                         .maxauthsize = 0,
3866                         .setkey  = chcr_aead_digest_null_setkey,
3867                         .setauthsize = chcr_authenc_null_setauthsize,
3868                 }
3869         },
3870 };
3871
3872 /*
3873  *      chcr_unregister_alg - Deregister crypto algorithms with
3874  *      kernel framework.
3875  */
3876 static int chcr_unregister_alg(void)
3877 {
3878         int i;
3879
3880         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3881                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3882                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3883                         if (driver_algs[i].is_registered)
3884                                 crypto_unregister_alg(
3885                                                 &driver_algs[i].alg.crypto);
3886                         break;
3887                 case CRYPTO_ALG_TYPE_AEAD:
3888                         if (driver_algs[i].is_registered)
3889                                 crypto_unregister_aead(
3890                                                 &driver_algs[i].alg.aead);
3891                         break;
3892                 case CRYPTO_ALG_TYPE_AHASH:
3893                         if (driver_algs[i].is_registered)
3894                                 crypto_unregister_ahash(
3895                                                 &driver_algs[i].alg.hash);
3896                         break;
3897                 }
3898                 driver_algs[i].is_registered = 0;
3899         }
3900         return 0;
3901 }
3902
3903 #define SZ_AHASH_CTX sizeof(struct chcr_context)
3904 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
3905 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
3906 #define AHASH_CRA_FLAGS (CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC)
3907
3908 /*
3909  *      chcr_register_alg - Register crypto algorithms with kernel framework.
3910  */
3911 static int chcr_register_alg(void)
3912 {
3913         struct crypto_alg ai;
3914         struct ahash_alg *a_hash;
3915         int err = 0, i;
3916         char *name = NULL;
3917
3918         for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3919                 if (driver_algs[i].is_registered)
3920                         continue;
3921                 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
3922                 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3923                         driver_algs[i].alg.crypto.cra_priority =
3924                                 CHCR_CRA_PRIORITY;
3925                         driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
3926                         driver_algs[i].alg.crypto.cra_flags =
3927                                 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
3928                                 CRYPTO_ALG_NEED_FALLBACK;
3929                         driver_algs[i].alg.crypto.cra_ctxsize =
3930                                 sizeof(struct chcr_context) +
3931                                 sizeof(struct ablk_ctx);
3932                         driver_algs[i].alg.crypto.cra_alignmask = 0;
3933                         driver_algs[i].alg.crypto.cra_type =
3934                                 &crypto_ablkcipher_type;
3935                         err = crypto_register_alg(&driver_algs[i].alg.crypto);
3936                         name = driver_algs[i].alg.crypto.cra_driver_name;
3937                         break;
3938                 case CRYPTO_ALG_TYPE_AEAD:
3939                         driver_algs[i].alg.aead.base.cra_flags =
3940                                 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC |
3941                                 CRYPTO_ALG_NEED_FALLBACK;
3942                         driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
3943                         driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
3944                         driver_algs[i].alg.aead.init = chcr_aead_cra_init;
3945                         driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
3946                         driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
3947                         err = crypto_register_aead(&driver_algs[i].alg.aead);
3948                         name = driver_algs[i].alg.aead.base.cra_driver_name;
3949                         break;
3950                 case CRYPTO_ALG_TYPE_AHASH:
3951                         a_hash = &driver_algs[i].alg.hash;
3952                         a_hash->update = chcr_ahash_update;
3953                         a_hash->final = chcr_ahash_final;
3954                         a_hash->finup = chcr_ahash_finup;
3955                         a_hash->digest = chcr_ahash_digest;
3956                         a_hash->export = chcr_ahash_export;
3957                         a_hash->import = chcr_ahash_import;
3958                         a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
3959                         a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
3960                         a_hash->halg.base.cra_module = THIS_MODULE;
3961                         a_hash->halg.base.cra_flags = AHASH_CRA_FLAGS;
3962                         a_hash->halg.base.cra_alignmask = 0;
3963                         a_hash->halg.base.cra_exit = NULL;
3964                         a_hash->halg.base.cra_type = &crypto_ahash_type;
3965
3966                         if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
3967                                 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
3968                                 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
3969                                 a_hash->init = chcr_hmac_init;
3970                                 a_hash->setkey = chcr_ahash_setkey;
3971                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
3972                         } else {
3973                                 a_hash->init = chcr_sha_init;
3974                                 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
3975                                 a_hash->halg.base.cra_init = chcr_sha_cra_init;
3976                         }
3977                         err = crypto_register_ahash(&driver_algs[i].alg.hash);
3978                         ai = driver_algs[i].alg.hash.halg.base;
3979                         name = ai.cra_driver_name;
3980                         break;
3981                 }
3982                 if (err) {
3983                         pr_err("chcr : %s : Algorithm registration failed\n",
3984                                name);
3985                         goto register_err;
3986                 } else {
3987                         driver_algs[i].is_registered = 1;
3988                 }
3989         }
3990         return 0;
3991
3992 register_err:
3993         chcr_unregister_alg();
3994         return err;
3995 }
3996
3997 /*
3998  *      start_crypto - Register the crypto algorithms.
3999  *      This should called once when the first device comesup. After this
4000  *      kernel will start calling driver APIs for crypto operations.
4001  */
4002 int start_crypto(void)
4003 {
4004         return chcr_register_alg();
4005 }
4006
4007 /*
4008  *      stop_crypto - Deregister all the crypto algorithms with kernel.
4009  *      This should be called once when the last device goes down. After this
4010  *      kernel will not call the driver API for crypto operations.
4011  */
4012 int stop_crypto(void)
4013 {
4014         chcr_unregister_alg();
4015         return 0;
4016 }
This page took 0.274323 seconds and 4 git commands to generate.