2 * This file is part of the Chelsio T6 Crypto driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * Written and Maintained by:
42 #define pr_fmt(fmt) "chcr:" fmt
44 #include <linux/kernel.h>
45 #include <linux/module.h>
46 #include <linux/crypto.h>
47 #include <linux/cryptohash.h>
48 #include <linux/skbuff.h>
49 #include <linux/rtnetlink.h>
50 #include <linux/highmem.h>
51 #include <linux/scatterlist.h>
53 #include <crypto/aes.h>
54 #include <crypto/algapi.h>
55 #include <crypto/hash.h>
56 #include <crypto/gcm.h>
57 #include <crypto/sha.h>
58 #include <crypto/authenc.h>
59 #include <crypto/ctr.h>
60 #include <crypto/gf128mul.h>
61 #include <crypto/internal/aead.h>
62 #include <crypto/null.h>
63 #include <crypto/internal/skcipher.h>
64 #include <crypto/aead.h>
65 #include <crypto/scatterwalk.h>
66 #include <crypto/internal/hash.h>
70 #include "chcr_core.h"
71 #include "chcr_algo.h"
72 #include "chcr_crypto.h"
74 #define IV AES_BLOCK_SIZE
76 static unsigned int sgl_ent_len[] = {
77 0, 0, 16, 24, 40, 48, 64, 72, 88,
78 96, 112, 120, 136, 144, 160, 168, 184,
79 192, 208, 216, 232, 240, 256, 264, 280,
80 288, 304, 312, 328, 336, 352, 360, 376
83 static unsigned int dsgl_ent_len[] = {
84 0, 32, 32, 48, 48, 64, 64, 80, 80,
85 112, 112, 128, 128, 144, 144, 160, 160,
86 192, 192, 208, 208, 224, 224, 240, 240,
87 272, 272, 288, 288, 304, 304, 320, 320
90 static u32 round_constant[11] = {
91 0x01000000, 0x02000000, 0x04000000, 0x08000000,
92 0x10000000, 0x20000000, 0x40000000, 0x80000000,
93 0x1B000000, 0x36000000, 0x6C000000
96 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
97 unsigned char *input, int err);
99 static inline struct chcr_aead_ctx *AEAD_CTX(struct chcr_context *ctx)
101 return ctx->crypto_ctx->aeadctx;
104 static inline struct ablk_ctx *ABLK_CTX(struct chcr_context *ctx)
106 return ctx->crypto_ctx->ablkctx;
109 static inline struct hmac_ctx *HMAC_CTX(struct chcr_context *ctx)
111 return ctx->crypto_ctx->hmacctx;
114 static inline struct chcr_gcm_ctx *GCM_CTX(struct chcr_aead_ctx *gctx)
116 return gctx->ctx->gcm;
119 static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
121 return gctx->ctx->authenc;
124 static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
126 return container_of(ctx->dev, struct uld_ctx, dev);
129 static inline int is_ofld_imm(const struct sk_buff *skb)
131 return (skb->len <= SGE_MAX_WR_LEN);
134 static inline void chcr_init_hctx_per_wr(struct chcr_ahash_req_ctx *reqctx)
136 memset(&reqctx->hctx_wr, 0, sizeof(struct chcr_hctx_per_wr));
139 static int sg_nents_xlen(struct scatterlist *sg, unsigned int reqlen,
145 unsigned int skip_len = 0;
148 if (sg_dma_len(sg) <= skip) {
149 skip -= sg_dma_len(sg);
158 while (sg && reqlen) {
159 less = min(reqlen, sg_dma_len(sg) - skip_len);
160 nents += DIV_ROUND_UP(less, entlen);
168 static inline int get_aead_subtype(struct crypto_aead *aead)
170 struct aead_alg *alg = crypto_aead_alg(aead);
171 struct chcr_alg_template *chcr_crypto_alg =
172 container_of(alg, struct chcr_alg_template, alg.aead);
173 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
176 void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
178 u8 temp[SHA512_DIGEST_SIZE];
179 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
180 int authsize = crypto_aead_authsize(tfm);
181 struct cpl_fw6_pld *fw6_pld;
184 fw6_pld = (struct cpl_fw6_pld *)input;
185 if ((get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) ||
186 (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_GCM)) {
187 cmp = crypto_memneq(&fw6_pld->data[2], (fw6_pld + 1), authsize);
190 sg_pcopy_to_buffer(req->src, sg_nents(req->src), temp,
191 authsize, req->assoclen +
192 req->cryptlen - authsize);
193 cmp = crypto_memneq(temp, (fw6_pld + 1), authsize);
201 static int chcr_inc_wrcount(struct chcr_dev *dev)
203 if (dev->state == CHCR_DETACH)
205 atomic_inc(&dev->inflight);
209 static inline void chcr_dec_wrcount(struct chcr_dev *dev)
211 atomic_dec(&dev->inflight);
214 static inline int chcr_handle_aead_resp(struct aead_request *req,
215 unsigned char *input,
218 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
219 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
220 struct chcr_dev *dev = a_ctx(tfm)->dev;
222 chcr_aead_common_exit(req);
223 if (reqctx->verify == VERIFY_SW) {
224 chcr_verify_tag(req, input, &err);
225 reqctx->verify = VERIFY_HW;
227 chcr_dec_wrcount(dev);
228 req->base.complete(&req->base, err);
233 static void get_aes_decrypt_key(unsigned char *dec_key,
234 const unsigned char *key,
235 unsigned int keylength)
243 case AES_KEYLENGTH_128BIT:
244 nk = KEYLENGTH_4BYTES;
245 nr = NUMBER_OF_ROUNDS_10;
247 case AES_KEYLENGTH_192BIT:
248 nk = KEYLENGTH_6BYTES;
249 nr = NUMBER_OF_ROUNDS_12;
251 case AES_KEYLENGTH_256BIT:
252 nk = KEYLENGTH_8BYTES;
253 nr = NUMBER_OF_ROUNDS_14;
258 for (i = 0; i < nk; i++)
259 w_ring[i] = be32_to_cpu(*(u32 *)&key[4 * i]);
262 temp = w_ring[nk - 1];
263 while (i + nk < (nr + 1) * 4) {
266 temp = (temp << 8) | (temp >> 24);
267 temp = aes_ks_subword(temp);
268 temp ^= round_constant[i / nk];
269 } else if (nk == 8 && (i % 4 == 0)) {
270 temp = aes_ks_subword(temp);
272 w_ring[i % nk] ^= temp;
273 temp = w_ring[i % nk];
277 for (k = 0, j = i % nk; k < nk; k++) {
278 *((u32 *)dec_key + k) = htonl(w_ring[j]);
285 static struct crypto_shash *chcr_alloc_shash(unsigned int ds)
287 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
290 case SHA1_DIGEST_SIZE:
291 base_hash = crypto_alloc_shash("sha1", 0, 0);
293 case SHA224_DIGEST_SIZE:
294 base_hash = crypto_alloc_shash("sha224", 0, 0);
296 case SHA256_DIGEST_SIZE:
297 base_hash = crypto_alloc_shash("sha256", 0, 0);
299 case SHA384_DIGEST_SIZE:
300 base_hash = crypto_alloc_shash("sha384", 0, 0);
302 case SHA512_DIGEST_SIZE:
303 base_hash = crypto_alloc_shash("sha512", 0, 0);
310 static int chcr_compute_partial_hash(struct shash_desc *desc,
311 char *iopad, char *result_hash,
314 struct sha1_state sha1_st;
315 struct sha256_state sha256_st;
316 struct sha512_state sha512_st;
319 if (digest_size == SHA1_DIGEST_SIZE) {
320 error = crypto_shash_init(desc) ?:
321 crypto_shash_update(desc, iopad, SHA1_BLOCK_SIZE) ?:
322 crypto_shash_export(desc, (void *)&sha1_st);
323 memcpy(result_hash, sha1_st.state, SHA1_DIGEST_SIZE);
324 } else if (digest_size == SHA224_DIGEST_SIZE) {
325 error = crypto_shash_init(desc) ?:
326 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
327 crypto_shash_export(desc, (void *)&sha256_st);
328 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
330 } else if (digest_size == SHA256_DIGEST_SIZE) {
331 error = crypto_shash_init(desc) ?:
332 crypto_shash_update(desc, iopad, SHA256_BLOCK_SIZE) ?:
333 crypto_shash_export(desc, (void *)&sha256_st);
334 memcpy(result_hash, sha256_st.state, SHA256_DIGEST_SIZE);
336 } else if (digest_size == SHA384_DIGEST_SIZE) {
337 error = crypto_shash_init(desc) ?:
338 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
339 crypto_shash_export(desc, (void *)&sha512_st);
340 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
342 } else if (digest_size == SHA512_DIGEST_SIZE) {
343 error = crypto_shash_init(desc) ?:
344 crypto_shash_update(desc, iopad, SHA512_BLOCK_SIZE) ?:
345 crypto_shash_export(desc, (void *)&sha512_st);
346 memcpy(result_hash, sha512_st.state, SHA512_DIGEST_SIZE);
349 pr_err("Unknown digest size %d\n", digest_size);
354 static void chcr_change_order(char *buf, int ds)
358 if (ds == SHA512_DIGEST_SIZE) {
359 for (i = 0; i < (ds / sizeof(u64)); i++)
360 *((__be64 *)buf + i) =
361 cpu_to_be64(*((u64 *)buf + i));
363 for (i = 0; i < (ds / sizeof(u32)); i++)
364 *((__be32 *)buf + i) =
365 cpu_to_be32(*((u32 *)buf + i));
369 static inline int is_hmac(struct crypto_tfm *tfm)
371 struct crypto_alg *alg = tfm->__crt_alg;
372 struct chcr_alg_template *chcr_crypto_alg =
373 container_of(__crypto_ahash_alg(alg), struct chcr_alg_template,
375 if (chcr_crypto_alg->type == CRYPTO_ALG_TYPE_HMAC)
380 static inline void dsgl_walk_init(struct dsgl_walk *walk,
381 struct cpl_rx_phys_dsgl *dsgl)
385 walk->to = (struct phys_sge_pairs *)(dsgl + 1);
388 static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
391 struct cpl_rx_phys_dsgl *phys_cpl;
393 phys_cpl = walk->dsgl;
395 phys_cpl->op_to_tid = htonl(CPL_RX_PHYS_DSGL_OPCODE_V(CPL_RX_PHYS_DSGL)
396 | CPL_RX_PHYS_DSGL_ISRDMA_V(0));
397 phys_cpl->pcirlxorder_to_noofsgentr =
398 htonl(CPL_RX_PHYS_DSGL_PCIRLXORDER_V(0) |
399 CPL_RX_PHYS_DSGL_PCINOSNOOP_V(0) |
400 CPL_RX_PHYS_DSGL_PCITPHNTENB_V(0) |
401 CPL_RX_PHYS_DSGL_PCITPHNT_V(0) |
402 CPL_RX_PHYS_DSGL_DCAID_V(0) |
403 CPL_RX_PHYS_DSGL_NOOFSGENTR_V(walk->nents));
404 phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
405 phys_cpl->rss_hdr_int.qid = htons(qid);
406 phys_cpl->rss_hdr_int.hash_val = 0;
407 phys_cpl->rss_hdr_int.channel = pci_chan_id;
410 static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
419 walk->to->len[j % 8] = htons(size);
420 walk->to->addr[j % 8] = cpu_to_be64(addr);
427 static void dsgl_walk_add_sg(struct dsgl_walk *walk,
428 struct scatterlist *sg,
433 unsigned int left_size = slen, len = 0;
434 unsigned int j = walk->nents;
440 if (sg_dma_len(sg) <= skip) {
441 skip -= sg_dma_len(sg);
450 while (left_size && sg) {
451 len = min_t(u32, left_size, sg_dma_len(sg) - skip_len);
454 ent_len = min_t(u32, len, CHCR_DST_SG_SIZE);
455 walk->to->len[j % 8] = htons(ent_len);
456 walk->to->addr[j % 8] = cpu_to_be64(sg_dma_address(sg) +
465 walk->last_sg_len = min_t(u32, left_size, sg_dma_len(sg) -
466 skip_len) + skip_len;
467 left_size -= min_t(u32, left_size, sg_dma_len(sg) - skip_len);
474 static inline void ulptx_walk_init(struct ulptx_walk *walk,
475 struct ulptx_sgl *ulp)
480 walk->pair = ulp->sge;
481 walk->last_sg = NULL;
482 walk->last_sg_len = 0;
485 static inline void ulptx_walk_end(struct ulptx_walk *walk)
487 walk->sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
488 ULPTX_NSGE_V(walk->nents));
492 static inline void ulptx_walk_add_page(struct ulptx_walk *walk,
499 if (walk->nents == 0) {
500 walk->sgl->len0 = cpu_to_be32(size);
501 walk->sgl->addr0 = cpu_to_be64(addr);
503 walk->pair->addr[walk->pair_idx] = cpu_to_be64(addr);
504 walk->pair->len[walk->pair_idx] = cpu_to_be32(size);
505 walk->pair_idx = !walk->pair_idx;
512 static void ulptx_walk_add_sg(struct ulptx_walk *walk,
513 struct scatterlist *sg,
524 if (sg_dma_len(sg) <= skip) {
525 skip -= sg_dma_len(sg);
533 WARN(!sg, "SG should not be null here\n");
534 if (sg && (walk->nents == 0)) {
535 small = min_t(unsigned int, sg_dma_len(sg) - skip_len, len);
536 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
537 walk->sgl->len0 = cpu_to_be32(sgmin);
538 walk->sgl->addr0 = cpu_to_be64(sg_dma_address(sg) + skip_len);
542 walk->last_sg_len = sgmin + skip_len;
544 if (sg_dma_len(sg) == skip_len) {
551 small = min(sg_dma_len(sg) - skip_len, len);
552 sgmin = min_t(unsigned int, small, CHCR_SRC_SG_SIZE);
553 walk->pair->len[walk->pair_idx] = cpu_to_be32(sgmin);
554 walk->pair->addr[walk->pair_idx] =
555 cpu_to_be64(sg_dma_address(sg) + skip_len);
556 walk->pair_idx = !walk->pair_idx;
563 walk->last_sg_len = skip_len;
564 if (sg_dma_len(sg) == skip_len) {
571 static inline int get_cryptoalg_subtype(struct crypto_tfm *tfm)
573 struct crypto_alg *alg = tfm->__crt_alg;
574 struct chcr_alg_template *chcr_crypto_alg =
575 container_of(alg, struct chcr_alg_template, alg.crypto);
577 return chcr_crypto_alg->type & CRYPTO_ALG_SUB_TYPE_MASK;
580 static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
582 struct adapter *adap = netdev2adap(dev);
583 struct sge_uld_txq_info *txq_info =
584 adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
585 struct sge_uld_txq *txq;
589 txq = &txq_info->uldtxq[idx];
590 spin_lock(&txq->sendq.lock);
593 spin_unlock(&txq->sendq.lock);
598 static int generate_copy_rrkey(struct ablk_ctx *ablkctx,
599 struct _key_ctx *key_ctx)
601 if (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) {
602 memcpy(key_ctx->key, ablkctx->rrkey, ablkctx->enckey_len);
605 ablkctx->key + (ablkctx->enckey_len >> 1),
606 ablkctx->enckey_len >> 1);
607 memcpy(key_ctx->key + (ablkctx->enckey_len >> 1),
608 ablkctx->rrkey, ablkctx->enckey_len >> 1);
613 static int chcr_hash_ent_in_wr(struct scatterlist *src,
616 unsigned int srcskip)
620 int soffset = 0, sless;
622 if (sg_dma_len(src) == srcskip) {
626 while (src && space > (sgl_ent_len[srcsg + 1])) {
627 sless = min_t(unsigned int, sg_dma_len(src) - soffset - srcskip,
632 if (sg_dma_len(src) == (soffset + srcskip)) {
641 static int chcr_sg_ent_in_wr(struct scatterlist *src,
642 struct scatterlist *dst,
645 unsigned int srcskip,
646 unsigned int dstskip)
648 int srclen = 0, dstlen = 0;
649 int srcsg = minsg, dstsg = minsg;
650 int offset = 0, soffset = 0, less, sless = 0;
652 if (sg_dma_len(src) == srcskip) {
656 if (sg_dma_len(dst) == dstskip) {
662 space > (sgl_ent_len[srcsg + 1] + dsgl_ent_len[dstsg])) {
663 sless = min_t(unsigned int, sg_dma_len(src) - srcskip - soffset,
668 while (dst && ((dstsg + 1) <= MAX_DSGL_ENT) &&
669 space > (sgl_ent_len[srcsg] + dsgl_ent_len[dstsg + 1])) {
670 if (srclen <= dstlen)
672 less = min_t(unsigned int, sg_dma_len(dst) - offset -
673 dstskip, CHCR_DST_SG_SIZE);
676 if ((offset + dstskip) == sg_dma_len(dst)) {
684 if ((soffset + srcskip) == sg_dma_len(src)) {
691 return min(srclen, dstlen);
694 static int chcr_cipher_fallback(struct crypto_sync_skcipher *cipher,
696 struct scatterlist *src,
697 struct scatterlist *dst,
700 unsigned short op_type)
704 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, cipher);
706 skcipher_request_set_sync_tfm(subreq, cipher);
707 skcipher_request_set_callback(subreq, flags, NULL, NULL);
708 skcipher_request_set_crypt(subreq, src, dst,
711 err = op_type ? crypto_skcipher_decrypt(subreq) :
712 crypto_skcipher_encrypt(subreq);
713 skcipher_request_zero(subreq);
718 static inline void create_wreq(struct chcr_context *ctx,
719 struct chcr_wr *chcr_req,
720 struct crypto_async_request *req,
727 struct uld_ctx *u_ctx = ULD_CTX(ctx);
728 int qid = u_ctx->lldi.rxq_ids[ctx->rx_qidx];
731 chcr_req->wreq.op_to_cctx_size = FILL_WR_OP_CCTX_SIZE;
732 chcr_req->wreq.pld_size_hash_size =
733 htonl(FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE_V(hash_sz));
734 chcr_req->wreq.len16_pkd =
735 htonl(FW_CRYPTO_LOOKASIDE_WR_LEN16_V(DIV_ROUND_UP(len16, 16)));
736 chcr_req->wreq.cookie = cpu_to_be64((uintptr_t)req);
737 chcr_req->wreq.rx_chid_to_rx_q_id =
738 FILL_WR_RX_Q_ID(ctx->tx_chan_id, qid,
739 !!lcb, ctx->tx_qidx);
741 chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
743 chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
744 ((sizeof(chcr_req->wreq)) >> 4)));
746 chcr_req->sc_imm.cmd_more = FILL_CMD_MORE(!imm);
747 chcr_req->sc_imm.len = cpu_to_be32(sizeof(struct cpl_tx_sec_pdu) +
748 sizeof(chcr_req->key_ctx) + sc_len);
752 * create_cipher_wr - form the WR for cipher operations
754 * @ctx: crypto driver context of the request.
755 * @qid: ingress qid where response of this WR should be received.
756 * @op_type: encryption or decryption
758 static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
760 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
761 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
762 struct sk_buff *skb = NULL;
763 struct chcr_wr *chcr_req;
764 struct cpl_rx_phys_dsgl *phys_cpl;
765 struct ulptx_sgl *ulptx;
766 struct chcr_blkcipher_req_ctx *reqctx =
767 ablkcipher_request_ctx(wrparam->req);
768 unsigned int temp = 0, transhdr_len, dst_size;
771 unsigned int kctx_len;
772 gfp_t flags = wrparam->req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
773 GFP_KERNEL : GFP_ATOMIC;
774 struct adapter *adap = padap(c_ctx(tfm)->dev);
776 nents = sg_nents_xlen(reqctx->dstsg, wrparam->bytes, CHCR_DST_SG_SIZE,
778 dst_size = get_space_for_phys_dsgl(nents);
779 kctx_len = roundup(ablkctx->enckey_len, 16);
780 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
781 nents = sg_nents_xlen(reqctx->srcsg, wrparam->bytes,
782 CHCR_SRC_SG_SIZE, reqctx->src_ofst);
783 temp = reqctx->imm ? roundup(wrparam->bytes, 16) :
784 (sgl_len(nents) * 8);
785 transhdr_len += temp;
786 transhdr_len = roundup(transhdr_len, 16);
787 skb = alloc_skb(SGE_MAX_WR_LEN, flags);
792 chcr_req = __skb_put_zero(skb, transhdr_len);
793 chcr_req->sec_cpl.op_ivinsrtofst =
794 FILL_SEC_CPL_OP_IVINSR(c_ctx(tfm)->tx_chan_id, 2, 1);
796 chcr_req->sec_cpl.pldlen = htonl(IV + wrparam->bytes);
797 chcr_req->sec_cpl.aadstart_cipherstop_hi =
798 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, IV + 1, 0);
800 chcr_req->sec_cpl.cipherstop_lo_authinsert =
801 FILL_SEC_CPL_AUTHINSERT(0, 0, 0, 0);
802 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, 0,
805 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 0,
808 chcr_req->key_ctx.ctx_hdr = ablkctx->key_ctx_hdr;
809 if ((reqctx->op == CHCR_DECRYPT_OP) &&
810 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
811 CRYPTO_ALG_SUB_TYPE_CTR)) &&
812 (!(get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
813 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686))) {
814 generate_copy_rrkey(ablkctx, &chcr_req->key_ctx);
816 if ((ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC) ||
817 (ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CTR)) {
818 memcpy(chcr_req->key_ctx.key, ablkctx->key,
819 ablkctx->enckey_len);
821 memcpy(chcr_req->key_ctx.key, ablkctx->key +
822 (ablkctx->enckey_len >> 1),
823 ablkctx->enckey_len >> 1);
824 memcpy(chcr_req->key_ctx.key +
825 (ablkctx->enckey_len >> 1),
827 ablkctx->enckey_len >> 1);
830 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
831 ulptx = (struct ulptx_sgl *)((u8 *)(phys_cpl + 1) + dst_size);
832 chcr_add_cipher_src_ent(wrparam->req, ulptx, wrparam);
833 chcr_add_cipher_dst_ent(wrparam->req, phys_cpl, wrparam, wrparam->qid);
835 atomic_inc(&adap->chcr_stats.cipher_rqst);
836 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + kctx_len + IV
837 + (reqctx->imm ? (wrparam->bytes) : 0);
838 create_wreq(c_ctx(tfm), chcr_req, &(wrparam->req->base), reqctx->imm, 0,
840 ablkctx->ciph_mode == CHCR_SCMD_CIPHER_MODE_AES_CBC);
843 if (reqctx->op && (ablkctx->ciph_mode ==
844 CHCR_SCMD_CIPHER_MODE_AES_CBC))
845 sg_pcopy_to_buffer(wrparam->req->src,
846 sg_nents(wrparam->req->src), wrparam->req->info, 16,
847 reqctx->processed + wrparam->bytes - AES_BLOCK_SIZE);
851 return ERR_PTR(error);
854 static inline int chcr_keyctx_ck_size(unsigned int keylen)
858 if (keylen == AES_KEYSIZE_128)
859 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
860 else if (keylen == AES_KEYSIZE_192)
861 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
862 else if (keylen == AES_KEYSIZE_256)
863 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
869 static int chcr_cipher_fallback_setkey(struct crypto_ablkcipher *cipher,
873 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
874 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
877 crypto_sync_skcipher_clear_flags(ablkctx->sw_cipher,
878 CRYPTO_TFM_REQ_MASK);
879 crypto_sync_skcipher_set_flags(ablkctx->sw_cipher,
880 cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
881 err = crypto_sync_skcipher_setkey(ablkctx->sw_cipher, key, keylen);
882 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
884 crypto_sync_skcipher_get_flags(ablkctx->sw_cipher) &
889 static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *cipher,
893 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
894 unsigned int ck_size, context_size;
898 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
902 ck_size = chcr_keyctx_ck_size(keylen);
903 alignment = ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192 ? 8 : 0;
904 memcpy(ablkctx->key, key, keylen);
905 ablkctx->enckey_len = keylen;
906 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, keylen << 3);
907 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
908 keylen + alignment) >> 4;
910 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
912 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC;
915 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
916 ablkctx->enckey_len = 0;
921 static int chcr_aes_ctr_setkey(struct crypto_ablkcipher *cipher,
925 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
926 unsigned int ck_size, context_size;
930 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
933 ck_size = chcr_keyctx_ck_size(keylen);
934 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
935 memcpy(ablkctx->key, key, keylen);
936 ablkctx->enckey_len = keylen;
937 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
938 keylen + alignment) >> 4;
940 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
942 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
946 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
947 ablkctx->enckey_len = 0;
952 static int chcr_aes_rfc3686_setkey(struct crypto_ablkcipher *cipher,
956 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
957 unsigned int ck_size, context_size;
961 if (keylen < CTR_RFC3686_NONCE_SIZE)
963 memcpy(ablkctx->nonce, key + (keylen - CTR_RFC3686_NONCE_SIZE),
964 CTR_RFC3686_NONCE_SIZE);
966 keylen -= CTR_RFC3686_NONCE_SIZE;
967 err = chcr_cipher_fallback_setkey(cipher, key, keylen);
971 ck_size = chcr_keyctx_ck_size(keylen);
972 alignment = (ck_size == CHCR_KEYCTX_CIPHER_KEY_SIZE_192) ? 8 : 0;
973 memcpy(ablkctx->key, key, keylen);
974 ablkctx->enckey_len = keylen;
975 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD +
976 keylen + alignment) >> 4;
978 ablkctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY,
980 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR;
984 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
985 ablkctx->enckey_len = 0;
989 static void ctr_add_iv(u8 *dstiv, u8 *srciv, u32 add)
991 unsigned int size = AES_BLOCK_SIZE;
992 __be32 *b = (__be32 *)(dstiv + size);
995 memcpy(dstiv, srciv, AES_BLOCK_SIZE);
996 for (; size >= 4; size -= 4) {
997 prev = be32_to_cpu(*--b);
1007 static unsigned int adjust_ctr_overflow(u8 *iv, u32 bytes)
1009 __be32 *b = (__be32 *)(iv + AES_BLOCK_SIZE);
1011 u32 temp = be32_to_cpu(*--b);
1014 c = (u64)temp + 1; // No of block can processed withou overflow
1015 if ((bytes / AES_BLOCK_SIZE) > c)
1016 bytes = c * AES_BLOCK_SIZE;
1020 static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv,
1023 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1024 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1025 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1026 struct crypto_aes_ctx aes;
1029 unsigned int keylen;
1030 int round = reqctx->last_req_len / AES_BLOCK_SIZE;
1031 int round8 = round / 8;
1033 memcpy(iv, reqctx->iv, AES_BLOCK_SIZE);
1035 keylen = ablkctx->enckey_len / 2;
1036 key = ablkctx->key + keylen;
1037 ret = aes_expandkey(&aes, key, keylen);
1040 aes_encrypt(&aes, iv, iv);
1041 for (i = 0; i < round8; i++)
1042 gf128mul_x8_ble((le128 *)iv, (le128 *)iv);
1044 for (i = 0; i < (round % 8); i++)
1045 gf128mul_x_ble((le128 *)iv, (le128 *)iv);
1048 aes_decrypt(&aes, iv, iv);
1050 memzero_explicit(&aes, sizeof(aes));
1054 static int chcr_update_cipher_iv(struct ablkcipher_request *req,
1055 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1057 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1058 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1059 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1062 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1063 ctr_add_iv(iv, req->info, (reqctx->processed /
1065 else if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_RFC3686)
1066 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1067 CTR_RFC3686_IV_SIZE) = cpu_to_be32((reqctx->processed /
1068 AES_BLOCK_SIZE) + 1);
1069 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1070 ret = chcr_update_tweak(req, iv, 0);
1071 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1073 /*Updated before sending last WR*/
1074 memcpy(iv, req->info, AES_BLOCK_SIZE);
1076 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1083 /* We need separate function for final iv because in rfc3686 Initial counter
1084 * starts from 1 and buffer size of iv is 8 byte only which remains constant
1085 * for subsequent update requests
1088 static int chcr_final_cipher_iv(struct ablkcipher_request *req,
1089 struct cpl_fw6_pld *fw6_pld, u8 *iv)
1091 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1092 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1093 int subtype = get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm));
1096 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR)
1097 ctr_add_iv(iv, req->info, DIV_ROUND_UP(reqctx->processed,
1099 else if (subtype == CRYPTO_ALG_SUB_TYPE_XTS)
1100 ret = chcr_update_tweak(req, iv, 1);
1101 else if (subtype == CRYPTO_ALG_SUB_TYPE_CBC) {
1102 /*Already updated for Decrypt*/
1104 memcpy(iv, &fw6_pld->data[2], AES_BLOCK_SIZE);
1111 static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1112 unsigned char *input, int err)
1114 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1115 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1116 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1117 struct sk_buff *skb;
1118 struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
1119 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1120 struct cipher_wr_param wrparam;
1121 struct chcr_dev *dev = c_ctx(tfm)->dev;
1126 if (req->nbytes == reqctx->processed) {
1127 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1129 err = chcr_final_cipher_iv(req, fw6_pld, req->info);
1134 bytes = chcr_sg_ent_in_wr(reqctx->srcsg, reqctx->dstsg, 0,
1135 CIP_SPACE_LEFT(ablkctx->enckey_len),
1136 reqctx->src_ofst, reqctx->dst_ofst);
1137 if ((bytes + reqctx->processed) >= req->nbytes)
1138 bytes = req->nbytes - reqctx->processed;
1140 bytes = rounddown(bytes, 16);
1142 /*CTR mode counter overfloa*/
1143 bytes = req->nbytes - reqctx->processed;
1145 err = chcr_update_cipher_iv(req, fw6_pld, reqctx->iv);
1149 if (unlikely(bytes == 0)) {
1150 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1152 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1162 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1163 CRYPTO_ALG_SUB_TYPE_CTR)
1164 bytes = adjust_ctr_overflow(reqctx->iv, bytes);
1165 wrparam.qid = u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx];
1167 wrparam.bytes = bytes;
1168 skb = create_cipher_wr(&wrparam);
1170 pr_err("chcr : %s : Failed to form WR. No memory\n", __func__);
1174 skb->dev = u_ctx->lldi.ports[0];
1175 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1177 reqctx->last_req_len = bytes;
1178 reqctx->processed += bytes;
1181 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1183 chcr_dec_wrcount(dev);
1184 req->base.complete(&req->base, err);
1188 static int process_cipher(struct ablkcipher_request *req,
1190 struct sk_buff **skb,
1191 unsigned short op_type)
1193 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1194 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
1195 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
1196 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(tfm));
1197 struct cipher_wr_param wrparam;
1198 int bytes, err = -EINVAL;
1200 reqctx->processed = 0;
1203 if ((ablkctx->enckey_len == 0) || (ivsize > AES_BLOCK_SIZE) ||
1204 (req->nbytes == 0) ||
1205 (req->nbytes % crypto_ablkcipher_blocksize(tfm))) {
1206 pr_err("AES: Invalid value of Key Len %d nbytes %d IV Len %d\n",
1207 ablkctx->enckey_len, req->nbytes, ivsize);
1211 err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1214 if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
1216 sizeof(struct cpl_rx_phys_dsgl) +
1219 /* Can be sent as Imm*/
1220 unsigned int dnents = 0, transhdr_len, phys_dsgl, kctx_len;
1222 dnents = sg_nents_xlen(req->dst, req->nbytes,
1223 CHCR_DST_SG_SIZE, 0);
1224 phys_dsgl = get_space_for_phys_dsgl(dnents);
1225 kctx_len = roundup(ablkctx->enckey_len, 16);
1226 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, phys_dsgl);
1227 reqctx->imm = (transhdr_len + IV + req->nbytes) <=
1229 bytes = IV + req->nbytes;
1236 bytes = chcr_sg_ent_in_wr(req->src, req->dst, 0,
1237 CIP_SPACE_LEFT(ablkctx->enckey_len),
1239 if ((bytes + reqctx->processed) >= req->nbytes)
1240 bytes = req->nbytes - reqctx->processed;
1242 bytes = rounddown(bytes, 16);
1244 bytes = req->nbytes;
1246 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1247 CRYPTO_ALG_SUB_TYPE_CTR) {
1248 bytes = adjust_ctr_overflow(req->info, bytes);
1250 if (get_cryptoalg_subtype(crypto_ablkcipher_tfm(tfm)) ==
1251 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686) {
1252 memcpy(reqctx->iv, ablkctx->nonce, CTR_RFC3686_NONCE_SIZE);
1253 memcpy(reqctx->iv + CTR_RFC3686_NONCE_SIZE, req->info,
1254 CTR_RFC3686_IV_SIZE);
1256 /* initialize counter portion of counter block */
1257 *(__be32 *)(reqctx->iv + CTR_RFC3686_NONCE_SIZE +
1258 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
1262 memcpy(reqctx->iv, req->info, IV);
1264 if (unlikely(bytes == 0)) {
1265 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev,
1267 err = chcr_cipher_fallback(ablkctx->sw_cipher,
1276 reqctx->op = op_type;
1277 reqctx->srcsg = req->src;
1278 reqctx->dstsg = req->dst;
1279 reqctx->src_ofst = 0;
1280 reqctx->dst_ofst = 0;
1283 wrparam.bytes = bytes;
1284 *skb = create_cipher_wr(&wrparam);
1286 err = PTR_ERR(*skb);
1289 reqctx->processed = bytes;
1290 reqctx->last_req_len = bytes;
1294 chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1299 static int chcr_aes_encrypt(struct ablkcipher_request *req)
1301 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1302 struct chcr_dev *dev = c_ctx(tfm)->dev;
1303 struct sk_buff *skb = NULL;
1304 int err, isfull = 0;
1305 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1307 err = chcr_inc_wrcount(dev);
1310 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1311 c_ctx(tfm)->tx_qidx))) {
1313 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1319 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1320 &skb, CHCR_ENCRYPT_OP);
1323 skb->dev = u_ctx->lldi.ports[0];
1324 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1326 return isfull ? -EBUSY : -EINPROGRESS;
1328 chcr_dec_wrcount(dev);
1332 static int chcr_aes_decrypt(struct ablkcipher_request *req)
1334 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1335 struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1336 struct chcr_dev *dev = c_ctx(tfm)->dev;
1337 struct sk_buff *skb = NULL;
1338 int err, isfull = 0;
1340 err = chcr_inc_wrcount(dev);
1344 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1345 c_ctx(tfm)->tx_qidx))) {
1347 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1351 err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
1352 &skb, CHCR_DECRYPT_OP);
1355 skb->dev = u_ctx->lldi.ports[0];
1356 set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
1358 return isfull ? -EBUSY : -EINPROGRESS;
1361 static int chcr_device_init(struct chcr_context *ctx)
1363 struct uld_ctx *u_ctx = NULL;
1365 int txq_perchan, txq_idx, ntxq;
1366 int err = 0, rxq_perchan, rxq_idx;
1368 id = smp_processor_id();
1370 u_ctx = assign_chcr_device();
1373 pr_err("chcr device assignment fails\n");
1376 ctx->dev = &u_ctx->dev;
1377 ntxq = u_ctx->lldi.ntxq;
1378 rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
1379 txq_perchan = ntxq / u_ctx->lldi.nchan;
1380 spin_lock(&ctx->dev->lock_chcr_dev);
1381 ctx->tx_chan_id = ctx->dev->tx_channel_id;
1382 ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
1383 spin_unlock(&ctx->dev->lock_chcr_dev);
1384 rxq_idx = ctx->tx_chan_id * rxq_perchan;
1385 rxq_idx += id % rxq_perchan;
1386 txq_idx = ctx->tx_chan_id * txq_perchan;
1387 txq_idx += id % txq_perchan;
1388 ctx->rx_qidx = rxq_idx;
1389 ctx->tx_qidx = txq_idx;
1390 /* Channel Id used by SGE to forward packet to Host.
1391 * Same value should be used in cpl_fw6_pld RSS_CH field
1392 * by FW. Driver programs PCI channel ID to be used in fw
1393 * at the time of queue allocation with value "pi->tx_chan"
1395 ctx->pci_chan_id = txq_idx / txq_perchan;
1401 static int chcr_cra_init(struct crypto_tfm *tfm)
1403 struct crypto_alg *alg = tfm->__crt_alg;
1404 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1405 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1407 ablkctx->sw_cipher = crypto_alloc_sync_skcipher(alg->cra_name, 0,
1408 CRYPTO_ALG_NEED_FALLBACK);
1409 if (IS_ERR(ablkctx->sw_cipher)) {
1410 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1411 return PTR_ERR(ablkctx->sw_cipher);
1414 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1415 return chcr_device_init(crypto_tfm_ctx(tfm));
1418 static int chcr_rfc3686_init(struct crypto_tfm *tfm)
1420 struct crypto_alg *alg = tfm->__crt_alg;
1421 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1422 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1424 /*RFC3686 initialises IV counter value to 1, rfc3686(ctr(aes))
1425 * cannot be used as fallback in chcr_handle_cipher_response
1427 ablkctx->sw_cipher = crypto_alloc_sync_skcipher("ctr(aes)", 0,
1428 CRYPTO_ALG_NEED_FALLBACK);
1429 if (IS_ERR(ablkctx->sw_cipher)) {
1430 pr_err("failed to allocate fallback for %s\n", alg->cra_name);
1431 return PTR_ERR(ablkctx->sw_cipher);
1433 tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx);
1434 return chcr_device_init(crypto_tfm_ctx(tfm));
1438 static void chcr_cra_exit(struct crypto_tfm *tfm)
1440 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
1441 struct ablk_ctx *ablkctx = ABLK_CTX(ctx);
1443 crypto_free_sync_skcipher(ablkctx->sw_cipher);
1446 static int get_alg_config(struct algo_param *params,
1447 unsigned int auth_size)
1449 switch (auth_size) {
1450 case SHA1_DIGEST_SIZE:
1451 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
1452 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA1;
1453 params->result_size = SHA1_DIGEST_SIZE;
1455 case SHA224_DIGEST_SIZE:
1456 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1457 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA224;
1458 params->result_size = SHA256_DIGEST_SIZE;
1460 case SHA256_DIGEST_SIZE:
1461 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
1462 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA256;
1463 params->result_size = SHA256_DIGEST_SIZE;
1465 case SHA384_DIGEST_SIZE:
1466 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1467 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384;
1468 params->result_size = SHA512_DIGEST_SIZE;
1470 case SHA512_DIGEST_SIZE:
1471 params->mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
1472 params->auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512;
1473 params->result_size = SHA512_DIGEST_SIZE;
1476 pr_err("chcr : ERROR, unsupported digest size\n");
1482 static inline void chcr_free_shash(struct crypto_shash *base_hash)
1484 crypto_free_shash(base_hash);
1488 * create_hash_wr - Create hash work request
1489 * @req - Cipher req base
1491 static struct sk_buff *create_hash_wr(struct ahash_request *req,
1492 struct hash_wr_param *param)
1494 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1495 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1496 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
1497 struct sk_buff *skb = NULL;
1498 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1499 struct chcr_wr *chcr_req;
1500 struct ulptx_sgl *ulptx;
1501 unsigned int nents = 0, transhdr_len;
1502 unsigned int temp = 0;
1503 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1505 struct adapter *adap = padap(h_ctx(tfm)->dev);
1508 transhdr_len = HASH_TRANSHDR_SIZE(param->kctx_len);
1509 req_ctx->hctx_wr.imm = (transhdr_len + param->bfr_len +
1510 param->sg_len) <= SGE_MAX_WR_LEN;
1511 nents = sg_nents_xlen(req_ctx->hctx_wr.srcsg, param->sg_len,
1512 CHCR_SRC_SG_SIZE, req_ctx->hctx_wr.src_ofst);
1513 nents += param->bfr_len ? 1 : 0;
1514 transhdr_len += req_ctx->hctx_wr.imm ? roundup(param->bfr_len +
1515 param->sg_len, 16) : (sgl_len(nents) * 8);
1516 transhdr_len = roundup(transhdr_len, 16);
1518 skb = alloc_skb(transhdr_len, flags);
1520 return ERR_PTR(-ENOMEM);
1521 chcr_req = __skb_put_zero(skb, transhdr_len);
1523 chcr_req->sec_cpl.op_ivinsrtofst =
1524 FILL_SEC_CPL_OP_IVINSR(h_ctx(tfm)->tx_chan_id, 2, 0);
1525 chcr_req->sec_cpl.pldlen = htonl(param->bfr_len + param->sg_len);
1527 chcr_req->sec_cpl.aadstart_cipherstop_hi =
1528 FILL_SEC_CPL_CIPHERSTOP_HI(0, 0, 0, 0);
1529 chcr_req->sec_cpl.cipherstop_lo_authinsert =
1530 FILL_SEC_CPL_AUTHINSERT(0, 1, 0, 0);
1531 chcr_req->sec_cpl.seqno_numivs =
1532 FILL_SEC_CPL_SCMD0_SEQNO(0, 0, 0, param->alg_prm.auth_mode,
1533 param->opad_needed, 0);
1535 chcr_req->sec_cpl.ivgen_hdrlen =
1536 FILL_SEC_CPL_IVGEN_HDRLEN(param->last, param->more, 0, 1, 0, 0);
1538 memcpy(chcr_req->key_ctx.key, req_ctx->partial_hash,
1539 param->alg_prm.result_size);
1541 if (param->opad_needed)
1542 memcpy(chcr_req->key_ctx.key +
1543 ((param->alg_prm.result_size <= 32) ? 32 :
1544 CHCR_HASH_MAX_DIGEST_SIZE),
1545 hmacctx->opad, param->alg_prm.result_size);
1547 chcr_req->key_ctx.ctx_hdr = FILL_KEY_CTX_HDR(CHCR_KEYCTX_NO_KEY,
1548 param->alg_prm.mk_size, 0,
1551 sizeof(chcr_req->key_ctx)) >> 4));
1552 chcr_req->sec_cpl.scmd1 = cpu_to_be64((u64)param->scmd1);
1553 ulptx = (struct ulptx_sgl *)((u8 *)(chcr_req + 1) + param->kctx_len +
1555 if (param->bfr_len != 0) {
1556 req_ctx->hctx_wr.dma_addr =
1557 dma_map_single(&u_ctx->lldi.pdev->dev, req_ctx->reqbfr,
1558 param->bfr_len, DMA_TO_DEVICE);
1559 if (dma_mapping_error(&u_ctx->lldi.pdev->dev,
1560 req_ctx->hctx_wr. dma_addr)) {
1564 req_ctx->hctx_wr.dma_len = param->bfr_len;
1566 req_ctx->hctx_wr.dma_addr = 0;
1568 chcr_add_hash_src_ent(req, ulptx, param);
1569 /* Request upto max wr size */
1570 temp = param->kctx_len + DUMMY_BYTES + (req_ctx->hctx_wr.imm ?
1571 (param->sg_len + param->bfr_len) : 0);
1572 atomic_inc(&adap->chcr_stats.digest_rqst);
1573 create_wreq(h_ctx(tfm), chcr_req, &req->base, req_ctx->hctx_wr.imm,
1574 param->hash_size, transhdr_len,
1576 req_ctx->hctx_wr.skb = skb;
1580 return ERR_PTR(error);
1583 static int chcr_ahash_update(struct ahash_request *req)
1585 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1586 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1587 struct uld_ctx *u_ctx = NULL;
1588 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1589 struct sk_buff *skb;
1590 u8 remainder = 0, bs;
1591 unsigned int nbytes = req->nbytes;
1592 struct hash_wr_param params;
1593 int error, isfull = 0;
1595 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1596 u_ctx = ULD_CTX(h_ctx(rtfm));
1598 if (nbytes + req_ctx->reqlen >= bs) {
1599 remainder = (nbytes + req_ctx->reqlen) % bs;
1600 nbytes = nbytes + req_ctx->reqlen - remainder;
1602 sg_pcopy_to_buffer(req->src, sg_nents(req->src), req_ctx->reqbfr
1603 + req_ctx->reqlen, nbytes, 0);
1604 req_ctx->reqlen += nbytes;
1607 error = chcr_inc_wrcount(dev);
1610 /* Detach state for CHCR means lldi or padap is freed. Increasing
1611 * inflight count for dev guarantees that lldi and padap is valid
1613 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1614 h_ctx(rtfm)->tx_qidx))) {
1616 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1622 chcr_init_hctx_per_wr(req_ctx);
1623 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1628 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1629 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1630 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1631 HASH_SPACE_LEFT(params.kctx_len), 0);
1632 if (params.sg_len > req->nbytes)
1633 params.sg_len = req->nbytes;
1634 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs) -
1636 params.opad_needed = 0;
1639 params.bfr_len = req_ctx->reqlen;
1641 req_ctx->hctx_wr.srcsg = req->src;
1643 params.hash_size = params.alg_prm.result_size;
1644 req_ctx->data_len += params.sg_len + params.bfr_len;
1645 skb = create_hash_wr(req, ¶ms);
1647 error = PTR_ERR(skb);
1651 req_ctx->hctx_wr.processed += params.sg_len;
1654 swap(req_ctx->reqbfr, req_ctx->skbfr);
1655 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
1656 req_ctx->reqbfr, remainder, req->nbytes -
1659 req_ctx->reqlen = remainder;
1660 skb->dev = u_ctx->lldi.ports[0];
1661 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1664 return isfull ? -EBUSY : -EINPROGRESS;
1666 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1668 chcr_dec_wrcount(dev);
1672 static void create_last_hash_block(char *bfr_ptr, unsigned int bs, u64 scmd1)
1674 memset(bfr_ptr, 0, bs);
1677 *(__be64 *)(bfr_ptr + 56) = cpu_to_be64(scmd1 << 3);
1679 *(__be64 *)(bfr_ptr + 120) = cpu_to_be64(scmd1 << 3);
1682 static int chcr_ahash_final(struct ahash_request *req)
1684 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1685 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1686 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1687 struct hash_wr_param params;
1688 struct sk_buff *skb;
1689 struct uld_ctx *u_ctx = NULL;
1690 u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1691 int error = -EINVAL;
1693 error = chcr_inc_wrcount(dev);
1697 chcr_init_hctx_per_wr(req_ctx);
1698 u_ctx = ULD_CTX(h_ctx(rtfm));
1699 if (is_hmac(crypto_ahash_tfm(rtfm)))
1700 params.opad_needed = 1;
1702 params.opad_needed = 0;
1704 req_ctx->hctx_wr.isfinal = 1;
1705 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1706 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1707 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1708 params.opad_needed = 1;
1709 params.kctx_len *= 2;
1711 params.opad_needed = 0;
1714 req_ctx->hctx_wr.result = 1;
1715 params.bfr_len = req_ctx->reqlen;
1716 req_ctx->data_len += params.bfr_len + params.sg_len;
1717 req_ctx->hctx_wr.srcsg = req->src;
1718 if (req_ctx->reqlen == 0) {
1719 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1723 params.bfr_len = bs;
1726 params.scmd1 = req_ctx->data_len;
1730 params.hash_size = crypto_ahash_digestsize(rtfm);
1731 skb = create_hash_wr(req, ¶ms);
1733 error = PTR_ERR(skb);
1736 req_ctx->reqlen = 0;
1737 skb->dev = u_ctx->lldi.ports[0];
1738 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1740 return -EINPROGRESS;
1742 chcr_dec_wrcount(dev);
1746 static int chcr_ahash_finup(struct ahash_request *req)
1748 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1749 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1750 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1751 struct uld_ctx *u_ctx = NULL;
1752 struct sk_buff *skb;
1753 struct hash_wr_param params;
1755 int error, isfull = 0;
1757 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1758 u_ctx = ULD_CTX(h_ctx(rtfm));
1759 error = chcr_inc_wrcount(dev);
1763 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1764 h_ctx(rtfm)->tx_qidx))) {
1766 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1771 chcr_init_hctx_per_wr(req_ctx);
1772 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1778 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1779 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1780 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1781 params.kctx_len *= 2;
1782 params.opad_needed = 1;
1784 params.opad_needed = 0;
1787 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1788 HASH_SPACE_LEFT(params.kctx_len), 0);
1789 if (params.sg_len < req->nbytes) {
1790 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1791 params.kctx_len /= 2;
1792 params.opad_needed = 0;
1796 params.sg_len = rounddown(params.sg_len + req_ctx->reqlen, bs)
1798 params.hash_size = params.alg_prm.result_size;
1803 params.sg_len = req->nbytes;
1804 params.hash_size = crypto_ahash_digestsize(rtfm);
1805 params.scmd1 = req_ctx->data_len + req_ctx->reqlen +
1808 params.bfr_len = req_ctx->reqlen;
1809 req_ctx->data_len += params.bfr_len + params.sg_len;
1810 req_ctx->hctx_wr.result = 1;
1811 req_ctx->hctx_wr.srcsg = req->src;
1812 if ((req_ctx->reqlen + req->nbytes) == 0) {
1813 create_last_hash_block(req_ctx->reqbfr, bs, req_ctx->data_len);
1817 params.bfr_len = bs;
1819 skb = create_hash_wr(req, ¶ms);
1821 error = PTR_ERR(skb);
1824 req_ctx->reqlen = 0;
1825 req_ctx->hctx_wr.processed += params.sg_len;
1826 skb->dev = u_ctx->lldi.ports[0];
1827 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1830 return isfull ? -EBUSY : -EINPROGRESS;
1832 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1834 chcr_dec_wrcount(dev);
1838 static int chcr_ahash_digest(struct ahash_request *req)
1840 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
1841 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1842 struct chcr_dev *dev = h_ctx(rtfm)->dev;
1843 struct uld_ctx *u_ctx = NULL;
1844 struct sk_buff *skb;
1845 struct hash_wr_param params;
1847 int error, isfull = 0;
1850 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1851 error = chcr_inc_wrcount(dev);
1855 u_ctx = ULD_CTX(h_ctx(rtfm));
1856 if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1857 h_ctx(rtfm)->tx_qidx))) {
1859 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1865 chcr_init_hctx_per_wr(req_ctx);
1866 error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1872 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1873 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1874 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1875 params.kctx_len *= 2;
1876 params.opad_needed = 1;
1878 params.opad_needed = 0;
1880 params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
1881 HASH_SPACE_LEFT(params.kctx_len), 0);
1882 if (params.sg_len < req->nbytes) {
1883 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1884 params.kctx_len /= 2;
1885 params.opad_needed = 0;
1890 params.sg_len = rounddown(params.sg_len, bs);
1891 params.hash_size = params.alg_prm.result_size;
1893 params.sg_len = req->nbytes;
1894 params.hash_size = crypto_ahash_digestsize(rtfm);
1897 params.scmd1 = req->nbytes + req_ctx->data_len;
1901 req_ctx->hctx_wr.result = 1;
1902 req_ctx->hctx_wr.srcsg = req->src;
1903 req_ctx->data_len += params.bfr_len + params.sg_len;
1905 if (req->nbytes == 0) {
1906 create_last_hash_block(req_ctx->reqbfr, bs, 0);
1908 params.bfr_len = bs;
1911 skb = create_hash_wr(req, ¶ms);
1913 error = PTR_ERR(skb);
1916 req_ctx->hctx_wr.processed += params.sg_len;
1917 skb->dev = u_ctx->lldi.ports[0];
1918 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1920 return isfull ? -EBUSY : -EINPROGRESS;
1922 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1924 chcr_dec_wrcount(dev);
1928 static int chcr_ahash_continue(struct ahash_request *req)
1930 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1931 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1932 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1933 struct uld_ctx *u_ctx = NULL;
1934 struct sk_buff *skb;
1935 struct hash_wr_param params;
1939 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1940 u_ctx = ULD_CTX(h_ctx(rtfm));
1941 get_alg_config(¶ms.alg_prm, crypto_ahash_digestsize(rtfm));
1942 params.kctx_len = roundup(params.alg_prm.result_size, 16);
1943 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1944 params.kctx_len *= 2;
1945 params.opad_needed = 1;
1947 params.opad_needed = 0;
1949 params.sg_len = chcr_hash_ent_in_wr(hctx_wr->srcsg, 0,
1950 HASH_SPACE_LEFT(params.kctx_len),
1952 if ((params.sg_len + hctx_wr->processed) > req->nbytes)
1953 params.sg_len = req->nbytes - hctx_wr->processed;
1954 if (!hctx_wr->result ||
1955 ((params.sg_len + hctx_wr->processed) < req->nbytes)) {
1956 if (is_hmac(crypto_ahash_tfm(rtfm))) {
1957 params.kctx_len /= 2;
1958 params.opad_needed = 0;
1962 params.sg_len = rounddown(params.sg_len, bs);
1963 params.hash_size = params.alg_prm.result_size;
1968 params.hash_size = crypto_ahash_digestsize(rtfm);
1969 params.scmd1 = reqctx->data_len + params.sg_len;
1972 reqctx->data_len += params.sg_len;
1973 skb = create_hash_wr(req, ¶ms);
1975 error = PTR_ERR(skb);
1978 hctx_wr->processed += params.sg_len;
1979 skb->dev = u_ctx->lldi.ports[0];
1980 set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
1987 static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1988 unsigned char *input,
1991 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
1992 struct chcr_hctx_per_wr *hctx_wr = &reqctx->hctx_wr;
1993 int digestsize, updated_digestsize;
1994 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1995 struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
1996 struct chcr_dev *dev = h_ctx(tfm)->dev;
2000 digestsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
2001 updated_digestsize = digestsize;
2002 if (digestsize == SHA224_DIGEST_SIZE)
2003 updated_digestsize = SHA256_DIGEST_SIZE;
2004 else if (digestsize == SHA384_DIGEST_SIZE)
2005 updated_digestsize = SHA512_DIGEST_SIZE;
2007 if (hctx_wr->dma_addr) {
2008 dma_unmap_single(&u_ctx->lldi.pdev->dev, hctx_wr->dma_addr,
2009 hctx_wr->dma_len, DMA_TO_DEVICE);
2010 hctx_wr->dma_addr = 0;
2012 if (hctx_wr->isfinal || ((hctx_wr->processed + reqctx->reqlen) ==
2014 if (hctx_wr->result == 1) {
2015 hctx_wr->result = 0;
2016 memcpy(req->result, input + sizeof(struct cpl_fw6_pld),
2019 memcpy(reqctx->partial_hash,
2020 input + sizeof(struct cpl_fw6_pld),
2021 updated_digestsize);
2026 memcpy(reqctx->partial_hash, input + sizeof(struct cpl_fw6_pld),
2027 updated_digestsize);
2029 err = chcr_ahash_continue(req);
2034 if (hctx_wr->is_sg_map)
2035 chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
2039 chcr_dec_wrcount(dev);
2040 req->base.complete(&req->base, err);
2044 * chcr_handle_resp - Unmap the DMA buffers associated with the request
2045 * @req: crypto request
2047 int chcr_handle_resp(struct crypto_async_request *req, unsigned char *input,
2050 struct crypto_tfm *tfm = req->tfm;
2051 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2052 struct adapter *adap = padap(ctx->dev);
2054 switch (tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
2055 case CRYPTO_ALG_TYPE_AEAD:
2056 err = chcr_handle_aead_resp(aead_request_cast(req), input, err);
2059 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2060 chcr_handle_cipher_resp(ablkcipher_request_cast(req),
2063 case CRYPTO_ALG_TYPE_AHASH:
2064 chcr_handle_ahash_resp(ahash_request_cast(req), input, err);
2066 atomic_inc(&adap->chcr_stats.complete);
2069 static int chcr_ahash_export(struct ahash_request *areq, void *out)
2071 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2072 struct chcr_ahash_req_ctx *state = out;
2074 state->reqlen = req_ctx->reqlen;
2075 state->data_len = req_ctx->data_len;
2076 memcpy(state->bfr1, req_ctx->reqbfr, req_ctx->reqlen);
2077 memcpy(state->partial_hash, req_ctx->partial_hash,
2078 CHCR_HASH_MAX_DIGEST_SIZE);
2079 chcr_init_hctx_per_wr(state);
2083 static int chcr_ahash_import(struct ahash_request *areq, const void *in)
2085 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2086 struct chcr_ahash_req_ctx *state = (struct chcr_ahash_req_ctx *)in;
2088 req_ctx->reqlen = state->reqlen;
2089 req_ctx->data_len = state->data_len;
2090 req_ctx->reqbfr = req_ctx->bfr1;
2091 req_ctx->skbfr = req_ctx->bfr2;
2092 memcpy(req_ctx->bfr1, state->bfr1, CHCR_HASH_MAX_BLOCK_SIZE_128);
2093 memcpy(req_ctx->partial_hash, state->partial_hash,
2094 CHCR_HASH_MAX_DIGEST_SIZE);
2095 chcr_init_hctx_per_wr(req_ctx);
2099 static int chcr_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2100 unsigned int keylen)
2102 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(tfm));
2103 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2104 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2105 unsigned int i, err = 0, updated_digestsize;
2107 SHASH_DESC_ON_STACK(shash, hmacctx->base_hash);
2109 /* use the key to calculate the ipad and opad. ipad will sent with the
2110 * first request's data. opad will be sent with the final hash result
2111 * ipad in hmacctx->ipad and opad in hmacctx->opad location
2113 shash->tfm = hmacctx->base_hash;
2115 err = crypto_shash_digest(shash, key, keylen,
2119 keylen = digestsize;
2121 memcpy(hmacctx->ipad, key, keylen);
2123 memset(hmacctx->ipad + keylen, 0, bs - keylen);
2124 memcpy(hmacctx->opad, hmacctx->ipad, bs);
2126 for (i = 0; i < bs / sizeof(int); i++) {
2127 *((unsigned int *)(&hmacctx->ipad) + i) ^= IPAD_DATA;
2128 *((unsigned int *)(&hmacctx->opad) + i) ^= OPAD_DATA;
2131 updated_digestsize = digestsize;
2132 if (digestsize == SHA224_DIGEST_SIZE)
2133 updated_digestsize = SHA256_DIGEST_SIZE;
2134 else if (digestsize == SHA384_DIGEST_SIZE)
2135 updated_digestsize = SHA512_DIGEST_SIZE;
2136 err = chcr_compute_partial_hash(shash, hmacctx->ipad,
2137 hmacctx->ipad, digestsize);
2140 chcr_change_order(hmacctx->ipad, updated_digestsize);
2142 err = chcr_compute_partial_hash(shash, hmacctx->opad,
2143 hmacctx->opad, digestsize);
2146 chcr_change_order(hmacctx->opad, updated_digestsize);
2151 static int chcr_aes_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
2152 unsigned int key_len)
2154 struct ablk_ctx *ablkctx = ABLK_CTX(c_ctx(cipher));
2155 unsigned short context_size = 0;
2158 err = chcr_cipher_fallback_setkey(cipher, key, key_len);
2162 memcpy(ablkctx->key, key, key_len);
2163 ablkctx->enckey_len = key_len;
2164 get_aes_decrypt_key(ablkctx->rrkey, ablkctx->key, key_len << 2);
2165 context_size = (KEY_CONTEXT_HDR_SALT_AND_PAD + key_len) >> 4;
2166 ablkctx->key_ctx_hdr =
2167 FILL_KEY_CTX_HDR((key_len == AES_KEYSIZE_256) ?
2168 CHCR_KEYCTX_CIPHER_KEY_SIZE_128 :
2169 CHCR_KEYCTX_CIPHER_KEY_SIZE_256,
2170 CHCR_KEYCTX_NO_KEY, 1,
2172 ablkctx->ciph_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS;
2175 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
2176 ablkctx->enckey_len = 0;
2181 static int chcr_sha_init(struct ahash_request *areq)
2183 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2184 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2185 int digestsize = crypto_ahash_digestsize(tfm);
2187 req_ctx->data_len = 0;
2188 req_ctx->reqlen = 0;
2189 req_ctx->reqbfr = req_ctx->bfr1;
2190 req_ctx->skbfr = req_ctx->bfr2;
2191 copy_hash_init_values(req_ctx->partial_hash, digestsize);
2196 static int chcr_sha_cra_init(struct crypto_tfm *tfm)
2198 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2199 sizeof(struct chcr_ahash_req_ctx));
2200 return chcr_device_init(crypto_tfm_ctx(tfm));
2203 static int chcr_hmac_init(struct ahash_request *areq)
2205 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2206 struct crypto_ahash *rtfm = crypto_ahash_reqtfm(areq);
2207 struct hmac_ctx *hmacctx = HMAC_CTX(h_ctx(rtfm));
2208 unsigned int digestsize = crypto_ahash_digestsize(rtfm);
2209 unsigned int bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
2211 chcr_sha_init(areq);
2212 req_ctx->data_len = bs;
2213 if (is_hmac(crypto_ahash_tfm(rtfm))) {
2214 if (digestsize == SHA224_DIGEST_SIZE)
2215 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2216 SHA256_DIGEST_SIZE);
2217 else if (digestsize == SHA384_DIGEST_SIZE)
2218 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2219 SHA512_DIGEST_SIZE);
2221 memcpy(req_ctx->partial_hash, hmacctx->ipad,
2227 static int chcr_hmac_cra_init(struct crypto_tfm *tfm)
2229 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2230 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2231 unsigned int digestsize =
2232 crypto_ahash_digestsize(__crypto_ahash_cast(tfm));
2234 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2235 sizeof(struct chcr_ahash_req_ctx));
2236 hmacctx->base_hash = chcr_alloc_shash(digestsize);
2237 if (IS_ERR(hmacctx->base_hash))
2238 return PTR_ERR(hmacctx->base_hash);
2239 return chcr_device_init(crypto_tfm_ctx(tfm));
2242 static void chcr_hmac_cra_exit(struct crypto_tfm *tfm)
2244 struct chcr_context *ctx = crypto_tfm_ctx(tfm);
2245 struct hmac_ctx *hmacctx = HMAC_CTX(ctx);
2247 if (hmacctx->base_hash) {
2248 chcr_free_shash(hmacctx->base_hash);
2249 hmacctx->base_hash = NULL;
2253 inline void chcr_aead_common_exit(struct aead_request *req)
2255 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2256 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2257 struct uld_ctx *u_ctx = ULD_CTX(a_ctx(tfm));
2259 chcr_aead_dma_unmap(&u_ctx->lldi.pdev->dev, req, reqctx->op);
2262 static int chcr_aead_common_init(struct aead_request *req)
2264 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2265 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2266 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2267 unsigned int authsize = crypto_aead_authsize(tfm);
2268 int error = -EINVAL;
2270 /* validate key size */
2271 if (aeadctx->enckey_len == 0)
2273 if (reqctx->op && req->cryptlen < authsize)
2276 reqctx->scratch_pad = reqctx->iv + IV;
2278 reqctx->scratch_pad = NULL;
2280 error = chcr_aead_dma_map(&ULD_CTX(a_ctx(tfm))->lldi.pdev->dev, req,
2292 static int chcr_aead_need_fallback(struct aead_request *req, int dst_nents,
2293 int aadmax, int wrlen,
2294 unsigned short op_type)
2296 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req));
2298 if (((req->cryptlen - (op_type ? authsize : 0)) == 0) ||
2299 dst_nents > MAX_DSGL_ENT ||
2300 (req->assoclen > aadmax) ||
2301 (wrlen > SGE_MAX_WR_LEN))
2306 static int chcr_aead_fallback(struct aead_request *req, unsigned short op_type)
2308 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2309 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2310 struct aead_request *subreq = aead_request_ctx(req);
2312 aead_request_set_tfm(subreq, aeadctx->sw_cipher);
2313 aead_request_set_callback(subreq, req->base.flags,
2314 req->base.complete, req->base.data);
2315 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
2317 aead_request_set_ad(subreq, req->assoclen);
2318 return op_type ? crypto_aead_decrypt(subreq) :
2319 crypto_aead_encrypt(subreq);
2322 static struct sk_buff *create_authenc_wr(struct aead_request *req,
2326 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2327 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2328 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
2329 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2330 struct sk_buff *skb = NULL;
2331 struct chcr_wr *chcr_req;
2332 struct cpl_rx_phys_dsgl *phys_cpl;
2333 struct ulptx_sgl *ulptx;
2334 unsigned int transhdr_len;
2335 unsigned int dst_size = 0, temp, subtype = get_aead_subtype(tfm);
2336 unsigned int kctx_len = 0, dnents, snents;
2337 unsigned int authsize = crypto_aead_authsize(tfm);
2338 int error = -EINVAL;
2341 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2343 struct adapter *adap = padap(a_ctx(tfm)->dev);
2345 if (req->cryptlen == 0)
2349 error = chcr_aead_common_init(req);
2351 return ERR_PTR(error);
2353 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL ||
2354 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2357 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
2358 (reqctx->op ? -authsize : authsize), CHCR_DST_SG_SIZE, 0);
2359 dnents += MIN_AUTH_SG; // For IV
2360 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2361 CHCR_SRC_SG_SIZE, 0);
2362 dst_size = get_space_for_phys_dsgl(dnents);
2363 kctx_len = (ntohl(KEY_CONTEXT_CTX_LEN_V(aeadctx->key_ctx_hdr)) << 4)
2364 - sizeof(chcr_req->key_ctx);
2365 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2366 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <
2368 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16)
2369 : (sgl_len(snents) * 8);
2370 transhdr_len += temp;
2371 transhdr_len = roundup(transhdr_len, 16);
2373 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
2374 transhdr_len, reqctx->op)) {
2375 atomic_inc(&adap->chcr_stats.fallback);
2376 chcr_aead_common_exit(req);
2377 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2379 skb = alloc_skb(transhdr_len, flags);
2385 chcr_req = __skb_put_zero(skb, transhdr_len);
2387 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
2390 * Input order is AAD,IV and Payload. where IV should be included as
2391 * the part of authdata. All other fields should be filled according
2392 * to the hardware spec
2394 chcr_req->sec_cpl.op_ivinsrtofst =
2395 FILL_SEC_CPL_OP_IVINSR(a_ctx(tfm)->tx_chan_id, 2, 1);
2396 chcr_req->sec_cpl.pldlen = htonl(req->assoclen + IV + req->cryptlen);
2397 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2399 null ? 0 : IV + req->assoclen,
2400 req->assoclen + IV + 1,
2401 (temp & 0x1F0) >> 4);
2402 chcr_req->sec_cpl.cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(
2404 null ? 0 : req->assoclen + IV + 1,
2406 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL ||
2407 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA)
2408 temp = CHCR_SCMD_CIPHER_MODE_AES_CTR;
2410 temp = CHCR_SCMD_CIPHER_MODE_AES_CBC;
2411 chcr_req->sec_cpl.seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op,
2412 (reqctx->op == CHCR_ENCRYPT_OP) ? 1 : 0,
2414 actx->auth_mode, aeadctx->hmac_ctrl,
2416 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
2419 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2420 if (reqctx->op == CHCR_ENCRYPT_OP ||
2421 subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2422 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL)
2423 memcpy(chcr_req->key_ctx.key, aeadctx->key,
2424 aeadctx->enckey_len);
2426 memcpy(chcr_req->key_ctx.key, actx->dec_rrkey,
2427 aeadctx->enckey_len);
2429 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2430 actx->h_iopad, kctx_len - roundup(aeadctx->enckey_len, 16));
2431 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2432 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2433 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2434 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
2435 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
2436 memcpy(ivptr, aeadctx->nonce, CTR_RFC3686_NONCE_SIZE);
2437 memcpy(ivptr + CTR_RFC3686_NONCE_SIZE, req->iv,
2438 CTR_RFC3686_IV_SIZE);
2439 *(__be32 *)(ivptr + CTR_RFC3686_NONCE_SIZE +
2440 CTR_RFC3686_IV_SIZE) = cpu_to_be32(1);
2442 memcpy(ivptr, req->iv, IV);
2444 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2445 chcr_add_aead_src_ent(req, ulptx);
2446 atomic_inc(&adap->chcr_stats.cipher_rqst);
2447 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2448 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
2449 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
2450 transhdr_len, temp, 0);
2455 chcr_aead_common_exit(req);
2457 return ERR_PTR(error);
2460 int chcr_aead_dma_map(struct device *dev,
2461 struct aead_request *req,
2462 unsigned short op_type)
2465 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2466 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2467 unsigned int authsize = crypto_aead_authsize(tfm);
2470 dst_size = req->assoclen + req->cryptlen + (op_type ?
2471 -authsize : authsize);
2472 if (!req->cryptlen || !dst_size)
2474 reqctx->iv_dma = dma_map_single(dev, reqctx->iv, (IV + reqctx->b0_len),
2476 if (dma_mapping_error(dev, reqctx->iv_dma))
2479 reqctx->b0_dma = reqctx->iv_dma + IV;
2482 if (req->src == req->dst) {
2483 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2488 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2492 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2495 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2503 dma_unmap_single(dev, reqctx->iv_dma, IV, DMA_BIDIRECTIONAL);
2507 void chcr_aead_dma_unmap(struct device *dev,
2508 struct aead_request *req,
2509 unsigned short op_type)
2511 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2512 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2513 unsigned int authsize = crypto_aead_authsize(tfm);
2516 dst_size = req->assoclen + req->cryptlen + (op_type ?
2517 -authsize : authsize);
2518 if (!req->cryptlen || !dst_size)
2521 dma_unmap_single(dev, reqctx->iv_dma, (IV + reqctx->b0_len),
2523 if (req->src == req->dst) {
2524 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2527 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2529 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2534 void chcr_add_aead_src_ent(struct aead_request *req,
2535 struct ulptx_sgl *ulptx)
2537 struct ulptx_walk ulp_walk;
2538 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2541 u8 *buf = (u8 *)ulptx;
2543 if (reqctx->b0_len) {
2544 memcpy(buf, reqctx->scratch_pad, reqctx->b0_len);
2545 buf += reqctx->b0_len;
2547 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2548 buf, req->cryptlen + req->assoclen, 0);
2550 ulptx_walk_init(&ulp_walk, ulptx);
2552 ulptx_walk_add_page(&ulp_walk, reqctx->b0_len,
2554 ulptx_walk_add_sg(&ulp_walk, req->src, req->cryptlen +
2556 ulptx_walk_end(&ulp_walk);
2560 void chcr_add_aead_dst_ent(struct aead_request *req,
2561 struct cpl_rx_phys_dsgl *phys_cpl,
2564 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2565 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2566 struct dsgl_walk dsgl_walk;
2567 unsigned int authsize = crypto_aead_authsize(tfm);
2568 struct chcr_context *ctx = a_ctx(tfm);
2571 dsgl_walk_init(&dsgl_walk, phys_cpl);
2572 dsgl_walk_add_page(&dsgl_walk, IV + reqctx->b0_len, reqctx->iv_dma);
2573 temp = req->assoclen + req->cryptlen +
2574 (reqctx->op ? -authsize : authsize);
2575 dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, 0);
2576 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2579 void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
2581 struct cipher_wr_param *wrparam)
2583 struct ulptx_walk ulp_walk;
2584 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2587 memcpy(buf, reqctx->iv, IV);
2590 sg_pcopy_to_buffer(req->src, sg_nents(req->src),
2591 buf, wrparam->bytes, reqctx->processed);
2593 ulptx_walk_init(&ulp_walk, (struct ulptx_sgl *)buf);
2594 ulptx_walk_add_sg(&ulp_walk, reqctx->srcsg, wrparam->bytes,
2596 reqctx->srcsg = ulp_walk.last_sg;
2597 reqctx->src_ofst = ulp_walk.last_sg_len;
2598 ulptx_walk_end(&ulp_walk);
2602 void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
2603 struct cpl_rx_phys_dsgl *phys_cpl,
2604 struct cipher_wr_param *wrparam,
2607 struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
2608 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
2609 struct chcr_context *ctx = c_ctx(tfm);
2610 struct dsgl_walk dsgl_walk;
2612 dsgl_walk_init(&dsgl_walk, phys_cpl);
2613 dsgl_walk_add_sg(&dsgl_walk, reqctx->dstsg, wrparam->bytes,
2615 reqctx->dstsg = dsgl_walk.last_sg;
2616 reqctx->dst_ofst = dsgl_walk.last_sg_len;
2618 dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
2621 void chcr_add_hash_src_ent(struct ahash_request *req,
2622 struct ulptx_sgl *ulptx,
2623 struct hash_wr_param *param)
2625 struct ulptx_walk ulp_walk;
2626 struct chcr_ahash_req_ctx *reqctx = ahash_request_ctx(req);
2628 if (reqctx->hctx_wr.imm) {
2629 u8 *buf = (u8 *)ulptx;
2631 if (param->bfr_len) {
2632 memcpy(buf, reqctx->reqbfr, param->bfr_len);
2633 buf += param->bfr_len;
2636 sg_pcopy_to_buffer(reqctx->hctx_wr.srcsg,
2637 sg_nents(reqctx->hctx_wr.srcsg), buf,
2640 ulptx_walk_init(&ulp_walk, ulptx);
2642 ulptx_walk_add_page(&ulp_walk, param->bfr_len,
2643 reqctx->hctx_wr.dma_addr);
2644 ulptx_walk_add_sg(&ulp_walk, reqctx->hctx_wr.srcsg,
2645 param->sg_len, reqctx->hctx_wr.src_ofst);
2646 reqctx->hctx_wr.srcsg = ulp_walk.last_sg;
2647 reqctx->hctx_wr.src_ofst = ulp_walk.last_sg_len;
2648 ulptx_walk_end(&ulp_walk);
2652 int chcr_hash_dma_map(struct device *dev,
2653 struct ahash_request *req)
2655 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2660 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2664 req_ctx->hctx_wr.is_sg_map = 1;
2668 void chcr_hash_dma_unmap(struct device *dev,
2669 struct ahash_request *req)
2671 struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
2676 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2678 req_ctx->hctx_wr.is_sg_map = 0;
2682 int chcr_cipher_dma_map(struct device *dev,
2683 struct ablkcipher_request *req)
2687 if (req->src == req->dst) {
2688 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2693 error = dma_map_sg(dev, req->src, sg_nents(req->src),
2697 error = dma_map_sg(dev, req->dst, sg_nents(req->dst),
2700 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2711 void chcr_cipher_dma_unmap(struct device *dev,
2712 struct ablkcipher_request *req)
2714 if (req->src == req->dst) {
2715 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2718 dma_unmap_sg(dev, req->src, sg_nents(req->src),
2720 dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
2725 static int set_msg_len(u8 *block, unsigned int msglen, int csize)
2729 memset(block, 0, csize);
2734 else if (msglen > (unsigned int)(1 << (8 * csize)))
2737 data = cpu_to_be32(msglen);
2738 memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
2743 static int generate_b0(struct aead_request *req, u8 *ivptr,
2744 unsigned short op_type)
2746 unsigned int l, lp, m;
2748 struct crypto_aead *aead = crypto_aead_reqtfm(req);
2749 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2750 u8 *b0 = reqctx->scratch_pad;
2752 m = crypto_aead_authsize(aead);
2754 memcpy(b0, ivptr, 16);
2759 /* set m, bits 3-5 */
2760 *b0 |= (8 * ((m - 2) / 2));
2762 /* set adata, bit 6, if associated data is used */
2765 rc = set_msg_len(b0 + 16 - l,
2766 (op_type == CHCR_DECRYPT_OP) ?
2767 req->cryptlen - m : req->cryptlen, l);
2772 static inline int crypto_ccm_check_iv(const u8 *iv)
2774 /* 2 <= L <= 8, so 1 <= L' <= 7. */
2775 if (iv[0] < 1 || iv[0] > 7)
2781 static int ccm_format_packet(struct aead_request *req,
2783 unsigned int sub_type,
2784 unsigned short op_type,
2785 unsigned int assoclen)
2787 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2788 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2789 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2792 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2794 memcpy(ivptr + 1, &aeadctx->salt[0], 3);
2795 memcpy(ivptr + 4, req->iv, 8);
2796 memset(ivptr + 12, 0, 4);
2798 memcpy(ivptr, req->iv, 16);
2801 *((unsigned short *)(reqctx->scratch_pad + 16)) =
2804 rc = generate_b0(req, ivptr, op_type);
2805 /* zero the ctr value */
2806 memset(ivptr + 15 - ivptr[0], 0, ivptr[0] + 1);
2810 static void fill_sec_cpl_for_aead(struct cpl_tx_sec_pdu *sec_cpl,
2811 unsigned int dst_size,
2812 struct aead_request *req,
2813 unsigned short op_type)
2815 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2816 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2817 unsigned int cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CCM;
2818 unsigned int mac_mode = CHCR_SCMD_AUTH_MODE_CBCMAC;
2819 unsigned int c_id = a_ctx(tfm)->tx_chan_id;
2820 unsigned int ccm_xtra;
2821 unsigned char tag_offset = 0, auth_offset = 0;
2822 unsigned int assoclen;
2824 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2825 assoclen = req->assoclen - 8;
2827 assoclen = req->assoclen;
2828 ccm_xtra = CCM_B0_SIZE +
2829 ((assoclen) ? CCM_AAD_FIELD_SIZE : 0);
2831 auth_offset = req->cryptlen ?
2832 (req->assoclen + IV + 1 + ccm_xtra) : 0;
2833 if (op_type == CHCR_DECRYPT_OP) {
2834 if (crypto_aead_authsize(tfm) != req->cryptlen)
2835 tag_offset = crypto_aead_authsize(tfm);
2841 sec_cpl->op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(c_id,
2844 htonl(req->assoclen + IV + req->cryptlen + ccm_xtra);
2845 /* For CCM there wil be b0 always. So AAD start will be 1 always */
2846 sec_cpl->aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
2847 1 + IV, IV + assoclen + ccm_xtra,
2848 req->assoclen + IV + 1 + ccm_xtra, 0);
2850 sec_cpl->cipherstop_lo_authinsert = FILL_SEC_CPL_AUTHINSERT(0,
2851 auth_offset, tag_offset,
2852 (op_type == CHCR_ENCRYPT_OP) ? 0 :
2853 crypto_aead_authsize(tfm));
2854 sec_cpl->seqno_numivs = FILL_SEC_CPL_SCMD0_SEQNO(op_type,
2855 (op_type == CHCR_ENCRYPT_OP) ? 0 : 1,
2856 cipher_mode, mac_mode,
2857 aeadctx->hmac_ctrl, IV >> 1);
2859 sec_cpl->ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1, 0,
2863 static int aead_ccm_validate_input(unsigned short op_type,
2864 struct aead_request *req,
2865 struct chcr_aead_ctx *aeadctx,
2866 unsigned int sub_type)
2868 if (sub_type != CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309) {
2869 if (crypto_ccm_check_iv(req->iv)) {
2870 pr_err("CCM: IV check fails\n");
2874 if (req->assoclen != 16 && req->assoclen != 20) {
2875 pr_err("RFC4309: Invalid AAD length %d\n",
2883 static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
2887 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2888 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2889 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2890 struct sk_buff *skb = NULL;
2891 struct chcr_wr *chcr_req;
2892 struct cpl_rx_phys_dsgl *phys_cpl;
2893 struct ulptx_sgl *ulptx;
2894 unsigned int transhdr_len;
2895 unsigned int dst_size = 0, kctx_len, dnents, temp, snents;
2896 unsigned int sub_type, assoclen = req->assoclen;
2897 unsigned int authsize = crypto_aead_authsize(tfm);
2898 int error = -EINVAL;
2900 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2902 struct adapter *adap = padap(a_ctx(tfm)->dev);
2904 sub_type = get_aead_subtype(tfm);
2905 if (sub_type == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309)
2907 reqctx->b0_len = CCM_B0_SIZE + (assoclen ? CCM_AAD_FIELD_SIZE : 0);
2908 error = chcr_aead_common_init(req);
2910 return ERR_PTR(error);
2912 error = aead_ccm_validate_input(reqctx->op, req, aeadctx, sub_type);
2915 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen
2916 + (reqctx->op ? -authsize : authsize),
2917 CHCR_DST_SG_SIZE, 0);
2918 dnents += MIN_CCM_SG; // For IV and B0
2919 dst_size = get_space_for_phys_dsgl(dnents);
2920 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
2921 CHCR_SRC_SG_SIZE, 0);
2922 snents += MIN_CCM_SG; //For B0
2923 kctx_len = roundup(aeadctx->enckey_len, 16) * 2;
2924 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
2925 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen +
2926 reqctx->b0_len) <= SGE_MAX_WR_LEN;
2927 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen +
2928 reqctx->b0_len, 16) :
2929 (sgl_len(snents) * 8);
2930 transhdr_len += temp;
2931 transhdr_len = roundup(transhdr_len, 16);
2933 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE -
2934 reqctx->b0_len, transhdr_len, reqctx->op)) {
2935 atomic_inc(&adap->chcr_stats.fallback);
2936 chcr_aead_common_exit(req);
2937 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
2939 skb = alloc_skb(transhdr_len, flags);
2946 chcr_req = __skb_put_zero(skb, transhdr_len);
2948 fill_sec_cpl_for_aead(&chcr_req->sec_cpl, dst_size, req, reqctx->op);
2950 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
2951 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
2952 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
2953 aeadctx->key, aeadctx->enckey_len);
2955 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
2956 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
2957 ulptx = (struct ulptx_sgl *)(ivptr + IV);
2958 error = ccm_format_packet(req, ivptr, sub_type, reqctx->op, assoclen);
2961 chcr_add_aead_dst_ent(req, phys_cpl, qid);
2962 chcr_add_aead_src_ent(req, ulptx);
2964 atomic_inc(&adap->chcr_stats.aead_rqst);
2965 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
2966 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen +
2967 reqctx->b0_len) : 0);
2968 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, 0,
2969 transhdr_len, temp, 0);
2976 chcr_aead_common_exit(req);
2977 return ERR_PTR(error);
2980 static struct sk_buff *create_gcm_wr(struct aead_request *req,
2984 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
2985 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
2986 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
2987 struct sk_buff *skb = NULL;
2988 struct chcr_wr *chcr_req;
2989 struct cpl_rx_phys_dsgl *phys_cpl;
2990 struct ulptx_sgl *ulptx;
2991 unsigned int transhdr_len, dnents = 0, snents;
2992 unsigned int dst_size = 0, temp = 0, kctx_len, assoclen = req->assoclen;
2993 unsigned int authsize = crypto_aead_authsize(tfm);
2994 int error = -EINVAL;
2996 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
2998 struct adapter *adap = padap(a_ctx(tfm)->dev);
3000 if (get_aead_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106)
3001 assoclen = req->assoclen - 8;
3004 error = chcr_aead_common_init(req);
3006 return ERR_PTR(error);
3007 dnents = sg_nents_xlen(req->dst, req->assoclen + req->cryptlen +
3008 (reqctx->op ? -authsize : authsize),
3009 CHCR_DST_SG_SIZE, 0);
3010 snents = sg_nents_xlen(req->src, req->assoclen + req->cryptlen,
3011 CHCR_SRC_SG_SIZE, 0);
3012 dnents += MIN_GCM_SG; // For IV
3013 dst_size = get_space_for_phys_dsgl(dnents);
3014 kctx_len = roundup(aeadctx->enckey_len, 16) + AEAD_H_SIZE;
3015 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dst_size);
3016 reqctx->imm = (transhdr_len + req->assoclen + req->cryptlen) <=
3018 temp = reqctx->imm ? roundup(req->assoclen + req->cryptlen, 16) :
3019 (sgl_len(snents) * 8);
3020 transhdr_len += temp;
3021 transhdr_len = roundup(transhdr_len, 16);
3022 if (chcr_aead_need_fallback(req, dnents, T6_MAX_AAD_SIZE,
3023 transhdr_len, reqctx->op)) {
3025 atomic_inc(&adap->chcr_stats.fallback);
3026 chcr_aead_common_exit(req);
3027 return ERR_PTR(chcr_aead_fallback(req, reqctx->op));
3029 skb = alloc_skb(transhdr_len, flags);
3035 chcr_req = __skb_put_zero(skb, transhdr_len);
3037 //Offset of tag from end
3038 temp = (reqctx->op == CHCR_ENCRYPT_OP) ? 0 : authsize;
3039 chcr_req->sec_cpl.op_ivinsrtofst = FILL_SEC_CPL_OP_IVINSR(
3040 a_ctx(tfm)->tx_chan_id, 2, 1);
3041 chcr_req->sec_cpl.pldlen =
3042 htonl(req->assoclen + IV + req->cryptlen);
3043 chcr_req->sec_cpl.aadstart_cipherstop_hi = FILL_SEC_CPL_CIPHERSTOP_HI(
3044 assoclen ? 1 + IV : 0,
3045 assoclen ? IV + assoclen : 0,
3046 req->assoclen + IV + 1, 0);
3047 chcr_req->sec_cpl.cipherstop_lo_authinsert =
3048 FILL_SEC_CPL_AUTHINSERT(0, req->assoclen + IV + 1,
3050 chcr_req->sec_cpl.seqno_numivs =
3051 FILL_SEC_CPL_SCMD0_SEQNO(reqctx->op, (reqctx->op ==
3052 CHCR_ENCRYPT_OP) ? 1 : 0,
3053 CHCR_SCMD_CIPHER_MODE_AES_GCM,
3054 CHCR_SCMD_AUTH_MODE_GHASH,
3055 aeadctx->hmac_ctrl, IV >> 1);
3056 chcr_req->sec_cpl.ivgen_hdrlen = FILL_SEC_CPL_IVGEN_HDRLEN(0, 0, 1,
3058 chcr_req->key_ctx.ctx_hdr = aeadctx->key_ctx_hdr;
3059 memcpy(chcr_req->key_ctx.key, aeadctx->key, aeadctx->enckey_len);
3060 memcpy(chcr_req->key_ctx.key + roundup(aeadctx->enckey_len, 16),
3061 GCM_CTX(aeadctx)->ghash_h, AEAD_H_SIZE);
3063 phys_cpl = (struct cpl_rx_phys_dsgl *)((u8 *)(chcr_req + 1) + kctx_len);
3064 ivptr = (u8 *)(phys_cpl + 1) + dst_size;
3065 /* prepare a 16 byte iv */
3066 /* S A L T | IV | 0x00000001 */
3067 if (get_aead_subtype(tfm) ==
3068 CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106) {
3069 memcpy(ivptr, aeadctx->salt, 4);
3070 memcpy(ivptr + 4, req->iv, GCM_RFC4106_IV_SIZE);
3072 memcpy(ivptr, req->iv, GCM_AES_IV_SIZE);
3074 *((unsigned int *)(ivptr + 12)) = htonl(0x01);
3076 ulptx = (struct ulptx_sgl *)(ivptr + 16);
3078 chcr_add_aead_dst_ent(req, phys_cpl, qid);
3079 chcr_add_aead_src_ent(req, ulptx);
3080 atomic_inc(&adap->chcr_stats.aead_rqst);
3081 temp = sizeof(struct cpl_rx_phys_dsgl) + dst_size + IV +
3082 kctx_len + (reqctx->imm ? (req->assoclen + req->cryptlen) : 0);
3083 create_wreq(a_ctx(tfm), chcr_req, &req->base, reqctx->imm, size,
3084 transhdr_len, temp, reqctx->verify);
3089 chcr_aead_common_exit(req);
3090 return ERR_PTR(error);
3095 static int chcr_aead_cra_init(struct crypto_aead *tfm)
3097 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3098 struct aead_alg *alg = crypto_aead_alg(tfm);
3100 aeadctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
3101 CRYPTO_ALG_NEED_FALLBACK |
3103 if (IS_ERR(aeadctx->sw_cipher))
3104 return PTR_ERR(aeadctx->sw_cipher);
3105 crypto_aead_set_reqsize(tfm, max(sizeof(struct chcr_aead_reqctx),
3106 sizeof(struct aead_request) +
3107 crypto_aead_reqsize(aeadctx->sw_cipher)));
3108 return chcr_device_init(a_ctx(tfm));
3111 static void chcr_aead_cra_exit(struct crypto_aead *tfm)
3113 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3115 crypto_free_aead(aeadctx->sw_cipher);
3118 static int chcr_authenc_null_setauthsize(struct crypto_aead *tfm,
3119 unsigned int authsize)
3121 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3123 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NOP;
3124 aeadctx->mayverify = VERIFY_HW;
3125 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3127 static int chcr_authenc_setauthsize(struct crypto_aead *tfm,
3128 unsigned int authsize)
3130 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3131 u32 maxauth = crypto_aead_maxauthsize(tfm);
3133 /*SHA1 authsize in ipsec is 12 instead of 10 i.e maxauthsize / 2 is not
3134 * true for sha1. authsize == 12 condition should be before
3135 * authsize == (maxauth >> 1)
3137 if (authsize == ICV_4) {
3138 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3139 aeadctx->mayverify = VERIFY_HW;
3140 } else if (authsize == ICV_6) {
3141 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3142 aeadctx->mayverify = VERIFY_HW;
3143 } else if (authsize == ICV_10) {
3144 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3145 aeadctx->mayverify = VERIFY_HW;
3146 } else if (authsize == ICV_12) {
3147 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3148 aeadctx->mayverify = VERIFY_HW;
3149 } else if (authsize == ICV_14) {
3150 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3151 aeadctx->mayverify = VERIFY_HW;
3152 } else if (authsize == (maxauth >> 1)) {
3153 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3154 aeadctx->mayverify = VERIFY_HW;
3155 } else if (authsize == maxauth) {
3156 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3157 aeadctx->mayverify = VERIFY_HW;
3159 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3160 aeadctx->mayverify = VERIFY_SW;
3162 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3166 static int chcr_gcm_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
3168 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3172 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3173 aeadctx->mayverify = VERIFY_HW;
3176 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3177 aeadctx->mayverify = VERIFY_HW;
3180 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3181 aeadctx->mayverify = VERIFY_HW;
3184 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3185 aeadctx->mayverify = VERIFY_HW;
3188 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3189 aeadctx->mayverify = VERIFY_HW;
3193 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3194 aeadctx->mayverify = VERIFY_SW;
3198 crypto_tfm_set_flags((struct crypto_tfm *) tfm,
3199 CRYPTO_TFM_RES_BAD_KEY_LEN);
3202 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3205 static int chcr_4106_4309_setauthsize(struct crypto_aead *tfm,
3206 unsigned int authsize)
3208 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3212 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3213 aeadctx->mayverify = VERIFY_HW;
3216 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3217 aeadctx->mayverify = VERIFY_HW;
3220 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3221 aeadctx->mayverify = VERIFY_HW;
3224 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3225 CRYPTO_TFM_RES_BAD_KEY_LEN);
3228 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3231 static int chcr_ccm_setauthsize(struct crypto_aead *tfm,
3232 unsigned int authsize)
3234 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3238 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL1;
3239 aeadctx->mayverify = VERIFY_HW;
3242 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL2;
3243 aeadctx->mayverify = VERIFY_HW;
3246 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_DIV2;
3247 aeadctx->mayverify = VERIFY_HW;
3250 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366;
3251 aeadctx->mayverify = VERIFY_HW;
3254 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT;
3255 aeadctx->mayverify = VERIFY_HW;
3258 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_PL3;
3259 aeadctx->mayverify = VERIFY_HW;
3262 aeadctx->hmac_ctrl = CHCR_SCMD_HMAC_CTRL_NO_TRUNC;
3263 aeadctx->mayverify = VERIFY_HW;
3266 crypto_tfm_set_flags((struct crypto_tfm *)tfm,
3267 CRYPTO_TFM_RES_BAD_KEY_LEN);
3270 return crypto_aead_setauthsize(aeadctx->sw_cipher, authsize);
3273 static int chcr_ccm_common_setkey(struct crypto_aead *aead,
3275 unsigned int keylen)
3277 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3278 unsigned char ck_size, mk_size;
3279 int key_ctx_size = 0;
3281 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) * 2;
3282 if (keylen == AES_KEYSIZE_128) {
3283 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3284 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
3285 } else if (keylen == AES_KEYSIZE_192) {
3286 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3287 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
3288 } else if (keylen == AES_KEYSIZE_256) {
3289 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3290 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
3292 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3293 CRYPTO_TFM_RES_BAD_KEY_LEN);
3294 aeadctx->enckey_len = 0;
3297 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, mk_size, 0, 0,
3299 memcpy(aeadctx->key, key, keylen);
3300 aeadctx->enckey_len = keylen;
3305 static int chcr_aead_ccm_setkey(struct crypto_aead *aead,
3307 unsigned int keylen)
3309 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3312 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3313 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3314 CRYPTO_TFM_REQ_MASK);
3315 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3316 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3317 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3318 CRYPTO_TFM_RES_MASK);
3321 return chcr_ccm_common_setkey(aead, key, keylen);
3324 static int chcr_aead_rfc4309_setkey(struct crypto_aead *aead, const u8 *key,
3325 unsigned int keylen)
3327 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3331 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3332 CRYPTO_TFM_RES_BAD_KEY_LEN);
3333 aeadctx->enckey_len = 0;
3336 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3337 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead) &
3338 CRYPTO_TFM_REQ_MASK);
3339 error = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3340 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3341 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3342 CRYPTO_TFM_RES_MASK);
3346 memcpy(aeadctx->salt, key + keylen, 3);
3347 return chcr_ccm_common_setkey(aead, key, keylen);
3350 static int chcr_gcm_setkey(struct crypto_aead *aead, const u8 *key,
3351 unsigned int keylen)
3353 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(aead));
3354 struct chcr_gcm_ctx *gctx = GCM_CTX(aeadctx);
3355 unsigned int ck_size;
3356 int ret = 0, key_ctx_size = 0;
3357 struct crypto_aes_ctx aes;
3359 aeadctx->enckey_len = 0;
3360 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3361 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(aead)
3362 & CRYPTO_TFM_REQ_MASK);
3363 ret = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3364 crypto_aead_clear_flags(aead, CRYPTO_TFM_RES_MASK);
3365 crypto_aead_set_flags(aead, crypto_aead_get_flags(aeadctx->sw_cipher) &
3366 CRYPTO_TFM_RES_MASK);
3370 if (get_aead_subtype(aead) == CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106 &&
3372 keylen -= 4; /* nonce/salt is present in the last 4 bytes */
3373 memcpy(aeadctx->salt, key + keylen, 4);
3375 if (keylen == AES_KEYSIZE_128) {
3376 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3377 } else if (keylen == AES_KEYSIZE_192) {
3378 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3379 } else if (keylen == AES_KEYSIZE_256) {
3380 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3382 crypto_tfm_set_flags((struct crypto_tfm *)aead,
3383 CRYPTO_TFM_RES_BAD_KEY_LEN);
3384 pr_err("GCM: Invalid key length %d\n", keylen);
3389 memcpy(aeadctx->key, key, keylen);
3390 aeadctx->enckey_len = keylen;
3391 key_ctx_size = sizeof(struct _key_ctx) + roundup(keylen, 16) +
3393 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size,
3394 CHCR_KEYCTX_MAC_KEY_SIZE_128,
3397 /* Calculate the H = CIPH(K, 0 repeated 16 times).
3398 * It will go in key context
3400 ret = aes_expandkey(&aes, key, keylen);
3402 aeadctx->enckey_len = 0;
3405 memset(gctx->ghash_h, 0, AEAD_H_SIZE);
3406 aes_encrypt(&aes, gctx->ghash_h, gctx->ghash_h);
3407 memzero_explicit(&aes, sizeof(aes));
3413 static int chcr_authenc_setkey(struct crypto_aead *authenc, const u8 *key,
3414 unsigned int keylen)
3416 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3417 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3418 /* it contains auth and cipher key both*/
3419 struct crypto_authenc_keys keys;
3420 unsigned int bs, subtype;
3421 unsigned int max_authsize = crypto_aead_alg(authenc)->maxauthsize;
3422 int err = 0, i, key_ctx_len = 0;
3423 unsigned char ck_size = 0;
3424 unsigned char pad[CHCR_HASH_MAX_BLOCK_SIZE_128] = { 0 };
3425 struct crypto_shash *base_hash = ERR_PTR(-EINVAL);
3426 struct algo_param param;
3430 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3431 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3432 & CRYPTO_TFM_REQ_MASK);
3433 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3434 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3435 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3436 & CRYPTO_TFM_RES_MASK);
3440 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3441 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3445 if (get_alg_config(¶m, max_authsize)) {
3446 pr_err("chcr : Unsupported digest size\n");
3449 subtype = get_aead_subtype(authenc);
3450 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3451 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3452 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3454 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3455 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3456 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3458 if (keys.enckeylen == AES_KEYSIZE_128) {
3459 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3460 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3461 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3462 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3463 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3465 pr_err("chcr : Unsupported cipher key\n");
3469 /* Copy only encryption key. We use authkey to generate h(ipad) and
3470 * h(opad) so authkey is not needed again. authkeylen size have the
3471 * size of the hash digest size.
3473 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3474 aeadctx->enckey_len = keys.enckeylen;
3475 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3476 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3478 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3479 aeadctx->enckey_len << 3);
3481 base_hash = chcr_alloc_shash(max_authsize);
3482 if (IS_ERR(base_hash)) {
3483 pr_err("chcr : Base driver cannot be loaded\n");
3484 aeadctx->enckey_len = 0;
3485 memzero_explicit(&keys, sizeof(keys));
3489 SHASH_DESC_ON_STACK(shash, base_hash);
3491 shash->tfm = base_hash;
3492 bs = crypto_shash_blocksize(base_hash);
3493 align = KEYCTX_ALIGN_PAD(max_authsize);
3494 o_ptr = actx->h_iopad + param.result_size + align;
3496 if (keys.authkeylen > bs) {
3497 err = crypto_shash_digest(shash, keys.authkey,
3501 pr_err("chcr : Base driver cannot be loaded\n");
3504 keys.authkeylen = max_authsize;
3506 memcpy(o_ptr, keys.authkey, keys.authkeylen);
3508 /* Compute the ipad-digest*/
3509 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3510 memcpy(pad, o_ptr, keys.authkeylen);
3511 for (i = 0; i < bs >> 2; i++)
3512 *((unsigned int *)pad + i) ^= IPAD_DATA;
3514 if (chcr_compute_partial_hash(shash, pad, actx->h_iopad,
3517 /* Compute the opad-digest */
3518 memset(pad + keys.authkeylen, 0, bs - keys.authkeylen);
3519 memcpy(pad, o_ptr, keys.authkeylen);
3520 for (i = 0; i < bs >> 2; i++)
3521 *((unsigned int *)pad + i) ^= OPAD_DATA;
3523 if (chcr_compute_partial_hash(shash, pad, o_ptr, max_authsize))
3526 /* convert the ipad and opad digest to network order */
3527 chcr_change_order(actx->h_iopad, param.result_size);
3528 chcr_change_order(o_ptr, param.result_size);
3529 key_ctx_len = sizeof(struct _key_ctx) +
3530 roundup(keys.enckeylen, 16) +
3531 (param.result_size + align) * 2;
3532 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, param.mk_size,
3533 0, 1, key_ctx_len >> 4);
3534 actx->auth_mode = param.auth_mode;
3535 chcr_free_shash(base_hash);
3537 memzero_explicit(&keys, sizeof(keys));
3541 aeadctx->enckey_len = 0;
3542 memzero_explicit(&keys, sizeof(keys));
3543 if (!IS_ERR(base_hash))
3544 chcr_free_shash(base_hash);
3548 static int chcr_aead_digest_null_setkey(struct crypto_aead *authenc,
3549 const u8 *key, unsigned int keylen)
3551 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(authenc));
3552 struct chcr_authenc_ctx *actx = AUTHENC_CTX(aeadctx);
3553 struct crypto_authenc_keys keys;
3555 /* it contains auth and cipher key both*/
3556 unsigned int subtype;
3557 int key_ctx_len = 0;
3558 unsigned char ck_size = 0;
3560 crypto_aead_clear_flags(aeadctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
3561 crypto_aead_set_flags(aeadctx->sw_cipher, crypto_aead_get_flags(authenc)
3562 & CRYPTO_TFM_REQ_MASK);
3563 err = crypto_aead_setkey(aeadctx->sw_cipher, key, keylen);
3564 crypto_aead_clear_flags(authenc, CRYPTO_TFM_RES_MASK);
3565 crypto_aead_set_flags(authenc, crypto_aead_get_flags(aeadctx->sw_cipher)
3566 & CRYPTO_TFM_RES_MASK);
3570 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) {
3571 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
3574 subtype = get_aead_subtype(authenc);
3575 if (subtype == CRYPTO_ALG_SUB_TYPE_CTR_SHA ||
3576 subtype == CRYPTO_ALG_SUB_TYPE_CTR_NULL) {
3577 if (keys.enckeylen < CTR_RFC3686_NONCE_SIZE)
3579 memcpy(aeadctx->nonce, keys.enckey + (keys.enckeylen
3580 - CTR_RFC3686_NONCE_SIZE), CTR_RFC3686_NONCE_SIZE);
3581 keys.enckeylen -= CTR_RFC3686_NONCE_SIZE;
3583 if (keys.enckeylen == AES_KEYSIZE_128) {
3584 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
3585 } else if (keys.enckeylen == AES_KEYSIZE_192) {
3586 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
3587 } else if (keys.enckeylen == AES_KEYSIZE_256) {
3588 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
3590 pr_err("chcr : Unsupported cipher key %d\n", keys.enckeylen);
3593 memcpy(aeadctx->key, keys.enckey, keys.enckeylen);
3594 aeadctx->enckey_len = keys.enckeylen;
3595 if (subtype == CRYPTO_ALG_SUB_TYPE_CBC_SHA ||
3596 subtype == CRYPTO_ALG_SUB_TYPE_CBC_NULL) {
3597 get_aes_decrypt_key(actx->dec_rrkey, aeadctx->key,
3598 aeadctx->enckey_len << 3);
3600 key_ctx_len = sizeof(struct _key_ctx) + roundup(keys.enckeylen, 16);
3602 aeadctx->key_ctx_hdr = FILL_KEY_CTX_HDR(ck_size, CHCR_KEYCTX_NO_KEY, 0,
3603 0, key_ctx_len >> 4);
3604 actx->auth_mode = CHCR_SCMD_AUTH_MODE_NOP;
3605 memzero_explicit(&keys, sizeof(keys));
3608 aeadctx->enckey_len = 0;
3609 memzero_explicit(&keys, sizeof(keys));
3613 static int chcr_aead_op(struct aead_request *req,
3615 create_wr_t create_wr_fn)
3617 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3618 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3619 struct uld_ctx *u_ctx;
3620 struct sk_buff *skb;
3622 struct chcr_dev *cdev;
3624 cdev = a_ctx(tfm)->dev;
3626 pr_err("chcr : %s : No crypto device.\n", __func__);
3630 if (chcr_inc_wrcount(cdev)) {
3631 /* Detach state for CHCR means lldi or padap is freed.
3632 * We cannot increment fallback here.
3634 return chcr_aead_fallback(req, reqctx->op);
3637 u_ctx = ULD_CTX(a_ctx(tfm));
3638 if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
3639 a_ctx(tfm)->tx_qidx)) {
3641 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3642 chcr_dec_wrcount(cdev);
3647 /* Form a WR from req */
3648 skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
3650 if (IS_ERR_OR_NULL(skb)) {
3651 chcr_dec_wrcount(cdev);
3652 return PTR_ERR_OR_ZERO(skb);
3655 skb->dev = u_ctx->lldi.ports[0];
3656 set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);
3658 return isfull ? -EBUSY : -EINPROGRESS;
3661 static int chcr_aead_encrypt(struct aead_request *req)
3663 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3664 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3666 reqctx->verify = VERIFY_HW;
3667 reqctx->op = CHCR_ENCRYPT_OP;
3669 switch (get_aead_subtype(tfm)) {
3670 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3671 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3672 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3673 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3674 return chcr_aead_op(req, 0, create_authenc_wr);
3675 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3676 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3677 return chcr_aead_op(req, 0, create_aead_ccm_wr);
3679 return chcr_aead_op(req, 0, create_gcm_wr);
3683 static int chcr_aead_decrypt(struct aead_request *req)
3685 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3686 struct chcr_aead_ctx *aeadctx = AEAD_CTX(a_ctx(tfm));
3687 struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
3690 if (aeadctx->mayverify == VERIFY_SW) {
3691 size = crypto_aead_maxauthsize(tfm);
3692 reqctx->verify = VERIFY_SW;
3695 reqctx->verify = VERIFY_HW;
3697 reqctx->op = CHCR_DECRYPT_OP;
3698 switch (get_aead_subtype(tfm)) {
3699 case CRYPTO_ALG_SUB_TYPE_CBC_SHA:
3700 case CRYPTO_ALG_SUB_TYPE_CTR_SHA:
3701 case CRYPTO_ALG_SUB_TYPE_CBC_NULL:
3702 case CRYPTO_ALG_SUB_TYPE_CTR_NULL:
3703 return chcr_aead_op(req, size, create_authenc_wr);
3704 case CRYPTO_ALG_SUB_TYPE_AEAD_CCM:
3705 case CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309:
3706 return chcr_aead_op(req, size, create_aead_ccm_wr);
3708 return chcr_aead_op(req, size, create_gcm_wr);
3712 static struct chcr_alg_template driver_algs[] = {
3715 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CBC,
3718 .cra_name = "cbc(aes)",
3719 .cra_driver_name = "cbc-aes-chcr",
3720 .cra_blocksize = AES_BLOCK_SIZE,
3721 .cra_init = chcr_cra_init,
3722 .cra_exit = chcr_cra_exit,
3723 .cra_u.ablkcipher = {
3724 .min_keysize = AES_MIN_KEY_SIZE,
3725 .max_keysize = AES_MAX_KEY_SIZE,
3726 .ivsize = AES_BLOCK_SIZE,
3727 .setkey = chcr_aes_cbc_setkey,
3728 .encrypt = chcr_aes_encrypt,
3729 .decrypt = chcr_aes_decrypt,
3734 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_XTS,
3737 .cra_name = "xts(aes)",
3738 .cra_driver_name = "xts-aes-chcr",
3739 .cra_blocksize = AES_BLOCK_SIZE,
3740 .cra_init = chcr_cra_init,
3742 .cra_u .ablkcipher = {
3743 .min_keysize = 2 * AES_MIN_KEY_SIZE,
3744 .max_keysize = 2 * AES_MAX_KEY_SIZE,
3745 .ivsize = AES_BLOCK_SIZE,
3746 .setkey = chcr_aes_xts_setkey,
3747 .encrypt = chcr_aes_encrypt,
3748 .decrypt = chcr_aes_decrypt,
3753 .type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_SUB_TYPE_CTR,
3756 .cra_name = "ctr(aes)",
3757 .cra_driver_name = "ctr-aes-chcr",
3759 .cra_init = chcr_cra_init,
3760 .cra_exit = chcr_cra_exit,
3761 .cra_u.ablkcipher = {
3762 .min_keysize = AES_MIN_KEY_SIZE,
3763 .max_keysize = AES_MAX_KEY_SIZE,
3764 .ivsize = AES_BLOCK_SIZE,
3765 .setkey = chcr_aes_ctr_setkey,
3766 .encrypt = chcr_aes_encrypt,
3767 .decrypt = chcr_aes_decrypt,
3772 .type = CRYPTO_ALG_TYPE_ABLKCIPHER |
3773 CRYPTO_ALG_SUB_TYPE_CTR_RFC3686,
3776 .cra_name = "rfc3686(ctr(aes))",
3777 .cra_driver_name = "rfc3686-ctr-aes-chcr",
3779 .cra_init = chcr_rfc3686_init,
3780 .cra_exit = chcr_cra_exit,
3781 .cra_u.ablkcipher = {
3782 .min_keysize = AES_MIN_KEY_SIZE +
3783 CTR_RFC3686_NONCE_SIZE,
3784 .max_keysize = AES_MAX_KEY_SIZE +
3785 CTR_RFC3686_NONCE_SIZE,
3786 .ivsize = CTR_RFC3686_IV_SIZE,
3787 .setkey = chcr_aes_rfc3686_setkey,
3788 .encrypt = chcr_aes_encrypt,
3789 .decrypt = chcr_aes_decrypt,
3795 .type = CRYPTO_ALG_TYPE_AHASH,
3798 .halg.digestsize = SHA1_DIGEST_SIZE,
3801 .cra_driver_name = "sha1-chcr",
3802 .cra_blocksize = SHA1_BLOCK_SIZE,
3807 .type = CRYPTO_ALG_TYPE_AHASH,
3810 .halg.digestsize = SHA256_DIGEST_SIZE,
3812 .cra_name = "sha256",
3813 .cra_driver_name = "sha256-chcr",
3814 .cra_blocksize = SHA256_BLOCK_SIZE,
3819 .type = CRYPTO_ALG_TYPE_AHASH,
3822 .halg.digestsize = SHA224_DIGEST_SIZE,
3824 .cra_name = "sha224",
3825 .cra_driver_name = "sha224-chcr",
3826 .cra_blocksize = SHA224_BLOCK_SIZE,
3831 .type = CRYPTO_ALG_TYPE_AHASH,
3834 .halg.digestsize = SHA384_DIGEST_SIZE,
3836 .cra_name = "sha384",
3837 .cra_driver_name = "sha384-chcr",
3838 .cra_blocksize = SHA384_BLOCK_SIZE,
3843 .type = CRYPTO_ALG_TYPE_AHASH,
3846 .halg.digestsize = SHA512_DIGEST_SIZE,
3848 .cra_name = "sha512",
3849 .cra_driver_name = "sha512-chcr",
3850 .cra_blocksize = SHA512_BLOCK_SIZE,
3856 .type = CRYPTO_ALG_TYPE_HMAC,
3859 .halg.digestsize = SHA1_DIGEST_SIZE,
3861 .cra_name = "hmac(sha1)",
3862 .cra_driver_name = "hmac-sha1-chcr",
3863 .cra_blocksize = SHA1_BLOCK_SIZE,
3868 .type = CRYPTO_ALG_TYPE_HMAC,
3871 .halg.digestsize = SHA224_DIGEST_SIZE,
3873 .cra_name = "hmac(sha224)",
3874 .cra_driver_name = "hmac-sha224-chcr",
3875 .cra_blocksize = SHA224_BLOCK_SIZE,
3880 .type = CRYPTO_ALG_TYPE_HMAC,
3883 .halg.digestsize = SHA256_DIGEST_SIZE,
3885 .cra_name = "hmac(sha256)",
3886 .cra_driver_name = "hmac-sha256-chcr",
3887 .cra_blocksize = SHA256_BLOCK_SIZE,
3892 .type = CRYPTO_ALG_TYPE_HMAC,
3895 .halg.digestsize = SHA384_DIGEST_SIZE,
3897 .cra_name = "hmac(sha384)",
3898 .cra_driver_name = "hmac-sha384-chcr",
3899 .cra_blocksize = SHA384_BLOCK_SIZE,
3904 .type = CRYPTO_ALG_TYPE_HMAC,
3907 .halg.digestsize = SHA512_DIGEST_SIZE,
3909 .cra_name = "hmac(sha512)",
3910 .cra_driver_name = "hmac-sha512-chcr",
3911 .cra_blocksize = SHA512_BLOCK_SIZE,
3915 /* Add AEAD Algorithms */
3917 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_GCM,
3921 .cra_name = "gcm(aes)",
3922 .cra_driver_name = "gcm-aes-chcr",
3924 .cra_priority = CHCR_AEAD_PRIORITY,
3925 .cra_ctxsize = sizeof(struct chcr_context) +
3926 sizeof(struct chcr_aead_ctx) +
3927 sizeof(struct chcr_gcm_ctx),
3929 .ivsize = GCM_AES_IV_SIZE,
3930 .maxauthsize = GHASH_DIGEST_SIZE,
3931 .setkey = chcr_gcm_setkey,
3932 .setauthsize = chcr_gcm_setauthsize,
3936 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4106,
3940 .cra_name = "rfc4106(gcm(aes))",
3941 .cra_driver_name = "rfc4106-gcm-aes-chcr",
3943 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3944 .cra_ctxsize = sizeof(struct chcr_context) +
3945 sizeof(struct chcr_aead_ctx) +
3946 sizeof(struct chcr_gcm_ctx),
3949 .ivsize = GCM_RFC4106_IV_SIZE,
3950 .maxauthsize = GHASH_DIGEST_SIZE,
3951 .setkey = chcr_gcm_setkey,
3952 .setauthsize = chcr_4106_4309_setauthsize,
3956 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_CCM,
3960 .cra_name = "ccm(aes)",
3961 .cra_driver_name = "ccm-aes-chcr",
3963 .cra_priority = CHCR_AEAD_PRIORITY,
3964 .cra_ctxsize = sizeof(struct chcr_context) +
3965 sizeof(struct chcr_aead_ctx),
3968 .ivsize = AES_BLOCK_SIZE,
3969 .maxauthsize = GHASH_DIGEST_SIZE,
3970 .setkey = chcr_aead_ccm_setkey,
3971 .setauthsize = chcr_ccm_setauthsize,
3975 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_AEAD_RFC4309,
3979 .cra_name = "rfc4309(ccm(aes))",
3980 .cra_driver_name = "rfc4309-ccm-aes-chcr",
3982 .cra_priority = CHCR_AEAD_PRIORITY + 1,
3983 .cra_ctxsize = sizeof(struct chcr_context) +
3984 sizeof(struct chcr_aead_ctx),
3988 .maxauthsize = GHASH_DIGEST_SIZE,
3989 .setkey = chcr_aead_rfc4309_setkey,
3990 .setauthsize = chcr_4106_4309_setauthsize,
3994 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
3998 .cra_name = "authenc(hmac(sha1),cbc(aes))",
4000 "authenc-hmac-sha1-cbc-aes-chcr",
4001 .cra_blocksize = AES_BLOCK_SIZE,
4002 .cra_priority = CHCR_AEAD_PRIORITY,
4003 .cra_ctxsize = sizeof(struct chcr_context) +
4004 sizeof(struct chcr_aead_ctx) +
4005 sizeof(struct chcr_authenc_ctx),
4008 .ivsize = AES_BLOCK_SIZE,
4009 .maxauthsize = SHA1_DIGEST_SIZE,
4010 .setkey = chcr_authenc_setkey,
4011 .setauthsize = chcr_authenc_setauthsize,
4015 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4020 .cra_name = "authenc(hmac(sha256),cbc(aes))",
4022 "authenc-hmac-sha256-cbc-aes-chcr",
4023 .cra_blocksize = AES_BLOCK_SIZE,
4024 .cra_priority = CHCR_AEAD_PRIORITY,
4025 .cra_ctxsize = sizeof(struct chcr_context) +
4026 sizeof(struct chcr_aead_ctx) +
4027 sizeof(struct chcr_authenc_ctx),
4030 .ivsize = AES_BLOCK_SIZE,
4031 .maxauthsize = SHA256_DIGEST_SIZE,
4032 .setkey = chcr_authenc_setkey,
4033 .setauthsize = chcr_authenc_setauthsize,
4037 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4041 .cra_name = "authenc(hmac(sha224),cbc(aes))",
4043 "authenc-hmac-sha224-cbc-aes-chcr",
4044 .cra_blocksize = AES_BLOCK_SIZE,
4045 .cra_priority = CHCR_AEAD_PRIORITY,
4046 .cra_ctxsize = sizeof(struct chcr_context) +
4047 sizeof(struct chcr_aead_ctx) +
4048 sizeof(struct chcr_authenc_ctx),
4050 .ivsize = AES_BLOCK_SIZE,
4051 .maxauthsize = SHA224_DIGEST_SIZE,
4052 .setkey = chcr_authenc_setkey,
4053 .setauthsize = chcr_authenc_setauthsize,
4057 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4061 .cra_name = "authenc(hmac(sha384),cbc(aes))",
4063 "authenc-hmac-sha384-cbc-aes-chcr",
4064 .cra_blocksize = AES_BLOCK_SIZE,
4065 .cra_priority = CHCR_AEAD_PRIORITY,
4066 .cra_ctxsize = sizeof(struct chcr_context) +
4067 sizeof(struct chcr_aead_ctx) +
4068 sizeof(struct chcr_authenc_ctx),
4071 .ivsize = AES_BLOCK_SIZE,
4072 .maxauthsize = SHA384_DIGEST_SIZE,
4073 .setkey = chcr_authenc_setkey,
4074 .setauthsize = chcr_authenc_setauthsize,
4078 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_SHA,
4082 .cra_name = "authenc(hmac(sha512),cbc(aes))",
4084 "authenc-hmac-sha512-cbc-aes-chcr",
4085 .cra_blocksize = AES_BLOCK_SIZE,
4086 .cra_priority = CHCR_AEAD_PRIORITY,
4087 .cra_ctxsize = sizeof(struct chcr_context) +
4088 sizeof(struct chcr_aead_ctx) +
4089 sizeof(struct chcr_authenc_ctx),
4092 .ivsize = AES_BLOCK_SIZE,
4093 .maxauthsize = SHA512_DIGEST_SIZE,
4094 .setkey = chcr_authenc_setkey,
4095 .setauthsize = chcr_authenc_setauthsize,
4099 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CBC_NULL,
4103 .cra_name = "authenc(digest_null,cbc(aes))",
4105 "authenc-digest_null-cbc-aes-chcr",
4106 .cra_blocksize = AES_BLOCK_SIZE,
4107 .cra_priority = CHCR_AEAD_PRIORITY,
4108 .cra_ctxsize = sizeof(struct chcr_context) +
4109 sizeof(struct chcr_aead_ctx) +
4110 sizeof(struct chcr_authenc_ctx),
4113 .ivsize = AES_BLOCK_SIZE,
4115 .setkey = chcr_aead_digest_null_setkey,
4116 .setauthsize = chcr_authenc_null_setauthsize,
4120 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4124 .cra_name = "authenc(hmac(sha1),rfc3686(ctr(aes)))",
4126 "authenc-hmac-sha1-rfc3686-ctr-aes-chcr",
4128 .cra_priority = CHCR_AEAD_PRIORITY,
4129 .cra_ctxsize = sizeof(struct chcr_context) +
4130 sizeof(struct chcr_aead_ctx) +
4131 sizeof(struct chcr_authenc_ctx),
4134 .ivsize = CTR_RFC3686_IV_SIZE,
4135 .maxauthsize = SHA1_DIGEST_SIZE,
4136 .setkey = chcr_authenc_setkey,
4137 .setauthsize = chcr_authenc_setauthsize,
4141 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4146 .cra_name = "authenc(hmac(sha256),rfc3686(ctr(aes)))",
4148 "authenc-hmac-sha256-rfc3686-ctr-aes-chcr",
4150 .cra_priority = CHCR_AEAD_PRIORITY,
4151 .cra_ctxsize = sizeof(struct chcr_context) +
4152 sizeof(struct chcr_aead_ctx) +
4153 sizeof(struct chcr_authenc_ctx),
4156 .ivsize = CTR_RFC3686_IV_SIZE,
4157 .maxauthsize = SHA256_DIGEST_SIZE,
4158 .setkey = chcr_authenc_setkey,
4159 .setauthsize = chcr_authenc_setauthsize,
4163 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4167 .cra_name = "authenc(hmac(sha224),rfc3686(ctr(aes)))",
4169 "authenc-hmac-sha224-rfc3686-ctr-aes-chcr",
4171 .cra_priority = CHCR_AEAD_PRIORITY,
4172 .cra_ctxsize = sizeof(struct chcr_context) +
4173 sizeof(struct chcr_aead_ctx) +
4174 sizeof(struct chcr_authenc_ctx),
4176 .ivsize = CTR_RFC3686_IV_SIZE,
4177 .maxauthsize = SHA224_DIGEST_SIZE,
4178 .setkey = chcr_authenc_setkey,
4179 .setauthsize = chcr_authenc_setauthsize,
4183 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4187 .cra_name = "authenc(hmac(sha384),rfc3686(ctr(aes)))",
4189 "authenc-hmac-sha384-rfc3686-ctr-aes-chcr",
4191 .cra_priority = CHCR_AEAD_PRIORITY,
4192 .cra_ctxsize = sizeof(struct chcr_context) +
4193 sizeof(struct chcr_aead_ctx) +
4194 sizeof(struct chcr_authenc_ctx),
4197 .ivsize = CTR_RFC3686_IV_SIZE,
4198 .maxauthsize = SHA384_DIGEST_SIZE,
4199 .setkey = chcr_authenc_setkey,
4200 .setauthsize = chcr_authenc_setauthsize,
4204 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_SHA,
4208 .cra_name = "authenc(hmac(sha512),rfc3686(ctr(aes)))",
4210 "authenc-hmac-sha512-rfc3686-ctr-aes-chcr",
4212 .cra_priority = CHCR_AEAD_PRIORITY,
4213 .cra_ctxsize = sizeof(struct chcr_context) +
4214 sizeof(struct chcr_aead_ctx) +
4215 sizeof(struct chcr_authenc_ctx),
4218 .ivsize = CTR_RFC3686_IV_SIZE,
4219 .maxauthsize = SHA512_DIGEST_SIZE,
4220 .setkey = chcr_authenc_setkey,
4221 .setauthsize = chcr_authenc_setauthsize,
4225 .type = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_SUB_TYPE_CTR_NULL,
4229 .cra_name = "authenc(digest_null,rfc3686(ctr(aes)))",
4231 "authenc-digest_null-rfc3686-ctr-aes-chcr",
4233 .cra_priority = CHCR_AEAD_PRIORITY,
4234 .cra_ctxsize = sizeof(struct chcr_context) +
4235 sizeof(struct chcr_aead_ctx) +
4236 sizeof(struct chcr_authenc_ctx),
4239 .ivsize = CTR_RFC3686_IV_SIZE,
4241 .setkey = chcr_aead_digest_null_setkey,
4242 .setauthsize = chcr_authenc_null_setauthsize,
4248 * chcr_unregister_alg - Deregister crypto algorithms with
4251 static int chcr_unregister_alg(void)
4255 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4256 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4257 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4258 if (driver_algs[i].is_registered)
4259 crypto_unregister_alg(
4260 &driver_algs[i].alg.crypto);
4262 case CRYPTO_ALG_TYPE_AEAD:
4263 if (driver_algs[i].is_registered)
4264 crypto_unregister_aead(
4265 &driver_algs[i].alg.aead);
4267 case CRYPTO_ALG_TYPE_AHASH:
4268 if (driver_algs[i].is_registered)
4269 crypto_unregister_ahash(
4270 &driver_algs[i].alg.hash);
4273 driver_algs[i].is_registered = 0;
4278 #define SZ_AHASH_CTX sizeof(struct chcr_context)
4279 #define SZ_AHASH_H_CTX (sizeof(struct chcr_context) + sizeof(struct hmac_ctx))
4280 #define SZ_AHASH_REQ_CTX sizeof(struct chcr_ahash_req_ctx)
4283 * chcr_register_alg - Register crypto algorithms with kernel framework.
4285 static int chcr_register_alg(void)
4287 struct crypto_alg ai;
4288 struct ahash_alg *a_hash;
4292 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
4293 if (driver_algs[i].is_registered)
4295 switch (driver_algs[i].type & CRYPTO_ALG_TYPE_MASK) {
4296 case CRYPTO_ALG_TYPE_ABLKCIPHER:
4297 driver_algs[i].alg.crypto.cra_priority =
4299 driver_algs[i].alg.crypto.cra_module = THIS_MODULE;
4300 driver_algs[i].alg.crypto.cra_flags =
4301 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
4302 CRYPTO_ALG_NEED_FALLBACK;
4303 driver_algs[i].alg.crypto.cra_ctxsize =
4304 sizeof(struct chcr_context) +
4305 sizeof(struct ablk_ctx);
4306 driver_algs[i].alg.crypto.cra_alignmask = 0;
4307 driver_algs[i].alg.crypto.cra_type =
4308 &crypto_ablkcipher_type;
4309 err = crypto_register_alg(&driver_algs[i].alg.crypto);
4310 name = driver_algs[i].alg.crypto.cra_driver_name;
4312 case CRYPTO_ALG_TYPE_AEAD:
4313 driver_algs[i].alg.aead.base.cra_flags =
4314 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK;
4315 driver_algs[i].alg.aead.encrypt = chcr_aead_encrypt;
4316 driver_algs[i].alg.aead.decrypt = chcr_aead_decrypt;
4317 driver_algs[i].alg.aead.init = chcr_aead_cra_init;
4318 driver_algs[i].alg.aead.exit = chcr_aead_cra_exit;
4319 driver_algs[i].alg.aead.base.cra_module = THIS_MODULE;
4320 err = crypto_register_aead(&driver_algs[i].alg.aead);
4321 name = driver_algs[i].alg.aead.base.cra_driver_name;
4323 case CRYPTO_ALG_TYPE_AHASH:
4324 a_hash = &driver_algs[i].alg.hash;
4325 a_hash->update = chcr_ahash_update;
4326 a_hash->final = chcr_ahash_final;
4327 a_hash->finup = chcr_ahash_finup;
4328 a_hash->digest = chcr_ahash_digest;
4329 a_hash->export = chcr_ahash_export;
4330 a_hash->import = chcr_ahash_import;
4331 a_hash->halg.statesize = SZ_AHASH_REQ_CTX;
4332 a_hash->halg.base.cra_priority = CHCR_CRA_PRIORITY;
4333 a_hash->halg.base.cra_module = THIS_MODULE;
4334 a_hash->halg.base.cra_flags = CRYPTO_ALG_ASYNC;
4335 a_hash->halg.base.cra_alignmask = 0;
4336 a_hash->halg.base.cra_exit = NULL;
4338 if (driver_algs[i].type == CRYPTO_ALG_TYPE_HMAC) {
4339 a_hash->halg.base.cra_init = chcr_hmac_cra_init;
4340 a_hash->halg.base.cra_exit = chcr_hmac_cra_exit;
4341 a_hash->init = chcr_hmac_init;
4342 a_hash->setkey = chcr_ahash_setkey;
4343 a_hash->halg.base.cra_ctxsize = SZ_AHASH_H_CTX;
4345 a_hash->init = chcr_sha_init;
4346 a_hash->halg.base.cra_ctxsize = SZ_AHASH_CTX;
4347 a_hash->halg.base.cra_init = chcr_sha_cra_init;
4349 err = crypto_register_ahash(&driver_algs[i].alg.hash);
4350 ai = driver_algs[i].alg.hash.halg.base;
4351 name = ai.cra_driver_name;
4355 pr_err("chcr : %s : Algorithm registration failed\n",
4359 driver_algs[i].is_registered = 1;
4365 chcr_unregister_alg();
4370 * start_crypto - Register the crypto algorithms.
4371 * This should called once when the first device comesup. After this
4372 * kernel will start calling driver APIs for crypto operations.
4374 int start_crypto(void)
4376 return chcr_register_alg();
4380 * stop_crypto - Deregister all the crypto algorithms with kernel.
4381 * This should be called once when the last device goes down. After this
4382 * kernel will not call the driver API for crypto operations.
4384 int stop_crypto(void)
4386 chcr_unregister_alg();