1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
3 * Copyright 2015-2016 Freescale Semiconductor Inc.
4 * Copyright 2017-2019 NXP
9 #include "caamalg_qi2.h"
10 #include "dpseci_cmd.h"
11 #include "desc_constr.h"
13 #include "sg_sw_sec4.h"
14 #include "sg_sw_qm2.h"
16 #include "caamalg_desc.h"
17 #include "caamhash_desc.h"
18 #include "dpseci-debugfs.h"
19 #include <linux/fsl/mc.h>
20 #include <soc/fsl/dpaa2-io.h>
21 #include <soc/fsl/dpaa2-fd.h>
22 #include <crypto/xts.h>
23 #include <asm/unaligned.h>
25 #define CAAM_CRA_PRIORITY 2000
27 /* max key is sum of AES_MAX_KEY_SIZE, max split key size */
28 #define CAAM_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE + \
29 SHA512_DIGEST_SIZE * 2)
32 * This is a cache of buffers, from which the users of CAAM QI driver
33 * can allocate short buffers. It's speedier than doing kmalloc on the hotpath.
34 * NOTE: A more elegant solution would be to have some headroom in the frames
35 * being processed. This can be added by the dpaa2-eth driver. This would
36 * pose a problem for userspace application processing which cannot
37 * know of this limitation. So for now, this will work.
38 * NOTE: The memcache is SMP-safe. No need to handle spinlocks in-here
40 static struct kmem_cache *qi_cache;
42 struct caam_alg_entry {
51 struct caam_aead_alg {
53 struct caam_alg_entry caam;
57 struct caam_skcipher_alg {
58 struct skcipher_alg skcipher;
59 struct caam_alg_entry caam;
64 * struct caam_ctx - per-session context
65 * @flc: Flow Contexts array
66 * @key: [authentication key], encryption key
67 * @flc_dma: I/O virtual addresses of the Flow Contexts
68 * @key_dma: I/O virtual address of the key
69 * @dir: DMA direction for mapping key and Flow Contexts
71 * @adata: authentication algorithm details
72 * @cdata: encryption algorithm details
73 * @authsize: authentication tag (a.k.a. ICV / MAC) size
74 * @xts_key_fallback: true if fallback tfm needs to be used due
75 * to unsupported xts key lengths
76 * @fallback: xts fallback tfm
79 struct caam_flc flc[NUM_OP];
80 u8 key[CAAM_MAX_KEY_SIZE];
81 dma_addr_t flc_dma[NUM_OP];
83 enum dma_data_direction dir;
87 unsigned int authsize;
88 bool xts_key_fallback;
89 struct crypto_skcipher *fallback;
92 static void *dpaa2_caam_iova_to_virt(struct dpaa2_caam_priv *priv,
95 phys_addr_t phys_addr;
97 phys_addr = priv->domain ? iommu_iova_to_phys(priv->domain, iova_addr) :
100 return phys_to_virt(phys_addr);
104 * qi_cache_zalloc - Allocate buffers from CAAM-QI cache
106 * Allocate data on the hotpath. Instead of using kzalloc, one can use the
107 * services of the CAAM QI memory cache (backed by kmem_cache). The buffers
108 * will have a size of CAAM_QI_MEMCACHE_SIZE, which should be sufficient for
109 * hosting 16 SG entries.
111 * @flags - flags that would be used for the equivalent kmalloc(..) call
113 * Returns a pointer to a retrieved buffer on success or NULL on failure.
115 static inline void *qi_cache_zalloc(gfp_t flags)
117 return kmem_cache_zalloc(qi_cache, flags);
121 * qi_cache_free - Frees buffers allocated from CAAM-QI cache
123 * @obj - buffer previously allocated by qi_cache_zalloc
125 * No checking is being done, the call is a passthrough call to
126 * kmem_cache_free(...)
128 static inline void qi_cache_free(void *obj)
130 kmem_cache_free(qi_cache, obj);
133 static struct caam_request *to_caam_req(struct crypto_async_request *areq)
135 switch (crypto_tfm_alg_type(areq->tfm)) {
136 case CRYPTO_ALG_TYPE_SKCIPHER:
137 return skcipher_request_ctx_dma(skcipher_request_cast(areq));
138 case CRYPTO_ALG_TYPE_AEAD:
139 return aead_request_ctx_dma(
140 container_of(areq, struct aead_request, base));
141 case CRYPTO_ALG_TYPE_AHASH:
142 return ahash_request_ctx_dma(ahash_request_cast(areq));
144 return ERR_PTR(-EINVAL);
148 static void caam_unmap(struct device *dev, struct scatterlist *src,
149 struct scatterlist *dst, int src_nents,
150 int dst_nents, dma_addr_t iv_dma, int ivsize,
151 enum dma_data_direction iv_dir, dma_addr_t qm_sg_dma,
156 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
158 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
160 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
164 dma_unmap_single(dev, iv_dma, ivsize, iv_dir);
167 dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE);
170 static int aead_set_sh_desc(struct crypto_aead *aead)
172 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
174 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
175 unsigned int ivsize = crypto_aead_ivsize(aead);
176 struct device *dev = ctx->dev;
177 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
178 struct caam_flc *flc;
182 unsigned int data_len[2];
184 const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) ==
185 OP_ALG_AAI_CTR_MOD128);
186 const bool is_rfc3686 = alg->caam.rfc3686;
188 if (!ctx->cdata.keylen || !ctx->authsize)
192 * AES-CTR needs to load IV in CONTEXT1 reg
193 * at an offset of 128bits (16bytes)
194 * CONTEXT1[255:128] = IV
201 * CONTEXT1[255:128] = {NONCE, IV, COUNTER}
204 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
205 nonce = (u32 *)((void *)ctx->key + ctx->adata.keylen_pad +
206 ctx->cdata.keylen - CTR_RFC3686_NONCE_SIZE);
210 * In case |user key| > |derived key|, using DKP<imm,imm> would result
211 * in invalid opcodes (last bytes of user key) in the resulting
212 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
213 * addresses are needed.
215 ctx->adata.key_virt = ctx->key;
216 ctx->adata.key_dma = ctx->key_dma;
218 ctx->cdata.key_virt = ctx->key + ctx->adata.keylen_pad;
219 ctx->cdata.key_dma = ctx->key_dma + ctx->adata.keylen_pad;
221 data_len[0] = ctx->adata.keylen_pad;
222 data_len[1] = ctx->cdata.keylen;
224 /* aead_encrypt shared descriptor */
225 if (desc_inline_query((alg->caam.geniv ? DESC_QI_AEAD_GIVENC_LEN :
226 DESC_QI_AEAD_ENC_LEN) +
227 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
228 DESC_JOB_IO_LEN, data_len, &inl_mask,
229 ARRAY_SIZE(data_len)) < 0)
232 ctx->adata.key_inline = !!(inl_mask & 1);
233 ctx->cdata.key_inline = !!(inl_mask & 2);
235 flc = &ctx->flc[ENCRYPT];
239 cnstr_shdsc_aead_givencap(desc, &ctx->cdata, &ctx->adata,
240 ivsize, ctx->authsize, is_rfc3686,
241 nonce, ctx1_iv_off, true,
244 cnstr_shdsc_aead_encap(desc, &ctx->cdata, &ctx->adata,
245 ivsize, ctx->authsize, is_rfc3686, nonce,
246 ctx1_iv_off, true, priv->sec_attr.era);
248 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
249 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
250 sizeof(flc->flc) + desc_bytes(desc),
253 /* aead_decrypt shared descriptor */
254 if (desc_inline_query(DESC_QI_AEAD_DEC_LEN +
255 (is_rfc3686 ? DESC_AEAD_CTR_RFC3686_LEN : 0),
256 DESC_JOB_IO_LEN, data_len, &inl_mask,
257 ARRAY_SIZE(data_len)) < 0)
260 ctx->adata.key_inline = !!(inl_mask & 1);
261 ctx->cdata.key_inline = !!(inl_mask & 2);
263 flc = &ctx->flc[DECRYPT];
265 cnstr_shdsc_aead_decap(desc, &ctx->cdata, &ctx->adata,
266 ivsize, ctx->authsize, alg->caam.geniv,
267 is_rfc3686, nonce, ctx1_iv_off, true,
269 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
270 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
271 sizeof(flc->flc) + desc_bytes(desc),
277 static int aead_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
279 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
281 ctx->authsize = authsize;
282 aead_set_sh_desc(authenc);
287 static int aead_setkey(struct crypto_aead *aead, const u8 *key,
290 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
291 struct device *dev = ctx->dev;
292 struct crypto_authenc_keys keys;
294 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
297 dev_dbg(dev, "keylen %d enckeylen %d authkeylen %d\n",
298 keys.authkeylen + keys.enckeylen, keys.enckeylen,
300 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
301 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
303 ctx->adata.keylen = keys.authkeylen;
304 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
307 if (ctx->adata.keylen_pad + keys.enckeylen > CAAM_MAX_KEY_SIZE)
310 memcpy(ctx->key, keys.authkey, keys.authkeylen);
311 memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen);
312 dma_sync_single_for_device(dev, ctx->key_dma, ctx->adata.keylen_pad +
313 keys.enckeylen, ctx->dir);
314 print_hex_dump_debug("ctx.key@" __stringify(__LINE__)": ",
315 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key,
316 ctx->adata.keylen_pad + keys.enckeylen, 1);
318 ctx->cdata.keylen = keys.enckeylen;
320 memzero_explicit(&keys, sizeof(keys));
321 return aead_set_sh_desc(aead);
323 memzero_explicit(&keys, sizeof(keys));
327 static int des3_aead_setkey(struct crypto_aead *aead, const u8 *key,
330 struct crypto_authenc_keys keys;
333 err = crypto_authenc_extractkeys(&keys, key, keylen);
338 if (keys.enckeylen != DES3_EDE_KEY_SIZE)
341 err = crypto_des3_ede_verify_key(crypto_aead_tfm(aead), keys.enckey) ?:
342 aead_setkey(aead, key, keylen);
345 memzero_explicit(&keys, sizeof(keys));
349 static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
352 struct crypto_aead *aead = crypto_aead_reqtfm(req);
353 struct caam_request *req_ctx = aead_request_ctx_dma(req);
354 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
355 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
356 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
357 struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead),
359 struct device *dev = ctx->dev;
360 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
361 GFP_KERNEL : GFP_ATOMIC;
362 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
363 int src_len, dst_len = 0;
364 struct aead_edesc *edesc;
365 dma_addr_t qm_sg_dma, iv_dma = 0;
367 unsigned int authsize = ctx->authsize;
368 int qm_sg_index = 0, qm_sg_nents = 0, qm_sg_bytes;
370 struct dpaa2_sg_entry *sg_table;
372 /* allocate space for base edesc, link tables and IV */
373 edesc = qi_cache_zalloc(GFP_DMA | flags);
374 if (unlikely(!edesc)) {
375 dev_err(dev, "could not allocate extended descriptor\n");
376 return ERR_PTR(-ENOMEM);
379 if (unlikely(req->dst != req->src)) {
380 src_len = req->assoclen + req->cryptlen;
381 dst_len = src_len + (encrypt ? authsize : (-authsize));
383 src_nents = sg_nents_for_len(req->src, src_len);
384 if (unlikely(src_nents < 0)) {
385 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
387 qi_cache_free(edesc);
388 return ERR_PTR(src_nents);
391 dst_nents = sg_nents_for_len(req->dst, dst_len);
392 if (unlikely(dst_nents < 0)) {
393 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
395 qi_cache_free(edesc);
396 return ERR_PTR(dst_nents);
400 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
402 if (unlikely(!mapped_src_nents)) {
403 dev_err(dev, "unable to map source\n");
404 qi_cache_free(edesc);
405 return ERR_PTR(-ENOMEM);
408 mapped_src_nents = 0;
412 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
414 if (unlikely(!mapped_dst_nents)) {
415 dev_err(dev, "unable to map destination\n");
416 dma_unmap_sg(dev, req->src, src_nents,
418 qi_cache_free(edesc);
419 return ERR_PTR(-ENOMEM);
422 mapped_dst_nents = 0;
425 src_len = req->assoclen + req->cryptlen +
426 (encrypt ? authsize : 0);
428 src_nents = sg_nents_for_len(req->src, src_len);
429 if (unlikely(src_nents < 0)) {
430 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
432 qi_cache_free(edesc);
433 return ERR_PTR(src_nents);
436 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
438 if (unlikely(!mapped_src_nents)) {
439 dev_err(dev, "unable to map source\n");
440 qi_cache_free(edesc);
441 return ERR_PTR(-ENOMEM);
445 if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv)
446 ivsize = crypto_aead_ivsize(aead);
449 * Create S/G table: req->assoclen, [IV,] req->src [, req->dst].
450 * Input is not contiguous.
451 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
452 * the end of the table by allocating more S/G entries. Logic:
453 * if (src != dst && output S/G)
454 * pad output S/G, if needed
455 * else if (src == dst && S/G)
456 * overlapping S/Gs; pad one of them
457 * else if (input S/G) ...
458 * pad input S/G, if needed
460 qm_sg_nents = 1 + !!ivsize + mapped_src_nents;
461 if (mapped_dst_nents > 1)
462 qm_sg_nents += pad_sg_nents(mapped_dst_nents);
463 else if ((req->src == req->dst) && (mapped_src_nents > 1))
464 qm_sg_nents = max(pad_sg_nents(qm_sg_nents),
466 pad_sg_nents(mapped_src_nents));
468 qm_sg_nents = pad_sg_nents(qm_sg_nents);
470 sg_table = &edesc->sgt[0];
471 qm_sg_bytes = qm_sg_nents * sizeof(*sg_table);
472 if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize >
473 CAAM_QI_MEMCACHE_SIZE)) {
474 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
475 qm_sg_nents, ivsize);
476 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
478 qi_cache_free(edesc);
479 return ERR_PTR(-ENOMEM);
483 u8 *iv = (u8 *)(sg_table + qm_sg_nents);
485 /* Make sure IV is located in a DMAable area */
486 memcpy(iv, req->iv, ivsize);
488 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
489 if (dma_mapping_error(dev, iv_dma)) {
490 dev_err(dev, "unable to map IV\n");
491 caam_unmap(dev, req->src, req->dst, src_nents,
492 dst_nents, 0, 0, DMA_NONE, 0, 0);
493 qi_cache_free(edesc);
494 return ERR_PTR(-ENOMEM);
498 edesc->src_nents = src_nents;
499 edesc->dst_nents = dst_nents;
500 edesc->iv_dma = iv_dma;
502 if ((alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK) ==
503 OP_ALG_ALGSEL_CHACHA20 && ivsize != CHACHAPOLY_IV_SIZE)
505 * The associated data comes already with the IV but we need
506 * to skip it when we authenticate or encrypt...
508 edesc->assoclen = cpu_to_caam32(req->assoclen - ivsize);
510 edesc->assoclen = cpu_to_caam32(req->assoclen);
511 edesc->assoclen_dma = dma_map_single(dev, &edesc->assoclen, 4,
513 if (dma_mapping_error(dev, edesc->assoclen_dma)) {
514 dev_err(dev, "unable to map assoclen\n");
515 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
516 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
517 qi_cache_free(edesc);
518 return ERR_PTR(-ENOMEM);
521 dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0);
524 dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0);
527 sg_to_qm_sg_last(req->src, src_len, sg_table + qm_sg_index, 0);
528 qm_sg_index += mapped_src_nents;
530 if (mapped_dst_nents > 1)
531 sg_to_qm_sg_last(req->dst, dst_len, sg_table + qm_sg_index, 0);
533 qm_sg_dma = dma_map_single(dev, sg_table, qm_sg_bytes, DMA_TO_DEVICE);
534 if (dma_mapping_error(dev, qm_sg_dma)) {
535 dev_err(dev, "unable to map S/G table\n");
536 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
537 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
538 iv_dma, ivsize, DMA_TO_DEVICE, 0, 0);
539 qi_cache_free(edesc);
540 return ERR_PTR(-ENOMEM);
543 edesc->qm_sg_dma = qm_sg_dma;
544 edesc->qm_sg_bytes = qm_sg_bytes;
546 out_len = req->assoclen + req->cryptlen +
547 (encrypt ? ctx->authsize : (-ctx->authsize));
548 in_len = 4 + ivsize + req->assoclen + req->cryptlen;
550 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
551 dpaa2_fl_set_final(in_fle, true);
552 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
553 dpaa2_fl_set_addr(in_fle, qm_sg_dma);
554 dpaa2_fl_set_len(in_fle, in_len);
556 if (req->dst == req->src) {
557 if (mapped_src_nents == 1) {
558 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
559 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->src));
561 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
562 dpaa2_fl_set_addr(out_fle, qm_sg_dma +
563 (1 + !!ivsize) * sizeof(*sg_table));
565 } else if (!mapped_dst_nents) {
567 * crypto engine requires the output entry to be present when
568 * "frame list" FD is used.
569 * Since engine does not support FMT=2'b11 (unused entry type),
570 * leaving out_fle zeroized is the best option.
573 } else if (mapped_dst_nents == 1) {
574 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
575 dpaa2_fl_set_addr(out_fle, sg_dma_address(req->dst));
577 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
578 dpaa2_fl_set_addr(out_fle, qm_sg_dma + qm_sg_index *
582 dpaa2_fl_set_len(out_fle, out_len);
588 static int chachapoly_set_sh_desc(struct crypto_aead *aead)
590 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
591 unsigned int ivsize = crypto_aead_ivsize(aead);
592 struct device *dev = ctx->dev;
593 struct caam_flc *flc;
596 if (!ctx->cdata.keylen || !ctx->authsize)
599 flc = &ctx->flc[ENCRYPT];
601 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
602 ctx->authsize, true, true);
603 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
604 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
605 sizeof(flc->flc) + desc_bytes(desc),
608 flc = &ctx->flc[DECRYPT];
610 cnstr_shdsc_chachapoly(desc, &ctx->cdata, &ctx->adata, ivsize,
611 ctx->authsize, false, true);
612 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
613 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
614 sizeof(flc->flc) + desc_bytes(desc),
620 static int chachapoly_setauthsize(struct crypto_aead *aead,
621 unsigned int authsize)
623 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
625 if (authsize != POLY1305_DIGEST_SIZE)
628 ctx->authsize = authsize;
629 return chachapoly_set_sh_desc(aead);
632 static int chachapoly_setkey(struct crypto_aead *aead, const u8 *key,
635 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
636 unsigned int ivsize = crypto_aead_ivsize(aead);
637 unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize;
639 if (keylen != CHACHA_KEY_SIZE + saltlen)
642 ctx->cdata.key_virt = key;
643 ctx->cdata.keylen = keylen - saltlen;
645 return chachapoly_set_sh_desc(aead);
648 static int gcm_set_sh_desc(struct crypto_aead *aead)
650 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
651 struct device *dev = ctx->dev;
652 unsigned int ivsize = crypto_aead_ivsize(aead);
653 struct caam_flc *flc;
655 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
658 if (!ctx->cdata.keylen || !ctx->authsize)
662 * AES GCM encrypt shared descriptor
663 * Job Descriptor and Shared Descriptor
664 * must fit into the 64-word Descriptor h/w Buffer
666 if (rem_bytes >= DESC_QI_GCM_ENC_LEN) {
667 ctx->cdata.key_inline = true;
668 ctx->cdata.key_virt = ctx->key;
670 ctx->cdata.key_inline = false;
671 ctx->cdata.key_dma = ctx->key_dma;
674 flc = &ctx->flc[ENCRYPT];
676 cnstr_shdsc_gcm_encap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
677 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
678 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
679 sizeof(flc->flc) + desc_bytes(desc),
683 * Job Descriptor and Shared Descriptors
684 * must all fit into the 64-word Descriptor h/w Buffer
686 if (rem_bytes >= DESC_QI_GCM_DEC_LEN) {
687 ctx->cdata.key_inline = true;
688 ctx->cdata.key_virt = ctx->key;
690 ctx->cdata.key_inline = false;
691 ctx->cdata.key_dma = ctx->key_dma;
694 flc = &ctx->flc[DECRYPT];
696 cnstr_shdsc_gcm_decap(desc, &ctx->cdata, ivsize, ctx->authsize, true);
697 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
698 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
699 sizeof(flc->flc) + desc_bytes(desc),
705 static int gcm_setauthsize(struct crypto_aead *authenc, unsigned int authsize)
707 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
710 err = crypto_gcm_check_authsize(authsize);
714 ctx->authsize = authsize;
715 gcm_set_sh_desc(authenc);
720 static int gcm_setkey(struct crypto_aead *aead,
721 const u8 *key, unsigned int keylen)
723 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
724 struct device *dev = ctx->dev;
727 ret = aes_check_keylen(keylen);
730 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
731 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
733 memcpy(ctx->key, key, keylen);
734 dma_sync_single_for_device(dev, ctx->key_dma, keylen, ctx->dir);
735 ctx->cdata.keylen = keylen;
737 return gcm_set_sh_desc(aead);
740 static int rfc4106_set_sh_desc(struct crypto_aead *aead)
742 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
743 struct device *dev = ctx->dev;
744 unsigned int ivsize = crypto_aead_ivsize(aead);
745 struct caam_flc *flc;
747 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
750 if (!ctx->cdata.keylen || !ctx->authsize)
753 ctx->cdata.key_virt = ctx->key;
756 * RFC4106 encrypt shared descriptor
757 * Job Descriptor and Shared Descriptor
758 * must fit into the 64-word Descriptor h/w Buffer
760 if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) {
761 ctx->cdata.key_inline = true;
763 ctx->cdata.key_inline = false;
764 ctx->cdata.key_dma = ctx->key_dma;
767 flc = &ctx->flc[ENCRYPT];
769 cnstr_shdsc_rfc4106_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
771 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
772 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
773 sizeof(flc->flc) + desc_bytes(desc),
777 * Job Descriptor and Shared Descriptors
778 * must all fit into the 64-word Descriptor h/w Buffer
780 if (rem_bytes >= DESC_QI_RFC4106_DEC_LEN) {
781 ctx->cdata.key_inline = true;
783 ctx->cdata.key_inline = false;
784 ctx->cdata.key_dma = ctx->key_dma;
787 flc = &ctx->flc[DECRYPT];
789 cnstr_shdsc_rfc4106_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
791 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
792 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
793 sizeof(flc->flc) + desc_bytes(desc),
799 static int rfc4106_setauthsize(struct crypto_aead *authenc,
800 unsigned int authsize)
802 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
805 err = crypto_rfc4106_check_authsize(authsize);
809 ctx->authsize = authsize;
810 rfc4106_set_sh_desc(authenc);
815 static int rfc4106_setkey(struct crypto_aead *aead,
816 const u8 *key, unsigned int keylen)
818 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
819 struct device *dev = ctx->dev;
822 ret = aes_check_keylen(keylen - 4);
826 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
827 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
829 memcpy(ctx->key, key, keylen);
831 * The last four bytes of the key material are used as the salt value
832 * in the nonce. Update the AES key length.
834 ctx->cdata.keylen = keylen - 4;
835 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
838 return rfc4106_set_sh_desc(aead);
841 static int rfc4543_set_sh_desc(struct crypto_aead *aead)
843 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
844 struct device *dev = ctx->dev;
845 unsigned int ivsize = crypto_aead_ivsize(aead);
846 struct caam_flc *flc;
848 int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN -
851 if (!ctx->cdata.keylen || !ctx->authsize)
854 ctx->cdata.key_virt = ctx->key;
857 * RFC4543 encrypt shared descriptor
858 * Job Descriptor and Shared Descriptor
859 * must fit into the 64-word Descriptor h/w Buffer
861 if (rem_bytes >= DESC_QI_RFC4543_ENC_LEN) {
862 ctx->cdata.key_inline = true;
864 ctx->cdata.key_inline = false;
865 ctx->cdata.key_dma = ctx->key_dma;
868 flc = &ctx->flc[ENCRYPT];
870 cnstr_shdsc_rfc4543_encap(desc, &ctx->cdata, ivsize, ctx->authsize,
872 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
873 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
874 sizeof(flc->flc) + desc_bytes(desc),
878 * Job Descriptor and Shared Descriptors
879 * must all fit into the 64-word Descriptor h/w Buffer
881 if (rem_bytes >= DESC_QI_RFC4543_DEC_LEN) {
882 ctx->cdata.key_inline = true;
884 ctx->cdata.key_inline = false;
885 ctx->cdata.key_dma = ctx->key_dma;
888 flc = &ctx->flc[DECRYPT];
890 cnstr_shdsc_rfc4543_decap(desc, &ctx->cdata, ivsize, ctx->authsize,
892 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
893 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
894 sizeof(flc->flc) + desc_bytes(desc),
900 static int rfc4543_setauthsize(struct crypto_aead *authenc,
901 unsigned int authsize)
903 struct caam_ctx *ctx = crypto_aead_ctx_dma(authenc);
908 ctx->authsize = authsize;
909 rfc4543_set_sh_desc(authenc);
914 static int rfc4543_setkey(struct crypto_aead *aead,
915 const u8 *key, unsigned int keylen)
917 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
918 struct device *dev = ctx->dev;
921 ret = aes_check_keylen(keylen - 4);
925 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
926 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
928 memcpy(ctx->key, key, keylen);
930 * The last four bytes of the key material are used as the salt value
931 * in the nonce. Update the AES key length.
933 ctx->cdata.keylen = keylen - 4;
934 dma_sync_single_for_device(dev, ctx->key_dma, ctx->cdata.keylen,
937 return rfc4543_set_sh_desc(aead);
940 static int skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
941 unsigned int keylen, const u32 ctx1_iv_off)
943 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
944 struct caam_skcipher_alg *alg =
945 container_of(crypto_skcipher_alg(skcipher),
946 struct caam_skcipher_alg, skcipher);
947 struct device *dev = ctx->dev;
948 struct caam_flc *flc;
949 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
951 const bool is_rfc3686 = alg->caam.rfc3686;
953 print_hex_dump_debug("key in @" __stringify(__LINE__)": ",
954 DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1);
956 ctx->cdata.keylen = keylen;
957 ctx->cdata.key_virt = key;
958 ctx->cdata.key_inline = true;
960 /* skcipher_encrypt shared descriptor */
961 flc = &ctx->flc[ENCRYPT];
963 cnstr_shdsc_skcipher_encap(desc, &ctx->cdata, ivsize, is_rfc3686,
965 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
966 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
967 sizeof(flc->flc) + desc_bytes(desc),
970 /* skcipher_decrypt shared descriptor */
971 flc = &ctx->flc[DECRYPT];
973 cnstr_shdsc_skcipher_decap(desc, &ctx->cdata, ivsize, is_rfc3686,
975 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
976 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
977 sizeof(flc->flc) + desc_bytes(desc),
983 static int aes_skcipher_setkey(struct crypto_skcipher *skcipher,
984 const u8 *key, unsigned int keylen)
988 err = aes_check_keylen(keylen);
992 return skcipher_setkey(skcipher, key, keylen, 0);
995 static int rfc3686_skcipher_setkey(struct crypto_skcipher *skcipher,
996 const u8 *key, unsigned int keylen)
1003 * | CONTEXT1[255:128] = {NONCE, IV, COUNTER}
1004 * | *key = {KEY, NONCE}
1006 ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE;
1007 keylen -= CTR_RFC3686_NONCE_SIZE;
1009 err = aes_check_keylen(keylen);
1013 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1016 static int ctr_skcipher_setkey(struct crypto_skcipher *skcipher,
1017 const u8 *key, unsigned int keylen)
1023 * AES-CTR needs to load IV in CONTEXT1 reg
1024 * at an offset of 128bits (16bytes)
1025 * CONTEXT1[255:128] = IV
1029 err = aes_check_keylen(keylen);
1033 return skcipher_setkey(skcipher, key, keylen, ctx1_iv_off);
1036 static int chacha20_skcipher_setkey(struct crypto_skcipher *skcipher,
1037 const u8 *key, unsigned int keylen)
1039 if (keylen != CHACHA_KEY_SIZE)
1042 return skcipher_setkey(skcipher, key, keylen, 0);
1045 static int des_skcipher_setkey(struct crypto_skcipher *skcipher,
1046 const u8 *key, unsigned int keylen)
1048 return verify_skcipher_des_key(skcipher, key) ?:
1049 skcipher_setkey(skcipher, key, keylen, 0);
1052 static int des3_skcipher_setkey(struct crypto_skcipher *skcipher,
1053 const u8 *key, unsigned int keylen)
1055 return verify_skcipher_des3_key(skcipher, key) ?:
1056 skcipher_setkey(skcipher, key, keylen, 0);
1059 static int xts_skcipher_setkey(struct crypto_skcipher *skcipher, const u8 *key,
1060 unsigned int keylen)
1062 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1063 struct device *dev = ctx->dev;
1064 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
1065 struct caam_flc *flc;
1069 err = xts_verify_key(skcipher, key, keylen);
1071 dev_dbg(dev, "key size mismatch\n");
1075 if (keylen != 2 * AES_KEYSIZE_128 && keylen != 2 * AES_KEYSIZE_256)
1076 ctx->xts_key_fallback = true;
1078 if (priv->sec_attr.era <= 8 || ctx->xts_key_fallback) {
1079 err = crypto_skcipher_setkey(ctx->fallback, key, keylen);
1084 ctx->cdata.keylen = keylen;
1085 ctx->cdata.key_virt = key;
1086 ctx->cdata.key_inline = true;
1088 /* xts_skcipher_encrypt shared descriptor */
1089 flc = &ctx->flc[ENCRYPT];
1090 desc = flc->sh_desc;
1091 cnstr_shdsc_xts_skcipher_encap(desc, &ctx->cdata);
1092 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1093 dma_sync_single_for_device(dev, ctx->flc_dma[ENCRYPT],
1094 sizeof(flc->flc) + desc_bytes(desc),
1097 /* xts_skcipher_decrypt shared descriptor */
1098 flc = &ctx->flc[DECRYPT];
1099 desc = flc->sh_desc;
1100 cnstr_shdsc_xts_skcipher_decap(desc, &ctx->cdata);
1101 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
1102 dma_sync_single_for_device(dev, ctx->flc_dma[DECRYPT],
1103 sizeof(flc->flc) + desc_bytes(desc),
1109 static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req)
1111 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1112 struct caam_request *req_ctx = skcipher_request_ctx_dma(req);
1113 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
1114 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
1115 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1116 struct device *dev = ctx->dev;
1117 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
1118 GFP_KERNEL : GFP_ATOMIC;
1119 int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0;
1120 struct skcipher_edesc *edesc;
1123 int ivsize = crypto_skcipher_ivsize(skcipher);
1124 int dst_sg_idx, qm_sg_ents, qm_sg_bytes;
1125 struct dpaa2_sg_entry *sg_table;
1127 src_nents = sg_nents_for_len(req->src, req->cryptlen);
1128 if (unlikely(src_nents < 0)) {
1129 dev_err(dev, "Insufficient bytes (%d) in src S/G\n",
1131 return ERR_PTR(src_nents);
1134 if (unlikely(req->dst != req->src)) {
1135 dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
1136 if (unlikely(dst_nents < 0)) {
1137 dev_err(dev, "Insufficient bytes (%d) in dst S/G\n",
1139 return ERR_PTR(dst_nents);
1142 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1144 if (unlikely(!mapped_src_nents)) {
1145 dev_err(dev, "unable to map source\n");
1146 return ERR_PTR(-ENOMEM);
1149 mapped_dst_nents = dma_map_sg(dev, req->dst, dst_nents,
1151 if (unlikely(!mapped_dst_nents)) {
1152 dev_err(dev, "unable to map destination\n");
1153 dma_unmap_sg(dev, req->src, src_nents, DMA_TO_DEVICE);
1154 return ERR_PTR(-ENOMEM);
1157 mapped_src_nents = dma_map_sg(dev, req->src, src_nents,
1159 if (unlikely(!mapped_src_nents)) {
1160 dev_err(dev, "unable to map source\n");
1161 return ERR_PTR(-ENOMEM);
1165 qm_sg_ents = 1 + mapped_src_nents;
1166 dst_sg_idx = qm_sg_ents;
1169 * Input, output HW S/G tables: [IV, src][dst, IV]
1170 * IV entries point to the same buffer
1171 * If src == dst, S/G entries are reused (S/G tables overlap)
1173 * HW reads 4 S/G entries at a time; make sure the reads don't go beyond
1174 * the end of the table by allocating more S/G entries.
1176 if (req->src != req->dst)
1177 qm_sg_ents += pad_sg_nents(mapped_dst_nents + 1);
1179 qm_sg_ents = 1 + pad_sg_nents(qm_sg_ents);
1181 qm_sg_bytes = qm_sg_ents * sizeof(struct dpaa2_sg_entry);
1182 if (unlikely(offsetof(struct skcipher_edesc, sgt) + qm_sg_bytes +
1183 ivsize > CAAM_QI_MEMCACHE_SIZE)) {
1184 dev_err(dev, "No space for %d S/G entries and/or %dB IV\n",
1185 qm_sg_ents, ivsize);
1186 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1188 return ERR_PTR(-ENOMEM);
1191 /* allocate space for base edesc, link tables and IV */
1192 edesc = qi_cache_zalloc(GFP_DMA | flags);
1193 if (unlikely(!edesc)) {
1194 dev_err(dev, "could not allocate extended descriptor\n");
1195 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1197 return ERR_PTR(-ENOMEM);
1200 /* Make sure IV is located in a DMAable area */
1201 sg_table = &edesc->sgt[0];
1202 iv = (u8 *)(sg_table + qm_sg_ents);
1203 memcpy(iv, req->iv, ivsize);
1205 iv_dma = dma_map_single(dev, iv, ivsize, DMA_BIDIRECTIONAL);
1206 if (dma_mapping_error(dev, iv_dma)) {
1207 dev_err(dev, "unable to map IV\n");
1208 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents, 0,
1210 qi_cache_free(edesc);
1211 return ERR_PTR(-ENOMEM);
1214 edesc->src_nents = src_nents;
1215 edesc->dst_nents = dst_nents;
1216 edesc->iv_dma = iv_dma;
1217 edesc->qm_sg_bytes = qm_sg_bytes;
1219 dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0);
1220 sg_to_qm_sg(req->src, req->cryptlen, sg_table + 1, 0);
1222 if (req->src != req->dst)
1223 sg_to_qm_sg(req->dst, req->cryptlen, sg_table + dst_sg_idx, 0);
1225 dma_to_qm_sg_one(sg_table + dst_sg_idx + mapped_dst_nents, iv_dma,
1228 edesc->qm_sg_dma = dma_map_single(dev, sg_table, edesc->qm_sg_bytes,
1230 if (dma_mapping_error(dev, edesc->qm_sg_dma)) {
1231 dev_err(dev, "unable to map S/G table\n");
1232 caam_unmap(dev, req->src, req->dst, src_nents, dst_nents,
1233 iv_dma, ivsize, DMA_BIDIRECTIONAL, 0, 0);
1234 qi_cache_free(edesc);
1235 return ERR_PTR(-ENOMEM);
1238 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
1239 dpaa2_fl_set_final(in_fle, true);
1240 dpaa2_fl_set_len(in_fle, req->cryptlen + ivsize);
1241 dpaa2_fl_set_len(out_fle, req->cryptlen + ivsize);
1243 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
1244 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
1246 dpaa2_fl_set_format(out_fle, dpaa2_fl_sg);
1248 if (req->src == req->dst)
1249 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma +
1252 dpaa2_fl_set_addr(out_fle, edesc->qm_sg_dma + dst_sg_idx *
1258 static void aead_unmap(struct device *dev, struct aead_edesc *edesc,
1259 struct aead_request *req)
1261 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1262 int ivsize = crypto_aead_ivsize(aead);
1264 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1265 edesc->iv_dma, ivsize, DMA_TO_DEVICE, edesc->qm_sg_dma,
1266 edesc->qm_sg_bytes);
1267 dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE);
1270 static void skcipher_unmap(struct device *dev, struct skcipher_edesc *edesc,
1271 struct skcipher_request *req)
1273 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1274 int ivsize = crypto_skcipher_ivsize(skcipher);
1276 caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents,
1277 edesc->iv_dma, ivsize, DMA_BIDIRECTIONAL, edesc->qm_sg_dma,
1278 edesc->qm_sg_bytes);
1281 static void aead_encrypt_done(void *cbk_ctx, u32 status)
1283 struct crypto_async_request *areq = cbk_ctx;
1284 struct aead_request *req = container_of(areq, struct aead_request,
1286 struct caam_request *req_ctx = to_caam_req(areq);
1287 struct aead_edesc *edesc = req_ctx->edesc;
1288 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1289 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1292 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1294 if (unlikely(status))
1295 ecode = caam_qi2_strstatus(ctx->dev, status);
1297 aead_unmap(ctx->dev, edesc, req);
1298 qi_cache_free(edesc);
1299 aead_request_complete(req, ecode);
1302 static void aead_decrypt_done(void *cbk_ctx, u32 status)
1304 struct crypto_async_request *areq = cbk_ctx;
1305 struct aead_request *req = container_of(areq, struct aead_request,
1307 struct caam_request *req_ctx = to_caam_req(areq);
1308 struct aead_edesc *edesc = req_ctx->edesc;
1309 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1310 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1313 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1315 if (unlikely(status))
1316 ecode = caam_qi2_strstatus(ctx->dev, status);
1318 aead_unmap(ctx->dev, edesc, req);
1319 qi_cache_free(edesc);
1320 aead_request_complete(req, ecode);
1323 static int aead_encrypt(struct aead_request *req)
1325 struct aead_edesc *edesc;
1326 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1327 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1328 struct caam_request *caam_req = aead_request_ctx_dma(req);
1331 /* allocate extended descriptor */
1332 edesc = aead_edesc_alloc(req, true);
1334 return PTR_ERR(edesc);
1336 caam_req->flc = &ctx->flc[ENCRYPT];
1337 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1338 caam_req->cbk = aead_encrypt_done;
1339 caam_req->ctx = &req->base;
1340 caam_req->edesc = edesc;
1341 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1342 if (ret != -EINPROGRESS &&
1343 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1344 aead_unmap(ctx->dev, edesc, req);
1345 qi_cache_free(edesc);
1351 static int aead_decrypt(struct aead_request *req)
1353 struct aead_edesc *edesc;
1354 struct crypto_aead *aead = crypto_aead_reqtfm(req);
1355 struct caam_ctx *ctx = crypto_aead_ctx_dma(aead);
1356 struct caam_request *caam_req = aead_request_ctx_dma(req);
1359 /* allocate extended descriptor */
1360 edesc = aead_edesc_alloc(req, false);
1362 return PTR_ERR(edesc);
1364 caam_req->flc = &ctx->flc[DECRYPT];
1365 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1366 caam_req->cbk = aead_decrypt_done;
1367 caam_req->ctx = &req->base;
1368 caam_req->edesc = edesc;
1369 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1370 if (ret != -EINPROGRESS &&
1371 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1372 aead_unmap(ctx->dev, edesc, req);
1373 qi_cache_free(edesc);
1379 static int ipsec_gcm_encrypt(struct aead_request *req)
1381 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_encrypt(req);
1384 static int ipsec_gcm_decrypt(struct aead_request *req)
1386 return crypto_ipsec_check_assoclen(req->assoclen) ? : aead_decrypt(req);
1389 static void skcipher_encrypt_done(void *cbk_ctx, u32 status)
1391 struct crypto_async_request *areq = cbk_ctx;
1392 struct skcipher_request *req = skcipher_request_cast(areq);
1393 struct caam_request *req_ctx = to_caam_req(areq);
1394 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1395 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1396 struct skcipher_edesc *edesc = req_ctx->edesc;
1398 int ivsize = crypto_skcipher_ivsize(skcipher);
1400 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1402 if (unlikely(status))
1403 ecode = caam_qi2_strstatus(ctx->dev, status);
1405 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1406 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1407 edesc->src_nents > 1 ? 100 : ivsize, 1);
1408 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1409 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1410 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1412 skcipher_unmap(ctx->dev, edesc, req);
1415 * The crypto API expects us to set the IV (req->iv) to the last
1416 * ciphertext block (CBC mode) or last counter (CTR mode).
1417 * This is used e.g. by the CTS mode.
1420 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1423 qi_cache_free(edesc);
1424 skcipher_request_complete(req, ecode);
1427 static void skcipher_decrypt_done(void *cbk_ctx, u32 status)
1429 struct crypto_async_request *areq = cbk_ctx;
1430 struct skcipher_request *req = skcipher_request_cast(areq);
1431 struct caam_request *req_ctx = to_caam_req(areq);
1432 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1433 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1434 struct skcipher_edesc *edesc = req_ctx->edesc;
1436 int ivsize = crypto_skcipher_ivsize(skcipher);
1438 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
1440 if (unlikely(status))
1441 ecode = caam_qi2_strstatus(ctx->dev, status);
1443 print_hex_dump_debug("dstiv @" __stringify(__LINE__)": ",
1444 DUMP_PREFIX_ADDRESS, 16, 4, req->iv,
1445 edesc->src_nents > 1 ? 100 : ivsize, 1);
1446 caam_dump_sg("dst @" __stringify(__LINE__)": ",
1447 DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
1448 edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
1450 skcipher_unmap(ctx->dev, edesc, req);
1453 * The crypto API expects us to set the IV (req->iv) to the last
1454 * ciphertext block (CBC mode) or last counter (CTR mode).
1455 * This is used e.g. by the CTS mode.
1458 memcpy(req->iv, (u8 *)&edesc->sgt[0] + edesc->qm_sg_bytes,
1461 qi_cache_free(edesc);
1462 skcipher_request_complete(req, ecode);
1465 static inline bool xts_skcipher_ivsize(struct skcipher_request *req)
1467 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1468 unsigned int ivsize = crypto_skcipher_ivsize(skcipher);
1470 return !!get_unaligned((u64 *)(req->iv + (ivsize / 2)));
1473 static int skcipher_encrypt(struct skcipher_request *req)
1475 struct skcipher_edesc *edesc;
1476 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1477 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1478 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1479 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1483 * XTS is expected to return an error even for input length = 0
1484 * Note that the case input length < block size will be caught during
1485 * HW offloading and return an error.
1487 if (!req->cryptlen && !ctx->fallback)
1490 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1491 ctx->xts_key_fallback)) {
1492 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1493 skcipher_request_set_callback(&caam_req->fallback_req,
1497 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1498 req->dst, req->cryptlen, req->iv);
1500 return crypto_skcipher_encrypt(&caam_req->fallback_req);
1503 /* allocate extended descriptor */
1504 edesc = skcipher_edesc_alloc(req);
1506 return PTR_ERR(edesc);
1508 caam_req->flc = &ctx->flc[ENCRYPT];
1509 caam_req->flc_dma = ctx->flc_dma[ENCRYPT];
1510 caam_req->cbk = skcipher_encrypt_done;
1511 caam_req->ctx = &req->base;
1512 caam_req->edesc = edesc;
1513 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1514 if (ret != -EINPROGRESS &&
1515 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1516 skcipher_unmap(ctx->dev, edesc, req);
1517 qi_cache_free(edesc);
1523 static int skcipher_decrypt(struct skcipher_request *req)
1525 struct skcipher_edesc *edesc;
1526 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
1527 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(skcipher);
1528 struct caam_request *caam_req = skcipher_request_ctx_dma(req);
1529 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
1533 * XTS is expected to return an error even for input length = 0
1534 * Note that the case input length < block size will be caught during
1535 * HW offloading and return an error.
1537 if (!req->cryptlen && !ctx->fallback)
1540 if (ctx->fallback && ((priv->sec_attr.era <= 8 && xts_skcipher_ivsize(req)) ||
1541 ctx->xts_key_fallback)) {
1542 skcipher_request_set_tfm(&caam_req->fallback_req, ctx->fallback);
1543 skcipher_request_set_callback(&caam_req->fallback_req,
1547 skcipher_request_set_crypt(&caam_req->fallback_req, req->src,
1548 req->dst, req->cryptlen, req->iv);
1550 return crypto_skcipher_decrypt(&caam_req->fallback_req);
1553 /* allocate extended descriptor */
1554 edesc = skcipher_edesc_alloc(req);
1556 return PTR_ERR(edesc);
1558 caam_req->flc = &ctx->flc[DECRYPT];
1559 caam_req->flc_dma = ctx->flc_dma[DECRYPT];
1560 caam_req->cbk = skcipher_decrypt_done;
1561 caam_req->ctx = &req->base;
1562 caam_req->edesc = edesc;
1563 ret = dpaa2_caam_enqueue(ctx->dev, caam_req);
1564 if (ret != -EINPROGRESS &&
1565 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1566 skcipher_unmap(ctx->dev, edesc, req);
1567 qi_cache_free(edesc);
1573 static int caam_cra_init(struct caam_ctx *ctx, struct caam_alg_entry *caam,
1576 dma_addr_t dma_addr;
1579 /* copy descriptor header template value */
1580 ctx->cdata.algtype = OP_TYPE_CLASS1_ALG | caam->class1_alg_type;
1581 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam->class2_alg_type;
1583 ctx->dev = caam->dev;
1584 ctx->dir = uses_dkp ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1586 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc,
1587 offsetof(struct caam_ctx, flc_dma),
1588 ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1589 if (dma_mapping_error(ctx->dev, dma_addr)) {
1590 dev_err(ctx->dev, "unable to map key, shared descriptors\n");
1594 for (i = 0; i < NUM_OP; i++)
1595 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
1596 ctx->key_dma = dma_addr + NUM_OP * sizeof(ctx->flc[0]);
1601 static int caam_cra_init_skcipher(struct crypto_skcipher *tfm)
1603 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
1604 struct caam_skcipher_alg *caam_alg =
1605 container_of(alg, typeof(*caam_alg), skcipher);
1606 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1607 u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
1610 if (alg_aai == OP_ALG_AAI_XTS) {
1611 const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
1612 struct crypto_skcipher *fallback;
1614 fallback = crypto_alloc_skcipher(tfm_name, 0,
1615 CRYPTO_ALG_NEED_FALLBACK);
1616 if (IS_ERR(fallback)) {
1617 dev_err(caam_alg->caam.dev,
1618 "Failed to allocate %s fallback: %ld\n",
1619 tfm_name, PTR_ERR(fallback));
1620 return PTR_ERR(fallback);
1623 ctx->fallback = fallback;
1624 crypto_skcipher_set_reqsize_dma(
1625 tfm, sizeof(struct caam_request) +
1626 crypto_skcipher_reqsize(fallback));
1628 crypto_skcipher_set_reqsize_dma(tfm,
1629 sizeof(struct caam_request));
1632 ret = caam_cra_init(ctx, &caam_alg->caam, false);
1633 if (ret && ctx->fallback)
1634 crypto_free_skcipher(ctx->fallback);
1639 static int caam_cra_init_aead(struct crypto_aead *tfm)
1641 struct aead_alg *alg = crypto_aead_alg(tfm);
1642 struct caam_aead_alg *caam_alg = container_of(alg, typeof(*caam_alg),
1645 crypto_aead_set_reqsize_dma(tfm, sizeof(struct caam_request));
1646 return caam_cra_init(crypto_aead_ctx_dma(tfm), &caam_alg->caam,
1647 !caam_alg->caam.nodkp);
1650 static void caam_exit_common(struct caam_ctx *ctx)
1652 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0],
1653 offsetof(struct caam_ctx, flc_dma), ctx->dir,
1654 DMA_ATTR_SKIP_CPU_SYNC);
1657 static void caam_cra_exit(struct crypto_skcipher *tfm)
1659 struct caam_ctx *ctx = crypto_skcipher_ctx_dma(tfm);
1662 crypto_free_skcipher(ctx->fallback);
1663 caam_exit_common(ctx);
1666 static void caam_cra_exit_aead(struct crypto_aead *tfm)
1668 caam_exit_common(crypto_aead_ctx_dma(tfm));
1671 static struct caam_skcipher_alg driver_algs[] = {
1675 .cra_name = "cbc(aes)",
1676 .cra_driver_name = "cbc-aes-caam-qi2",
1677 .cra_blocksize = AES_BLOCK_SIZE,
1679 .setkey = aes_skcipher_setkey,
1680 .encrypt = skcipher_encrypt,
1681 .decrypt = skcipher_decrypt,
1682 .min_keysize = AES_MIN_KEY_SIZE,
1683 .max_keysize = AES_MAX_KEY_SIZE,
1684 .ivsize = AES_BLOCK_SIZE,
1686 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1691 .cra_name = "cbc(des3_ede)",
1692 .cra_driver_name = "cbc-3des-caam-qi2",
1693 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1695 .setkey = des3_skcipher_setkey,
1696 .encrypt = skcipher_encrypt,
1697 .decrypt = skcipher_decrypt,
1698 .min_keysize = DES3_EDE_KEY_SIZE,
1699 .max_keysize = DES3_EDE_KEY_SIZE,
1700 .ivsize = DES3_EDE_BLOCK_SIZE,
1702 .caam.class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
1707 .cra_name = "cbc(des)",
1708 .cra_driver_name = "cbc-des-caam-qi2",
1709 .cra_blocksize = DES_BLOCK_SIZE,
1711 .setkey = des_skcipher_setkey,
1712 .encrypt = skcipher_encrypt,
1713 .decrypt = skcipher_decrypt,
1714 .min_keysize = DES_KEY_SIZE,
1715 .max_keysize = DES_KEY_SIZE,
1716 .ivsize = DES_BLOCK_SIZE,
1718 .caam.class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
1723 .cra_name = "ctr(aes)",
1724 .cra_driver_name = "ctr-aes-caam-qi2",
1727 .setkey = ctr_skcipher_setkey,
1728 .encrypt = skcipher_encrypt,
1729 .decrypt = skcipher_decrypt,
1730 .min_keysize = AES_MIN_KEY_SIZE,
1731 .max_keysize = AES_MAX_KEY_SIZE,
1732 .ivsize = AES_BLOCK_SIZE,
1733 .chunksize = AES_BLOCK_SIZE,
1735 .caam.class1_alg_type = OP_ALG_ALGSEL_AES |
1736 OP_ALG_AAI_CTR_MOD128,
1741 .cra_name = "rfc3686(ctr(aes))",
1742 .cra_driver_name = "rfc3686-ctr-aes-caam-qi2",
1745 .setkey = rfc3686_skcipher_setkey,
1746 .encrypt = skcipher_encrypt,
1747 .decrypt = skcipher_decrypt,
1748 .min_keysize = AES_MIN_KEY_SIZE +
1749 CTR_RFC3686_NONCE_SIZE,
1750 .max_keysize = AES_MAX_KEY_SIZE +
1751 CTR_RFC3686_NONCE_SIZE,
1752 .ivsize = CTR_RFC3686_IV_SIZE,
1753 .chunksize = AES_BLOCK_SIZE,
1756 .class1_alg_type = OP_ALG_ALGSEL_AES |
1757 OP_ALG_AAI_CTR_MOD128,
1764 .cra_name = "xts(aes)",
1765 .cra_driver_name = "xts-aes-caam-qi2",
1766 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1767 .cra_blocksize = AES_BLOCK_SIZE,
1769 .setkey = xts_skcipher_setkey,
1770 .encrypt = skcipher_encrypt,
1771 .decrypt = skcipher_decrypt,
1772 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1773 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1774 .ivsize = AES_BLOCK_SIZE,
1776 .caam.class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XTS,
1781 .cra_name = "chacha20",
1782 .cra_driver_name = "chacha20-caam-qi2",
1785 .setkey = chacha20_skcipher_setkey,
1786 .encrypt = skcipher_encrypt,
1787 .decrypt = skcipher_decrypt,
1788 .min_keysize = CHACHA_KEY_SIZE,
1789 .max_keysize = CHACHA_KEY_SIZE,
1790 .ivsize = CHACHA_IV_SIZE,
1792 .caam.class1_alg_type = OP_ALG_ALGSEL_CHACHA20,
1796 static struct caam_aead_alg driver_aeads[] = {
1800 .cra_name = "rfc4106(gcm(aes))",
1801 .cra_driver_name = "rfc4106-gcm-aes-caam-qi2",
1804 .setkey = rfc4106_setkey,
1805 .setauthsize = rfc4106_setauthsize,
1806 .encrypt = ipsec_gcm_encrypt,
1807 .decrypt = ipsec_gcm_decrypt,
1809 .maxauthsize = AES_BLOCK_SIZE,
1812 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1819 .cra_name = "rfc4543(gcm(aes))",
1820 .cra_driver_name = "rfc4543-gcm-aes-caam-qi2",
1823 .setkey = rfc4543_setkey,
1824 .setauthsize = rfc4543_setauthsize,
1825 .encrypt = ipsec_gcm_encrypt,
1826 .decrypt = ipsec_gcm_decrypt,
1828 .maxauthsize = AES_BLOCK_SIZE,
1831 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1835 /* Galois Counter Mode */
1839 .cra_name = "gcm(aes)",
1840 .cra_driver_name = "gcm-aes-caam-qi2",
1843 .setkey = gcm_setkey,
1844 .setauthsize = gcm_setauthsize,
1845 .encrypt = aead_encrypt,
1846 .decrypt = aead_decrypt,
1848 .maxauthsize = AES_BLOCK_SIZE,
1851 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_GCM,
1855 /* single-pass ipsec_esp descriptor */
1859 .cra_name = "authenc(hmac(md5),cbc(aes))",
1860 .cra_driver_name = "authenc-hmac-md5-"
1862 .cra_blocksize = AES_BLOCK_SIZE,
1864 .setkey = aead_setkey,
1865 .setauthsize = aead_setauthsize,
1866 .encrypt = aead_encrypt,
1867 .decrypt = aead_decrypt,
1868 .ivsize = AES_BLOCK_SIZE,
1869 .maxauthsize = MD5_DIGEST_SIZE,
1872 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1873 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1874 OP_ALG_AAI_HMAC_PRECOMP,
1880 .cra_name = "echainiv(authenc(hmac(md5),"
1882 .cra_driver_name = "echainiv-authenc-hmac-md5-"
1884 .cra_blocksize = AES_BLOCK_SIZE,
1886 .setkey = aead_setkey,
1887 .setauthsize = aead_setauthsize,
1888 .encrypt = aead_encrypt,
1889 .decrypt = aead_decrypt,
1890 .ivsize = AES_BLOCK_SIZE,
1891 .maxauthsize = MD5_DIGEST_SIZE,
1894 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1895 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
1896 OP_ALG_AAI_HMAC_PRECOMP,
1903 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1904 .cra_driver_name = "authenc-hmac-sha1-"
1906 .cra_blocksize = AES_BLOCK_SIZE,
1908 .setkey = aead_setkey,
1909 .setauthsize = aead_setauthsize,
1910 .encrypt = aead_encrypt,
1911 .decrypt = aead_decrypt,
1912 .ivsize = AES_BLOCK_SIZE,
1913 .maxauthsize = SHA1_DIGEST_SIZE,
1916 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1917 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1918 OP_ALG_AAI_HMAC_PRECOMP,
1924 .cra_name = "echainiv(authenc(hmac(sha1),"
1926 .cra_driver_name = "echainiv-authenc-"
1927 "hmac-sha1-cbc-aes-caam-qi2",
1928 .cra_blocksize = AES_BLOCK_SIZE,
1930 .setkey = aead_setkey,
1931 .setauthsize = aead_setauthsize,
1932 .encrypt = aead_encrypt,
1933 .decrypt = aead_decrypt,
1934 .ivsize = AES_BLOCK_SIZE,
1935 .maxauthsize = SHA1_DIGEST_SIZE,
1938 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1939 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
1940 OP_ALG_AAI_HMAC_PRECOMP,
1947 .cra_name = "authenc(hmac(sha224),cbc(aes))",
1948 .cra_driver_name = "authenc-hmac-sha224-"
1950 .cra_blocksize = AES_BLOCK_SIZE,
1952 .setkey = aead_setkey,
1953 .setauthsize = aead_setauthsize,
1954 .encrypt = aead_encrypt,
1955 .decrypt = aead_decrypt,
1956 .ivsize = AES_BLOCK_SIZE,
1957 .maxauthsize = SHA224_DIGEST_SIZE,
1960 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1961 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1962 OP_ALG_AAI_HMAC_PRECOMP,
1968 .cra_name = "echainiv(authenc(hmac(sha224),"
1970 .cra_driver_name = "echainiv-authenc-"
1971 "hmac-sha224-cbc-aes-caam-qi2",
1972 .cra_blocksize = AES_BLOCK_SIZE,
1974 .setkey = aead_setkey,
1975 .setauthsize = aead_setauthsize,
1976 .encrypt = aead_encrypt,
1977 .decrypt = aead_decrypt,
1978 .ivsize = AES_BLOCK_SIZE,
1979 .maxauthsize = SHA224_DIGEST_SIZE,
1982 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
1983 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
1984 OP_ALG_AAI_HMAC_PRECOMP,
1991 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1992 .cra_driver_name = "authenc-hmac-sha256-"
1994 .cra_blocksize = AES_BLOCK_SIZE,
1996 .setkey = aead_setkey,
1997 .setauthsize = aead_setauthsize,
1998 .encrypt = aead_encrypt,
1999 .decrypt = aead_decrypt,
2000 .ivsize = AES_BLOCK_SIZE,
2001 .maxauthsize = SHA256_DIGEST_SIZE,
2004 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2005 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2006 OP_ALG_AAI_HMAC_PRECOMP,
2012 .cra_name = "echainiv(authenc(hmac(sha256),"
2014 .cra_driver_name = "echainiv-authenc-"
2015 "hmac-sha256-cbc-aes-"
2017 .cra_blocksize = AES_BLOCK_SIZE,
2019 .setkey = aead_setkey,
2020 .setauthsize = aead_setauthsize,
2021 .encrypt = aead_encrypt,
2022 .decrypt = aead_decrypt,
2023 .ivsize = AES_BLOCK_SIZE,
2024 .maxauthsize = SHA256_DIGEST_SIZE,
2027 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2028 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2029 OP_ALG_AAI_HMAC_PRECOMP,
2036 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2037 .cra_driver_name = "authenc-hmac-sha384-"
2039 .cra_blocksize = AES_BLOCK_SIZE,
2041 .setkey = aead_setkey,
2042 .setauthsize = aead_setauthsize,
2043 .encrypt = aead_encrypt,
2044 .decrypt = aead_decrypt,
2045 .ivsize = AES_BLOCK_SIZE,
2046 .maxauthsize = SHA384_DIGEST_SIZE,
2049 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2050 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2051 OP_ALG_AAI_HMAC_PRECOMP,
2057 .cra_name = "echainiv(authenc(hmac(sha384),"
2059 .cra_driver_name = "echainiv-authenc-"
2060 "hmac-sha384-cbc-aes-"
2062 .cra_blocksize = AES_BLOCK_SIZE,
2064 .setkey = aead_setkey,
2065 .setauthsize = aead_setauthsize,
2066 .encrypt = aead_encrypt,
2067 .decrypt = aead_decrypt,
2068 .ivsize = AES_BLOCK_SIZE,
2069 .maxauthsize = SHA384_DIGEST_SIZE,
2072 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2073 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2074 OP_ALG_AAI_HMAC_PRECOMP,
2081 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2082 .cra_driver_name = "authenc-hmac-sha512-"
2084 .cra_blocksize = AES_BLOCK_SIZE,
2086 .setkey = aead_setkey,
2087 .setauthsize = aead_setauthsize,
2088 .encrypt = aead_encrypt,
2089 .decrypt = aead_decrypt,
2090 .ivsize = AES_BLOCK_SIZE,
2091 .maxauthsize = SHA512_DIGEST_SIZE,
2094 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2095 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2096 OP_ALG_AAI_HMAC_PRECOMP,
2102 .cra_name = "echainiv(authenc(hmac(sha512),"
2104 .cra_driver_name = "echainiv-authenc-"
2105 "hmac-sha512-cbc-aes-"
2107 .cra_blocksize = AES_BLOCK_SIZE,
2109 .setkey = aead_setkey,
2110 .setauthsize = aead_setauthsize,
2111 .encrypt = aead_encrypt,
2112 .decrypt = aead_decrypt,
2113 .ivsize = AES_BLOCK_SIZE,
2114 .maxauthsize = SHA512_DIGEST_SIZE,
2117 .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
2118 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2119 OP_ALG_AAI_HMAC_PRECOMP,
2126 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2127 .cra_driver_name = "authenc-hmac-md5-"
2128 "cbc-des3_ede-caam-qi2",
2129 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2131 .setkey = des3_aead_setkey,
2132 .setauthsize = aead_setauthsize,
2133 .encrypt = aead_encrypt,
2134 .decrypt = aead_decrypt,
2135 .ivsize = DES3_EDE_BLOCK_SIZE,
2136 .maxauthsize = MD5_DIGEST_SIZE,
2139 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2140 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2141 OP_ALG_AAI_HMAC_PRECOMP,
2147 .cra_name = "echainiv(authenc(hmac(md5),"
2149 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2150 "cbc-des3_ede-caam-qi2",
2151 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2153 .setkey = des3_aead_setkey,
2154 .setauthsize = aead_setauthsize,
2155 .encrypt = aead_encrypt,
2156 .decrypt = aead_decrypt,
2157 .ivsize = DES3_EDE_BLOCK_SIZE,
2158 .maxauthsize = MD5_DIGEST_SIZE,
2161 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2162 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2163 OP_ALG_AAI_HMAC_PRECOMP,
2170 .cra_name = "authenc(hmac(sha1),"
2172 .cra_driver_name = "authenc-hmac-sha1-"
2173 "cbc-des3_ede-caam-qi2",
2174 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2176 .setkey = des3_aead_setkey,
2177 .setauthsize = aead_setauthsize,
2178 .encrypt = aead_encrypt,
2179 .decrypt = aead_decrypt,
2180 .ivsize = DES3_EDE_BLOCK_SIZE,
2181 .maxauthsize = SHA1_DIGEST_SIZE,
2184 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2185 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2186 OP_ALG_AAI_HMAC_PRECOMP,
2192 .cra_name = "echainiv(authenc(hmac(sha1),"
2194 .cra_driver_name = "echainiv-authenc-"
2196 "cbc-des3_ede-caam-qi2",
2197 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2199 .setkey = des3_aead_setkey,
2200 .setauthsize = aead_setauthsize,
2201 .encrypt = aead_encrypt,
2202 .decrypt = aead_decrypt,
2203 .ivsize = DES3_EDE_BLOCK_SIZE,
2204 .maxauthsize = SHA1_DIGEST_SIZE,
2207 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2208 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2209 OP_ALG_AAI_HMAC_PRECOMP,
2216 .cra_name = "authenc(hmac(sha224),"
2218 .cra_driver_name = "authenc-hmac-sha224-"
2219 "cbc-des3_ede-caam-qi2",
2220 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2222 .setkey = des3_aead_setkey,
2223 .setauthsize = aead_setauthsize,
2224 .encrypt = aead_encrypt,
2225 .decrypt = aead_decrypt,
2226 .ivsize = DES3_EDE_BLOCK_SIZE,
2227 .maxauthsize = SHA224_DIGEST_SIZE,
2230 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2231 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2232 OP_ALG_AAI_HMAC_PRECOMP,
2238 .cra_name = "echainiv(authenc(hmac(sha224),"
2240 .cra_driver_name = "echainiv-authenc-"
2242 "cbc-des3_ede-caam-qi2",
2243 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2245 .setkey = des3_aead_setkey,
2246 .setauthsize = aead_setauthsize,
2247 .encrypt = aead_encrypt,
2248 .decrypt = aead_decrypt,
2249 .ivsize = DES3_EDE_BLOCK_SIZE,
2250 .maxauthsize = SHA224_DIGEST_SIZE,
2253 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2254 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2255 OP_ALG_AAI_HMAC_PRECOMP,
2262 .cra_name = "authenc(hmac(sha256),"
2264 .cra_driver_name = "authenc-hmac-sha256-"
2265 "cbc-des3_ede-caam-qi2",
2266 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2268 .setkey = des3_aead_setkey,
2269 .setauthsize = aead_setauthsize,
2270 .encrypt = aead_encrypt,
2271 .decrypt = aead_decrypt,
2272 .ivsize = DES3_EDE_BLOCK_SIZE,
2273 .maxauthsize = SHA256_DIGEST_SIZE,
2276 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2277 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2278 OP_ALG_AAI_HMAC_PRECOMP,
2284 .cra_name = "echainiv(authenc(hmac(sha256),"
2286 .cra_driver_name = "echainiv-authenc-"
2288 "cbc-des3_ede-caam-qi2",
2289 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2291 .setkey = des3_aead_setkey,
2292 .setauthsize = aead_setauthsize,
2293 .encrypt = aead_encrypt,
2294 .decrypt = aead_decrypt,
2295 .ivsize = DES3_EDE_BLOCK_SIZE,
2296 .maxauthsize = SHA256_DIGEST_SIZE,
2299 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2300 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2301 OP_ALG_AAI_HMAC_PRECOMP,
2308 .cra_name = "authenc(hmac(sha384),"
2310 .cra_driver_name = "authenc-hmac-sha384-"
2311 "cbc-des3_ede-caam-qi2",
2312 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2314 .setkey = des3_aead_setkey,
2315 .setauthsize = aead_setauthsize,
2316 .encrypt = aead_encrypt,
2317 .decrypt = aead_decrypt,
2318 .ivsize = DES3_EDE_BLOCK_SIZE,
2319 .maxauthsize = SHA384_DIGEST_SIZE,
2322 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2323 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2324 OP_ALG_AAI_HMAC_PRECOMP,
2330 .cra_name = "echainiv(authenc(hmac(sha384),"
2332 .cra_driver_name = "echainiv-authenc-"
2334 "cbc-des3_ede-caam-qi2",
2335 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2337 .setkey = des3_aead_setkey,
2338 .setauthsize = aead_setauthsize,
2339 .encrypt = aead_encrypt,
2340 .decrypt = aead_decrypt,
2341 .ivsize = DES3_EDE_BLOCK_SIZE,
2342 .maxauthsize = SHA384_DIGEST_SIZE,
2345 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2346 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2347 OP_ALG_AAI_HMAC_PRECOMP,
2354 .cra_name = "authenc(hmac(sha512),"
2356 .cra_driver_name = "authenc-hmac-sha512-"
2357 "cbc-des3_ede-caam-qi2",
2358 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2360 .setkey = des3_aead_setkey,
2361 .setauthsize = aead_setauthsize,
2362 .encrypt = aead_encrypt,
2363 .decrypt = aead_decrypt,
2364 .ivsize = DES3_EDE_BLOCK_SIZE,
2365 .maxauthsize = SHA512_DIGEST_SIZE,
2368 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2369 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2370 OP_ALG_AAI_HMAC_PRECOMP,
2376 .cra_name = "echainiv(authenc(hmac(sha512),"
2378 .cra_driver_name = "echainiv-authenc-"
2380 "cbc-des3_ede-caam-qi2",
2381 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2383 .setkey = des3_aead_setkey,
2384 .setauthsize = aead_setauthsize,
2385 .encrypt = aead_encrypt,
2386 .decrypt = aead_decrypt,
2387 .ivsize = DES3_EDE_BLOCK_SIZE,
2388 .maxauthsize = SHA512_DIGEST_SIZE,
2391 .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
2392 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2393 OP_ALG_AAI_HMAC_PRECOMP,
2400 .cra_name = "authenc(hmac(md5),cbc(des))",
2401 .cra_driver_name = "authenc-hmac-md5-"
2403 .cra_blocksize = DES_BLOCK_SIZE,
2405 .setkey = aead_setkey,
2406 .setauthsize = aead_setauthsize,
2407 .encrypt = aead_encrypt,
2408 .decrypt = aead_decrypt,
2409 .ivsize = DES_BLOCK_SIZE,
2410 .maxauthsize = MD5_DIGEST_SIZE,
2413 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2414 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2415 OP_ALG_AAI_HMAC_PRECOMP,
2421 .cra_name = "echainiv(authenc(hmac(md5),"
2423 .cra_driver_name = "echainiv-authenc-hmac-md5-"
2425 .cra_blocksize = DES_BLOCK_SIZE,
2427 .setkey = aead_setkey,
2428 .setauthsize = aead_setauthsize,
2429 .encrypt = aead_encrypt,
2430 .decrypt = aead_decrypt,
2431 .ivsize = DES_BLOCK_SIZE,
2432 .maxauthsize = MD5_DIGEST_SIZE,
2435 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2436 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2437 OP_ALG_AAI_HMAC_PRECOMP,
2444 .cra_name = "authenc(hmac(sha1),cbc(des))",
2445 .cra_driver_name = "authenc-hmac-sha1-"
2447 .cra_blocksize = DES_BLOCK_SIZE,
2449 .setkey = aead_setkey,
2450 .setauthsize = aead_setauthsize,
2451 .encrypt = aead_encrypt,
2452 .decrypt = aead_decrypt,
2453 .ivsize = DES_BLOCK_SIZE,
2454 .maxauthsize = SHA1_DIGEST_SIZE,
2457 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2458 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2459 OP_ALG_AAI_HMAC_PRECOMP,
2465 .cra_name = "echainiv(authenc(hmac(sha1),"
2467 .cra_driver_name = "echainiv-authenc-"
2468 "hmac-sha1-cbc-des-caam-qi2",
2469 .cra_blocksize = DES_BLOCK_SIZE,
2471 .setkey = aead_setkey,
2472 .setauthsize = aead_setauthsize,
2473 .encrypt = aead_encrypt,
2474 .decrypt = aead_decrypt,
2475 .ivsize = DES_BLOCK_SIZE,
2476 .maxauthsize = SHA1_DIGEST_SIZE,
2479 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2480 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2481 OP_ALG_AAI_HMAC_PRECOMP,
2488 .cra_name = "authenc(hmac(sha224),cbc(des))",
2489 .cra_driver_name = "authenc-hmac-sha224-"
2491 .cra_blocksize = DES_BLOCK_SIZE,
2493 .setkey = aead_setkey,
2494 .setauthsize = aead_setauthsize,
2495 .encrypt = aead_encrypt,
2496 .decrypt = aead_decrypt,
2497 .ivsize = DES_BLOCK_SIZE,
2498 .maxauthsize = SHA224_DIGEST_SIZE,
2501 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2502 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2503 OP_ALG_AAI_HMAC_PRECOMP,
2509 .cra_name = "echainiv(authenc(hmac(sha224),"
2511 .cra_driver_name = "echainiv-authenc-"
2512 "hmac-sha224-cbc-des-"
2514 .cra_blocksize = DES_BLOCK_SIZE,
2516 .setkey = aead_setkey,
2517 .setauthsize = aead_setauthsize,
2518 .encrypt = aead_encrypt,
2519 .decrypt = aead_decrypt,
2520 .ivsize = DES_BLOCK_SIZE,
2521 .maxauthsize = SHA224_DIGEST_SIZE,
2524 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2525 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2526 OP_ALG_AAI_HMAC_PRECOMP,
2533 .cra_name = "authenc(hmac(sha256),cbc(des))",
2534 .cra_driver_name = "authenc-hmac-sha256-"
2536 .cra_blocksize = DES_BLOCK_SIZE,
2538 .setkey = aead_setkey,
2539 .setauthsize = aead_setauthsize,
2540 .encrypt = aead_encrypt,
2541 .decrypt = aead_decrypt,
2542 .ivsize = DES_BLOCK_SIZE,
2543 .maxauthsize = SHA256_DIGEST_SIZE,
2546 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2547 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2548 OP_ALG_AAI_HMAC_PRECOMP,
2554 .cra_name = "echainiv(authenc(hmac(sha256),"
2556 .cra_driver_name = "echainiv-authenc-"
2557 "hmac-sha256-cbc-des-"
2559 .cra_blocksize = DES_BLOCK_SIZE,
2561 .setkey = aead_setkey,
2562 .setauthsize = aead_setauthsize,
2563 .encrypt = aead_encrypt,
2564 .decrypt = aead_decrypt,
2565 .ivsize = DES_BLOCK_SIZE,
2566 .maxauthsize = SHA256_DIGEST_SIZE,
2569 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2570 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2571 OP_ALG_AAI_HMAC_PRECOMP,
2578 .cra_name = "authenc(hmac(sha384),cbc(des))",
2579 .cra_driver_name = "authenc-hmac-sha384-"
2581 .cra_blocksize = DES_BLOCK_SIZE,
2583 .setkey = aead_setkey,
2584 .setauthsize = aead_setauthsize,
2585 .encrypt = aead_encrypt,
2586 .decrypt = aead_decrypt,
2587 .ivsize = DES_BLOCK_SIZE,
2588 .maxauthsize = SHA384_DIGEST_SIZE,
2591 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2592 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2593 OP_ALG_AAI_HMAC_PRECOMP,
2599 .cra_name = "echainiv(authenc(hmac(sha384),"
2601 .cra_driver_name = "echainiv-authenc-"
2602 "hmac-sha384-cbc-des-"
2604 .cra_blocksize = DES_BLOCK_SIZE,
2606 .setkey = aead_setkey,
2607 .setauthsize = aead_setauthsize,
2608 .encrypt = aead_encrypt,
2609 .decrypt = aead_decrypt,
2610 .ivsize = DES_BLOCK_SIZE,
2611 .maxauthsize = SHA384_DIGEST_SIZE,
2614 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2615 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2616 OP_ALG_AAI_HMAC_PRECOMP,
2623 .cra_name = "authenc(hmac(sha512),cbc(des))",
2624 .cra_driver_name = "authenc-hmac-sha512-"
2626 .cra_blocksize = DES_BLOCK_SIZE,
2628 .setkey = aead_setkey,
2629 .setauthsize = aead_setauthsize,
2630 .encrypt = aead_encrypt,
2631 .decrypt = aead_decrypt,
2632 .ivsize = DES_BLOCK_SIZE,
2633 .maxauthsize = SHA512_DIGEST_SIZE,
2636 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2637 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2638 OP_ALG_AAI_HMAC_PRECOMP,
2644 .cra_name = "echainiv(authenc(hmac(sha512),"
2646 .cra_driver_name = "echainiv-authenc-"
2647 "hmac-sha512-cbc-des-"
2649 .cra_blocksize = DES_BLOCK_SIZE,
2651 .setkey = aead_setkey,
2652 .setauthsize = aead_setauthsize,
2653 .encrypt = aead_encrypt,
2654 .decrypt = aead_decrypt,
2655 .ivsize = DES_BLOCK_SIZE,
2656 .maxauthsize = SHA512_DIGEST_SIZE,
2659 .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
2660 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2661 OP_ALG_AAI_HMAC_PRECOMP,
2668 .cra_name = "authenc(hmac(md5),"
2669 "rfc3686(ctr(aes)))",
2670 .cra_driver_name = "authenc-hmac-md5-"
2671 "rfc3686-ctr-aes-caam-qi2",
2674 .setkey = aead_setkey,
2675 .setauthsize = aead_setauthsize,
2676 .encrypt = aead_encrypt,
2677 .decrypt = aead_decrypt,
2678 .ivsize = CTR_RFC3686_IV_SIZE,
2679 .maxauthsize = MD5_DIGEST_SIZE,
2682 .class1_alg_type = OP_ALG_ALGSEL_AES |
2683 OP_ALG_AAI_CTR_MOD128,
2684 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2685 OP_ALG_AAI_HMAC_PRECOMP,
2692 .cra_name = "seqiv(authenc("
2693 "hmac(md5),rfc3686(ctr(aes))))",
2694 .cra_driver_name = "seqiv-authenc-hmac-md5-"
2695 "rfc3686-ctr-aes-caam-qi2",
2698 .setkey = aead_setkey,
2699 .setauthsize = aead_setauthsize,
2700 .encrypt = aead_encrypt,
2701 .decrypt = aead_decrypt,
2702 .ivsize = CTR_RFC3686_IV_SIZE,
2703 .maxauthsize = MD5_DIGEST_SIZE,
2706 .class1_alg_type = OP_ALG_ALGSEL_AES |
2707 OP_ALG_AAI_CTR_MOD128,
2708 .class2_alg_type = OP_ALG_ALGSEL_MD5 |
2709 OP_ALG_AAI_HMAC_PRECOMP,
2717 .cra_name = "authenc(hmac(sha1),"
2718 "rfc3686(ctr(aes)))",
2719 .cra_driver_name = "authenc-hmac-sha1-"
2720 "rfc3686-ctr-aes-caam-qi2",
2723 .setkey = aead_setkey,
2724 .setauthsize = aead_setauthsize,
2725 .encrypt = aead_encrypt,
2726 .decrypt = aead_decrypt,
2727 .ivsize = CTR_RFC3686_IV_SIZE,
2728 .maxauthsize = SHA1_DIGEST_SIZE,
2731 .class1_alg_type = OP_ALG_ALGSEL_AES |
2732 OP_ALG_AAI_CTR_MOD128,
2733 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2734 OP_ALG_AAI_HMAC_PRECOMP,
2741 .cra_name = "seqiv(authenc("
2742 "hmac(sha1),rfc3686(ctr(aes))))",
2743 .cra_driver_name = "seqiv-authenc-hmac-sha1-"
2744 "rfc3686-ctr-aes-caam-qi2",
2747 .setkey = aead_setkey,
2748 .setauthsize = aead_setauthsize,
2749 .encrypt = aead_encrypt,
2750 .decrypt = aead_decrypt,
2751 .ivsize = CTR_RFC3686_IV_SIZE,
2752 .maxauthsize = SHA1_DIGEST_SIZE,
2755 .class1_alg_type = OP_ALG_ALGSEL_AES |
2756 OP_ALG_AAI_CTR_MOD128,
2757 .class2_alg_type = OP_ALG_ALGSEL_SHA1 |
2758 OP_ALG_AAI_HMAC_PRECOMP,
2766 .cra_name = "authenc(hmac(sha224),"
2767 "rfc3686(ctr(aes)))",
2768 .cra_driver_name = "authenc-hmac-sha224-"
2769 "rfc3686-ctr-aes-caam-qi2",
2772 .setkey = aead_setkey,
2773 .setauthsize = aead_setauthsize,
2774 .encrypt = aead_encrypt,
2775 .decrypt = aead_decrypt,
2776 .ivsize = CTR_RFC3686_IV_SIZE,
2777 .maxauthsize = SHA224_DIGEST_SIZE,
2780 .class1_alg_type = OP_ALG_ALGSEL_AES |
2781 OP_ALG_AAI_CTR_MOD128,
2782 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2783 OP_ALG_AAI_HMAC_PRECOMP,
2790 .cra_name = "seqiv(authenc("
2791 "hmac(sha224),rfc3686(ctr(aes))))",
2792 .cra_driver_name = "seqiv-authenc-hmac-sha224-"
2793 "rfc3686-ctr-aes-caam-qi2",
2796 .setkey = aead_setkey,
2797 .setauthsize = aead_setauthsize,
2798 .encrypt = aead_encrypt,
2799 .decrypt = aead_decrypt,
2800 .ivsize = CTR_RFC3686_IV_SIZE,
2801 .maxauthsize = SHA224_DIGEST_SIZE,
2804 .class1_alg_type = OP_ALG_ALGSEL_AES |
2805 OP_ALG_AAI_CTR_MOD128,
2806 .class2_alg_type = OP_ALG_ALGSEL_SHA224 |
2807 OP_ALG_AAI_HMAC_PRECOMP,
2815 .cra_name = "authenc(hmac(sha256),"
2816 "rfc3686(ctr(aes)))",
2817 .cra_driver_name = "authenc-hmac-sha256-"
2818 "rfc3686-ctr-aes-caam-qi2",
2821 .setkey = aead_setkey,
2822 .setauthsize = aead_setauthsize,
2823 .encrypt = aead_encrypt,
2824 .decrypt = aead_decrypt,
2825 .ivsize = CTR_RFC3686_IV_SIZE,
2826 .maxauthsize = SHA256_DIGEST_SIZE,
2829 .class1_alg_type = OP_ALG_ALGSEL_AES |
2830 OP_ALG_AAI_CTR_MOD128,
2831 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2832 OP_ALG_AAI_HMAC_PRECOMP,
2839 .cra_name = "seqiv(authenc(hmac(sha256),"
2840 "rfc3686(ctr(aes))))",
2841 .cra_driver_name = "seqiv-authenc-hmac-sha256-"
2842 "rfc3686-ctr-aes-caam-qi2",
2845 .setkey = aead_setkey,
2846 .setauthsize = aead_setauthsize,
2847 .encrypt = aead_encrypt,
2848 .decrypt = aead_decrypt,
2849 .ivsize = CTR_RFC3686_IV_SIZE,
2850 .maxauthsize = SHA256_DIGEST_SIZE,
2853 .class1_alg_type = OP_ALG_ALGSEL_AES |
2854 OP_ALG_AAI_CTR_MOD128,
2855 .class2_alg_type = OP_ALG_ALGSEL_SHA256 |
2856 OP_ALG_AAI_HMAC_PRECOMP,
2864 .cra_name = "authenc(hmac(sha384),"
2865 "rfc3686(ctr(aes)))",
2866 .cra_driver_name = "authenc-hmac-sha384-"
2867 "rfc3686-ctr-aes-caam-qi2",
2870 .setkey = aead_setkey,
2871 .setauthsize = aead_setauthsize,
2872 .encrypt = aead_encrypt,
2873 .decrypt = aead_decrypt,
2874 .ivsize = CTR_RFC3686_IV_SIZE,
2875 .maxauthsize = SHA384_DIGEST_SIZE,
2878 .class1_alg_type = OP_ALG_ALGSEL_AES |
2879 OP_ALG_AAI_CTR_MOD128,
2880 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2881 OP_ALG_AAI_HMAC_PRECOMP,
2888 .cra_name = "seqiv(authenc(hmac(sha384),"
2889 "rfc3686(ctr(aes))))",
2890 .cra_driver_name = "seqiv-authenc-hmac-sha384-"
2891 "rfc3686-ctr-aes-caam-qi2",
2894 .setkey = aead_setkey,
2895 .setauthsize = aead_setauthsize,
2896 .encrypt = aead_encrypt,
2897 .decrypt = aead_decrypt,
2898 .ivsize = CTR_RFC3686_IV_SIZE,
2899 .maxauthsize = SHA384_DIGEST_SIZE,
2902 .class1_alg_type = OP_ALG_ALGSEL_AES |
2903 OP_ALG_AAI_CTR_MOD128,
2904 .class2_alg_type = OP_ALG_ALGSEL_SHA384 |
2905 OP_ALG_AAI_HMAC_PRECOMP,
2913 .cra_name = "rfc7539(chacha20,poly1305)",
2914 .cra_driver_name = "rfc7539-chacha20-poly1305-"
2918 .setkey = chachapoly_setkey,
2919 .setauthsize = chachapoly_setauthsize,
2920 .encrypt = aead_encrypt,
2921 .decrypt = aead_decrypt,
2922 .ivsize = CHACHAPOLY_IV_SIZE,
2923 .maxauthsize = POLY1305_DIGEST_SIZE,
2926 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2928 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2936 .cra_name = "rfc7539esp(chacha20,poly1305)",
2937 .cra_driver_name = "rfc7539esp-chacha20-"
2938 "poly1305-caam-qi2",
2941 .setkey = chachapoly_setkey,
2942 .setauthsize = chachapoly_setauthsize,
2943 .encrypt = aead_encrypt,
2944 .decrypt = aead_decrypt,
2946 .maxauthsize = POLY1305_DIGEST_SIZE,
2949 .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
2951 .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
2959 .cra_name = "authenc(hmac(sha512),"
2960 "rfc3686(ctr(aes)))",
2961 .cra_driver_name = "authenc-hmac-sha512-"
2962 "rfc3686-ctr-aes-caam-qi2",
2965 .setkey = aead_setkey,
2966 .setauthsize = aead_setauthsize,
2967 .encrypt = aead_encrypt,
2968 .decrypt = aead_decrypt,
2969 .ivsize = CTR_RFC3686_IV_SIZE,
2970 .maxauthsize = SHA512_DIGEST_SIZE,
2973 .class1_alg_type = OP_ALG_ALGSEL_AES |
2974 OP_ALG_AAI_CTR_MOD128,
2975 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
2976 OP_ALG_AAI_HMAC_PRECOMP,
2983 .cra_name = "seqiv(authenc(hmac(sha512),"
2984 "rfc3686(ctr(aes))))",
2985 .cra_driver_name = "seqiv-authenc-hmac-sha512-"
2986 "rfc3686-ctr-aes-caam-qi2",
2989 .setkey = aead_setkey,
2990 .setauthsize = aead_setauthsize,
2991 .encrypt = aead_encrypt,
2992 .decrypt = aead_decrypt,
2993 .ivsize = CTR_RFC3686_IV_SIZE,
2994 .maxauthsize = SHA512_DIGEST_SIZE,
2997 .class1_alg_type = OP_ALG_ALGSEL_AES |
2998 OP_ALG_AAI_CTR_MOD128,
2999 .class2_alg_type = OP_ALG_ALGSEL_SHA512 |
3000 OP_ALG_AAI_HMAC_PRECOMP,
3007 static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
3009 struct skcipher_alg *alg = &t_alg->skcipher;
3011 alg->base.cra_module = THIS_MODULE;
3012 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3013 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3014 alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3015 CRYPTO_ALG_KERN_DRIVER_ONLY);
3017 alg->init = caam_cra_init_skcipher;
3018 alg->exit = caam_cra_exit;
3021 static void caam_aead_alg_init(struct caam_aead_alg *t_alg)
3023 struct aead_alg *alg = &t_alg->aead;
3025 alg->base.cra_module = THIS_MODULE;
3026 alg->base.cra_priority = CAAM_CRA_PRIORITY;
3027 alg->base.cra_ctxsize = sizeof(struct caam_ctx) + crypto_dma_padding();
3028 alg->base.cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
3029 CRYPTO_ALG_KERN_DRIVER_ONLY;
3031 alg->init = caam_cra_init_aead;
3032 alg->exit = caam_cra_exit_aead;
3035 /* max hash key is max split key size */
3036 #define CAAM_MAX_HASH_KEY_SIZE (SHA512_DIGEST_SIZE * 2)
3038 #define CAAM_MAX_HASH_BLOCK_SIZE SHA512_BLOCK_SIZE
3040 /* caam context sizes for hashes: running digest + 8 */
3041 #define HASH_MSG_LEN 8
3042 #define MAX_CTX_LEN (HASH_MSG_LEN + SHA512_DIGEST_SIZE)
3053 * struct caam_hash_ctx - ahash per-session context
3054 * @flc: Flow Contexts array
3055 * @key: authentication key
3056 * @flc_dma: I/O virtual addresses of the Flow Contexts
3057 * @dev: dpseci device
3058 * @ctx_len: size of Context Register
3059 * @adata: hashing algorithm details
3061 struct caam_hash_ctx {
3062 struct caam_flc flc[HASH_NUM_OP];
3063 u8 key[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3064 dma_addr_t flc_dma[HASH_NUM_OP];
3067 struct alginfo adata;
3071 struct caam_hash_state {
3072 struct caam_request caam_req;
3076 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE] ____cacheline_aligned;
3079 u8 caam_ctx[MAX_CTX_LEN] ____cacheline_aligned;
3080 int (*update)(struct ahash_request *req);
3081 int (*final)(struct ahash_request *req);
3082 int (*finup)(struct ahash_request *req);
3085 struct caam_export_state {
3086 u8 buf[CAAM_MAX_HASH_BLOCK_SIZE];
3087 u8 caam_ctx[MAX_CTX_LEN];
3089 int (*update)(struct ahash_request *req);
3090 int (*final)(struct ahash_request *req);
3091 int (*finup)(struct ahash_request *req);
3094 /* Map current buffer in state (if length > 0) and put it in link table */
3095 static inline int buf_map_to_qm_sg(struct device *dev,
3096 struct dpaa2_sg_entry *qm_sg,
3097 struct caam_hash_state *state)
3099 int buflen = state->buflen;
3104 state->buf_dma = dma_map_single(dev, state->buf, buflen,
3106 if (dma_mapping_error(dev, state->buf_dma)) {
3107 dev_err(dev, "unable to map buf\n");
3112 dma_to_qm_sg_one(qm_sg, state->buf_dma, buflen, 0);
3117 /* Map state->caam_ctx, and add it to link table */
3118 static inline int ctx_map_to_qm_sg(struct device *dev,
3119 struct caam_hash_state *state, int ctx_len,
3120 struct dpaa2_sg_entry *qm_sg, u32 flag)
3122 state->ctx_dma_len = ctx_len;
3123 state->ctx_dma = dma_map_single(dev, state->caam_ctx, ctx_len, flag);
3124 if (dma_mapping_error(dev, state->ctx_dma)) {
3125 dev_err(dev, "unable to map ctx\n");
3130 dma_to_qm_sg_one(qm_sg, state->ctx_dma, ctx_len, 0);
3135 static int ahash_set_sh_desc(struct crypto_ahash *ahash)
3137 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3138 int digestsize = crypto_ahash_digestsize(ahash);
3139 struct dpaa2_caam_priv *priv = dev_get_drvdata(ctx->dev);
3140 struct caam_flc *flc;
3143 /* ahash_update shared descriptor */
3144 flc = &ctx->flc[UPDATE];
3145 desc = flc->sh_desc;
3146 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
3147 ctx->ctx_len, true, priv->sec_attr.era);
3148 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3149 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE],
3150 desc_bytes(desc), DMA_BIDIRECTIONAL);
3151 print_hex_dump_debug("ahash update shdesc@" __stringify(__LINE__)": ",
3152 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3155 /* ahash_update_first shared descriptor */
3156 flc = &ctx->flc[UPDATE_FIRST];
3157 desc = flc->sh_desc;
3158 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
3159 ctx->ctx_len, false, priv->sec_attr.era);
3160 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3161 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[UPDATE_FIRST],
3162 desc_bytes(desc), DMA_BIDIRECTIONAL);
3163 print_hex_dump_debug("ahash update first shdesc@" __stringify(__LINE__)": ",
3164 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3167 /* ahash_final shared descriptor */
3168 flc = &ctx->flc[FINALIZE];
3169 desc = flc->sh_desc;
3170 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
3171 ctx->ctx_len, true, priv->sec_attr.era);
3172 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3173 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[FINALIZE],
3174 desc_bytes(desc), DMA_BIDIRECTIONAL);
3175 print_hex_dump_debug("ahash final shdesc@" __stringify(__LINE__)": ",
3176 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3179 /* ahash_digest shared descriptor */
3180 flc = &ctx->flc[DIGEST];
3181 desc = flc->sh_desc;
3182 cnstr_shdsc_ahash(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
3183 ctx->ctx_len, false, priv->sec_attr.era);
3184 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3185 dma_sync_single_for_device(ctx->dev, ctx->flc_dma[DIGEST],
3186 desc_bytes(desc), DMA_BIDIRECTIONAL);
3187 print_hex_dump_debug("ahash digest shdesc@" __stringify(__LINE__)": ",
3188 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3194 struct split_key_sh_result {
3195 struct completion completion;
3200 static void split_key_sh_done(void *cbk_ctx, u32 err)
3202 struct split_key_sh_result *res = cbk_ctx;
3204 dev_dbg(res->dev, "%s %d: err 0x%x\n", __func__, __LINE__, err);
3206 res->err = err ? caam_qi2_strstatus(res->dev, err) : 0;
3207 complete(&res->completion);
3210 /* Digest hash size if it is too large */
3211 static int hash_digest_key(struct caam_hash_ctx *ctx, u32 *keylen, u8 *key,
3214 struct caam_request *req_ctx;
3216 struct split_key_sh_result result;
3218 struct caam_flc *flc;
3221 struct dpaa2_fl_entry *in_fle, *out_fle;
3223 req_ctx = kzalloc(sizeof(*req_ctx), GFP_KERNEL | GFP_DMA);
3227 in_fle = &req_ctx->fd_flt[1];
3228 out_fle = &req_ctx->fd_flt[0];
3230 flc = kzalloc(sizeof(*flc), GFP_KERNEL | GFP_DMA);
3234 key_dma = dma_map_single(ctx->dev, key, *keylen, DMA_BIDIRECTIONAL);
3235 if (dma_mapping_error(ctx->dev, key_dma)) {
3236 dev_err(ctx->dev, "unable to map key memory\n");
3240 desc = flc->sh_desc;
3242 init_sh_desc(desc, 0);
3244 /* descriptor to perform unkeyed hash on key_in */
3245 append_operation(desc, ctx->adata.algtype | OP_ALG_ENCRYPT |
3246 OP_ALG_AS_INITFINAL);
3247 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 |
3248 FIFOLD_TYPE_LAST2 | FIFOLD_TYPE_MSG);
3249 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
3250 LDST_SRCDST_BYTE_CONTEXT);
3252 flc->flc[1] = cpu_to_caam32(desc_len(desc)); /* SDL */
3253 flc_dma = dma_map_single(ctx->dev, flc, sizeof(flc->flc) +
3254 desc_bytes(desc), DMA_TO_DEVICE);
3255 if (dma_mapping_error(ctx->dev, flc_dma)) {
3256 dev_err(ctx->dev, "unable to map shared descriptor\n");
3260 dpaa2_fl_set_final(in_fle, true);
3261 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3262 dpaa2_fl_set_addr(in_fle, key_dma);
3263 dpaa2_fl_set_len(in_fle, *keylen);
3264 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3265 dpaa2_fl_set_addr(out_fle, key_dma);
3266 dpaa2_fl_set_len(out_fle, digestsize);
3268 print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
3269 DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
3270 print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
3271 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
3275 init_completion(&result.completion);
3276 result.dev = ctx->dev;
3279 req_ctx->flc_dma = flc_dma;
3280 req_ctx->cbk = split_key_sh_done;
3281 req_ctx->ctx = &result;
3283 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3284 if (ret == -EINPROGRESS) {
3286 wait_for_completion(&result.completion);
3288 print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
3289 DUMP_PREFIX_ADDRESS, 16, 4, key,
3293 dma_unmap_single(ctx->dev, flc_dma, sizeof(flc->flc) + desc_bytes(desc),
3296 dma_unmap_single(ctx->dev, key_dma, *keylen, DMA_BIDIRECTIONAL);
3302 *keylen = digestsize;
3307 static int ahash_setkey(struct crypto_ahash *ahash, const u8 *key,
3308 unsigned int keylen)
3310 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3311 unsigned int blocksize = crypto_tfm_alg_blocksize(&ahash->base);
3312 unsigned int digestsize = crypto_ahash_digestsize(ahash);
3314 u8 *hashed_key = NULL;
3316 dev_dbg(ctx->dev, "keylen %d blocksize %d\n", keylen, blocksize);
3318 if (keylen > blocksize) {
3319 hashed_key = kmemdup(key, keylen, GFP_KERNEL | GFP_DMA);
3322 ret = hash_digest_key(ctx, &keylen, hashed_key, digestsize);
3328 ctx->adata.keylen = keylen;
3329 ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype &
3330 OP_ALG_ALGSEL_MASK);
3331 if (ctx->adata.keylen_pad > CAAM_MAX_HASH_KEY_SIZE)
3334 ctx->adata.key_virt = key;
3335 ctx->adata.key_inline = true;
3338 * In case |user key| > |derived key|, using DKP<imm,imm> would result
3339 * in invalid opcodes (last bytes of user key) in the resulting
3340 * descriptor. Use DKP<ptr,imm> instead => both virtual and dma key
3341 * addresses are needed.
3343 if (keylen > ctx->adata.keylen_pad) {
3344 memcpy(ctx->key, key, keylen);
3345 dma_sync_single_for_device(ctx->dev, ctx->adata.key_dma,
3346 ctx->adata.keylen_pad,
3350 ret = ahash_set_sh_desc(ahash);
3358 static inline void ahash_unmap(struct device *dev, struct ahash_edesc *edesc,
3359 struct ahash_request *req)
3361 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3363 if (edesc->src_nents)
3364 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE);
3366 if (edesc->qm_sg_bytes)
3367 dma_unmap_single(dev, edesc->qm_sg_dma, edesc->qm_sg_bytes,
3370 if (state->buf_dma) {
3371 dma_unmap_single(dev, state->buf_dma, state->buflen,
3377 static inline void ahash_unmap_ctx(struct device *dev,
3378 struct ahash_edesc *edesc,
3379 struct ahash_request *req, u32 flag)
3381 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3383 if (state->ctx_dma) {
3384 dma_unmap_single(dev, state->ctx_dma, state->ctx_dma_len, flag);
3387 ahash_unmap(dev, edesc, req);
3390 static void ahash_done(void *cbk_ctx, u32 status)
3392 struct crypto_async_request *areq = cbk_ctx;
3393 struct ahash_request *req = ahash_request_cast(areq);
3394 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3395 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3396 struct ahash_edesc *edesc = state->caam_req.edesc;
3397 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3398 int digestsize = crypto_ahash_digestsize(ahash);
3401 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3403 if (unlikely(status))
3404 ecode = caam_qi2_strstatus(ctx->dev, status);
3406 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3407 memcpy(req->result, state->caam_ctx, digestsize);
3408 qi_cache_free(edesc);
3410 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3411 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3414 req->base.complete(&req->base, ecode);
3417 static void ahash_done_bi(void *cbk_ctx, u32 status)
3419 struct crypto_async_request *areq = cbk_ctx;
3420 struct ahash_request *req = ahash_request_cast(areq);
3421 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3422 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3423 struct ahash_edesc *edesc = state->caam_req.edesc;
3424 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3427 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3429 if (unlikely(status))
3430 ecode = caam_qi2_strstatus(ctx->dev, status);
3432 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3433 qi_cache_free(edesc);
3435 scatterwalk_map_and_copy(state->buf, req->src,
3436 req->nbytes - state->next_buflen,
3437 state->next_buflen, 0);
3438 state->buflen = state->next_buflen;
3440 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3441 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3444 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3445 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3448 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3449 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3450 crypto_ahash_digestsize(ahash), 1);
3452 req->base.complete(&req->base, ecode);
3455 static void ahash_done_ctx_src(void *cbk_ctx, u32 status)
3457 struct crypto_async_request *areq = cbk_ctx;
3458 struct ahash_request *req = ahash_request_cast(areq);
3459 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3460 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3461 struct ahash_edesc *edesc = state->caam_req.edesc;
3462 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3463 int digestsize = crypto_ahash_digestsize(ahash);
3466 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3468 if (unlikely(status))
3469 ecode = caam_qi2_strstatus(ctx->dev, status);
3471 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3472 memcpy(req->result, state->caam_ctx, digestsize);
3473 qi_cache_free(edesc);
3475 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3476 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3479 req->base.complete(&req->base, ecode);
3482 static void ahash_done_ctx_dst(void *cbk_ctx, u32 status)
3484 struct crypto_async_request *areq = cbk_ctx;
3485 struct ahash_request *req = ahash_request_cast(areq);
3486 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3487 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3488 struct ahash_edesc *edesc = state->caam_req.edesc;
3489 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3492 dev_dbg(ctx->dev, "%s %d: err 0x%x\n", __func__, __LINE__, status);
3494 if (unlikely(status))
3495 ecode = caam_qi2_strstatus(ctx->dev, status);
3497 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3498 qi_cache_free(edesc);
3500 scatterwalk_map_and_copy(state->buf, req->src,
3501 req->nbytes - state->next_buflen,
3502 state->next_buflen, 0);
3503 state->buflen = state->next_buflen;
3505 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3506 DUMP_PREFIX_ADDRESS, 16, 4, state->buf,
3509 print_hex_dump_debug("ctx@" __stringify(__LINE__)": ",
3510 DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
3513 print_hex_dump_debug("result@" __stringify(__LINE__)": ",
3514 DUMP_PREFIX_ADDRESS, 16, 4, req->result,
3515 crypto_ahash_digestsize(ahash), 1);
3517 req->base.complete(&req->base, ecode);
3520 static int ahash_update_ctx(struct ahash_request *req)
3522 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3523 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3524 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3525 struct caam_request *req_ctx = &state->caam_req;
3526 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3527 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3528 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3529 GFP_KERNEL : GFP_ATOMIC;
3530 u8 *buf = state->buf;
3531 int *buflen = &state->buflen;
3532 int *next_buflen = &state->next_buflen;
3533 int in_len = *buflen + req->nbytes, to_hash;
3534 int src_nents, mapped_nents, qm_sg_bytes, qm_sg_src_index;
3535 struct ahash_edesc *edesc;
3538 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3539 to_hash = in_len - *next_buflen;
3542 struct dpaa2_sg_entry *sg_table;
3543 int src_len = req->nbytes - *next_buflen;
3545 src_nents = sg_nents_for_len(req->src, src_len);
3546 if (src_nents < 0) {
3547 dev_err(ctx->dev, "Invalid number of src SG.\n");
3552 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3554 if (!mapped_nents) {
3555 dev_err(ctx->dev, "unable to DMA map source\n");
3562 /* allocate space for base edesc and link tables */
3563 edesc = qi_cache_zalloc(GFP_DMA | flags);
3565 dma_unmap_sg(ctx->dev, req->src, src_nents,
3570 edesc->src_nents = src_nents;
3571 qm_sg_src_index = 1 + (*buflen ? 1 : 0);
3572 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3574 sg_table = &edesc->sgt[0];
3576 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3581 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3586 sg_to_qm_sg_last(req->src, src_len,
3587 sg_table + qm_sg_src_index, 0);
3589 dpaa2_sg_set_final(sg_table + qm_sg_src_index - 1,
3593 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3594 qm_sg_bytes, DMA_TO_DEVICE);
3595 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3596 dev_err(ctx->dev, "unable to map S/G table\n");
3600 edesc->qm_sg_bytes = qm_sg_bytes;
3602 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3603 dpaa2_fl_set_final(in_fle, true);
3604 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3605 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3606 dpaa2_fl_set_len(in_fle, ctx->ctx_len + to_hash);
3607 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3608 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3609 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
3611 req_ctx->flc = &ctx->flc[UPDATE];
3612 req_ctx->flc_dma = ctx->flc_dma[UPDATE];
3613 req_ctx->cbk = ahash_done_bi;
3614 req_ctx->ctx = &req->base;
3615 req_ctx->edesc = edesc;
3617 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3618 if (ret != -EINPROGRESS &&
3620 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3622 } else if (*next_buflen) {
3623 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
3625 *buflen = *next_buflen;
3627 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
3628 DUMP_PREFIX_ADDRESS, 16, 4, buf,
3634 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3635 qi_cache_free(edesc);
3639 static int ahash_final_ctx(struct ahash_request *req)
3641 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3642 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3643 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3644 struct caam_request *req_ctx = &state->caam_req;
3645 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3646 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3647 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3648 GFP_KERNEL : GFP_ATOMIC;
3649 int buflen = state->buflen;
3651 int digestsize = crypto_ahash_digestsize(ahash);
3652 struct ahash_edesc *edesc;
3653 struct dpaa2_sg_entry *sg_table;
3656 /* allocate space for base edesc and link tables */
3657 edesc = qi_cache_zalloc(GFP_DMA | flags);
3661 qm_sg_bytes = pad_sg_nents(1 + (buflen ? 1 : 0)) * sizeof(*sg_table);
3662 sg_table = &edesc->sgt[0];
3664 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3669 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3673 dpaa2_sg_set_final(sg_table + (buflen ? 1 : 0), true);
3675 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3677 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3678 dev_err(ctx->dev, "unable to map S/G table\n");
3682 edesc->qm_sg_bytes = qm_sg_bytes;
3684 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3685 dpaa2_fl_set_final(in_fle, true);
3686 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3687 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3688 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen);
3689 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3690 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3691 dpaa2_fl_set_len(out_fle, digestsize);
3693 req_ctx->flc = &ctx->flc[FINALIZE];
3694 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3695 req_ctx->cbk = ahash_done_ctx_src;
3696 req_ctx->ctx = &req->base;
3697 req_ctx->edesc = edesc;
3699 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3700 if (ret == -EINPROGRESS ||
3701 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3705 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3706 qi_cache_free(edesc);
3710 static int ahash_finup_ctx(struct ahash_request *req)
3712 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3713 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3714 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3715 struct caam_request *req_ctx = &state->caam_req;
3716 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3717 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3718 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3719 GFP_KERNEL : GFP_ATOMIC;
3720 int buflen = state->buflen;
3721 int qm_sg_bytes, qm_sg_src_index;
3722 int src_nents, mapped_nents;
3723 int digestsize = crypto_ahash_digestsize(ahash);
3724 struct ahash_edesc *edesc;
3725 struct dpaa2_sg_entry *sg_table;
3728 src_nents = sg_nents_for_len(req->src, req->nbytes);
3729 if (src_nents < 0) {
3730 dev_err(ctx->dev, "Invalid number of src SG.\n");
3735 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3737 if (!mapped_nents) {
3738 dev_err(ctx->dev, "unable to DMA map source\n");
3745 /* allocate space for base edesc and link tables */
3746 edesc = qi_cache_zalloc(GFP_DMA | flags);
3748 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3752 edesc->src_nents = src_nents;
3753 qm_sg_src_index = 1 + (buflen ? 1 : 0);
3754 qm_sg_bytes = pad_sg_nents(qm_sg_src_index + mapped_nents) *
3756 sg_table = &edesc->sgt[0];
3758 ret = ctx_map_to_qm_sg(ctx->dev, state, ctx->ctx_len, sg_table,
3763 ret = buf_map_to_qm_sg(ctx->dev, sg_table + 1, state);
3767 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + qm_sg_src_index, 0);
3769 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
3771 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3772 dev_err(ctx->dev, "unable to map S/G table\n");
3776 edesc->qm_sg_bytes = qm_sg_bytes;
3778 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3779 dpaa2_fl_set_final(in_fle, true);
3780 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3781 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3782 dpaa2_fl_set_len(in_fle, ctx->ctx_len + buflen + req->nbytes);
3783 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3784 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3785 dpaa2_fl_set_len(out_fle, digestsize);
3787 req_ctx->flc = &ctx->flc[FINALIZE];
3788 req_ctx->flc_dma = ctx->flc_dma[FINALIZE];
3789 req_ctx->cbk = ahash_done_ctx_src;
3790 req_ctx->ctx = &req->base;
3791 req_ctx->edesc = edesc;
3793 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3794 if (ret == -EINPROGRESS ||
3795 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3799 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_BIDIRECTIONAL);
3800 qi_cache_free(edesc);
3804 static int ahash_digest(struct ahash_request *req)
3806 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3807 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3808 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3809 struct caam_request *req_ctx = &state->caam_req;
3810 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3811 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3812 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3813 GFP_KERNEL : GFP_ATOMIC;
3814 int digestsize = crypto_ahash_digestsize(ahash);
3815 int src_nents, mapped_nents;
3816 struct ahash_edesc *edesc;
3821 src_nents = sg_nents_for_len(req->src, req->nbytes);
3822 if (src_nents < 0) {
3823 dev_err(ctx->dev, "Invalid number of src SG.\n");
3828 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
3830 if (!mapped_nents) {
3831 dev_err(ctx->dev, "unable to map source for DMA\n");
3838 /* allocate space for base edesc and link tables */
3839 edesc = qi_cache_zalloc(GFP_DMA | flags);
3841 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
3845 edesc->src_nents = src_nents;
3846 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3848 if (mapped_nents > 1) {
3850 struct dpaa2_sg_entry *sg_table = &edesc->sgt[0];
3852 qm_sg_bytes = pad_sg_nents(mapped_nents) * sizeof(*sg_table);
3853 sg_to_qm_sg_last(req->src, req->nbytes, sg_table, 0);
3854 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
3855 qm_sg_bytes, DMA_TO_DEVICE);
3856 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
3857 dev_err(ctx->dev, "unable to map S/G table\n");
3860 edesc->qm_sg_bytes = qm_sg_bytes;
3861 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
3862 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
3864 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3865 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
3868 state->ctx_dma_len = digestsize;
3869 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3871 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3872 dev_err(ctx->dev, "unable to map ctx\n");
3877 dpaa2_fl_set_final(in_fle, true);
3878 dpaa2_fl_set_len(in_fle, req->nbytes);
3879 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3880 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3881 dpaa2_fl_set_len(out_fle, digestsize);
3883 req_ctx->flc = &ctx->flc[DIGEST];
3884 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3885 req_ctx->cbk = ahash_done;
3886 req_ctx->ctx = &req->base;
3887 req_ctx->edesc = edesc;
3888 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3889 if (ret == -EINPROGRESS ||
3890 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3894 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3895 qi_cache_free(edesc);
3899 static int ahash_final_no_ctx(struct ahash_request *req)
3901 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3902 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3903 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3904 struct caam_request *req_ctx = &state->caam_req;
3905 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3906 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3907 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3908 GFP_KERNEL : GFP_ATOMIC;
3909 u8 *buf = state->buf;
3910 int buflen = state->buflen;
3911 int digestsize = crypto_ahash_digestsize(ahash);
3912 struct ahash_edesc *edesc;
3915 /* allocate space for base edesc and link tables */
3916 edesc = qi_cache_zalloc(GFP_DMA | flags);
3921 state->buf_dma = dma_map_single(ctx->dev, buf, buflen,
3923 if (dma_mapping_error(ctx->dev, state->buf_dma)) {
3924 dev_err(ctx->dev, "unable to map src\n");
3929 state->ctx_dma_len = digestsize;
3930 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
3932 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
3933 dev_err(ctx->dev, "unable to map ctx\n");
3938 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
3939 dpaa2_fl_set_final(in_fle, true);
3941 * crypto engine requires the input entry to be present when
3942 * "frame list" FD is used.
3943 * Since engine does not support FMT=2'b11 (unused entry type), leaving
3944 * in_fle zeroized (except for "Final" flag) is the best option.
3947 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
3948 dpaa2_fl_set_addr(in_fle, state->buf_dma);
3949 dpaa2_fl_set_len(in_fle, buflen);
3951 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
3952 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
3953 dpaa2_fl_set_len(out_fle, digestsize);
3955 req_ctx->flc = &ctx->flc[DIGEST];
3956 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
3957 req_ctx->cbk = ahash_done;
3958 req_ctx->ctx = &req->base;
3959 req_ctx->edesc = edesc;
3961 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
3962 if (ret == -EINPROGRESS ||
3963 (ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3967 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
3968 qi_cache_free(edesc);
3972 static int ahash_update_no_ctx(struct ahash_request *req)
3974 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
3975 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
3976 struct caam_hash_state *state = ahash_request_ctx_dma(req);
3977 struct caam_request *req_ctx = &state->caam_req;
3978 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
3979 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
3980 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
3981 GFP_KERNEL : GFP_ATOMIC;
3982 u8 *buf = state->buf;
3983 int *buflen = &state->buflen;
3984 int *next_buflen = &state->next_buflen;
3985 int in_len = *buflen + req->nbytes, to_hash;
3986 int qm_sg_bytes, src_nents, mapped_nents;
3987 struct ahash_edesc *edesc;
3990 *next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
3991 to_hash = in_len - *next_buflen;
3994 struct dpaa2_sg_entry *sg_table;
3995 int src_len = req->nbytes - *next_buflen;
3997 src_nents = sg_nents_for_len(req->src, src_len);
3998 if (src_nents < 0) {
3999 dev_err(ctx->dev, "Invalid number of src SG.\n");
4004 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4006 if (!mapped_nents) {
4007 dev_err(ctx->dev, "unable to DMA map source\n");
4014 /* allocate space for base edesc and link tables */
4015 edesc = qi_cache_zalloc(GFP_DMA | flags);
4017 dma_unmap_sg(ctx->dev, req->src, src_nents,
4022 edesc->src_nents = src_nents;
4023 qm_sg_bytes = pad_sg_nents(1 + mapped_nents) *
4025 sg_table = &edesc->sgt[0];
4027 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4031 sg_to_qm_sg_last(req->src, src_len, sg_table + 1, 0);
4033 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4034 qm_sg_bytes, DMA_TO_DEVICE);
4035 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4036 dev_err(ctx->dev, "unable to map S/G table\n");
4040 edesc->qm_sg_bytes = qm_sg_bytes;
4042 state->ctx_dma_len = ctx->ctx_len;
4043 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4044 ctx->ctx_len, DMA_FROM_DEVICE);
4045 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4046 dev_err(ctx->dev, "unable to map ctx\n");
4052 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4053 dpaa2_fl_set_final(in_fle, true);
4054 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4055 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4056 dpaa2_fl_set_len(in_fle, to_hash);
4057 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4058 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4059 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4061 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4062 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4063 req_ctx->cbk = ahash_done_ctx_dst;
4064 req_ctx->ctx = &req->base;
4065 req_ctx->edesc = edesc;
4067 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4068 if (ret != -EINPROGRESS &&
4070 req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4073 state->update = ahash_update_ctx;
4074 state->finup = ahash_finup_ctx;
4075 state->final = ahash_final_ctx;
4076 } else if (*next_buflen) {
4077 scatterwalk_map_and_copy(buf + *buflen, req->src, 0,
4079 *buflen = *next_buflen;
4081 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4082 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4088 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4089 qi_cache_free(edesc);
4093 static int ahash_finup_no_ctx(struct ahash_request *req)
4095 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4096 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4097 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4098 struct caam_request *req_ctx = &state->caam_req;
4099 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4100 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4101 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4102 GFP_KERNEL : GFP_ATOMIC;
4103 int buflen = state->buflen;
4104 int qm_sg_bytes, src_nents, mapped_nents;
4105 int digestsize = crypto_ahash_digestsize(ahash);
4106 struct ahash_edesc *edesc;
4107 struct dpaa2_sg_entry *sg_table;
4110 src_nents = sg_nents_for_len(req->src, req->nbytes);
4111 if (src_nents < 0) {
4112 dev_err(ctx->dev, "Invalid number of src SG.\n");
4117 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4119 if (!mapped_nents) {
4120 dev_err(ctx->dev, "unable to DMA map source\n");
4127 /* allocate space for base edesc and link tables */
4128 edesc = qi_cache_zalloc(GFP_DMA | flags);
4130 dma_unmap_sg(ctx->dev, req->src, src_nents, DMA_TO_DEVICE);
4134 edesc->src_nents = src_nents;
4135 qm_sg_bytes = pad_sg_nents(2 + mapped_nents) * sizeof(*sg_table);
4136 sg_table = &edesc->sgt[0];
4138 ret = buf_map_to_qm_sg(ctx->dev, sg_table, state);
4142 sg_to_qm_sg_last(req->src, req->nbytes, sg_table + 1, 0);
4144 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table, qm_sg_bytes,
4146 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4147 dev_err(ctx->dev, "unable to map S/G table\n");
4151 edesc->qm_sg_bytes = qm_sg_bytes;
4153 state->ctx_dma_len = digestsize;
4154 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx, digestsize,
4156 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4157 dev_err(ctx->dev, "unable to map ctx\n");
4163 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4164 dpaa2_fl_set_final(in_fle, true);
4165 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4166 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4167 dpaa2_fl_set_len(in_fle, buflen + req->nbytes);
4168 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4169 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4170 dpaa2_fl_set_len(out_fle, digestsize);
4172 req_ctx->flc = &ctx->flc[DIGEST];
4173 req_ctx->flc_dma = ctx->flc_dma[DIGEST];
4174 req_ctx->cbk = ahash_done;
4175 req_ctx->ctx = &req->base;
4176 req_ctx->edesc = edesc;
4177 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4178 if (ret != -EINPROGRESS &&
4179 !(ret == -EBUSY && req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
4184 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_FROM_DEVICE);
4185 qi_cache_free(edesc);
4189 static int ahash_update_first(struct ahash_request *req)
4191 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
4192 struct caam_hash_ctx *ctx = crypto_ahash_ctx_dma(ahash);
4193 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4194 struct caam_request *req_ctx = &state->caam_req;
4195 struct dpaa2_fl_entry *in_fle = &req_ctx->fd_flt[1];
4196 struct dpaa2_fl_entry *out_fle = &req_ctx->fd_flt[0];
4197 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
4198 GFP_KERNEL : GFP_ATOMIC;
4199 u8 *buf = state->buf;
4200 int *buflen = &state->buflen;
4201 int *next_buflen = &state->next_buflen;
4203 int src_nents, mapped_nents;
4204 struct ahash_edesc *edesc;
4207 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
4209 to_hash = req->nbytes - *next_buflen;
4212 struct dpaa2_sg_entry *sg_table;
4213 int src_len = req->nbytes - *next_buflen;
4215 src_nents = sg_nents_for_len(req->src, src_len);
4216 if (src_nents < 0) {
4217 dev_err(ctx->dev, "Invalid number of src SG.\n");
4222 mapped_nents = dma_map_sg(ctx->dev, req->src, src_nents,
4224 if (!mapped_nents) {
4225 dev_err(ctx->dev, "unable to map source for DMA\n");
4232 /* allocate space for base edesc and link tables */
4233 edesc = qi_cache_zalloc(GFP_DMA | flags);
4235 dma_unmap_sg(ctx->dev, req->src, src_nents,
4240 edesc->src_nents = src_nents;
4241 sg_table = &edesc->sgt[0];
4243 memset(&req_ctx->fd_flt, 0, sizeof(req_ctx->fd_flt));
4244 dpaa2_fl_set_final(in_fle, true);
4245 dpaa2_fl_set_len(in_fle, to_hash);
4247 if (mapped_nents > 1) {
4250 sg_to_qm_sg_last(req->src, src_len, sg_table, 0);
4251 qm_sg_bytes = pad_sg_nents(mapped_nents) *
4253 edesc->qm_sg_dma = dma_map_single(ctx->dev, sg_table,
4256 if (dma_mapping_error(ctx->dev, edesc->qm_sg_dma)) {
4257 dev_err(ctx->dev, "unable to map S/G table\n");
4261 edesc->qm_sg_bytes = qm_sg_bytes;
4262 dpaa2_fl_set_format(in_fle, dpaa2_fl_sg);
4263 dpaa2_fl_set_addr(in_fle, edesc->qm_sg_dma);
4265 dpaa2_fl_set_format(in_fle, dpaa2_fl_single);
4266 dpaa2_fl_set_addr(in_fle, sg_dma_address(req->src));
4269 state->ctx_dma_len = ctx->ctx_len;
4270 state->ctx_dma = dma_map_single(ctx->dev, state->caam_ctx,
4271 ctx->ctx_len, DMA_FROM_DEVICE);
4272 if (dma_mapping_error(ctx->dev, state->ctx_dma)) {
4273 dev_err(ctx->dev, "unable to map ctx\n");
4279 dpaa2_fl_set_format(out_fle, dpaa2_fl_single);
4280 dpaa2_fl_set_addr(out_fle, state->ctx_dma);
4281 dpaa2_fl_set_len(out_fle, ctx->ctx_len);
4283 req_ctx->flc = &ctx->flc[UPDATE_FIRST];
4284 req_ctx->flc_dma = ctx->flc_dma[UPDATE_FIRST];
4285 req_ctx->cbk = ahash_done_ctx_dst;
4286 req_ctx->ctx = &req->base;
4287 req_ctx->edesc = edesc;
4289 ret = dpaa2_caam_enqueue(ctx->dev, req_ctx);
4290 if (ret != -EINPROGRESS &&
4291 !(ret == -EBUSY && req->base.flags &
4292 CRYPTO_TFM_REQ_MAY_BACKLOG))
4295 state->update = ahash_update_ctx;
4296 state->finup = ahash_finup_ctx;
4297 state->final = ahash_final_ctx;
4298 } else if (*next_buflen) {
4299 state->update = ahash_update_no_ctx;
4300 state->finup = ahash_finup_no_ctx;
4301 state->final = ahash_final_no_ctx;
4302 scatterwalk_map_and_copy(buf, req->src, 0,
4304 *buflen = *next_buflen;
4306 print_hex_dump_debug("buf@" __stringify(__LINE__)": ",
4307 DUMP_PREFIX_ADDRESS, 16, 4, buf,
4313 ahash_unmap_ctx(ctx->dev, edesc, req, DMA_TO_DEVICE);
4314 qi_cache_free(edesc);
4318 static int ahash_finup_first(struct ahash_request *req)
4320 return ahash_digest(req);
4323 static int ahash_init(struct ahash_request *req)
4325 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4327 state->update = ahash_update_first;
4328 state->finup = ahash_finup_first;
4329 state->final = ahash_final_no_ctx;
4332 state->ctx_dma_len = 0;
4335 state->next_buflen = 0;
4340 static int ahash_update(struct ahash_request *req)
4342 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4344 return state->update(req);
4347 static int ahash_finup(struct ahash_request *req)
4349 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4351 return state->finup(req);
4354 static int ahash_final(struct ahash_request *req)
4356 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4358 return state->final(req);
4361 static int ahash_export(struct ahash_request *req, void *out)
4363 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4364 struct caam_export_state *export = out;
4365 u8 *buf = state->buf;
4366 int len = state->buflen;
4368 memcpy(export->buf, buf, len);
4369 memcpy(export->caam_ctx, state->caam_ctx, sizeof(export->caam_ctx));
4370 export->buflen = len;
4371 export->update = state->update;
4372 export->final = state->final;
4373 export->finup = state->finup;
4378 static int ahash_import(struct ahash_request *req, const void *in)
4380 struct caam_hash_state *state = ahash_request_ctx_dma(req);
4381 const struct caam_export_state *export = in;
4383 memset(state, 0, sizeof(*state));
4384 memcpy(state->buf, export->buf, export->buflen);
4385 memcpy(state->caam_ctx, export->caam_ctx, sizeof(state->caam_ctx));
4386 state->buflen = export->buflen;
4387 state->update = export->update;
4388 state->final = export->final;
4389 state->finup = export->finup;
4394 struct caam_hash_template {
4395 char name[CRYPTO_MAX_ALG_NAME];
4396 char driver_name[CRYPTO_MAX_ALG_NAME];
4397 char hmac_name[CRYPTO_MAX_ALG_NAME];
4398 char hmac_driver_name[CRYPTO_MAX_ALG_NAME];
4399 unsigned int blocksize;
4400 struct ahash_alg template_ahash;
4404 /* ahash descriptors */
4405 static struct caam_hash_template driver_hash[] = {
4408 .driver_name = "sha1-caam-qi2",
4409 .hmac_name = "hmac(sha1)",
4410 .hmac_driver_name = "hmac-sha1-caam-qi2",
4411 .blocksize = SHA1_BLOCK_SIZE,
4414 .update = ahash_update,
4415 .final = ahash_final,
4416 .finup = ahash_finup,
4417 .digest = ahash_digest,
4418 .export = ahash_export,
4419 .import = ahash_import,
4420 .setkey = ahash_setkey,
4422 .digestsize = SHA1_DIGEST_SIZE,
4423 .statesize = sizeof(struct caam_export_state),
4426 .alg_type = OP_ALG_ALGSEL_SHA1,
4429 .driver_name = "sha224-caam-qi2",
4430 .hmac_name = "hmac(sha224)",
4431 .hmac_driver_name = "hmac-sha224-caam-qi2",
4432 .blocksize = SHA224_BLOCK_SIZE,
4435 .update = ahash_update,
4436 .final = ahash_final,
4437 .finup = ahash_finup,
4438 .digest = ahash_digest,
4439 .export = ahash_export,
4440 .import = ahash_import,
4441 .setkey = ahash_setkey,
4443 .digestsize = SHA224_DIGEST_SIZE,
4444 .statesize = sizeof(struct caam_export_state),
4447 .alg_type = OP_ALG_ALGSEL_SHA224,
4450 .driver_name = "sha256-caam-qi2",
4451 .hmac_name = "hmac(sha256)",
4452 .hmac_driver_name = "hmac-sha256-caam-qi2",
4453 .blocksize = SHA256_BLOCK_SIZE,
4456 .update = ahash_update,
4457 .final = ahash_final,
4458 .finup = ahash_finup,
4459 .digest = ahash_digest,
4460 .export = ahash_export,
4461 .import = ahash_import,
4462 .setkey = ahash_setkey,
4464 .digestsize = SHA256_DIGEST_SIZE,
4465 .statesize = sizeof(struct caam_export_state),
4468 .alg_type = OP_ALG_ALGSEL_SHA256,
4471 .driver_name = "sha384-caam-qi2",
4472 .hmac_name = "hmac(sha384)",
4473 .hmac_driver_name = "hmac-sha384-caam-qi2",
4474 .blocksize = SHA384_BLOCK_SIZE,
4477 .update = ahash_update,
4478 .final = ahash_final,
4479 .finup = ahash_finup,
4480 .digest = ahash_digest,
4481 .export = ahash_export,
4482 .import = ahash_import,
4483 .setkey = ahash_setkey,
4485 .digestsize = SHA384_DIGEST_SIZE,
4486 .statesize = sizeof(struct caam_export_state),
4489 .alg_type = OP_ALG_ALGSEL_SHA384,
4492 .driver_name = "sha512-caam-qi2",
4493 .hmac_name = "hmac(sha512)",
4494 .hmac_driver_name = "hmac-sha512-caam-qi2",
4495 .blocksize = SHA512_BLOCK_SIZE,
4498 .update = ahash_update,
4499 .final = ahash_final,
4500 .finup = ahash_finup,
4501 .digest = ahash_digest,
4502 .export = ahash_export,
4503 .import = ahash_import,
4504 .setkey = ahash_setkey,
4506 .digestsize = SHA512_DIGEST_SIZE,
4507 .statesize = sizeof(struct caam_export_state),
4510 .alg_type = OP_ALG_ALGSEL_SHA512,
4513 .driver_name = "md5-caam-qi2",
4514 .hmac_name = "hmac(md5)",
4515 .hmac_driver_name = "hmac-md5-caam-qi2",
4516 .blocksize = MD5_BLOCK_WORDS * 4,
4519 .update = ahash_update,
4520 .final = ahash_final,
4521 .finup = ahash_finup,
4522 .digest = ahash_digest,
4523 .export = ahash_export,
4524 .import = ahash_import,
4525 .setkey = ahash_setkey,
4527 .digestsize = MD5_DIGEST_SIZE,
4528 .statesize = sizeof(struct caam_export_state),
4531 .alg_type = OP_ALG_ALGSEL_MD5,
4535 struct caam_hash_alg {
4536 struct list_head entry;
4539 struct ahash_alg ahash_alg;
4542 static int caam_hash_cra_init(struct crypto_tfm *tfm)
4544 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
4545 struct crypto_alg *base = tfm->__crt_alg;
4546 struct hash_alg_common *halg =
4547 container_of(base, struct hash_alg_common, base);
4548 struct ahash_alg *alg =
4549 container_of(halg, struct ahash_alg, halg);
4550 struct caam_hash_alg *caam_hash =
4551 container_of(alg, struct caam_hash_alg, ahash_alg);
4552 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4553 /* Sizes for MDHA running digests: MD5, SHA1, 224, 256, 384, 512 */
4554 static const u8 runninglen[] = { HASH_MSG_LEN + MD5_DIGEST_SIZE,
4555 HASH_MSG_LEN + SHA1_DIGEST_SIZE,
4557 HASH_MSG_LEN + SHA256_DIGEST_SIZE,
4559 HASH_MSG_LEN + SHA512_DIGEST_SIZE };
4560 dma_addr_t dma_addr;
4563 ctx->dev = caam_hash->dev;
4566 ctx->adata.key_dma = dma_map_single_attrs(ctx->dev, ctx->key,
4567 ARRAY_SIZE(ctx->key),
4569 DMA_ATTR_SKIP_CPU_SYNC);
4570 if (dma_mapping_error(ctx->dev, ctx->adata.key_dma)) {
4571 dev_err(ctx->dev, "unable to map key\n");
4576 dma_addr = dma_map_single_attrs(ctx->dev, ctx->flc, sizeof(ctx->flc),
4578 DMA_ATTR_SKIP_CPU_SYNC);
4579 if (dma_mapping_error(ctx->dev, dma_addr)) {
4580 dev_err(ctx->dev, "unable to map shared descriptors\n");
4581 if (ctx->adata.key_dma)
4582 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4583 ARRAY_SIZE(ctx->key),
4585 DMA_ATTR_SKIP_CPU_SYNC);
4589 for (i = 0; i < HASH_NUM_OP; i++)
4590 ctx->flc_dma[i] = dma_addr + i * sizeof(ctx->flc[i]);
4592 /* copy descriptor header template value */
4593 ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
4595 ctx->ctx_len = runninglen[(ctx->adata.algtype &
4596 OP_ALG_ALGSEL_SUBMASK) >>
4597 OP_ALG_ALGSEL_SHIFT];
4599 crypto_ahash_set_reqsize_dma(ahash, sizeof(struct caam_hash_state));
4602 * For keyed hash algorithms shared descriptors
4603 * will be created later in setkey() callback
4605 return alg->setkey ? 0 : ahash_set_sh_desc(ahash);
4608 static void caam_hash_cra_exit(struct crypto_tfm *tfm)
4610 struct caam_hash_ctx *ctx = crypto_tfm_ctx_dma(tfm);
4612 dma_unmap_single_attrs(ctx->dev, ctx->flc_dma[0], sizeof(ctx->flc),
4613 DMA_BIDIRECTIONAL, DMA_ATTR_SKIP_CPU_SYNC);
4614 if (ctx->adata.key_dma)
4615 dma_unmap_single_attrs(ctx->dev, ctx->adata.key_dma,
4616 ARRAY_SIZE(ctx->key), DMA_TO_DEVICE,
4617 DMA_ATTR_SKIP_CPU_SYNC);
4620 static struct caam_hash_alg *caam_hash_alloc(struct device *dev,
4621 struct caam_hash_template *template, bool keyed)
4623 struct caam_hash_alg *t_alg;
4624 struct ahash_alg *halg;
4625 struct crypto_alg *alg;
4627 t_alg = kzalloc(sizeof(*t_alg), GFP_KERNEL);
4629 return ERR_PTR(-ENOMEM);
4631 t_alg->ahash_alg = template->template_ahash;
4632 halg = &t_alg->ahash_alg;
4633 alg = &halg->halg.base;
4636 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4637 template->hmac_name);
4638 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4639 template->hmac_driver_name);
4641 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
4643 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
4644 template->driver_name);
4645 t_alg->ahash_alg.setkey = NULL;
4647 alg->cra_module = THIS_MODULE;
4648 alg->cra_init = caam_hash_cra_init;
4649 alg->cra_exit = caam_hash_cra_exit;
4650 alg->cra_ctxsize = sizeof(struct caam_hash_ctx) + crypto_dma_padding();
4651 alg->cra_priority = CAAM_CRA_PRIORITY;
4652 alg->cra_blocksize = template->blocksize;
4653 alg->cra_alignmask = 0;
4654 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
4656 t_alg->alg_type = template->alg_type;
4662 static void dpaa2_caam_fqdan_cb(struct dpaa2_io_notification_ctx *nctx)
4664 struct dpaa2_caam_priv_per_cpu *ppriv;
4666 ppriv = container_of(nctx, struct dpaa2_caam_priv_per_cpu, nctx);
4667 napi_schedule_irqoff(&ppriv->napi);
4670 static int __cold dpaa2_dpseci_dpio_setup(struct dpaa2_caam_priv *priv)
4672 struct device *dev = priv->dev;
4673 struct dpaa2_io_notification_ctx *nctx;
4674 struct dpaa2_caam_priv_per_cpu *ppriv;
4675 int err, i = 0, cpu;
4677 for_each_online_cpu(cpu) {
4678 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4680 nctx = &ppriv->nctx;
4682 nctx->id = ppriv->rsp_fqid;
4683 nctx->desired_cpu = cpu;
4684 nctx->cb = dpaa2_caam_fqdan_cb;
4686 /* Register notification callbacks */
4687 ppriv->dpio = dpaa2_io_service_select(cpu);
4688 err = dpaa2_io_service_register(ppriv->dpio, nctx, dev);
4689 if (unlikely(err)) {
4690 dev_dbg(dev, "No affine DPIO for cpu %d\n", cpu);
4693 * If no affine DPIO for this core, there's probably
4694 * none available for next cores either. Signal we want
4695 * to retry later, in case the DPIO devices weren't
4698 err = -EPROBE_DEFER;
4702 ppriv->store = dpaa2_io_store_create(DPAA2_CAAM_STORE_SIZE,
4704 if (unlikely(!ppriv->store)) {
4705 dev_err(dev, "dpaa2_io_store_create() failed\n");
4710 if (++i == priv->num_pairs)
4717 for_each_online_cpu(cpu) {
4718 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4719 if (!ppriv->nctx.cb)
4721 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx, dev);
4724 for_each_online_cpu(cpu) {
4725 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4728 dpaa2_io_store_destroy(ppriv->store);
4734 static void __cold dpaa2_dpseci_dpio_free(struct dpaa2_caam_priv *priv)
4736 struct dpaa2_caam_priv_per_cpu *ppriv;
4739 for_each_online_cpu(cpu) {
4740 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4741 dpaa2_io_service_deregister(ppriv->dpio, &ppriv->nctx,
4743 dpaa2_io_store_destroy(ppriv->store);
4745 if (++i == priv->num_pairs)
4750 static int dpaa2_dpseci_bind(struct dpaa2_caam_priv *priv)
4752 struct dpseci_rx_queue_cfg rx_queue_cfg;
4753 struct device *dev = priv->dev;
4754 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4755 struct dpaa2_caam_priv_per_cpu *ppriv;
4756 int err = 0, i = 0, cpu;
4758 /* Configure Rx queues */
4759 for_each_online_cpu(cpu) {
4760 ppriv = per_cpu_ptr(priv->ppriv, cpu);
4762 rx_queue_cfg.options = DPSECI_QUEUE_OPT_DEST |
4763 DPSECI_QUEUE_OPT_USER_CTX;
4764 rx_queue_cfg.order_preservation_en = 0;
4765 rx_queue_cfg.dest_cfg.dest_type = DPSECI_DEST_DPIO;
4766 rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
4768 * Rx priority (WQ) doesn't really matter, since we use
4769 * pull mode, i.e. volatile dequeues from specific FQs
4771 rx_queue_cfg.dest_cfg.priority = 0;
4772 rx_queue_cfg.user_ctx = ppriv->nctx.qman64;
4774 err = dpseci_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
4777 dev_err(dev, "dpseci_set_rx_queue() failed with err %d\n",
4782 if (++i == priv->num_pairs)
4789 static void dpaa2_dpseci_congestion_free(struct dpaa2_caam_priv *priv)
4791 struct device *dev = priv->dev;
4793 if (!priv->cscn_mem)
4796 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4797 kfree(priv->cscn_mem);
4800 static void dpaa2_dpseci_free(struct dpaa2_caam_priv *priv)
4802 struct device *dev = priv->dev;
4803 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
4806 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
4807 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
4809 dev_err(dev, "dpseci_reset() failed\n");
4812 dpaa2_dpseci_congestion_free(priv);
4813 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
4816 static void dpaa2_caam_process_fd(struct dpaa2_caam_priv *priv,
4817 const struct dpaa2_fd *fd)
4819 struct caam_request *req;
4822 if (dpaa2_fd_get_format(fd) != dpaa2_fd_list) {
4823 dev_err(priv->dev, "Only Frame List FD format is supported!\n");
4827 fd_err = dpaa2_fd_get_ctrl(fd) & FD_CTRL_ERR_MASK;
4828 if (unlikely(fd_err))
4829 dev_err_ratelimited(priv->dev, "FD error: %08x\n", fd_err);
4832 * FD[ADDR] is guaranteed to be valid, irrespective of errors reported
4833 * in FD[ERR] or FD[FRC].
4835 req = dpaa2_caam_iova_to_virt(priv, dpaa2_fd_get_addr(fd));
4836 dma_unmap_single(priv->dev, req->fd_flt_dma, sizeof(req->fd_flt),
4838 req->cbk(req->ctx, dpaa2_fd_get_frc(fd));
4841 static int dpaa2_caam_pull_fq(struct dpaa2_caam_priv_per_cpu *ppriv)
4845 /* Retry while portal is busy */
4847 err = dpaa2_io_service_pull_fq(ppriv->dpio, ppriv->rsp_fqid,
4849 } while (err == -EBUSY);
4852 dev_err(ppriv->priv->dev, "dpaa2_io_service_pull err %d", err);
4857 static int dpaa2_caam_store_consume(struct dpaa2_caam_priv_per_cpu *ppriv)
4859 struct dpaa2_dq *dq;
4860 int cleaned = 0, is_last;
4863 dq = dpaa2_io_store_next(ppriv->store, &is_last);
4864 if (unlikely(!dq)) {
4865 if (unlikely(!is_last)) {
4866 dev_dbg(ppriv->priv->dev,
4867 "FQ %d returned no valid frames\n",
4870 * MUST retry until we get some sort of
4871 * valid response token (be it "empty dequeue"
4872 * or a valid frame).
4880 dpaa2_caam_process_fd(ppriv->priv, dpaa2_dq_fd(dq));
4887 static int dpaa2_dpseci_poll(struct napi_struct *napi, int budget)
4889 struct dpaa2_caam_priv_per_cpu *ppriv;
4890 struct dpaa2_caam_priv *priv;
4891 int err, cleaned = 0, store_cleaned;
4893 ppriv = container_of(napi, struct dpaa2_caam_priv_per_cpu, napi);
4896 if (unlikely(dpaa2_caam_pull_fq(ppriv)))
4900 store_cleaned = dpaa2_caam_store_consume(ppriv);
4901 cleaned += store_cleaned;
4903 if (store_cleaned == 0 ||
4904 cleaned > budget - DPAA2_CAAM_STORE_SIZE)
4907 /* Try to dequeue some more */
4908 err = dpaa2_caam_pull_fq(ppriv);
4913 if (cleaned < budget) {
4914 napi_complete_done(napi, cleaned);
4915 err = dpaa2_io_service_rearm(ppriv->dpio, &ppriv->nctx);
4917 dev_err(priv->dev, "Notification rearm failed: %d\n",
4924 static int dpaa2_dpseci_congestion_setup(struct dpaa2_caam_priv *priv,
4927 struct dpseci_congestion_notification_cfg cong_notif_cfg = { 0 };
4928 struct device *dev = priv->dev;
4932 * Congestion group feature supported starting with DPSECI API v5.1
4933 * and only when object has been created with this capability.
4935 if ((DPSECI_VER(priv->major_ver, priv->minor_ver) < DPSECI_VER(5, 1)) ||
4936 !(priv->dpseci_attr.options & DPSECI_OPT_HAS_CG))
4939 priv->cscn_mem = kzalloc(DPAA2_CSCN_SIZE + DPAA2_CSCN_ALIGN,
4940 GFP_KERNEL | GFP_DMA);
4941 if (!priv->cscn_mem)
4944 priv->cscn_mem_aligned = PTR_ALIGN(priv->cscn_mem, DPAA2_CSCN_ALIGN);
4945 priv->cscn_dma = dma_map_single(dev, priv->cscn_mem_aligned,
4946 DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4947 if (dma_mapping_error(dev, priv->cscn_dma)) {
4948 dev_err(dev, "Error mapping CSCN memory area\n");
4953 cong_notif_cfg.units = DPSECI_CONGESTION_UNIT_BYTES;
4954 cong_notif_cfg.threshold_entry = DPAA2_SEC_CONG_ENTRY_THRESH;
4955 cong_notif_cfg.threshold_exit = DPAA2_SEC_CONG_EXIT_THRESH;
4956 cong_notif_cfg.message_ctx = (uintptr_t)priv;
4957 cong_notif_cfg.message_iova = priv->cscn_dma;
4958 cong_notif_cfg.notification_mode = DPSECI_CGN_MODE_WRITE_MEM_ON_ENTER |
4959 DPSECI_CGN_MODE_WRITE_MEM_ON_EXIT |
4960 DPSECI_CGN_MODE_COHERENT_WRITE;
4962 err = dpseci_set_congestion_notification(priv->mc_io, 0, token,
4965 dev_err(dev, "dpseci_set_congestion_notification failed\n");
4972 dma_unmap_single(dev, priv->cscn_dma, DPAA2_CSCN_SIZE, DMA_FROM_DEVICE);
4974 kfree(priv->cscn_mem);
4979 static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
4981 struct device *dev = &ls_dev->dev;
4982 struct dpaa2_caam_priv *priv;
4983 struct dpaa2_caam_priv_per_cpu *ppriv;
4987 priv = dev_get_drvdata(dev);
4990 priv->dpsec_id = ls_dev->obj_desc.id;
4992 /* Get a handle for the DPSECI this interface is associate with */
4993 err = dpseci_open(priv->mc_io, 0, priv->dpsec_id, &ls_dev->mc_handle);
4995 dev_err(dev, "dpseci_open() failed: %d\n", err);
4999 err = dpseci_get_api_version(priv->mc_io, 0, &priv->major_ver,
5002 dev_err(dev, "dpseci_get_api_version() failed\n");
5006 dev_info(dev, "dpseci v%d.%d\n", priv->major_ver, priv->minor_ver);
5008 if (DPSECI_VER(priv->major_ver, priv->minor_ver) > DPSECI_VER(5, 3)) {
5009 err = dpseci_reset(priv->mc_io, 0, ls_dev->mc_handle);
5011 dev_err(dev, "dpseci_reset() failed\n");
5016 err = dpseci_get_attributes(priv->mc_io, 0, ls_dev->mc_handle,
5017 &priv->dpseci_attr);
5019 dev_err(dev, "dpseci_get_attributes() failed\n");
5023 err = dpseci_get_sec_attr(priv->mc_io, 0, ls_dev->mc_handle,
5026 dev_err(dev, "dpseci_get_sec_attr() failed\n");
5030 err = dpaa2_dpseci_congestion_setup(priv, ls_dev->mc_handle);
5032 dev_err(dev, "setup_congestion() failed\n");
5036 priv->num_pairs = min(priv->dpseci_attr.num_rx_queues,
5037 priv->dpseci_attr.num_tx_queues);
5038 if (priv->num_pairs > num_online_cpus()) {
5039 dev_warn(dev, "%d queues won't be used\n",
5040 priv->num_pairs - num_online_cpus());
5041 priv->num_pairs = num_online_cpus();
5044 for (i = 0; i < priv->dpseci_attr.num_rx_queues; i++) {
5045 err = dpseci_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5046 &priv->rx_queue_attr[i]);
5048 dev_err(dev, "dpseci_get_rx_queue() failed\n");
5049 goto err_get_rx_queue;
5053 for (i = 0; i < priv->dpseci_attr.num_tx_queues; i++) {
5054 err = dpseci_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, i,
5055 &priv->tx_queue_attr[i]);
5057 dev_err(dev, "dpseci_get_tx_queue() failed\n");
5058 goto err_get_rx_queue;
5063 for_each_online_cpu(cpu) {
5066 j = i % priv->num_pairs;
5068 ppriv = per_cpu_ptr(priv->ppriv, cpu);
5069 ppriv->req_fqid = priv->tx_queue_attr[j].fqid;
5072 * Allow all cores to enqueue, while only some of them
5073 * will take part in dequeuing.
5075 if (++i > priv->num_pairs)
5078 ppriv->rsp_fqid = priv->rx_queue_attr[j].fqid;
5081 dev_dbg(dev, "pair %d: rx queue %d, tx queue %d\n", j,
5082 priv->rx_queue_attr[j].fqid,
5083 priv->tx_queue_attr[j].fqid);
5085 ppriv->net_dev.dev = *dev;
5086 INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
5087 netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
5089 DPAA2_CAAM_NAPI_WEIGHT);
5095 dpaa2_dpseci_congestion_free(priv);
5097 dpseci_close(priv->mc_io, 0, ls_dev->mc_handle);
5102 static int dpaa2_dpseci_enable(struct dpaa2_caam_priv *priv)
5104 struct device *dev = priv->dev;
5105 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5106 struct dpaa2_caam_priv_per_cpu *ppriv;
5109 for (i = 0; i < priv->num_pairs; i++) {
5110 ppriv = per_cpu_ptr(priv->ppriv, i);
5111 napi_enable(&ppriv->napi);
5114 return dpseci_enable(priv->mc_io, 0, ls_dev->mc_handle);
5117 static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
5119 struct device *dev = priv->dev;
5120 struct dpaa2_caam_priv_per_cpu *ppriv;
5121 struct fsl_mc_device *ls_dev = to_fsl_mc_device(dev);
5122 int i, err = 0, enabled;
5124 err = dpseci_disable(priv->mc_io, 0, ls_dev->mc_handle);
5126 dev_err(dev, "dpseci_disable() failed\n");
5130 err = dpseci_is_enabled(priv->mc_io, 0, ls_dev->mc_handle, &enabled);
5132 dev_err(dev, "dpseci_is_enabled() failed\n");
5136 dev_dbg(dev, "disable: %s\n", enabled ? "false" : "true");
5138 for (i = 0; i < priv->num_pairs; i++) {
5139 ppriv = per_cpu_ptr(priv->ppriv, i);
5140 napi_disable(&ppriv->napi);
5141 netif_napi_del(&ppriv->napi);
5147 static struct list_head hash_list;
5149 static int dpaa2_caam_probe(struct fsl_mc_device *dpseci_dev)
5152 struct dpaa2_caam_priv *priv;
5154 bool registered = false;
5157 * There is no way to get CAAM endianness - there is no direct register
5158 * space access and MC f/w does not provide this attribute.
5159 * All DPAA2-based SoCs have little endian CAAM, thus hard-code this
5162 caam_little_end = true;
5166 dev = &dpseci_dev->dev;
5168 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
5172 dev_set_drvdata(dev, priv);
5174 priv->domain = iommu_get_domain_for_dev(dev);
5176 qi_cache = kmem_cache_create("dpaa2_caamqicache", CAAM_QI_MEMCACHE_SIZE,
5177 0, SLAB_CACHE_DMA, NULL);
5179 dev_err(dev, "Can't allocate SEC cache\n");
5183 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(49));
5185 dev_err(dev, "dma_set_mask_and_coherent() failed\n");
5189 /* Obtain a MC portal */
5190 err = fsl_mc_portal_allocate(dpseci_dev, 0, &priv->mc_io);
5193 err = -EPROBE_DEFER;
5195 dev_err(dev, "MC portal allocation failed\n");
5200 priv->ppriv = alloc_percpu(*priv->ppriv);
5202 dev_err(dev, "alloc_percpu() failed\n");
5204 goto err_alloc_ppriv;
5207 /* DPSECI initialization */
5208 err = dpaa2_dpseci_setup(dpseci_dev);
5210 dev_err(dev, "dpaa2_dpseci_setup() failed\n");
5211 goto err_dpseci_setup;
5215 err = dpaa2_dpseci_dpio_setup(priv);
5217 dev_err_probe(dev, err, "dpaa2_dpseci_dpio_setup() failed\n");
5218 goto err_dpio_setup;
5221 /* DPSECI binding to DPIO */
5222 err = dpaa2_dpseci_bind(priv);
5224 dev_err(dev, "dpaa2_dpseci_bind() failed\n");
5229 err = dpaa2_dpseci_enable(priv);
5231 dev_err(dev, "dpaa2_dpseci_enable() failed\n");
5235 dpaa2_dpseci_debugfs_init(priv);
5237 /* register crypto algorithms the device supports */
5238 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5239 struct caam_skcipher_alg *t_alg = driver_algs + i;
5240 u32 alg_sel = t_alg->caam.class1_alg_type & OP_ALG_ALGSEL_MASK;
5242 /* Skip DES algorithms if not supported by device */
5243 if (!priv->sec_attr.des_acc_num &&
5244 (alg_sel == OP_ALG_ALGSEL_3DES ||
5245 alg_sel == OP_ALG_ALGSEL_DES))
5248 /* Skip AES algorithms if not supported by device */
5249 if (!priv->sec_attr.aes_acc_num &&
5250 alg_sel == OP_ALG_ALGSEL_AES)
5253 /* Skip CHACHA20 algorithms if not supported by device */
5254 if (alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5255 !priv->sec_attr.ccha_acc_num)
5258 t_alg->caam.dev = dev;
5259 caam_skcipher_alg_init(t_alg);
5261 err = crypto_register_skcipher(&t_alg->skcipher);
5263 dev_warn(dev, "%s alg registration failed: %d\n",
5264 t_alg->skcipher.base.cra_driver_name, err);
5268 t_alg->registered = true;
5272 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5273 struct caam_aead_alg *t_alg = driver_aeads + i;
5274 u32 c1_alg_sel = t_alg->caam.class1_alg_type &
5276 u32 c2_alg_sel = t_alg->caam.class2_alg_type &
5279 /* Skip DES algorithms if not supported by device */
5280 if (!priv->sec_attr.des_acc_num &&
5281 (c1_alg_sel == OP_ALG_ALGSEL_3DES ||
5282 c1_alg_sel == OP_ALG_ALGSEL_DES))
5285 /* Skip AES algorithms if not supported by device */
5286 if (!priv->sec_attr.aes_acc_num &&
5287 c1_alg_sel == OP_ALG_ALGSEL_AES)
5290 /* Skip CHACHA20 algorithms if not supported by device */
5291 if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 &&
5292 !priv->sec_attr.ccha_acc_num)
5295 /* Skip POLY1305 algorithms if not supported by device */
5296 if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 &&
5297 !priv->sec_attr.ptha_acc_num)
5301 * Skip algorithms requiring message digests
5302 * if MD not supported by device.
5304 if ((c2_alg_sel & ~OP_ALG_ALGSEL_SUBMASK) == 0x40 &&
5305 !priv->sec_attr.md_acc_num)
5308 t_alg->caam.dev = dev;
5309 caam_aead_alg_init(t_alg);
5311 err = crypto_register_aead(&t_alg->aead);
5313 dev_warn(dev, "%s alg registration failed: %d\n",
5314 t_alg->aead.base.cra_driver_name, err);
5318 t_alg->registered = true;
5322 dev_info(dev, "algorithms registered in /proc/crypto\n");
5324 /* register hash algorithms the device supports */
5325 INIT_LIST_HEAD(&hash_list);
5328 * Skip registration of any hashing algorithms if MD block
5331 if (!priv->sec_attr.md_acc_num)
5334 for (i = 0; i < ARRAY_SIZE(driver_hash); i++) {
5335 struct caam_hash_alg *t_alg;
5336 struct caam_hash_template *alg = driver_hash + i;
5338 /* register hmac version */
5339 t_alg = caam_hash_alloc(dev, alg, true);
5340 if (IS_ERR(t_alg)) {
5341 err = PTR_ERR(t_alg);
5342 dev_warn(dev, "%s hash alg allocation failed: %d\n",
5343 alg->hmac_driver_name, err);
5347 err = crypto_register_ahash(&t_alg->ahash_alg);
5349 dev_warn(dev, "%s alg registration failed: %d\n",
5350 t_alg->ahash_alg.halg.base.cra_driver_name,
5354 list_add_tail(&t_alg->entry, &hash_list);
5357 /* register unkeyed version */
5358 t_alg = caam_hash_alloc(dev, alg, false);
5359 if (IS_ERR(t_alg)) {
5360 err = PTR_ERR(t_alg);
5361 dev_warn(dev, "%s alg allocation failed: %d\n",
5362 alg->driver_name, err);
5366 err = crypto_register_ahash(&t_alg->ahash_alg);
5368 dev_warn(dev, "%s alg registration failed: %d\n",
5369 t_alg->ahash_alg.halg.base.cra_driver_name,
5373 list_add_tail(&t_alg->entry, &hash_list);
5376 if (!list_empty(&hash_list))
5377 dev_info(dev, "hash algorithms registered in /proc/crypto\n");
5382 dpaa2_dpseci_dpio_free(priv);
5384 dpaa2_dpseci_free(priv);
5386 free_percpu(priv->ppriv);
5388 fsl_mc_portal_free(priv->mc_io);
5390 kmem_cache_destroy(qi_cache);
5395 static int __cold dpaa2_caam_remove(struct fsl_mc_device *ls_dev)
5398 struct dpaa2_caam_priv *priv;
5402 priv = dev_get_drvdata(dev);
5404 dpaa2_dpseci_debugfs_exit(priv);
5406 for (i = 0; i < ARRAY_SIZE(driver_aeads); i++) {
5407 struct caam_aead_alg *t_alg = driver_aeads + i;
5409 if (t_alg->registered)
5410 crypto_unregister_aead(&t_alg->aead);
5413 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
5414 struct caam_skcipher_alg *t_alg = driver_algs + i;
5416 if (t_alg->registered)
5417 crypto_unregister_skcipher(&t_alg->skcipher);
5420 if (hash_list.next) {
5421 struct caam_hash_alg *t_hash_alg, *p;
5423 list_for_each_entry_safe(t_hash_alg, p, &hash_list, entry) {
5424 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
5425 list_del(&t_hash_alg->entry);
5430 dpaa2_dpseci_disable(priv);
5431 dpaa2_dpseci_dpio_free(priv);
5432 dpaa2_dpseci_free(priv);
5433 free_percpu(priv->ppriv);
5434 fsl_mc_portal_free(priv->mc_io);
5435 kmem_cache_destroy(qi_cache);
5440 int dpaa2_caam_enqueue(struct device *dev, struct caam_request *req)
5443 struct dpaa2_caam_priv *priv = dev_get_drvdata(dev);
5444 struct dpaa2_caam_priv_per_cpu *ppriv;
5448 return PTR_ERR(req);
5450 if (priv->cscn_mem) {
5451 dma_sync_single_for_cpu(priv->dev, priv->cscn_dma,
5454 if (unlikely(dpaa2_cscn_state_congested(priv->cscn_mem_aligned))) {
5455 dev_dbg_ratelimited(dev, "Dropping request\n");
5460 dpaa2_fl_set_flc(&req->fd_flt[1], req->flc_dma);
5462 req->fd_flt_dma = dma_map_single(dev, req->fd_flt, sizeof(req->fd_flt),
5464 if (dma_mapping_error(dev, req->fd_flt_dma)) {
5465 dev_err(dev, "DMA mapping error for QI enqueue request\n");
5469 memset(&fd, 0, sizeof(fd));
5470 dpaa2_fd_set_format(&fd, dpaa2_fd_list);
5471 dpaa2_fd_set_addr(&fd, req->fd_flt_dma);
5472 dpaa2_fd_set_len(&fd, dpaa2_fl_get_len(&req->fd_flt[1]));
5473 dpaa2_fd_set_flc(&fd, req->flc_dma);
5475 ppriv = raw_cpu_ptr(priv->ppriv);
5476 for (i = 0; i < (priv->dpseci_attr.num_tx_queues << 1); i++) {
5477 err = dpaa2_io_service_enqueue_fq(ppriv->dpio, ppriv->req_fqid,
5485 if (unlikely(err)) {
5486 dev_err_ratelimited(dev, "Error enqueuing frame: %d\n", err);
5490 return -EINPROGRESS;
5493 dma_unmap_single(dev, req->fd_flt_dma, sizeof(req->fd_flt),
5497 EXPORT_SYMBOL(dpaa2_caam_enqueue);
5499 static const struct fsl_mc_device_id dpaa2_caam_match_id_table[] = {
5501 .vendor = FSL_MC_VENDOR_FREESCALE,
5502 .obj_type = "dpseci",
5506 MODULE_DEVICE_TABLE(fslmc, dpaa2_caam_match_id_table);
5508 static struct fsl_mc_driver dpaa2_caam_driver = {
5510 .name = KBUILD_MODNAME,
5511 .owner = THIS_MODULE,
5513 .probe = dpaa2_caam_probe,
5514 .remove = dpaa2_caam_remove,
5515 .match_id_table = dpaa2_caam_match_id_table
5518 MODULE_LICENSE("Dual BSD/GPL");
5519 MODULE_AUTHOR("Freescale Semiconductor, Inc");
5520 MODULE_DESCRIPTION("Freescale DPAA2 CAAM Driver");
5522 module_fsl_mc_driver(dpaa2_caam_driver);