2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/aes.h>
16 #include <crypto/des.h>
20 struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
25 struct mv_cesa_des3_ctx {
26 struct mv_cesa_ctx base;
27 u8 key[DES3_EDE_KEY_SIZE];
30 struct mv_cesa_aes_ctx {
31 struct mv_cesa_ctx base;
32 struct crypto_aes_ctx aes;
35 struct mv_cesa_ablkcipher_dma_iter {
36 struct mv_cesa_dma_iter base;
37 struct mv_cesa_sg_dma_iter src;
38 struct mv_cesa_sg_dma_iter dst;
42 mv_cesa_ablkcipher_req_iter_init(struct mv_cesa_ablkcipher_dma_iter *iter,
43 struct ablkcipher_request *req)
45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes);
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
51 mv_cesa_ablkcipher_req_iter_next_op(struct mv_cesa_ablkcipher_dma_iter *iter)
53 iter->src.op_offset = 0;
54 iter->dst.op_offset = 0;
56 return mv_cesa_req_dma_iter_next_op(&iter->base);
60 mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req)
62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
64 if (req->dst != req->src) {
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
73 mv_cesa_dma_cleanup(&creq->req.dma);
76 static inline void mv_cesa_ablkcipher_cleanup(struct ablkcipher_request *req)
78 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
80 if (creq->req.base.type == CESA_DMA_REQ)
81 mv_cesa_ablkcipher_dma_cleanup(req);
84 static void mv_cesa_ablkcipher_std_step(struct ablkcipher_request *req)
86 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
87 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
88 struct mv_cesa_engine *engine = sreq->base.engine;
89 size_t len = min_t(size_t, req->nbytes - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE);
92 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
93 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
97 mv_cesa_set_crypt_op_len(&sreq->op, len);
99 /* FIXME: only update enc_len field */
100 if (!sreq->skip_ctx) {
101 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
102 sreq->skip_ctx = true;
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
107 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
108 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
109 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
112 static int mv_cesa_ablkcipher_std_process(struct ablkcipher_request *req,
115 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
116 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
117 struct mv_cesa_engine *engine = sreq->base.engine;
120 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
121 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
122 sreq->size, sreq->offset);
125 if (sreq->offset < req->nbytes)
131 static int mv_cesa_ablkcipher_process(struct crypto_async_request *req,
134 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
135 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
136 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
137 struct mv_cesa_engine *engine = sreq->base.engine;
140 if (creq->req.base.type == CESA_DMA_REQ)
141 ret = mv_cesa_dma_process(&creq->req.dma, status);
143 ret = mv_cesa_ablkcipher_std_process(ablkreq, status);
148 memcpy_fromio(ablkreq->info,
149 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
150 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(ablkreq)));
155 static void mv_cesa_ablkcipher_step(struct crypto_async_request *req)
157 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
158 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
160 if (creq->req.base.type == CESA_DMA_REQ)
161 mv_cesa_dma_step(&creq->req.dma);
163 mv_cesa_ablkcipher_std_step(ablkreq);
167 mv_cesa_ablkcipher_dma_prepare(struct ablkcipher_request *req)
169 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
170 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
172 mv_cesa_dma_prepare(dreq, dreq->base.engine);
176 mv_cesa_ablkcipher_std_prepare(struct ablkcipher_request *req)
178 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
179 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
180 struct mv_cesa_engine *engine = sreq->base.engine;
184 mv_cesa_adjust_op(engine, &sreq->op);
185 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
188 static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
189 struct mv_cesa_engine *engine)
191 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
192 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
193 creq->req.base.engine = engine;
195 if (creq->req.base.type == CESA_DMA_REQ)
196 mv_cesa_ablkcipher_dma_prepare(ablkreq);
198 mv_cesa_ablkcipher_std_prepare(ablkreq);
202 mv_cesa_ablkcipher_req_cleanup(struct crypto_async_request *req)
204 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
206 mv_cesa_ablkcipher_cleanup(ablkreq);
209 static const struct mv_cesa_req_ops mv_cesa_ablkcipher_req_ops = {
210 .step = mv_cesa_ablkcipher_step,
211 .process = mv_cesa_ablkcipher_process,
212 .prepare = mv_cesa_ablkcipher_prepare,
213 .cleanup = mv_cesa_ablkcipher_req_cleanup,
216 static int mv_cesa_ablkcipher_cra_init(struct crypto_tfm *tfm)
218 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
220 ctx->base.ops = &mv_cesa_ablkcipher_req_ops;
222 tfm->crt_ablkcipher.reqsize = sizeof(struct mv_cesa_ablkcipher_req);
227 static int mv_cesa_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
230 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
231 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
237 ret = crypto_aes_expand_key(&ctx->aes, key, len);
239 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
243 remaining = (ctx->aes.key_length - 16) / 4;
244 offset = ctx->aes.key_length + 24 - remaining;
245 for (i = 0; i < remaining; i++)
246 ctx->aes.key_dec[4 + i] =
247 cpu_to_le32(ctx->aes.key_enc[offset + i]);
252 static int mv_cesa_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
255 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
256 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
257 u32 tmp[DES_EXPKEY_WORDS];
260 if (len != DES_KEY_SIZE) {
261 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
265 ret = des_ekey(tmp, key);
266 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
267 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
271 memcpy(ctx->key, key, DES_KEY_SIZE);
276 static int mv_cesa_des3_ede_setkey(struct crypto_ablkcipher *cipher,
277 const u8 *key, unsigned int len)
279 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
280 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
282 if (len != DES3_EDE_KEY_SIZE) {
283 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
287 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
292 static int mv_cesa_ablkcipher_dma_req_init(struct ablkcipher_request *req,
293 const struct mv_cesa_op_ctx *op_templ)
295 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
296 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
297 GFP_KERNEL : GFP_ATOMIC;
298 struct mv_cesa_tdma_req *dreq = &creq->req.dma;
299 struct mv_cesa_ablkcipher_dma_iter iter;
300 struct mv_cesa_tdma_chain chain;
301 bool skip_ctx = false;
304 dreq->base.type = CESA_DMA_REQ;
305 dreq->chain.first = NULL;
306 dreq->chain.last = NULL;
308 if (req->src != req->dst) {
309 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
314 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
321 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
327 mv_cesa_tdma_desc_iter_init(&chain);
328 mv_cesa_ablkcipher_req_iter_init(&iter, req);
331 struct mv_cesa_op_ctx *op;
333 op = mv_cesa_dma_add_op(&chain, op_templ, skip_ctx, flags);
340 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
342 /* Add input transfers */
343 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
348 /* Add dummy desc to launch the crypto operation */
349 ret = mv_cesa_dma_add_dummy_launch(&chain, flags);
353 /* Add output transfers */
354 ret = mv_cesa_dma_add_op_transfers(&chain, &iter.base,
359 } while (mv_cesa_ablkcipher_req_iter_next_op(&iter));
366 mv_cesa_dma_cleanup(dreq);
367 if (req->dst != req->src)
368 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
372 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
373 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
379 mv_cesa_ablkcipher_std_req_init(struct ablkcipher_request *req,
380 const struct mv_cesa_op_ctx *op_templ)
382 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
383 struct mv_cesa_ablkcipher_std_req *sreq = &creq->req.std;
385 sreq->base.type = CESA_STD_REQ;
386 sreq->op = *op_templ;
387 sreq->skip_ctx = false;
392 static int mv_cesa_ablkcipher_req_init(struct ablkcipher_request *req,
393 struct mv_cesa_op_ctx *tmpl)
395 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req);
396 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
397 unsigned int blksize = crypto_ablkcipher_blocksize(tfm);
400 if (!IS_ALIGNED(req->nbytes, blksize))
403 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
404 if (creq->src_nents < 0) {
405 dev_err(cesa_dev->dev, "Invalid number of src SG");
406 return creq->src_nents;
408 creq->dst_nents = sg_nents_for_len(req->dst, req->nbytes);
409 if (creq->dst_nents < 0) {
410 dev_err(cesa_dev->dev, "Invalid number of dst SG");
411 return creq->dst_nents;
414 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
415 CESA_SA_DESC_CFG_OP_MSK);
417 /* TODO: add a threshold for DMA usage */
418 if (cesa_dev->caps->has_tdma)
419 ret = mv_cesa_ablkcipher_dma_req_init(req, tmpl);
421 ret = mv_cesa_ablkcipher_std_req_init(req, tmpl);
426 static int mv_cesa_des_op(struct ablkcipher_request *req,
427 struct mv_cesa_op_ctx *tmpl)
429 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
432 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
433 CESA_SA_DESC_CFG_CRYPTM_MSK);
435 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
437 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
441 ret = mv_cesa_queue_req(&req->base);
442 if (mv_cesa_req_needs_cleanup(&req->base, ret))
443 mv_cesa_ablkcipher_cleanup(req);
448 static int mv_cesa_ecb_des_encrypt(struct ablkcipher_request *req)
450 struct mv_cesa_op_ctx tmpl;
452 mv_cesa_set_op_cfg(&tmpl,
453 CESA_SA_DESC_CFG_CRYPTCM_ECB |
454 CESA_SA_DESC_CFG_DIR_ENC);
456 return mv_cesa_des_op(req, &tmpl);
459 static int mv_cesa_ecb_des_decrypt(struct ablkcipher_request *req)
461 struct mv_cesa_op_ctx tmpl;
463 mv_cesa_set_op_cfg(&tmpl,
464 CESA_SA_DESC_CFG_CRYPTCM_ECB |
465 CESA_SA_DESC_CFG_DIR_DEC);
467 return mv_cesa_des_op(req, &tmpl);
470 struct crypto_alg mv_cesa_ecb_des_alg = {
471 .cra_name = "ecb(des)",
472 .cra_driver_name = "mv-ecb-des",
474 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
475 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
476 .cra_blocksize = DES_BLOCK_SIZE,
477 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
479 .cra_type = &crypto_ablkcipher_type,
480 .cra_module = THIS_MODULE,
481 .cra_init = mv_cesa_ablkcipher_cra_init,
484 .min_keysize = DES_KEY_SIZE,
485 .max_keysize = DES_KEY_SIZE,
486 .setkey = mv_cesa_des_setkey,
487 .encrypt = mv_cesa_ecb_des_encrypt,
488 .decrypt = mv_cesa_ecb_des_decrypt,
493 static int mv_cesa_cbc_des_op(struct ablkcipher_request *req,
494 struct mv_cesa_op_ctx *tmpl)
496 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
497 CESA_SA_DESC_CFG_CRYPTCM_MSK);
499 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES_BLOCK_SIZE);
501 return mv_cesa_des_op(req, tmpl);
504 static int mv_cesa_cbc_des_encrypt(struct ablkcipher_request *req)
506 struct mv_cesa_op_ctx tmpl;
508 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
510 return mv_cesa_cbc_des_op(req, &tmpl);
513 static int mv_cesa_cbc_des_decrypt(struct ablkcipher_request *req)
515 struct mv_cesa_op_ctx tmpl;
517 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
519 return mv_cesa_cbc_des_op(req, &tmpl);
522 struct crypto_alg mv_cesa_cbc_des_alg = {
523 .cra_name = "cbc(des)",
524 .cra_driver_name = "mv-cbc-des",
526 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
527 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
528 .cra_blocksize = DES_BLOCK_SIZE,
529 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
531 .cra_type = &crypto_ablkcipher_type,
532 .cra_module = THIS_MODULE,
533 .cra_init = mv_cesa_ablkcipher_cra_init,
536 .min_keysize = DES_KEY_SIZE,
537 .max_keysize = DES_KEY_SIZE,
538 .ivsize = DES_BLOCK_SIZE,
539 .setkey = mv_cesa_des_setkey,
540 .encrypt = mv_cesa_cbc_des_encrypt,
541 .decrypt = mv_cesa_cbc_des_decrypt,
546 static int mv_cesa_des3_op(struct ablkcipher_request *req,
547 struct mv_cesa_op_ctx *tmpl)
549 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
552 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
553 CESA_SA_DESC_CFG_CRYPTM_MSK);
555 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
557 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
561 ret = mv_cesa_queue_req(&req->base);
562 if (mv_cesa_req_needs_cleanup(&req->base, ret))
563 mv_cesa_ablkcipher_cleanup(req);
568 static int mv_cesa_ecb_des3_ede_encrypt(struct ablkcipher_request *req)
570 struct mv_cesa_op_ctx tmpl;
572 mv_cesa_set_op_cfg(&tmpl,
573 CESA_SA_DESC_CFG_CRYPTCM_ECB |
574 CESA_SA_DESC_CFG_3DES_EDE |
575 CESA_SA_DESC_CFG_DIR_ENC);
577 return mv_cesa_des3_op(req, &tmpl);
580 static int mv_cesa_ecb_des3_ede_decrypt(struct ablkcipher_request *req)
582 struct mv_cesa_op_ctx tmpl;
584 mv_cesa_set_op_cfg(&tmpl,
585 CESA_SA_DESC_CFG_CRYPTCM_ECB |
586 CESA_SA_DESC_CFG_3DES_EDE |
587 CESA_SA_DESC_CFG_DIR_DEC);
589 return mv_cesa_des3_op(req, &tmpl);
592 struct crypto_alg mv_cesa_ecb_des3_ede_alg = {
593 .cra_name = "ecb(des3_ede)",
594 .cra_driver_name = "mv-ecb-des3-ede",
596 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
597 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
598 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
599 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
601 .cra_type = &crypto_ablkcipher_type,
602 .cra_module = THIS_MODULE,
603 .cra_init = mv_cesa_ablkcipher_cra_init,
606 .min_keysize = DES3_EDE_KEY_SIZE,
607 .max_keysize = DES3_EDE_KEY_SIZE,
608 .ivsize = DES3_EDE_BLOCK_SIZE,
609 .setkey = mv_cesa_des3_ede_setkey,
610 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
611 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
616 static int mv_cesa_cbc_des3_op(struct ablkcipher_request *req,
617 struct mv_cesa_op_ctx *tmpl)
619 memcpy(tmpl->ctx.blkcipher.iv, req->info, DES3_EDE_BLOCK_SIZE);
621 return mv_cesa_des3_op(req, tmpl);
624 static int mv_cesa_cbc_des3_ede_encrypt(struct ablkcipher_request *req)
626 struct mv_cesa_op_ctx tmpl;
628 mv_cesa_set_op_cfg(&tmpl,
629 CESA_SA_DESC_CFG_CRYPTCM_CBC |
630 CESA_SA_DESC_CFG_3DES_EDE |
631 CESA_SA_DESC_CFG_DIR_ENC);
633 return mv_cesa_cbc_des3_op(req, &tmpl);
636 static int mv_cesa_cbc_des3_ede_decrypt(struct ablkcipher_request *req)
638 struct mv_cesa_op_ctx tmpl;
640 mv_cesa_set_op_cfg(&tmpl,
641 CESA_SA_DESC_CFG_CRYPTCM_CBC |
642 CESA_SA_DESC_CFG_3DES_EDE |
643 CESA_SA_DESC_CFG_DIR_DEC);
645 return mv_cesa_cbc_des3_op(req, &tmpl);
648 struct crypto_alg mv_cesa_cbc_des3_ede_alg = {
649 .cra_name = "cbc(des3_ede)",
650 .cra_driver_name = "mv-cbc-des3-ede",
652 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
653 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
654 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
655 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
657 .cra_type = &crypto_ablkcipher_type,
658 .cra_module = THIS_MODULE,
659 .cra_init = mv_cesa_ablkcipher_cra_init,
662 .min_keysize = DES3_EDE_KEY_SIZE,
663 .max_keysize = DES3_EDE_KEY_SIZE,
664 .ivsize = DES3_EDE_BLOCK_SIZE,
665 .setkey = mv_cesa_des3_ede_setkey,
666 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
667 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
672 static int mv_cesa_aes_op(struct ablkcipher_request *req,
673 struct mv_cesa_op_ctx *tmpl)
675 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
680 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
682 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
683 key = ctx->aes.key_dec;
685 key = ctx->aes.key_enc;
687 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
688 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
690 if (ctx->aes.key_length == 24)
691 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
692 else if (ctx->aes.key_length == 32)
693 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
695 mv_cesa_update_op_cfg(tmpl, cfg,
696 CESA_SA_DESC_CFG_CRYPTM_MSK |
697 CESA_SA_DESC_CFG_AES_LEN_MSK);
699 ret = mv_cesa_ablkcipher_req_init(req, tmpl);
703 ret = mv_cesa_queue_req(&req->base);
704 if (mv_cesa_req_needs_cleanup(&req->base, ret))
705 mv_cesa_ablkcipher_cleanup(req);
710 static int mv_cesa_ecb_aes_encrypt(struct ablkcipher_request *req)
712 struct mv_cesa_op_ctx tmpl;
714 mv_cesa_set_op_cfg(&tmpl,
715 CESA_SA_DESC_CFG_CRYPTCM_ECB |
716 CESA_SA_DESC_CFG_DIR_ENC);
718 return mv_cesa_aes_op(req, &tmpl);
721 static int mv_cesa_ecb_aes_decrypt(struct ablkcipher_request *req)
723 struct mv_cesa_op_ctx tmpl;
725 mv_cesa_set_op_cfg(&tmpl,
726 CESA_SA_DESC_CFG_CRYPTCM_ECB |
727 CESA_SA_DESC_CFG_DIR_DEC);
729 return mv_cesa_aes_op(req, &tmpl);
732 struct crypto_alg mv_cesa_ecb_aes_alg = {
733 .cra_name = "ecb(aes)",
734 .cra_driver_name = "mv-ecb-aes",
736 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
737 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
738 .cra_blocksize = AES_BLOCK_SIZE,
739 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
741 .cra_type = &crypto_ablkcipher_type,
742 .cra_module = THIS_MODULE,
743 .cra_init = mv_cesa_ablkcipher_cra_init,
746 .min_keysize = AES_MIN_KEY_SIZE,
747 .max_keysize = AES_MAX_KEY_SIZE,
748 .setkey = mv_cesa_aes_setkey,
749 .encrypt = mv_cesa_ecb_aes_encrypt,
750 .decrypt = mv_cesa_ecb_aes_decrypt,
755 static int mv_cesa_cbc_aes_op(struct ablkcipher_request *req,
756 struct mv_cesa_op_ctx *tmpl)
758 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
759 CESA_SA_DESC_CFG_CRYPTCM_MSK);
760 memcpy(tmpl->ctx.blkcipher.iv, req->info, AES_BLOCK_SIZE);
762 return mv_cesa_aes_op(req, tmpl);
765 static int mv_cesa_cbc_aes_encrypt(struct ablkcipher_request *req)
767 struct mv_cesa_op_ctx tmpl;
769 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
771 return mv_cesa_cbc_aes_op(req, &tmpl);
774 static int mv_cesa_cbc_aes_decrypt(struct ablkcipher_request *req)
776 struct mv_cesa_op_ctx tmpl;
778 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
780 return mv_cesa_cbc_aes_op(req, &tmpl);
783 struct crypto_alg mv_cesa_cbc_aes_alg = {
784 .cra_name = "cbc(aes)",
785 .cra_driver_name = "mv-cbc-aes",
787 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
788 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
789 .cra_blocksize = AES_BLOCK_SIZE,
790 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
792 .cra_type = &crypto_ablkcipher_type,
793 .cra_module = THIS_MODULE,
794 .cra_init = mv_cesa_ablkcipher_cra_init,
797 .min_keysize = AES_MIN_KEY_SIZE,
798 .max_keysize = AES_MAX_KEY_SIZE,
799 .ivsize = AES_BLOCK_SIZE,
800 .setkey = mv_cesa_aes_setkey,
801 .encrypt = mv_cesa_cbc_aes_encrypt,
802 .decrypt = mv_cesa_cbc_aes_decrypt,