2 * Cipher algorithms supported by the CESA: DES, 3DES and AES.
7 * This work is based on an initial version written by
8 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
15 #include <crypto/aes.h>
16 #include <crypto/des.h>
20 struct mv_cesa_des_ctx {
21 struct mv_cesa_ctx base;
25 struct mv_cesa_des3_ctx {
26 struct mv_cesa_ctx base;
27 u8 key[DES3_EDE_KEY_SIZE];
30 struct mv_cesa_aes_ctx {
31 struct mv_cesa_ctx base;
32 struct crypto_aes_ctx aes;
35 struct mv_cesa_skcipher_dma_iter {
36 struct mv_cesa_dma_iter base;
37 struct mv_cesa_sg_dma_iter src;
38 struct mv_cesa_sg_dma_iter dst;
42 mv_cesa_skcipher_req_iter_init(struct mv_cesa_skcipher_dma_iter *iter,
43 struct skcipher_request *req)
45 mv_cesa_req_dma_iter_init(&iter->base, req->cryptlen);
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE);
51 mv_cesa_skcipher_req_iter_next_op(struct mv_cesa_skcipher_dma_iter *iter)
53 iter->src.op_offset = 0;
54 iter->dst.op_offset = 0;
56 return mv_cesa_req_dma_iter_next_op(&iter->base);
60 mv_cesa_skcipher_dma_cleanup(struct skcipher_request *req)
62 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
64 if (req->dst != req->src) {
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
73 mv_cesa_dma_cleanup(&creq->base);
76 static inline void mv_cesa_skcipher_cleanup(struct skcipher_request *req)
78 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
80 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
81 mv_cesa_skcipher_dma_cleanup(req);
84 static void mv_cesa_skcipher_std_step(struct skcipher_request *req)
86 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
87 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
88 struct mv_cesa_engine *engine = creq->base.engine;
89 size_t len = min_t(size_t, req->cryptlen - sreq->offset,
90 CESA_SA_SRAM_PAYLOAD_SIZE);
92 mv_cesa_adjust_op(engine, &sreq->op);
93 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
95 len = sg_pcopy_to_buffer(req->src, creq->src_nents,
96 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
100 mv_cesa_set_crypt_op_len(&sreq->op, len);
102 /* FIXME: only update enc_len field */
103 if (!sreq->skip_ctx) {
104 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op));
105 sreq->skip_ctx = true;
107 memcpy_toio(engine->sram, &sreq->op, sizeof(sreq->op.desc));
110 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
111 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
112 BUG_ON(readl(engine->regs + CESA_SA_CMD) &
113 CESA_SA_CMD_EN_CESA_SA_ACCL0);
114 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
117 static int mv_cesa_skcipher_std_process(struct skcipher_request *req,
120 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
121 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
122 struct mv_cesa_engine *engine = creq->base.engine;
125 len = sg_pcopy_from_buffer(req->dst, creq->dst_nents,
126 engine->sram + CESA_SA_DATA_SRAM_OFFSET,
127 sreq->size, sreq->offset);
130 if (sreq->offset < req->cryptlen)
136 static int mv_cesa_skcipher_process(struct crypto_async_request *req,
139 struct skcipher_request *skreq = skcipher_request_cast(req);
140 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
141 struct mv_cesa_req *basereq = &creq->base;
143 if (mv_cesa_req_get_type(basereq) == CESA_STD_REQ)
144 return mv_cesa_skcipher_std_process(skreq, status);
146 return mv_cesa_dma_process(basereq, status);
149 static void mv_cesa_skcipher_step(struct crypto_async_request *req)
151 struct skcipher_request *skreq = skcipher_request_cast(req);
152 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
154 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
155 mv_cesa_dma_step(&creq->base);
157 mv_cesa_skcipher_std_step(skreq);
161 mv_cesa_skcipher_dma_prepare(struct skcipher_request *req)
163 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
164 struct mv_cesa_req *basereq = &creq->base;
166 mv_cesa_dma_prepare(basereq, basereq->engine);
170 mv_cesa_skcipher_std_prepare(struct skcipher_request *req)
172 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
173 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
179 static inline void mv_cesa_skcipher_prepare(struct crypto_async_request *req,
180 struct mv_cesa_engine *engine)
182 struct skcipher_request *skreq = skcipher_request_cast(req);
183 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
184 creq->base.engine = engine;
186 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
187 mv_cesa_skcipher_dma_prepare(skreq);
189 mv_cesa_skcipher_std_prepare(skreq);
193 mv_cesa_skcipher_req_cleanup(struct crypto_async_request *req)
195 struct skcipher_request *skreq = skcipher_request_cast(req);
197 mv_cesa_skcipher_cleanup(skreq);
201 mv_cesa_skcipher_complete(struct crypto_async_request *req)
203 struct skcipher_request *skreq = skcipher_request_cast(req);
204 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(skreq);
205 struct mv_cesa_engine *engine = creq->base.engine;
208 atomic_sub(skreq->cryptlen, &engine->load);
209 ivsize = crypto_skcipher_ivsize(crypto_skcipher_reqtfm(skreq));
211 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ) {
212 struct mv_cesa_req *basereq;
214 basereq = &creq->base;
215 memcpy(skreq->iv, basereq->chain.last->op->ctx.blkcipher.iv,
218 memcpy_fromio(skreq->iv,
219 engine->sram + CESA_SA_CRYPT_IV_SRAM_OFFSET,
224 static const struct mv_cesa_req_ops mv_cesa_skcipher_req_ops = {
225 .step = mv_cesa_skcipher_step,
226 .process = mv_cesa_skcipher_process,
227 .cleanup = mv_cesa_skcipher_req_cleanup,
228 .complete = mv_cesa_skcipher_complete,
231 static void mv_cesa_skcipher_cra_exit(struct crypto_tfm *tfm)
233 void *ctx = crypto_tfm_ctx(tfm);
235 memzero_explicit(ctx, tfm->__crt_alg->cra_ctxsize);
238 static int mv_cesa_skcipher_cra_init(struct crypto_tfm *tfm)
240 struct mv_cesa_ctx *ctx = crypto_tfm_ctx(tfm);
242 ctx->ops = &mv_cesa_skcipher_req_ops;
244 crypto_skcipher_set_reqsize(__crypto_skcipher_cast(tfm),
245 sizeof(struct mv_cesa_skcipher_req));
250 static int mv_cesa_aes_setkey(struct crypto_skcipher *cipher, const u8 *key,
253 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
254 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(tfm);
260 ret = crypto_aes_expand_key(&ctx->aes, key, len);
262 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
266 remaining = (ctx->aes.key_length - 16) / 4;
267 offset = ctx->aes.key_length + 24 - remaining;
268 for (i = 0; i < remaining; i++)
269 ctx->aes.key_dec[4 + i] =
270 cpu_to_le32(ctx->aes.key_enc[offset + i]);
275 static int mv_cesa_des_setkey(struct crypto_skcipher *cipher, const u8 *key,
278 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
279 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
280 u32 tmp[DES_EXPKEY_WORDS];
283 if (len != DES_KEY_SIZE) {
284 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
288 ret = des_ekey(tmp, key);
289 if (!ret && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
290 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
294 memcpy(ctx->key, key, DES_KEY_SIZE);
299 static int mv_cesa_des3_ede_setkey(struct crypto_skcipher *cipher,
300 const u8 *key, unsigned int len)
302 struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
303 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(tfm);
305 if (len != DES3_EDE_KEY_SIZE) {
306 crypto_skcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
310 memcpy(ctx->key, key, DES3_EDE_KEY_SIZE);
315 static int mv_cesa_skcipher_dma_req_init(struct skcipher_request *req,
316 const struct mv_cesa_op_ctx *op_templ)
318 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
319 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
320 GFP_KERNEL : GFP_ATOMIC;
321 struct mv_cesa_req *basereq = &creq->base;
322 struct mv_cesa_skcipher_dma_iter iter;
323 bool skip_ctx = false;
326 basereq->chain.first = NULL;
327 basereq->chain.last = NULL;
329 if (req->src != req->dst) {
330 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
335 ret = dma_map_sg(cesa_dev->dev, req->dst, creq->dst_nents,
342 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
348 mv_cesa_tdma_desc_iter_init(&basereq->chain);
349 mv_cesa_skcipher_req_iter_init(&iter, req);
352 struct mv_cesa_op_ctx *op;
354 op = mv_cesa_dma_add_op(&basereq->chain, op_templ, skip_ctx, flags);
361 mv_cesa_set_crypt_op_len(op, iter.base.op_len);
363 /* Add input transfers */
364 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
369 /* Add dummy desc to launch the crypto operation */
370 ret = mv_cesa_dma_add_dummy_launch(&basereq->chain, flags);
374 /* Add output transfers */
375 ret = mv_cesa_dma_add_op_transfers(&basereq->chain, &iter.base,
380 } while (mv_cesa_skcipher_req_iter_next_op(&iter));
382 /* Add output data for IV */
383 ret = mv_cesa_dma_add_result_op(&basereq->chain, CESA_SA_CFG_SRAM_OFFSET,
384 CESA_SA_DATA_SRAM_OFFSET,
385 CESA_TDMA_SRC_IN_SRAM, flags);
390 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
395 mv_cesa_dma_cleanup(basereq);
396 if (req->dst != req->src)
397 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents,
401 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents,
402 req->dst != req->src ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
408 mv_cesa_skcipher_std_req_init(struct skcipher_request *req,
409 const struct mv_cesa_op_ctx *op_templ)
411 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
412 struct mv_cesa_skcipher_std_req *sreq = &creq->std;
413 struct mv_cesa_req *basereq = &creq->base;
415 sreq->op = *op_templ;
416 sreq->skip_ctx = false;
417 basereq->chain.first = NULL;
418 basereq->chain.last = NULL;
423 static int mv_cesa_skcipher_req_init(struct skcipher_request *req,
424 struct mv_cesa_op_ctx *tmpl)
426 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
427 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
428 unsigned int blksize = crypto_skcipher_blocksize(tfm);
431 if (!IS_ALIGNED(req->cryptlen, blksize))
434 creq->src_nents = sg_nents_for_len(req->src, req->cryptlen);
435 if (creq->src_nents < 0) {
436 dev_err(cesa_dev->dev, "Invalid number of src SG");
437 return creq->src_nents;
439 creq->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);
440 if (creq->dst_nents < 0) {
441 dev_err(cesa_dev->dev, "Invalid number of dst SG");
442 return creq->dst_nents;
445 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_OP_CRYPT_ONLY,
446 CESA_SA_DESC_CFG_OP_MSK);
448 if (cesa_dev->caps->has_tdma)
449 ret = mv_cesa_skcipher_dma_req_init(req, tmpl);
451 ret = mv_cesa_skcipher_std_req_init(req, tmpl);
456 static int mv_cesa_skcipher_queue_req(struct skcipher_request *req,
457 struct mv_cesa_op_ctx *tmpl)
460 struct mv_cesa_skcipher_req *creq = skcipher_request_ctx(req);
461 struct mv_cesa_engine *engine;
463 ret = mv_cesa_skcipher_req_init(req, tmpl);
467 engine = mv_cesa_select_engine(req->cryptlen);
468 mv_cesa_skcipher_prepare(&req->base, engine);
470 ret = mv_cesa_queue_req(&req->base, &creq->base);
472 if (mv_cesa_req_needs_cleanup(&req->base, ret))
473 mv_cesa_skcipher_cleanup(req);
478 static int mv_cesa_des_op(struct skcipher_request *req,
479 struct mv_cesa_op_ctx *tmpl)
481 struct mv_cesa_des_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
483 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_DES,
484 CESA_SA_DESC_CFG_CRYPTM_MSK);
486 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES_KEY_SIZE);
488 return mv_cesa_skcipher_queue_req(req, tmpl);
491 static int mv_cesa_ecb_des_encrypt(struct skcipher_request *req)
493 struct mv_cesa_op_ctx tmpl;
495 mv_cesa_set_op_cfg(&tmpl,
496 CESA_SA_DESC_CFG_CRYPTCM_ECB |
497 CESA_SA_DESC_CFG_DIR_ENC);
499 return mv_cesa_des_op(req, &tmpl);
502 static int mv_cesa_ecb_des_decrypt(struct skcipher_request *req)
504 struct mv_cesa_op_ctx tmpl;
506 mv_cesa_set_op_cfg(&tmpl,
507 CESA_SA_DESC_CFG_CRYPTCM_ECB |
508 CESA_SA_DESC_CFG_DIR_DEC);
510 return mv_cesa_des_op(req, &tmpl);
513 struct skcipher_alg mv_cesa_ecb_des_alg = {
514 .setkey = mv_cesa_des_setkey,
515 .encrypt = mv_cesa_ecb_des_encrypt,
516 .decrypt = mv_cesa_ecb_des_decrypt,
517 .min_keysize = DES_KEY_SIZE,
518 .max_keysize = DES_KEY_SIZE,
520 .cra_name = "ecb(des)",
521 .cra_driver_name = "mv-ecb-des",
523 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
524 .cra_blocksize = DES_BLOCK_SIZE,
525 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
527 .cra_module = THIS_MODULE,
528 .cra_init = mv_cesa_skcipher_cra_init,
529 .cra_exit = mv_cesa_skcipher_cra_exit,
533 static int mv_cesa_cbc_des_op(struct skcipher_request *req,
534 struct mv_cesa_op_ctx *tmpl)
536 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
537 CESA_SA_DESC_CFG_CRYPTCM_MSK);
539 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES_BLOCK_SIZE);
541 return mv_cesa_des_op(req, tmpl);
544 static int mv_cesa_cbc_des_encrypt(struct skcipher_request *req)
546 struct mv_cesa_op_ctx tmpl;
548 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
550 return mv_cesa_cbc_des_op(req, &tmpl);
553 static int mv_cesa_cbc_des_decrypt(struct skcipher_request *req)
555 struct mv_cesa_op_ctx tmpl;
557 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
559 return mv_cesa_cbc_des_op(req, &tmpl);
562 struct skcipher_alg mv_cesa_cbc_des_alg = {
563 .setkey = mv_cesa_des_setkey,
564 .encrypt = mv_cesa_cbc_des_encrypt,
565 .decrypt = mv_cesa_cbc_des_decrypt,
566 .min_keysize = DES_KEY_SIZE,
567 .max_keysize = DES_KEY_SIZE,
568 .ivsize = DES_BLOCK_SIZE,
570 .cra_name = "cbc(des)",
571 .cra_driver_name = "mv-cbc-des",
573 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
574 .cra_blocksize = DES_BLOCK_SIZE,
575 .cra_ctxsize = sizeof(struct mv_cesa_des_ctx),
577 .cra_module = THIS_MODULE,
578 .cra_init = mv_cesa_skcipher_cra_init,
579 .cra_exit = mv_cesa_skcipher_cra_exit,
583 static int mv_cesa_des3_op(struct skcipher_request *req,
584 struct mv_cesa_op_ctx *tmpl)
586 struct mv_cesa_des3_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
588 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTM_3DES,
589 CESA_SA_DESC_CFG_CRYPTM_MSK);
591 memcpy(tmpl->ctx.blkcipher.key, ctx->key, DES3_EDE_KEY_SIZE);
593 return mv_cesa_skcipher_queue_req(req, tmpl);
596 static int mv_cesa_ecb_des3_ede_encrypt(struct skcipher_request *req)
598 struct mv_cesa_op_ctx tmpl;
600 mv_cesa_set_op_cfg(&tmpl,
601 CESA_SA_DESC_CFG_CRYPTCM_ECB |
602 CESA_SA_DESC_CFG_3DES_EDE |
603 CESA_SA_DESC_CFG_DIR_ENC);
605 return mv_cesa_des3_op(req, &tmpl);
608 static int mv_cesa_ecb_des3_ede_decrypt(struct skcipher_request *req)
610 struct mv_cesa_op_ctx tmpl;
612 mv_cesa_set_op_cfg(&tmpl,
613 CESA_SA_DESC_CFG_CRYPTCM_ECB |
614 CESA_SA_DESC_CFG_3DES_EDE |
615 CESA_SA_DESC_CFG_DIR_DEC);
617 return mv_cesa_des3_op(req, &tmpl);
620 struct skcipher_alg mv_cesa_ecb_des3_ede_alg = {
621 .setkey = mv_cesa_des3_ede_setkey,
622 .encrypt = mv_cesa_ecb_des3_ede_encrypt,
623 .decrypt = mv_cesa_ecb_des3_ede_decrypt,
624 .min_keysize = DES3_EDE_KEY_SIZE,
625 .max_keysize = DES3_EDE_KEY_SIZE,
626 .ivsize = DES3_EDE_BLOCK_SIZE,
628 .cra_name = "ecb(des3_ede)",
629 .cra_driver_name = "mv-ecb-des3-ede",
631 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
632 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
633 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
635 .cra_module = THIS_MODULE,
636 .cra_init = mv_cesa_skcipher_cra_init,
637 .cra_exit = mv_cesa_skcipher_cra_exit,
641 static int mv_cesa_cbc_des3_op(struct skcipher_request *req,
642 struct mv_cesa_op_ctx *tmpl)
644 memcpy(tmpl->ctx.blkcipher.iv, req->iv, DES3_EDE_BLOCK_SIZE);
646 return mv_cesa_des3_op(req, tmpl);
649 static int mv_cesa_cbc_des3_ede_encrypt(struct skcipher_request *req)
651 struct mv_cesa_op_ctx tmpl;
653 mv_cesa_set_op_cfg(&tmpl,
654 CESA_SA_DESC_CFG_CRYPTCM_CBC |
655 CESA_SA_DESC_CFG_3DES_EDE |
656 CESA_SA_DESC_CFG_DIR_ENC);
658 return mv_cesa_cbc_des3_op(req, &tmpl);
661 static int mv_cesa_cbc_des3_ede_decrypt(struct skcipher_request *req)
663 struct mv_cesa_op_ctx tmpl;
665 mv_cesa_set_op_cfg(&tmpl,
666 CESA_SA_DESC_CFG_CRYPTCM_CBC |
667 CESA_SA_DESC_CFG_3DES_EDE |
668 CESA_SA_DESC_CFG_DIR_DEC);
670 return mv_cesa_cbc_des3_op(req, &tmpl);
673 struct skcipher_alg mv_cesa_cbc_des3_ede_alg = {
674 .setkey = mv_cesa_des3_ede_setkey,
675 .encrypt = mv_cesa_cbc_des3_ede_encrypt,
676 .decrypt = mv_cesa_cbc_des3_ede_decrypt,
677 .min_keysize = DES3_EDE_KEY_SIZE,
678 .max_keysize = DES3_EDE_KEY_SIZE,
679 .ivsize = DES3_EDE_BLOCK_SIZE,
681 .cra_name = "cbc(des3_ede)",
682 .cra_driver_name = "mv-cbc-des3-ede",
684 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
685 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
686 .cra_ctxsize = sizeof(struct mv_cesa_des3_ctx),
688 .cra_module = THIS_MODULE,
689 .cra_init = mv_cesa_skcipher_cra_init,
690 .cra_exit = mv_cesa_skcipher_cra_exit,
694 static int mv_cesa_aes_op(struct skcipher_request *req,
695 struct mv_cesa_op_ctx *tmpl)
697 struct mv_cesa_aes_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
702 cfg = CESA_SA_DESC_CFG_CRYPTM_AES;
704 if (mv_cesa_get_op_cfg(tmpl) & CESA_SA_DESC_CFG_DIR_DEC)
705 key = ctx->aes.key_dec;
707 key = ctx->aes.key_enc;
709 for (i = 0; i < ctx->aes.key_length / sizeof(u32); i++)
710 tmpl->ctx.blkcipher.key[i] = cpu_to_le32(key[i]);
712 if (ctx->aes.key_length == 24)
713 cfg |= CESA_SA_DESC_CFG_AES_LEN_192;
714 else if (ctx->aes.key_length == 32)
715 cfg |= CESA_SA_DESC_CFG_AES_LEN_256;
717 mv_cesa_update_op_cfg(tmpl, cfg,
718 CESA_SA_DESC_CFG_CRYPTM_MSK |
719 CESA_SA_DESC_CFG_AES_LEN_MSK);
721 return mv_cesa_skcipher_queue_req(req, tmpl);
724 static int mv_cesa_ecb_aes_encrypt(struct skcipher_request *req)
726 struct mv_cesa_op_ctx tmpl;
728 mv_cesa_set_op_cfg(&tmpl,
729 CESA_SA_DESC_CFG_CRYPTCM_ECB |
730 CESA_SA_DESC_CFG_DIR_ENC);
732 return mv_cesa_aes_op(req, &tmpl);
735 static int mv_cesa_ecb_aes_decrypt(struct skcipher_request *req)
737 struct mv_cesa_op_ctx tmpl;
739 mv_cesa_set_op_cfg(&tmpl,
740 CESA_SA_DESC_CFG_CRYPTCM_ECB |
741 CESA_SA_DESC_CFG_DIR_DEC);
743 return mv_cesa_aes_op(req, &tmpl);
746 struct skcipher_alg mv_cesa_ecb_aes_alg = {
747 .setkey = mv_cesa_aes_setkey,
748 .encrypt = mv_cesa_ecb_aes_encrypt,
749 .decrypt = mv_cesa_ecb_aes_decrypt,
750 .min_keysize = AES_MIN_KEY_SIZE,
751 .max_keysize = AES_MAX_KEY_SIZE,
753 .cra_name = "ecb(aes)",
754 .cra_driver_name = "mv-ecb-aes",
756 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
757 .cra_blocksize = AES_BLOCK_SIZE,
758 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
760 .cra_module = THIS_MODULE,
761 .cra_init = mv_cesa_skcipher_cra_init,
762 .cra_exit = mv_cesa_skcipher_cra_exit,
766 static int mv_cesa_cbc_aes_op(struct skcipher_request *req,
767 struct mv_cesa_op_ctx *tmpl)
769 mv_cesa_update_op_cfg(tmpl, CESA_SA_DESC_CFG_CRYPTCM_CBC,
770 CESA_SA_DESC_CFG_CRYPTCM_MSK);
771 memcpy(tmpl->ctx.blkcipher.iv, req->iv, AES_BLOCK_SIZE);
773 return mv_cesa_aes_op(req, tmpl);
776 static int mv_cesa_cbc_aes_encrypt(struct skcipher_request *req)
778 struct mv_cesa_op_ctx tmpl;
780 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_ENC);
782 return mv_cesa_cbc_aes_op(req, &tmpl);
785 static int mv_cesa_cbc_aes_decrypt(struct skcipher_request *req)
787 struct mv_cesa_op_ctx tmpl;
789 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_DIR_DEC);
791 return mv_cesa_cbc_aes_op(req, &tmpl);
794 struct skcipher_alg mv_cesa_cbc_aes_alg = {
795 .setkey = mv_cesa_aes_setkey,
796 .encrypt = mv_cesa_cbc_aes_encrypt,
797 .decrypt = mv_cesa_cbc_aes_decrypt,
798 .min_keysize = AES_MIN_KEY_SIZE,
799 .max_keysize = AES_MAX_KEY_SIZE,
800 .ivsize = AES_BLOCK_SIZE,
802 .cra_name = "cbc(aes)",
803 .cra_driver_name = "mv-cbc-aes",
805 .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC,
806 .cra_blocksize = AES_BLOCK_SIZE,
807 .cra_ctxsize = sizeof(struct mv_cesa_aes_ctx),
809 .cra_module = THIS_MODULE,
810 .cra_init = mv_cesa_skcipher_cra_init,
811 .cra_exit = mv_cesa_skcipher_cra_exit,