4 * Support for OMAP AES HW acceleration.
6 * Copyright (c) 2010 Nokia Corporation
8 * Copyright (c) 2011 Texas Instruments Incorporated
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
16 #define pr_fmt(fmt) "%20s: " fmt, __func__
17 #define prn(num) pr_debug(#num "=%d\n", num)
18 #define prx(num) pr_debug(#num "=%x\n", num)
20 #include <linux/err.h>
21 #include <linux/module.h>
22 #include <linux/init.h>
23 #include <linux/errno.h>
24 #include <linux/kernel.h>
25 #include <linux/platform_device.h>
26 #include <linux/scatterlist.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/dmaengine.h>
29 #include <linux/pm_runtime.h>
31 #include <linux/of_device.h>
32 #include <linux/of_address.h>
34 #include <linux/crypto.h>
35 #include <linux/interrupt.h>
36 #include <crypto/scatterwalk.h>
37 #include <crypto/aes.h>
38 #include <crypto/gcm.h>
39 #include <crypto/engine.h>
40 #include <crypto/internal/skcipher.h>
41 #include <crypto/internal/aead.h>
43 #include "omap-crypto.h"
46 /* keep registered devices data here */
47 static LIST_HEAD(dev_list);
48 static DEFINE_SPINLOCK(list_lock);
50 static int aes_fallback_sz = 200;
53 #define omap_aes_read(dd, offset) \
56 _read_ret = __raw_readl(dd->io_base + offset); \
57 pr_debug("omap_aes_read(" #offset "=%#x)= %#x\n", \
62 inline u32 omap_aes_read(struct omap_aes_dev *dd, u32 offset)
64 return __raw_readl(dd->io_base + offset);
69 #define omap_aes_write(dd, offset, value) \
71 pr_debug("omap_aes_write(" #offset "=%#x) value=%#x\n", \
73 __raw_writel(value, dd->io_base + offset); \
76 inline void omap_aes_write(struct omap_aes_dev *dd, u32 offset,
79 __raw_writel(value, dd->io_base + offset);
83 static inline void omap_aes_write_mask(struct omap_aes_dev *dd, u32 offset,
88 val = omap_aes_read(dd, offset);
91 omap_aes_write(dd, offset, val);
94 static void omap_aes_write_n(struct omap_aes_dev *dd, u32 offset,
95 u32 *value, int count)
97 for (; count--; value++, offset += 4)
98 omap_aes_write(dd, offset, *value);
101 static int omap_aes_hw_init(struct omap_aes_dev *dd)
105 if (!(dd->flags & FLAGS_INIT)) {
106 dd->flags |= FLAGS_INIT;
110 err = pm_runtime_get_sync(dd->dev);
112 dev_err(dd->dev, "failed to get sync: %d\n", err);
119 void omap_aes_clear_copy_flags(struct omap_aes_dev *dd)
121 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_IN_DATA_ST_SHIFT);
122 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_OUT_DATA_ST_SHIFT);
123 dd->flags &= ~(OMAP_CRYPTO_COPY_MASK << FLAGS_ASSOC_DATA_ST_SHIFT);
126 int omap_aes_write_ctrl(struct omap_aes_dev *dd)
128 struct omap_aes_reqctx *rctx;
133 err = omap_aes_hw_init(dd);
137 key32 = dd->ctx->keylen / sizeof(u32);
139 /* RESET the key as previous HASH keys should not get affected*/
140 if (dd->flags & FLAGS_GCM)
141 for (i = 0; i < 0x40; i = i + 4)
142 omap_aes_write(dd, i, 0x0);
144 for (i = 0; i < key32; i++) {
145 omap_aes_write(dd, AES_REG_KEY(dd, i),
146 __le32_to_cpu(dd->ctx->key[i]));
149 if ((dd->flags & (FLAGS_CBC | FLAGS_CTR)) && dd->req->info)
150 omap_aes_write_n(dd, AES_REG_IV(dd, 0), dd->req->info, 4);
152 if ((dd->flags & (FLAGS_GCM)) && dd->aead_req->iv) {
153 rctx = aead_request_ctx(dd->aead_req);
154 omap_aes_write_n(dd, AES_REG_IV(dd, 0), (u32 *)rctx->iv, 4);
157 val = FLD_VAL(((dd->ctx->keylen >> 3) - 1), 4, 3);
158 if (dd->flags & FLAGS_CBC)
159 val |= AES_REG_CTRL_CBC;
161 if (dd->flags & (FLAGS_CTR | FLAGS_GCM))
162 val |= AES_REG_CTRL_CTR | AES_REG_CTRL_CTR_WIDTH_128;
164 if (dd->flags & FLAGS_GCM)
165 val |= AES_REG_CTRL_GCM;
167 if (dd->flags & FLAGS_ENCRYPT)
168 val |= AES_REG_CTRL_DIRECTION;
170 omap_aes_write_mask(dd, AES_REG_CTRL(dd), val, AES_REG_CTRL_MASK);
175 static void omap_aes_dma_trigger_omap2(struct omap_aes_dev *dd, int length)
179 val = dd->pdata->dma_start;
181 if (dd->dma_lch_out != NULL)
182 val |= dd->pdata->dma_enable_out;
183 if (dd->dma_lch_in != NULL)
184 val |= dd->pdata->dma_enable_in;
186 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
187 dd->pdata->dma_start;
189 omap_aes_write_mask(dd, AES_REG_MASK(dd), val, mask);
193 static void omap_aes_dma_trigger_omap4(struct omap_aes_dev *dd, int length)
195 omap_aes_write(dd, AES_REG_LENGTH_N(0), length);
196 omap_aes_write(dd, AES_REG_LENGTH_N(1), 0);
197 if (dd->flags & FLAGS_GCM)
198 omap_aes_write(dd, AES_REG_A_LEN, dd->assoc_len);
200 omap_aes_dma_trigger_omap2(dd, length);
203 static void omap_aes_dma_stop(struct omap_aes_dev *dd)
207 mask = dd->pdata->dma_enable_out | dd->pdata->dma_enable_in |
208 dd->pdata->dma_start;
210 omap_aes_write_mask(dd, AES_REG_MASK(dd), 0, mask);
213 struct omap_aes_dev *omap_aes_find_dev(struct omap_aes_reqctx *rctx)
215 struct omap_aes_dev *dd;
217 spin_lock_bh(&list_lock);
218 dd = list_first_entry(&dev_list, struct omap_aes_dev, list);
219 list_move_tail(&dd->list, &dev_list);
221 spin_unlock_bh(&list_lock);
226 static void omap_aes_dma_out_callback(void *data)
228 struct omap_aes_dev *dd = data;
230 /* dma_lch_out - completed */
231 tasklet_schedule(&dd->done_task);
234 static int omap_aes_dma_init(struct omap_aes_dev *dd)
238 dd->dma_lch_out = NULL;
239 dd->dma_lch_in = NULL;
241 dd->dma_lch_in = dma_request_chan(dd->dev, "rx");
242 if (IS_ERR(dd->dma_lch_in)) {
243 dev_err(dd->dev, "Unable to request in DMA channel\n");
244 return PTR_ERR(dd->dma_lch_in);
247 dd->dma_lch_out = dma_request_chan(dd->dev, "tx");
248 if (IS_ERR(dd->dma_lch_out)) {
249 dev_err(dd->dev, "Unable to request out DMA channel\n");
250 err = PTR_ERR(dd->dma_lch_out);
257 dma_release_channel(dd->dma_lch_in);
262 static void omap_aes_dma_cleanup(struct omap_aes_dev *dd)
267 dma_release_channel(dd->dma_lch_out);
268 dma_release_channel(dd->dma_lch_in);
271 static int omap_aes_crypt_dma(struct omap_aes_dev *dd,
272 struct scatterlist *in_sg,
273 struct scatterlist *out_sg,
274 int in_sg_len, int out_sg_len)
276 struct dma_async_tx_descriptor *tx_in, *tx_out;
277 struct dma_slave_config cfg;
281 scatterwalk_start(&dd->in_walk, dd->in_sg);
282 scatterwalk_start(&dd->out_walk, dd->out_sg);
284 /* Enable DATAIN interrupt and let it take
286 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
290 dma_sync_sg_for_device(dd->dev, dd->in_sg, in_sg_len, DMA_TO_DEVICE);
292 memset(&cfg, 0, sizeof(cfg));
294 cfg.src_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
295 cfg.dst_addr = dd->phys_base + AES_REG_DATA_N(dd, 0);
296 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
297 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
298 cfg.src_maxburst = DST_MAXBURST;
299 cfg.dst_maxburst = DST_MAXBURST;
302 ret = dmaengine_slave_config(dd->dma_lch_in, &cfg);
304 dev_err(dd->dev, "can't configure IN dmaengine slave: %d\n",
309 tx_in = dmaengine_prep_slave_sg(dd->dma_lch_in, in_sg, in_sg_len,
311 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
313 dev_err(dd->dev, "IN prep_slave_sg() failed\n");
317 /* No callback necessary */
318 tx_in->callback_param = dd;
321 ret = dmaengine_slave_config(dd->dma_lch_out, &cfg);
323 dev_err(dd->dev, "can't configure OUT dmaengine slave: %d\n",
328 tx_out = dmaengine_prep_slave_sg(dd->dma_lch_out, out_sg, out_sg_len,
330 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
332 dev_err(dd->dev, "OUT prep_slave_sg() failed\n");
336 if (dd->flags & FLAGS_GCM)
337 tx_out->callback = omap_aes_gcm_dma_out_callback;
339 tx_out->callback = omap_aes_dma_out_callback;
340 tx_out->callback_param = dd;
342 dmaengine_submit(tx_in);
343 dmaengine_submit(tx_out);
345 dma_async_issue_pending(dd->dma_lch_in);
346 dma_async_issue_pending(dd->dma_lch_out);
349 dd->pdata->trigger(dd, dd->total);
354 int omap_aes_crypt_dma_start(struct omap_aes_dev *dd)
358 pr_debug("total: %d\n", dd->total);
361 err = dma_map_sg(dd->dev, dd->in_sg, dd->in_sg_len,
364 dev_err(dd->dev, "dma_map_sg() error\n");
368 err = dma_map_sg(dd->dev, dd->out_sg, dd->out_sg_len,
371 dev_err(dd->dev, "dma_map_sg() error\n");
376 err = omap_aes_crypt_dma(dd, dd->in_sg, dd->out_sg, dd->in_sg_len,
378 if (err && !dd->pio_only) {
379 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
380 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
387 static void omap_aes_finish_req(struct omap_aes_dev *dd, int err)
389 struct ablkcipher_request *req = dd->req;
391 pr_debug("err: %d\n", err);
393 crypto_finalize_ablkcipher_request(dd->engine, req, err);
395 pm_runtime_mark_last_busy(dd->dev);
396 pm_runtime_put_autosuspend(dd->dev);
399 int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
401 pr_debug("total: %d\n", dd->total);
403 omap_aes_dma_stop(dd);
409 static int omap_aes_handle_queue(struct omap_aes_dev *dd,
410 struct ablkcipher_request *req)
413 return crypto_transfer_ablkcipher_request_to_engine(dd->engine, req);
418 static int omap_aes_prepare_req(struct crypto_engine *engine,
421 struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
422 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
423 crypto_ablkcipher_reqtfm(req));
424 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
425 struct omap_aes_dev *dd = rctx->dd;
432 /* assign new request to device */
434 dd->total = req->nbytes;
435 dd->total_save = req->nbytes;
436 dd->in_sg = req->src;
437 dd->out_sg = req->dst;
438 dd->orig_out = req->dst;
440 flags = OMAP_CRYPTO_COPY_DATA;
441 if (req->src == req->dst)
442 flags |= OMAP_CRYPTO_FORCE_COPY;
444 ret = omap_crypto_align_sg(&dd->in_sg, dd->total, AES_BLOCK_SIZE,
446 FLAGS_IN_DATA_ST_SHIFT, &dd->flags);
450 ret = omap_crypto_align_sg(&dd->out_sg, dd->total, AES_BLOCK_SIZE,
452 FLAGS_OUT_DATA_ST_SHIFT, &dd->flags);
456 dd->in_sg_len = sg_nents_for_len(dd->in_sg, dd->total);
457 if (dd->in_sg_len < 0)
458 return dd->in_sg_len;
460 dd->out_sg_len = sg_nents_for_len(dd->out_sg, dd->total);
461 if (dd->out_sg_len < 0)
462 return dd->out_sg_len;
464 rctx->mode &= FLAGS_MODE_MASK;
465 dd->flags = (dd->flags & ~FLAGS_MODE_MASK) | rctx->mode;
470 return omap_aes_write_ctrl(dd);
473 static int omap_aes_crypt_req(struct crypto_engine *engine,
476 struct ablkcipher_request *req = container_of(areq, struct ablkcipher_request, base);
477 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
478 struct omap_aes_dev *dd = rctx->dd;
483 return omap_aes_crypt_dma_start(dd);
486 static void omap_aes_done_task(unsigned long data)
488 struct omap_aes_dev *dd = (struct omap_aes_dev *)data;
490 pr_debug("enter done_task\n");
493 dma_sync_sg_for_device(dd->dev, dd->out_sg, dd->out_sg_len,
495 dma_unmap_sg(dd->dev, dd->in_sg, dd->in_sg_len, DMA_TO_DEVICE);
496 dma_unmap_sg(dd->dev, dd->out_sg, dd->out_sg_len,
498 omap_aes_crypt_dma_stop(dd);
501 omap_crypto_cleanup(dd->in_sgl, NULL, 0, dd->total_save,
502 FLAGS_IN_DATA_ST_SHIFT, dd->flags);
504 omap_crypto_cleanup(&dd->out_sgl, dd->orig_out, 0, dd->total_save,
505 FLAGS_OUT_DATA_ST_SHIFT, dd->flags);
507 omap_aes_finish_req(dd, 0);
512 static int omap_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
514 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(
515 crypto_ablkcipher_reqtfm(req));
516 struct omap_aes_reqctx *rctx = ablkcipher_request_ctx(req);
517 struct omap_aes_dev *dd;
520 pr_debug("nbytes: %d, enc: %d, cbc: %d\n", req->nbytes,
521 !!(mode & FLAGS_ENCRYPT),
522 !!(mode & FLAGS_CBC));
524 if (req->nbytes < aes_fallback_sz) {
525 SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->fallback);
527 skcipher_request_set_sync_tfm(subreq, ctx->fallback);
528 skcipher_request_set_callback(subreq, req->base.flags, NULL,
530 skcipher_request_set_crypt(subreq, req->src, req->dst,
531 req->nbytes, req->info);
533 if (mode & FLAGS_ENCRYPT)
534 ret = crypto_skcipher_encrypt(subreq);
536 ret = crypto_skcipher_decrypt(subreq);
538 skcipher_request_zero(subreq);
541 dd = omap_aes_find_dev(rctx);
547 return omap_aes_handle_queue(dd, req);
550 /* ********************** ALG API ************************************ */
552 static int omap_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
555 struct omap_aes_ctx *ctx = crypto_ablkcipher_ctx(tfm);
558 if (keylen != AES_KEYSIZE_128 && keylen != AES_KEYSIZE_192 &&
559 keylen != AES_KEYSIZE_256)
562 pr_debug("enter, keylen: %d\n", keylen);
564 memcpy(ctx->key, key, keylen);
565 ctx->keylen = keylen;
567 crypto_sync_skcipher_clear_flags(ctx->fallback, CRYPTO_TFM_REQ_MASK);
568 crypto_sync_skcipher_set_flags(ctx->fallback, tfm->base.crt_flags &
569 CRYPTO_TFM_REQ_MASK);
571 ret = crypto_sync_skcipher_setkey(ctx->fallback, key, keylen);
578 static int omap_aes_ecb_encrypt(struct ablkcipher_request *req)
580 return omap_aes_crypt(req, FLAGS_ENCRYPT);
583 static int omap_aes_ecb_decrypt(struct ablkcipher_request *req)
585 return omap_aes_crypt(req, 0);
588 static int omap_aes_cbc_encrypt(struct ablkcipher_request *req)
590 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
593 static int omap_aes_cbc_decrypt(struct ablkcipher_request *req)
595 return omap_aes_crypt(req, FLAGS_CBC);
598 static int omap_aes_ctr_encrypt(struct ablkcipher_request *req)
600 return omap_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CTR);
603 static int omap_aes_ctr_decrypt(struct ablkcipher_request *req)
605 return omap_aes_crypt(req, FLAGS_CTR);
608 static int omap_aes_prepare_req(struct crypto_engine *engine,
610 static int omap_aes_crypt_req(struct crypto_engine *engine,
613 static int omap_aes_cra_init(struct crypto_tfm *tfm)
615 const char *name = crypto_tfm_alg_name(tfm);
616 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
617 struct crypto_sync_skcipher *blk;
619 blk = crypto_alloc_sync_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK);
625 tfm->crt_ablkcipher.reqsize = sizeof(struct omap_aes_reqctx);
627 ctx->enginectx.op.prepare_request = omap_aes_prepare_req;
628 ctx->enginectx.op.unprepare_request = NULL;
629 ctx->enginectx.op.do_one_request = omap_aes_crypt_req;
634 static int omap_aes_gcm_cra_init(struct crypto_aead *tfm)
636 struct omap_aes_dev *dd = NULL;
637 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
640 /* Find AES device, currently picks the first device */
641 spin_lock_bh(&list_lock);
642 list_for_each_entry(dd, &dev_list, list) {
645 spin_unlock_bh(&list_lock);
647 err = pm_runtime_get_sync(dd->dev);
649 dev_err(dd->dev, "%s: failed to get_sync(%d)\n",
654 tfm->reqsize = sizeof(struct omap_aes_reqctx);
655 ctx->ctr = crypto_alloc_skcipher("ecb(aes)", 0, 0);
656 if (IS_ERR(ctx->ctr)) {
657 pr_warn("could not load aes driver for encrypting IV\n");
658 return PTR_ERR(ctx->ctr);
664 static void omap_aes_cra_exit(struct crypto_tfm *tfm)
666 struct omap_aes_ctx *ctx = crypto_tfm_ctx(tfm);
669 crypto_free_sync_skcipher(ctx->fallback);
671 ctx->fallback = NULL;
674 static void omap_aes_gcm_cra_exit(struct crypto_aead *tfm)
676 struct omap_aes_ctx *ctx = crypto_aead_ctx(tfm);
678 omap_aes_cra_exit(crypto_aead_tfm(tfm));
681 crypto_free_skcipher(ctx->ctr);
684 /* ********************** ALGS ************************************ */
686 static struct crypto_alg algs_ecb_cbc[] = {
688 .cra_name = "ecb(aes)",
689 .cra_driver_name = "ecb-aes-omap",
691 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
692 CRYPTO_ALG_KERN_DRIVER_ONLY |
693 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
694 .cra_blocksize = AES_BLOCK_SIZE,
695 .cra_ctxsize = sizeof(struct omap_aes_ctx),
697 .cra_type = &crypto_ablkcipher_type,
698 .cra_module = THIS_MODULE,
699 .cra_init = omap_aes_cra_init,
700 .cra_exit = omap_aes_cra_exit,
701 .cra_u.ablkcipher = {
702 .min_keysize = AES_MIN_KEY_SIZE,
703 .max_keysize = AES_MAX_KEY_SIZE,
704 .setkey = omap_aes_setkey,
705 .encrypt = omap_aes_ecb_encrypt,
706 .decrypt = omap_aes_ecb_decrypt,
710 .cra_name = "cbc(aes)",
711 .cra_driver_name = "cbc-aes-omap",
713 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
714 CRYPTO_ALG_KERN_DRIVER_ONLY |
715 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
716 .cra_blocksize = AES_BLOCK_SIZE,
717 .cra_ctxsize = sizeof(struct omap_aes_ctx),
719 .cra_type = &crypto_ablkcipher_type,
720 .cra_module = THIS_MODULE,
721 .cra_init = omap_aes_cra_init,
722 .cra_exit = omap_aes_cra_exit,
723 .cra_u.ablkcipher = {
724 .min_keysize = AES_MIN_KEY_SIZE,
725 .max_keysize = AES_MAX_KEY_SIZE,
726 .ivsize = AES_BLOCK_SIZE,
727 .setkey = omap_aes_setkey,
728 .encrypt = omap_aes_cbc_encrypt,
729 .decrypt = omap_aes_cbc_decrypt,
734 static struct crypto_alg algs_ctr[] = {
736 .cra_name = "ctr(aes)",
737 .cra_driver_name = "ctr-aes-omap",
739 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
740 CRYPTO_ALG_KERN_DRIVER_ONLY |
741 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
742 .cra_blocksize = AES_BLOCK_SIZE,
743 .cra_ctxsize = sizeof(struct omap_aes_ctx),
745 .cra_type = &crypto_ablkcipher_type,
746 .cra_module = THIS_MODULE,
747 .cra_init = omap_aes_cra_init,
748 .cra_exit = omap_aes_cra_exit,
749 .cra_u.ablkcipher = {
750 .min_keysize = AES_MIN_KEY_SIZE,
751 .max_keysize = AES_MAX_KEY_SIZE,
753 .ivsize = AES_BLOCK_SIZE,
754 .setkey = omap_aes_setkey,
755 .encrypt = omap_aes_ctr_encrypt,
756 .decrypt = omap_aes_ctr_decrypt,
761 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc[] = {
763 .algs_list = algs_ecb_cbc,
764 .size = ARRAY_SIZE(algs_ecb_cbc),
768 static struct aead_alg algs_aead_gcm[] = {
771 .cra_name = "gcm(aes)",
772 .cra_driver_name = "gcm-aes-omap",
774 .cra_flags = CRYPTO_ALG_ASYNC |
775 CRYPTO_ALG_KERN_DRIVER_ONLY,
777 .cra_ctxsize = sizeof(struct omap_aes_ctx),
778 .cra_alignmask = 0xf,
779 .cra_module = THIS_MODULE,
781 .init = omap_aes_gcm_cra_init,
782 .exit = omap_aes_gcm_cra_exit,
783 .ivsize = GCM_AES_IV_SIZE,
784 .maxauthsize = AES_BLOCK_SIZE,
785 .setkey = omap_aes_gcm_setkey,
786 .encrypt = omap_aes_gcm_encrypt,
787 .decrypt = omap_aes_gcm_decrypt,
791 .cra_name = "rfc4106(gcm(aes))",
792 .cra_driver_name = "rfc4106-gcm-aes-omap",
794 .cra_flags = CRYPTO_ALG_ASYNC |
795 CRYPTO_ALG_KERN_DRIVER_ONLY,
797 .cra_ctxsize = sizeof(struct omap_aes_ctx),
798 .cra_alignmask = 0xf,
799 .cra_module = THIS_MODULE,
801 .init = omap_aes_gcm_cra_init,
802 .exit = omap_aes_gcm_cra_exit,
803 .maxauthsize = AES_BLOCK_SIZE,
804 .ivsize = GCM_RFC4106_IV_SIZE,
805 .setkey = omap_aes_4106gcm_setkey,
806 .encrypt = omap_aes_4106gcm_encrypt,
807 .decrypt = omap_aes_4106gcm_decrypt,
811 static struct omap_aes_aead_algs omap_aes_aead_info = {
812 .algs_list = algs_aead_gcm,
813 .size = ARRAY_SIZE(algs_aead_gcm),
816 static const struct omap_aes_pdata omap_aes_pdata_omap2 = {
817 .algs_info = omap_aes_algs_info_ecb_cbc,
818 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc),
819 .trigger = omap_aes_dma_trigger_omap2,
826 .dma_enable_in = BIT(2),
827 .dma_enable_out = BIT(3),
836 static struct omap_aes_algs_info omap_aes_algs_info_ecb_cbc_ctr[] = {
838 .algs_list = algs_ecb_cbc,
839 .size = ARRAY_SIZE(algs_ecb_cbc),
842 .algs_list = algs_ctr,
843 .size = ARRAY_SIZE(algs_ctr),
847 static const struct omap_aes_pdata omap_aes_pdata_omap3 = {
848 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
849 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
850 .trigger = omap_aes_dma_trigger_omap2,
857 .dma_enable_in = BIT(2),
858 .dma_enable_out = BIT(3),
866 static const struct omap_aes_pdata omap_aes_pdata_omap4 = {
867 .algs_info = omap_aes_algs_info_ecb_cbc_ctr,
868 .algs_info_size = ARRAY_SIZE(omap_aes_algs_info_ecb_cbc_ctr),
869 .aead_algs_info = &omap_aes_aead_info,
870 .trigger = omap_aes_dma_trigger_omap4,
877 .irq_status_ofs = 0x8c,
878 .irq_enable_ofs = 0x90,
879 .dma_enable_in = BIT(5),
880 .dma_enable_out = BIT(6),
881 .major_mask = 0x0700,
883 .minor_mask = 0x003f,
887 static irqreturn_t omap_aes_irq(int irq, void *dev_id)
889 struct omap_aes_dev *dd = dev_id;
893 status = omap_aes_read(dd, AES_REG_IRQ_STATUS(dd));
894 if (status & AES_REG_IRQ_DATA_IN) {
895 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
899 BUG_ON(_calc_walked(in) > dd->in_sg->length);
901 src = sg_virt(dd->in_sg) + _calc_walked(in);
903 for (i = 0; i < AES_BLOCK_WORDS; i++) {
904 omap_aes_write(dd, AES_REG_DATA_N(dd, i), *src);
906 scatterwalk_advance(&dd->in_walk, 4);
907 if (dd->in_sg->length == _calc_walked(in)) {
908 dd->in_sg = sg_next(dd->in_sg);
910 scatterwalk_start(&dd->in_walk,
912 src = sg_virt(dd->in_sg) +
920 /* Clear IRQ status */
921 status &= ~AES_REG_IRQ_DATA_IN;
922 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
924 /* Enable DATA_OUT interrupt */
925 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x4);
927 } else if (status & AES_REG_IRQ_DATA_OUT) {
928 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x0);
932 BUG_ON(_calc_walked(out) > dd->out_sg->length);
934 dst = sg_virt(dd->out_sg) + _calc_walked(out);
936 for (i = 0; i < AES_BLOCK_WORDS; i++) {
937 *dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
938 scatterwalk_advance(&dd->out_walk, 4);
939 if (dd->out_sg->length == _calc_walked(out)) {
940 dd->out_sg = sg_next(dd->out_sg);
942 scatterwalk_start(&dd->out_walk,
944 dst = sg_virt(dd->out_sg) +
952 dd->total -= min_t(size_t, AES_BLOCK_SIZE, dd->total);
954 /* Clear IRQ status */
955 status &= ~AES_REG_IRQ_DATA_OUT;
956 omap_aes_write(dd, AES_REG_IRQ_STATUS(dd), status);
959 /* All bytes read! */
960 tasklet_schedule(&dd->done_task);
962 /* Enable DATA_IN interrupt for next block */
963 omap_aes_write(dd, AES_REG_IRQ_ENABLE(dd), 0x2);
969 static const struct of_device_id omap_aes_of_match[] = {
971 .compatible = "ti,omap2-aes",
972 .data = &omap_aes_pdata_omap2,
975 .compatible = "ti,omap3-aes",
976 .data = &omap_aes_pdata_omap3,
979 .compatible = "ti,omap4-aes",
980 .data = &omap_aes_pdata_omap4,
984 MODULE_DEVICE_TABLE(of, omap_aes_of_match);
986 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
987 struct device *dev, struct resource *res)
989 struct device_node *node = dev->of_node;
992 dd->pdata = of_device_get_match_data(dev);
994 dev_err(dev, "no compatible OF match\n");
999 err = of_address_to_resource(node, 0, res);
1001 dev_err(dev, "can't translate OF node address\n");
1010 static const struct of_device_id omap_aes_of_match[] = {
1014 static int omap_aes_get_res_of(struct omap_aes_dev *dd,
1015 struct device *dev, struct resource *res)
1021 static int omap_aes_get_res_pdev(struct omap_aes_dev *dd,
1022 struct platform_device *pdev, struct resource *res)
1024 struct device *dev = &pdev->dev;
1028 /* Get the base address */
1029 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1031 dev_err(dev, "no MEM resource info\n");
1035 memcpy(res, r, sizeof(*res));
1037 /* Only OMAP2/3 can be non-DT */
1038 dd->pdata = &omap_aes_pdata_omap2;
1044 static ssize_t fallback_show(struct device *dev, struct device_attribute *attr,
1047 return sprintf(buf, "%d\n", aes_fallback_sz);
1050 static ssize_t fallback_store(struct device *dev, struct device_attribute *attr,
1051 const char *buf, size_t size)
1056 status = kstrtol(buf, 0, &value);
1060 /* HW accelerator only works with buffers > 9 */
1062 dev_err(dev, "minimum fallback size 9\n");
1066 aes_fallback_sz = value;
1071 static ssize_t queue_len_show(struct device *dev, struct device_attribute *attr,
1074 struct omap_aes_dev *dd = dev_get_drvdata(dev);
1076 return sprintf(buf, "%d\n", dd->engine->queue.max_qlen);
1079 static ssize_t queue_len_store(struct device *dev,
1080 struct device_attribute *attr, const char *buf,
1083 struct omap_aes_dev *dd;
1086 unsigned long flags;
1088 status = kstrtol(buf, 0, &value);
1096 * Changing the queue size in fly is safe, if size becomes smaller
1097 * than current size, it will just not accept new entries until
1098 * it has shrank enough.
1100 spin_lock_bh(&list_lock);
1101 list_for_each_entry(dd, &dev_list, list) {
1102 spin_lock_irqsave(&dd->lock, flags);
1103 dd->engine->queue.max_qlen = value;
1104 dd->aead_queue.base.max_qlen = value;
1105 spin_unlock_irqrestore(&dd->lock, flags);
1107 spin_unlock_bh(&list_lock);
1112 static DEVICE_ATTR_RW(queue_len);
1113 static DEVICE_ATTR_RW(fallback);
1115 static struct attribute *omap_aes_attrs[] = {
1116 &dev_attr_queue_len.attr,
1117 &dev_attr_fallback.attr,
1121 static struct attribute_group omap_aes_attr_group = {
1122 .attrs = omap_aes_attrs,
1125 static int omap_aes_probe(struct platform_device *pdev)
1127 struct device *dev = &pdev->dev;
1128 struct omap_aes_dev *dd;
1129 struct crypto_alg *algp;
1130 struct aead_alg *aalg;
1131 struct resource res;
1132 int err = -ENOMEM, i, j, irq = -1;
1135 dd = devm_kzalloc(dev, sizeof(struct omap_aes_dev), GFP_KERNEL);
1137 dev_err(dev, "unable to alloc data struct.\n");
1141 platform_set_drvdata(pdev, dd);
1143 aead_init_queue(&dd->aead_queue, OMAP_AES_QUEUE_LENGTH);
1145 err = (dev->of_node) ? omap_aes_get_res_of(dd, dev, &res) :
1146 omap_aes_get_res_pdev(dd, pdev, &res);
1150 dd->io_base = devm_ioremap_resource(dev, &res);
1151 if (IS_ERR(dd->io_base)) {
1152 err = PTR_ERR(dd->io_base);
1155 dd->phys_base = res.start;
1157 pm_runtime_use_autosuspend(dev);
1158 pm_runtime_set_autosuspend_delay(dev, DEFAULT_AUTOSUSPEND_DELAY);
1160 pm_runtime_enable(dev);
1161 err = pm_runtime_get_sync(dev);
1163 dev_err(dev, "%s: failed to get_sync(%d)\n",
1168 omap_aes_dma_stop(dd);
1170 reg = omap_aes_read(dd, AES_REG_REV(dd));
1172 pm_runtime_put_sync(dev);
1174 dev_info(dev, "OMAP AES hw accel rev: %u.%u\n",
1175 (reg & dd->pdata->major_mask) >> dd->pdata->major_shift,
1176 (reg & dd->pdata->minor_mask) >> dd->pdata->minor_shift);
1178 tasklet_init(&dd->done_task, omap_aes_done_task, (unsigned long)dd);
1180 err = omap_aes_dma_init(dd);
1181 if (err == -EPROBE_DEFER) {
1183 } else if (err && AES_REG_IRQ_STATUS(dd) && AES_REG_IRQ_ENABLE(dd)) {
1186 irq = platform_get_irq(pdev, 0);
1188 dev_err(dev, "can't get IRQ resource\n");
1193 err = devm_request_irq(dev, irq, omap_aes_irq, 0,
1196 dev_err(dev, "Unable to grab omap-aes IRQ\n");
1201 spin_lock_init(&dd->lock);
1203 INIT_LIST_HEAD(&dd->list);
1204 spin_lock(&list_lock);
1205 list_add_tail(&dd->list, &dev_list);
1206 spin_unlock(&list_lock);
1208 /* Initialize crypto engine */
1209 dd->engine = crypto_engine_alloc_init(dev, 1);
1215 err = crypto_engine_start(dd->engine);
1219 for (i = 0; i < dd->pdata->algs_info_size; i++) {
1220 if (!dd->pdata->algs_info[i].registered) {
1221 for (j = 0; j < dd->pdata->algs_info[i].size; j++) {
1222 algp = &dd->pdata->algs_info[i].algs_list[j];
1224 pr_debug("reg alg: %s\n", algp->cra_name);
1225 INIT_LIST_HEAD(&algp->cra_list);
1227 err = crypto_register_alg(algp);
1231 dd->pdata->algs_info[i].registered++;
1236 if (dd->pdata->aead_algs_info &&
1237 !dd->pdata->aead_algs_info->registered) {
1238 for (i = 0; i < dd->pdata->aead_algs_info->size; i++) {
1239 aalg = &dd->pdata->aead_algs_info->algs_list[i];
1242 pr_debug("reg alg: %s\n", algp->cra_name);
1243 INIT_LIST_HEAD(&algp->cra_list);
1245 err = crypto_register_aead(aalg);
1249 dd->pdata->aead_algs_info->registered++;
1253 err = sysfs_create_group(&dev->kobj, &omap_aes_attr_group);
1255 dev_err(dev, "could not create sysfs device attrs\n");
1261 for (i = dd->pdata->aead_algs_info->registered - 1; i >= 0; i--) {
1262 aalg = &dd->pdata->aead_algs_info->algs_list[i];
1263 crypto_unregister_aead(aalg);
1266 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1267 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1268 crypto_unregister_alg(
1269 &dd->pdata->algs_info[i].algs_list[j]);
1273 crypto_engine_exit(dd->engine);
1275 omap_aes_dma_cleanup(dd);
1277 tasklet_kill(&dd->done_task);
1278 pm_runtime_disable(dev);
1282 dev_err(dev, "initialization failed.\n");
1286 static int omap_aes_remove(struct platform_device *pdev)
1288 struct omap_aes_dev *dd = platform_get_drvdata(pdev);
1289 struct aead_alg *aalg;
1295 spin_lock(&list_lock);
1296 list_del(&dd->list);
1297 spin_unlock(&list_lock);
1299 for (i = dd->pdata->algs_info_size - 1; i >= 0; i--)
1300 for (j = dd->pdata->algs_info[i].registered - 1; j >= 0; j--)
1301 crypto_unregister_alg(
1302 &dd->pdata->algs_info[i].algs_list[j]);
1304 for (i = dd->pdata->aead_algs_info->size - 1; i >= 0; i--) {
1305 aalg = &dd->pdata->aead_algs_info->algs_list[i];
1306 crypto_unregister_aead(aalg);
1309 crypto_engine_exit(dd->engine);
1311 tasklet_kill(&dd->done_task);
1312 omap_aes_dma_cleanup(dd);
1313 pm_runtime_disable(dd->dev);
1319 #ifdef CONFIG_PM_SLEEP
1320 static int omap_aes_suspend(struct device *dev)
1322 pm_runtime_put_sync(dev);
1326 static int omap_aes_resume(struct device *dev)
1328 pm_runtime_get_sync(dev);
1333 static SIMPLE_DEV_PM_OPS(omap_aes_pm_ops, omap_aes_suspend, omap_aes_resume);
1335 static struct platform_driver omap_aes_driver = {
1336 .probe = omap_aes_probe,
1337 .remove = omap_aes_remove,
1340 .pm = &omap_aes_pm_ops,
1341 .of_match_table = omap_aes_of_match,
1345 module_platform_driver(omap_aes_driver);
1347 MODULE_DESCRIPTION("OMAP AES hw acceleration support.");
1348 MODULE_LICENSE("GPL v2");
1349 MODULE_AUTHOR("Dmitry Kasatkin");