4 * Support for SAHARA cryptographic accelerator.
7 * Copyright (c) 2013 Vista Silicon S.L.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
14 * Based on omap-aes.c and tegra-aes.c
17 #include <crypto/algapi.h>
18 #include <crypto/aes.h>
19 #include <crypto/hash.h>
20 #include <crypto/internal/hash.h>
21 #include <crypto/scatterwalk.h>
22 #include <crypto/sha.h>
24 #include <linux/clk.h>
25 #include <linux/crypto.h>
26 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/module.h>
32 #include <linux/mutex.h>
34 #include <linux/of_device.h>
35 #include <linux/platform_device.h>
37 #define SHA_BUFFER_LEN PAGE_SIZE
38 #define SAHARA_MAX_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
40 #define SAHARA_NAME "sahara"
41 #define SAHARA_VERSION_3 3
42 #define SAHARA_VERSION_4 4
43 #define SAHARA_TIMEOUT_MS 1000
44 #define SAHARA_MAX_HW_DESC 2
45 #define SAHARA_MAX_HW_LINK 20
47 #define FLAGS_MODE_MASK 0x000f
48 #define FLAGS_ENCRYPT BIT(0)
49 #define FLAGS_CBC BIT(1)
50 #define FLAGS_NEW_KEY BIT(3)
52 #define SAHARA_HDR_BASE 0x00800000
53 #define SAHARA_HDR_SKHA_ALG_AES 0
54 #define SAHARA_HDR_SKHA_OP_ENC (1 << 2)
55 #define SAHARA_HDR_SKHA_MODE_ECB (0 << 3)
56 #define SAHARA_HDR_SKHA_MODE_CBC (1 << 3)
57 #define SAHARA_HDR_FORM_DATA (5 << 16)
58 #define SAHARA_HDR_FORM_KEY (8 << 16)
59 #define SAHARA_HDR_LLO (1 << 24)
60 #define SAHARA_HDR_CHA_SKHA (1 << 28)
61 #define SAHARA_HDR_CHA_MDHA (2 << 28)
62 #define SAHARA_HDR_PARITY_BIT (1 << 31)
64 #define SAHARA_HDR_MDHA_SET_MODE_MD_KEY 0x20880000
65 #define SAHARA_HDR_MDHA_SET_MODE_HASH 0x208D0000
66 #define SAHARA_HDR_MDHA_HASH 0xA0850000
67 #define SAHARA_HDR_MDHA_STORE_DIGEST 0x20820000
68 #define SAHARA_HDR_MDHA_ALG_SHA1 0
69 #define SAHARA_HDR_MDHA_ALG_MD5 1
70 #define SAHARA_HDR_MDHA_ALG_SHA256 2
71 #define SAHARA_HDR_MDHA_ALG_SHA224 3
72 #define SAHARA_HDR_MDHA_PDATA (1 << 2)
73 #define SAHARA_HDR_MDHA_HMAC (1 << 3)
74 #define SAHARA_HDR_MDHA_INIT (1 << 5)
75 #define SAHARA_HDR_MDHA_IPAD (1 << 6)
76 #define SAHARA_HDR_MDHA_OPAD (1 << 7)
77 #define SAHARA_HDR_MDHA_SWAP (1 << 8)
78 #define SAHARA_HDR_MDHA_MAC_FULL (1 << 9)
79 #define SAHARA_HDR_MDHA_SSL (1 << 10)
81 /* SAHARA can only process one request at a time */
82 #define SAHARA_QUEUE_LENGTH 1
84 #define SAHARA_REG_VERSION 0x00
85 #define SAHARA_REG_DAR 0x04
86 #define SAHARA_REG_CONTROL 0x08
87 #define SAHARA_CONTROL_SET_THROTTLE(x) (((x) & 0xff) << 24)
88 #define SAHARA_CONTROL_SET_MAXBURST(x) (((x) & 0xff) << 16)
89 #define SAHARA_CONTROL_RNG_AUTORSD (1 << 7)
90 #define SAHARA_CONTROL_ENABLE_INT (1 << 4)
91 #define SAHARA_REG_CMD 0x0C
92 #define SAHARA_CMD_RESET (1 << 0)
93 #define SAHARA_CMD_CLEAR_INT (1 << 8)
94 #define SAHARA_CMD_CLEAR_ERR (1 << 9)
95 #define SAHARA_CMD_SINGLE_STEP (1 << 10)
96 #define SAHARA_CMD_MODE_BATCH (1 << 16)
97 #define SAHARA_CMD_MODE_DEBUG (1 << 18)
98 #define SAHARA_REG_STATUS 0x10
99 #define SAHARA_STATUS_GET_STATE(x) ((x) & 0x7)
100 #define SAHARA_STATE_IDLE 0
101 #define SAHARA_STATE_BUSY 1
102 #define SAHARA_STATE_ERR 2
103 #define SAHARA_STATE_FAULT 3
104 #define SAHARA_STATE_COMPLETE 4
105 #define SAHARA_STATE_COMP_FLAG (1 << 2)
106 #define SAHARA_STATUS_DAR_FULL (1 << 3)
107 #define SAHARA_STATUS_ERROR (1 << 4)
108 #define SAHARA_STATUS_SECURE (1 << 5)
109 #define SAHARA_STATUS_FAIL (1 << 6)
110 #define SAHARA_STATUS_INIT (1 << 7)
111 #define SAHARA_STATUS_RNG_RESEED (1 << 8)
112 #define SAHARA_STATUS_ACTIVE_RNG (1 << 9)
113 #define SAHARA_STATUS_ACTIVE_MDHA (1 << 10)
114 #define SAHARA_STATUS_ACTIVE_SKHA (1 << 11)
115 #define SAHARA_STATUS_MODE_BATCH (1 << 16)
116 #define SAHARA_STATUS_MODE_DEDICATED (1 << 17)
117 #define SAHARA_STATUS_MODE_DEBUG (1 << 18)
118 #define SAHARA_STATUS_GET_ISTATE(x) (((x) >> 24) & 0xff)
119 #define SAHARA_REG_ERRSTATUS 0x14
120 #define SAHARA_ERRSTATUS_GET_SOURCE(x) ((x) & 0xf)
121 #define SAHARA_ERRSOURCE_CHA 14
122 #define SAHARA_ERRSOURCE_DMA 15
123 #define SAHARA_ERRSTATUS_DMA_DIR (1 << 8)
124 #define SAHARA_ERRSTATUS_GET_DMASZ(x)(((x) >> 9) & 0x3)
125 #define SAHARA_ERRSTATUS_GET_DMASRC(x) (((x) >> 13) & 0x7)
126 #define SAHARA_ERRSTATUS_GET_CHASRC(x) (((x) >> 16) & 0xfff)
127 #define SAHARA_ERRSTATUS_GET_CHAERR(x) (((x) >> 28) & 0x3)
128 #define SAHARA_REG_FADDR 0x18
129 #define SAHARA_REG_CDAR 0x1C
130 #define SAHARA_REG_IDAR 0x20
132 struct sahara_hw_desc {
141 struct sahara_hw_link {
150 /* AES-specific context */
152 u8 key[AES_KEYSIZE_128];
153 struct crypto_ablkcipher *fallback;
155 /* SHA-specific context */
156 struct crypto_shash *shash_fallback;
159 struct sahara_aes_reqctx {
164 * struct sahara_sha_reqctx - private data per request
165 * @buf: holds data for requests smaller than block_size
166 * @rembuf: used to prepare one block_size-aligned request
167 * @context: hw-specific context for request. Digest is extracted from this
168 * @mode: specifies what type of hw-descriptor needs to be built
169 * @digest_size: length of digest for this request
170 * @context_size: length of hw-context for this request.
171 * Always digest_size + 4
172 * @buf_cnt: number of bytes saved in buf
173 * @sg_in_idx: number of hw links
174 * @in_sg: scatterlist for input data
175 * @in_sg_chain: scatterlists for chained input data
176 * @total: total number of bytes for transfer
177 * @last: is this the last block
178 * @first: is this the first block
179 * @active: inside a transfer
181 struct sahara_sha_reqctx {
182 u8 buf[SAHARA_MAX_SHA_BLOCK_SIZE];
183 u8 rembuf[SAHARA_MAX_SHA_BLOCK_SIZE];
184 u8 context[SHA256_DIGEST_SIZE + 4];
187 unsigned int digest_size;
188 unsigned int context_size;
189 unsigned int buf_cnt;
190 unsigned int sg_in_idx;
191 struct scatterlist *in_sg;
192 struct scatterlist in_sg_chain[2];
200 struct device *device;
201 unsigned int version;
202 void __iomem *regs_base;
205 struct mutex queue_mutex;
206 struct task_struct *kthread;
207 struct completion dma_completion;
209 struct sahara_ctx *ctx;
211 struct crypto_queue queue;
214 struct sahara_hw_desc *hw_desc[SAHARA_MAX_HW_DESC];
215 dma_addr_t hw_phys_desc[SAHARA_MAX_HW_DESC];
218 dma_addr_t key_phys_base;
221 dma_addr_t iv_phys_base;
224 dma_addr_t context_phys_base;
226 struct sahara_hw_link *hw_link[SAHARA_MAX_HW_LINK];
227 dma_addr_t hw_phys_link[SAHARA_MAX_HW_LINK];
230 struct scatterlist *in_sg;
232 struct scatterlist *out_sg;
238 static struct sahara_dev *dev_ptr;
240 static inline void sahara_write(struct sahara_dev *dev, u32 data, u32 reg)
242 writel(data, dev->regs_base + reg);
245 static inline unsigned int sahara_read(struct sahara_dev *dev, u32 reg)
247 return readl(dev->regs_base + reg);
250 static u32 sahara_aes_key_hdr(struct sahara_dev *dev)
252 u32 hdr = SAHARA_HDR_BASE | SAHARA_HDR_SKHA_ALG_AES |
253 SAHARA_HDR_FORM_KEY | SAHARA_HDR_LLO |
254 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
256 if (dev->flags & FLAGS_CBC) {
257 hdr |= SAHARA_HDR_SKHA_MODE_CBC;
258 hdr ^= SAHARA_HDR_PARITY_BIT;
261 if (dev->flags & FLAGS_ENCRYPT) {
262 hdr |= SAHARA_HDR_SKHA_OP_ENC;
263 hdr ^= SAHARA_HDR_PARITY_BIT;
269 static u32 sahara_aes_data_link_hdr(struct sahara_dev *dev)
271 return SAHARA_HDR_BASE | SAHARA_HDR_FORM_DATA |
272 SAHARA_HDR_CHA_SKHA | SAHARA_HDR_PARITY_BIT;
275 static const char *sahara_err_src[16] = {
278 "Descriptor length error",
279 "Descriptor length or pointer error",
281 "Link pointer error",
282 "Input buffer error",
283 "Output buffer error",
284 "Output buffer starvation",
285 "Internal state fault",
286 "General descriptor problem",
288 "Descriptor address error",
289 "Link address error",
294 static const char *sahara_err_dmasize[4] = {
296 "Half-word transfer",
301 static const char *sahara_err_dmasrc[8] = {
304 "Internal IP bus error",
306 "DMA crosses 256 byte boundary",
312 static const char *sahara_cha_errsrc[12] = {
313 "Input buffer non-empty",
318 "Write during processing",
319 "CTX read during processing",
321 "Input buffer disabled/underflow",
322 "Output buffer disabled/overflow",
323 "DES key parity error",
327 static const char *sahara_cha_err[4] = { "No error", "SKHA", "MDHA", "RNG" };
329 static void sahara_decode_error(struct sahara_dev *dev, unsigned int error)
331 u8 source = SAHARA_ERRSTATUS_GET_SOURCE(error);
332 u16 chasrc = ffs(SAHARA_ERRSTATUS_GET_CHASRC(error));
334 dev_err(dev->device, "%s: Error Register = 0x%08x\n", __func__, error);
336 dev_err(dev->device, " - %s.\n", sahara_err_src[source]);
338 if (source == SAHARA_ERRSOURCE_DMA) {
339 if (error & SAHARA_ERRSTATUS_DMA_DIR)
340 dev_err(dev->device, " * DMA read.\n");
342 dev_err(dev->device, " * DMA write.\n");
344 dev_err(dev->device, " * %s.\n",
345 sahara_err_dmasize[SAHARA_ERRSTATUS_GET_DMASZ(error)]);
346 dev_err(dev->device, " * %s.\n",
347 sahara_err_dmasrc[SAHARA_ERRSTATUS_GET_DMASRC(error)]);
348 } else if (source == SAHARA_ERRSOURCE_CHA) {
349 dev_err(dev->device, " * %s.\n",
350 sahara_cha_errsrc[chasrc]);
351 dev_err(dev->device, " * %s.\n",
352 sahara_cha_err[SAHARA_ERRSTATUS_GET_CHAERR(error)]);
354 dev_err(dev->device, "\n");
357 static const char *sahara_state[4] = { "Idle", "Busy", "Error", "HW Fault" };
359 static void sahara_decode_status(struct sahara_dev *dev, unsigned int status)
363 if (!IS_ENABLED(DEBUG))
366 state = SAHARA_STATUS_GET_STATE(status);
368 dev_dbg(dev->device, "%s: Status Register = 0x%08x\n",
371 dev_dbg(dev->device, " - State = %d:\n", state);
372 if (state & SAHARA_STATE_COMP_FLAG)
373 dev_dbg(dev->device, " * Descriptor completed. IRQ pending.\n");
375 dev_dbg(dev->device, " * %s.\n",
376 sahara_state[state & ~SAHARA_STATE_COMP_FLAG]);
378 if (status & SAHARA_STATUS_DAR_FULL)
379 dev_dbg(dev->device, " - DAR Full.\n");
380 if (status & SAHARA_STATUS_ERROR)
381 dev_dbg(dev->device, " - Error.\n");
382 if (status & SAHARA_STATUS_SECURE)
383 dev_dbg(dev->device, " - Secure.\n");
384 if (status & SAHARA_STATUS_FAIL)
385 dev_dbg(dev->device, " - Fail.\n");
386 if (status & SAHARA_STATUS_RNG_RESEED)
387 dev_dbg(dev->device, " - RNG Reseed Request.\n");
388 if (status & SAHARA_STATUS_ACTIVE_RNG)
389 dev_dbg(dev->device, " - RNG Active.\n");
390 if (status & SAHARA_STATUS_ACTIVE_MDHA)
391 dev_dbg(dev->device, " - MDHA Active.\n");
392 if (status & SAHARA_STATUS_ACTIVE_SKHA)
393 dev_dbg(dev->device, " - SKHA Active.\n");
395 if (status & SAHARA_STATUS_MODE_BATCH)
396 dev_dbg(dev->device, " - Batch Mode.\n");
397 else if (status & SAHARA_STATUS_MODE_DEDICATED)
398 dev_dbg(dev->device, " - Decidated Mode.\n");
399 else if (status & SAHARA_STATUS_MODE_DEBUG)
400 dev_dbg(dev->device, " - Debug Mode.\n");
402 dev_dbg(dev->device, " - Internal state = 0x%02x\n",
403 SAHARA_STATUS_GET_ISTATE(status));
405 dev_dbg(dev->device, "Current DAR: 0x%08x\n",
406 sahara_read(dev, SAHARA_REG_CDAR));
407 dev_dbg(dev->device, "Initial DAR: 0x%08x\n\n",
408 sahara_read(dev, SAHARA_REG_IDAR));
411 static void sahara_dump_descriptors(struct sahara_dev *dev)
415 if (!IS_ENABLED(DEBUG))
418 for (i = 0; i < SAHARA_MAX_HW_DESC; i++) {
419 dev_dbg(dev->device, "Descriptor (%d) (%pad):\n",
420 i, &dev->hw_phys_desc[i]);
421 dev_dbg(dev->device, "\thdr = 0x%08x\n", dev->hw_desc[i]->hdr);
422 dev_dbg(dev->device, "\tlen1 = %u\n", dev->hw_desc[i]->len1);
423 dev_dbg(dev->device, "\tp1 = 0x%08x\n", dev->hw_desc[i]->p1);
424 dev_dbg(dev->device, "\tlen2 = %u\n", dev->hw_desc[i]->len2);
425 dev_dbg(dev->device, "\tp2 = 0x%08x\n", dev->hw_desc[i]->p2);
426 dev_dbg(dev->device, "\tnext = 0x%08x\n",
427 dev->hw_desc[i]->next);
429 dev_dbg(dev->device, "\n");
432 static void sahara_dump_links(struct sahara_dev *dev)
436 if (!IS_ENABLED(DEBUG))
439 for (i = 0; i < SAHARA_MAX_HW_LINK; i++) {
440 dev_dbg(dev->device, "Link (%d) (%pad):\n",
441 i, &dev->hw_phys_link[i]);
442 dev_dbg(dev->device, "\tlen = %u\n", dev->hw_link[i]->len);
443 dev_dbg(dev->device, "\tp = 0x%08x\n", dev->hw_link[i]->p);
444 dev_dbg(dev->device, "\tnext = 0x%08x\n",
445 dev->hw_link[i]->next);
447 dev_dbg(dev->device, "\n");
450 static int sahara_hw_descriptor_create(struct sahara_dev *dev)
452 struct sahara_ctx *ctx = dev->ctx;
453 struct scatterlist *sg;
458 /* Copy new key if necessary */
459 if (ctx->flags & FLAGS_NEW_KEY) {
460 memcpy(dev->key_base, ctx->key, ctx->keylen);
461 ctx->flags &= ~FLAGS_NEW_KEY;
463 if (dev->flags & FLAGS_CBC) {
464 dev->hw_desc[idx]->len1 = AES_BLOCK_SIZE;
465 dev->hw_desc[idx]->p1 = dev->iv_phys_base;
467 dev->hw_desc[idx]->len1 = 0;
468 dev->hw_desc[idx]->p1 = 0;
470 dev->hw_desc[idx]->len2 = ctx->keylen;
471 dev->hw_desc[idx]->p2 = dev->key_phys_base;
472 dev->hw_desc[idx]->next = dev->hw_phys_desc[1];
474 dev->hw_desc[idx]->hdr = sahara_aes_key_hdr(dev);
479 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, dev->total);
480 if (dev->nb_in_sg < 0) {
481 dev_err(dev->device, "Invalid numbers of src SG.\n");
482 return dev->nb_in_sg;
484 dev->nb_out_sg = sg_nents_for_len(dev->out_sg, dev->total);
485 if (dev->nb_out_sg < 0) {
486 dev_err(dev->device, "Invalid numbers of dst SG.\n");
487 return dev->nb_out_sg;
489 if ((dev->nb_in_sg + dev->nb_out_sg) > SAHARA_MAX_HW_LINK) {
490 dev_err(dev->device, "not enough hw links (%d)\n",
491 dev->nb_in_sg + dev->nb_out_sg);
495 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
497 if (ret != dev->nb_in_sg) {
498 dev_err(dev->device, "couldn't map in sg\n");
501 ret = dma_map_sg(dev->device, dev->out_sg, dev->nb_out_sg,
503 if (ret != dev->nb_out_sg) {
504 dev_err(dev->device, "couldn't map out sg\n");
508 /* Create input links */
509 dev->hw_desc[idx]->p1 = dev->hw_phys_link[0];
511 for (i = 0; i < dev->nb_in_sg; i++) {
512 dev->hw_link[i]->len = sg->length;
513 dev->hw_link[i]->p = sg->dma_address;
514 if (i == (dev->nb_in_sg - 1)) {
515 dev->hw_link[i]->next = 0;
517 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
522 /* Create output links */
523 dev->hw_desc[idx]->p2 = dev->hw_phys_link[i];
525 for (j = i; j < dev->nb_out_sg + i; j++) {
526 dev->hw_link[j]->len = sg->length;
527 dev->hw_link[j]->p = sg->dma_address;
528 if (j == (dev->nb_out_sg + i - 1)) {
529 dev->hw_link[j]->next = 0;
531 dev->hw_link[j]->next = dev->hw_phys_link[j + 1];
536 /* Fill remaining fields of hw_desc[1] */
537 dev->hw_desc[idx]->hdr = sahara_aes_data_link_hdr(dev);
538 dev->hw_desc[idx]->len1 = dev->total;
539 dev->hw_desc[idx]->len2 = dev->total;
540 dev->hw_desc[idx]->next = 0;
542 sahara_dump_descriptors(dev);
543 sahara_dump_links(dev);
545 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
550 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
553 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
559 static int sahara_aes_process(struct ablkcipher_request *req)
561 struct sahara_dev *dev = dev_ptr;
562 struct sahara_ctx *ctx;
563 struct sahara_aes_reqctx *rctx;
565 unsigned long timeout;
567 /* Request is ready to be dispatched by the device */
569 "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
570 req->nbytes, req->src, req->dst);
572 /* assign new request to device */
573 dev->total = req->nbytes;
574 dev->in_sg = req->src;
575 dev->out_sg = req->dst;
577 rctx = ablkcipher_request_ctx(req);
578 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req));
579 rctx->mode &= FLAGS_MODE_MASK;
580 dev->flags = (dev->flags & ~FLAGS_MODE_MASK) | rctx->mode;
582 if ((dev->flags & FLAGS_CBC) && req->info)
583 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128);
585 /* assign new context to device */
588 reinit_completion(&dev->dma_completion);
590 ret = sahara_hw_descriptor_create(dev);
594 timeout = wait_for_completion_timeout(&dev->dma_completion,
595 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
597 dev_err(dev->device, "AES timeout\n");
601 dma_unmap_sg(dev->device, dev->out_sg, dev->nb_out_sg,
603 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
609 static int sahara_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
612 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(tfm);
615 ctx->keylen = keylen;
617 /* SAHARA only supports 128bit keys */
618 if (keylen == AES_KEYSIZE_128) {
619 memcpy(ctx->key, key, keylen);
620 ctx->flags |= FLAGS_NEW_KEY;
624 if (keylen != AES_KEYSIZE_128 &&
625 keylen != AES_KEYSIZE_192 && keylen != AES_KEYSIZE_256)
629 * The requested key size is not supported by HW, do a fallback.
631 ctx->fallback->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
632 ctx->fallback->base.crt_flags |=
633 (tfm->base.crt_flags & CRYPTO_TFM_REQ_MASK);
635 ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
637 struct crypto_tfm *tfm_aux = crypto_ablkcipher_tfm(tfm);
639 tfm_aux->crt_flags &= ~CRYPTO_TFM_RES_MASK;
640 tfm_aux->crt_flags |=
641 (ctx->fallback->base.crt_flags & CRYPTO_TFM_RES_MASK);
646 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
648 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req);
649 struct sahara_dev *dev = dev_ptr;
652 dev_dbg(dev->device, "nbytes: %d, enc: %d, cbc: %d\n",
653 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC));
655 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) {
657 "request size is not exact amount of AES blocks\n");
663 mutex_lock(&dev->queue_mutex);
664 err = ablkcipher_enqueue_request(&dev->queue, req);
665 mutex_unlock(&dev->queue_mutex);
667 wake_up_process(dev->kthread);
672 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req)
674 struct crypto_tfm *tfm =
675 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
676 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
677 crypto_ablkcipher_reqtfm(req));
680 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
681 ablkcipher_request_set_tfm(req, ctx->fallback);
682 err = crypto_ablkcipher_encrypt(req);
683 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
687 return sahara_aes_crypt(req, FLAGS_ENCRYPT);
690 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req)
692 struct crypto_tfm *tfm =
693 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
694 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
695 crypto_ablkcipher_reqtfm(req));
698 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
699 ablkcipher_request_set_tfm(req, ctx->fallback);
700 err = crypto_ablkcipher_decrypt(req);
701 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
705 return sahara_aes_crypt(req, 0);
708 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req)
710 struct crypto_tfm *tfm =
711 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
712 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
713 crypto_ablkcipher_reqtfm(req));
716 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
717 ablkcipher_request_set_tfm(req, ctx->fallback);
718 err = crypto_ablkcipher_encrypt(req);
719 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
723 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC);
726 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req)
728 struct crypto_tfm *tfm =
729 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
730 struct sahara_ctx *ctx = crypto_ablkcipher_ctx(
731 crypto_ablkcipher_reqtfm(req));
734 if (unlikely(ctx->keylen != AES_KEYSIZE_128)) {
735 ablkcipher_request_set_tfm(req, ctx->fallback);
736 err = crypto_ablkcipher_decrypt(req);
737 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
741 return sahara_aes_crypt(req, FLAGS_CBC);
744 static int sahara_aes_cra_init(struct crypto_tfm *tfm)
746 const char *name = crypto_tfm_alg_name(tfm);
747 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
749 ctx->fallback = crypto_alloc_ablkcipher(name, 0,
750 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
751 if (IS_ERR(ctx->fallback)) {
752 pr_err("Error allocating fallback algo %s\n", name);
753 return PTR_ERR(ctx->fallback);
756 tfm->crt_ablkcipher.reqsize = sizeof(struct sahara_aes_reqctx);
761 static void sahara_aes_cra_exit(struct crypto_tfm *tfm)
763 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
766 crypto_free_ablkcipher(ctx->fallback);
767 ctx->fallback = NULL;
770 static u32 sahara_sha_init_hdr(struct sahara_dev *dev,
771 struct sahara_sha_reqctx *rctx)
778 hdr |= SAHARA_HDR_MDHA_SET_MODE_HASH;
779 hdr |= SAHARA_HDR_MDHA_INIT;
781 hdr |= SAHARA_HDR_MDHA_SET_MODE_MD_KEY;
785 hdr |= SAHARA_HDR_MDHA_PDATA;
787 if (hweight_long(hdr) % 2 == 0)
788 hdr |= SAHARA_HDR_PARITY_BIT;
793 static int sahara_sha_hw_links_create(struct sahara_dev *dev,
794 struct sahara_sha_reqctx *rctx,
797 struct scatterlist *sg;
801 dev->in_sg = rctx->in_sg;
803 dev->nb_in_sg = sg_nents_for_len(dev->in_sg, rctx->total);
804 if (dev->nb_in_sg < 0) {
805 dev_err(dev->device, "Invalid numbers of src SG.\n");
806 return dev->nb_in_sg;
808 if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
809 dev_err(dev->device, "not enough hw links (%d)\n",
810 dev->nb_in_sg + dev->nb_out_sg);
815 ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg, DMA_TO_DEVICE);
819 for (i = start; i < dev->nb_in_sg + start; i++) {
820 dev->hw_link[i]->len = sg->length;
821 dev->hw_link[i]->p = sg->dma_address;
822 if (i == (dev->nb_in_sg + start - 1)) {
823 dev->hw_link[i]->next = 0;
825 dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
833 static int sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev,
834 struct sahara_sha_reqctx *rctx,
835 struct ahash_request *req,
842 /* Create initial descriptor: #8*/
843 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
845 /* Create hash descriptor: #10. Must follow #6. */
846 dev->hw_desc[index]->hdr = SAHARA_HDR_MDHA_HASH;
848 dev->hw_desc[index]->len1 = rctx->total;
849 if (dev->hw_desc[index]->len1 == 0) {
850 /* if len1 is 0, p1 must be 0, too */
851 dev->hw_desc[index]->p1 = 0;
854 /* Create input links */
855 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
856 i = sahara_sha_hw_links_create(dev, rctx, index);
858 rctx->sg_in_idx = index;
863 dev->hw_desc[index]->p2 = dev->hw_phys_link[i];
865 /* Save the context for the next operation */
866 result_len = rctx->context_size;
867 dev->hw_link[i]->p = dev->context_phys_base;
869 dev->hw_link[i]->len = result_len;
870 dev->hw_desc[index]->len2 = result_len;
872 dev->hw_link[i]->next = 0;
878 * Load descriptor aka #6
880 * To load a previously saved context back to the MDHA unit
886 static int sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev,
887 struct sahara_sha_reqctx *rctx,
888 struct ahash_request *req,
891 dev->hw_desc[index]->hdr = sahara_sha_init_hdr(dev, rctx);
893 dev->hw_desc[index]->len1 = rctx->context_size;
894 dev->hw_desc[index]->p1 = dev->hw_phys_link[index];
895 dev->hw_desc[index]->len2 = 0;
896 dev->hw_desc[index]->p2 = 0;
898 dev->hw_link[index]->len = rctx->context_size;
899 dev->hw_link[index]->p = dev->context_phys_base;
900 dev->hw_link[index]->next = 0;
905 static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
907 if (!sg || !sg->length)
910 while (nbytes && sg) {
911 if (nbytes <= sg->length) {
916 nbytes -= sg->length;
923 static int sahara_sha_prepare_request(struct ahash_request *req)
925 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
926 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
927 unsigned int hash_later;
928 unsigned int block_size;
931 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
933 /* append bytes from previous operation */
934 len = rctx->buf_cnt + req->nbytes;
936 /* only the last transfer can be padded in hardware */
937 if (!rctx->last && (len < block_size)) {
938 /* to few data, save for next operation */
939 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src,
941 rctx->buf_cnt += req->nbytes;
946 /* add data from previous operation first */
948 memcpy(rctx->rembuf, rctx->buf, rctx->buf_cnt);
950 /* data must always be a multiple of block_size */
951 hash_later = rctx->last ? 0 : len & (block_size - 1);
953 unsigned int offset = req->nbytes - hash_later;
954 /* Save remaining bytes for later use */
955 scatterwalk_map_and_copy(rctx->buf, req->src, offset,
959 /* nbytes should now be multiple of blocksize */
960 req->nbytes = req->nbytes - hash_later;
962 sahara_walk_and_recalc(req->src, req->nbytes);
964 /* have data from previous operation and current */
965 if (rctx->buf_cnt && req->nbytes) {
966 sg_init_table(rctx->in_sg_chain, 2);
967 sg_set_buf(rctx->in_sg_chain, rctx->rembuf, rctx->buf_cnt);
969 sg_chain(rctx->in_sg_chain, 2, req->src);
971 rctx->total = req->nbytes + rctx->buf_cnt;
972 rctx->in_sg = rctx->in_sg_chain;
974 req->src = rctx->in_sg_chain;
975 /* only data from previous operation */
976 } else if (rctx->buf_cnt) {
978 rctx->in_sg = req->src;
980 rctx->in_sg = rctx->in_sg_chain;
981 /* buf was copied into rembuf above */
982 sg_init_one(rctx->in_sg, rctx->rembuf, rctx->buf_cnt);
983 rctx->total = rctx->buf_cnt;
984 /* no data from previous operation */
986 rctx->in_sg = req->src;
987 rctx->total = req->nbytes;
988 req->src = rctx->in_sg;
991 /* on next call, we only have the remaining data in the buffer */
992 rctx->buf_cnt = hash_later;
997 static int sahara_sha_process(struct ahash_request *req)
999 struct sahara_dev *dev = dev_ptr;
1000 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1002 unsigned long timeout;
1004 ret = sahara_sha_prepare_request(req);
1009 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0);
1010 dev->hw_desc[0]->next = 0;
1013 memcpy(dev->context_base, rctx->context, rctx->context_size);
1015 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0);
1016 dev->hw_desc[0]->next = dev->hw_phys_desc[1];
1017 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1);
1018 dev->hw_desc[1]->next = 0;
1021 sahara_dump_descriptors(dev);
1022 sahara_dump_links(dev);
1024 reinit_completion(&dev->dma_completion);
1026 sahara_write(dev, dev->hw_phys_desc[0], SAHARA_REG_DAR);
1028 timeout = wait_for_completion_timeout(&dev->dma_completion,
1029 msecs_to_jiffies(SAHARA_TIMEOUT_MS));
1031 dev_err(dev->device, "SHA timeout\n");
1035 if (rctx->sg_in_idx)
1036 dma_unmap_sg(dev->device, dev->in_sg, dev->nb_in_sg,
1039 memcpy(rctx->context, dev->context_base, rctx->context_size);
1042 memcpy(req->result, rctx->context, rctx->digest_size);
1047 static int sahara_queue_manage(void *data)
1049 struct sahara_dev *dev = (struct sahara_dev *)data;
1050 struct crypto_async_request *async_req;
1051 struct crypto_async_request *backlog;
1055 __set_current_state(TASK_INTERRUPTIBLE);
1057 mutex_lock(&dev->queue_mutex);
1058 backlog = crypto_get_backlog(&dev->queue);
1059 async_req = crypto_dequeue_request(&dev->queue);
1060 mutex_unlock(&dev->queue_mutex);
1063 backlog->complete(backlog, -EINPROGRESS);
1066 if (crypto_tfm_alg_type(async_req->tfm) ==
1067 CRYPTO_ALG_TYPE_AHASH) {
1068 struct ahash_request *req =
1069 ahash_request_cast(async_req);
1071 ret = sahara_sha_process(req);
1073 struct ablkcipher_request *req =
1074 ablkcipher_request_cast(async_req);
1076 ret = sahara_aes_process(req);
1079 async_req->complete(async_req, ret);
1085 } while (!kthread_should_stop());
1090 static int sahara_sha_enqueue(struct ahash_request *req, int last)
1092 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1093 struct sahara_dev *dev = dev_ptr;
1096 if (!req->nbytes && !last)
1099 mutex_lock(&rctx->mutex);
1102 if (!rctx->active) {
1107 mutex_lock(&dev->queue_mutex);
1108 ret = crypto_enqueue_request(&dev->queue, &req->base);
1109 mutex_unlock(&dev->queue_mutex);
1111 wake_up_process(dev->kthread);
1112 mutex_unlock(&rctx->mutex);
1117 static int sahara_sha_init(struct ahash_request *req)
1119 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1120 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1122 memset(rctx, 0, sizeof(*rctx));
1124 switch (crypto_ahash_digestsize(tfm)) {
1125 case SHA1_DIGEST_SIZE:
1126 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA1;
1127 rctx->digest_size = SHA1_DIGEST_SIZE;
1129 case SHA256_DIGEST_SIZE:
1130 rctx->mode |= SAHARA_HDR_MDHA_ALG_SHA256;
1131 rctx->digest_size = SHA256_DIGEST_SIZE;
1137 rctx->context_size = rctx->digest_size + 4;
1140 mutex_init(&rctx->mutex);
1145 static int sahara_sha_update(struct ahash_request *req)
1147 return sahara_sha_enqueue(req, 0);
1150 static int sahara_sha_final(struct ahash_request *req)
1153 return sahara_sha_enqueue(req, 1);
1156 static int sahara_sha_finup(struct ahash_request *req)
1158 return sahara_sha_enqueue(req, 1);
1161 static int sahara_sha_digest(struct ahash_request *req)
1163 sahara_sha_init(req);
1165 return sahara_sha_finup(req);
1168 static int sahara_sha_export(struct ahash_request *req, void *out)
1170 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1171 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1172 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1174 memcpy(out, ctx, sizeof(struct sahara_ctx));
1175 memcpy(out + sizeof(struct sahara_sha_reqctx), rctx,
1176 sizeof(struct sahara_sha_reqctx));
1181 static int sahara_sha_import(struct ahash_request *req, const void *in)
1183 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1184 struct sahara_ctx *ctx = crypto_ahash_ctx(ahash);
1185 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req);
1187 memcpy(ctx, in, sizeof(struct sahara_ctx));
1188 memcpy(rctx, in + sizeof(struct sahara_sha_reqctx),
1189 sizeof(struct sahara_sha_reqctx));
1194 static int sahara_sha_cra_init(struct crypto_tfm *tfm)
1196 const char *name = crypto_tfm_alg_name(tfm);
1197 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1199 ctx->shash_fallback = crypto_alloc_shash(name, 0,
1200 CRYPTO_ALG_NEED_FALLBACK);
1201 if (IS_ERR(ctx->shash_fallback)) {
1202 pr_err("Error allocating fallback algo %s\n", name);
1203 return PTR_ERR(ctx->shash_fallback);
1205 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1206 sizeof(struct sahara_sha_reqctx) +
1207 SHA_BUFFER_LEN + SHA256_BLOCK_SIZE);
1212 static void sahara_sha_cra_exit(struct crypto_tfm *tfm)
1214 struct sahara_ctx *ctx = crypto_tfm_ctx(tfm);
1216 crypto_free_shash(ctx->shash_fallback);
1217 ctx->shash_fallback = NULL;
1220 static struct crypto_alg aes_algs[] = {
1222 .cra_name = "ecb(aes)",
1223 .cra_driver_name = "sahara-ecb-aes",
1224 .cra_priority = 300,
1225 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1226 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1227 .cra_blocksize = AES_BLOCK_SIZE,
1228 .cra_ctxsize = sizeof(struct sahara_ctx),
1229 .cra_alignmask = 0x0,
1230 .cra_type = &crypto_ablkcipher_type,
1231 .cra_module = THIS_MODULE,
1232 .cra_init = sahara_aes_cra_init,
1233 .cra_exit = sahara_aes_cra_exit,
1234 .cra_u.ablkcipher = {
1235 .min_keysize = AES_MIN_KEY_SIZE ,
1236 .max_keysize = AES_MAX_KEY_SIZE,
1237 .setkey = sahara_aes_setkey,
1238 .encrypt = sahara_aes_ecb_encrypt,
1239 .decrypt = sahara_aes_ecb_decrypt,
1242 .cra_name = "cbc(aes)",
1243 .cra_driver_name = "sahara-cbc-aes",
1244 .cra_priority = 300,
1245 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1246 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1247 .cra_blocksize = AES_BLOCK_SIZE,
1248 .cra_ctxsize = sizeof(struct sahara_ctx),
1249 .cra_alignmask = 0x0,
1250 .cra_type = &crypto_ablkcipher_type,
1251 .cra_module = THIS_MODULE,
1252 .cra_init = sahara_aes_cra_init,
1253 .cra_exit = sahara_aes_cra_exit,
1254 .cra_u.ablkcipher = {
1255 .min_keysize = AES_MIN_KEY_SIZE ,
1256 .max_keysize = AES_MAX_KEY_SIZE,
1257 .ivsize = AES_BLOCK_SIZE,
1258 .setkey = sahara_aes_setkey,
1259 .encrypt = sahara_aes_cbc_encrypt,
1260 .decrypt = sahara_aes_cbc_decrypt,
1265 static struct ahash_alg sha_v3_algs[] = {
1267 .init = sahara_sha_init,
1268 .update = sahara_sha_update,
1269 .final = sahara_sha_final,
1270 .finup = sahara_sha_finup,
1271 .digest = sahara_sha_digest,
1272 .export = sahara_sha_export,
1273 .import = sahara_sha_import,
1274 .halg.digestsize = SHA1_DIGEST_SIZE,
1277 .cra_driver_name = "sahara-sha1",
1278 .cra_priority = 300,
1279 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1281 CRYPTO_ALG_NEED_FALLBACK,
1282 .cra_blocksize = SHA1_BLOCK_SIZE,
1283 .cra_ctxsize = sizeof(struct sahara_ctx),
1285 .cra_module = THIS_MODULE,
1286 .cra_init = sahara_sha_cra_init,
1287 .cra_exit = sahara_sha_cra_exit,
1292 static struct ahash_alg sha_v4_algs[] = {
1294 .init = sahara_sha_init,
1295 .update = sahara_sha_update,
1296 .final = sahara_sha_final,
1297 .finup = sahara_sha_finup,
1298 .digest = sahara_sha_digest,
1299 .export = sahara_sha_export,
1300 .import = sahara_sha_import,
1301 .halg.digestsize = SHA256_DIGEST_SIZE,
1303 .cra_name = "sha256",
1304 .cra_driver_name = "sahara-sha256",
1305 .cra_priority = 300,
1306 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
1308 CRYPTO_ALG_NEED_FALLBACK,
1309 .cra_blocksize = SHA256_BLOCK_SIZE,
1310 .cra_ctxsize = sizeof(struct sahara_ctx),
1312 .cra_module = THIS_MODULE,
1313 .cra_init = sahara_sha_cra_init,
1314 .cra_exit = sahara_sha_cra_exit,
1319 static irqreturn_t sahara_irq_handler(int irq, void *data)
1321 struct sahara_dev *dev = (struct sahara_dev *)data;
1322 unsigned int stat = sahara_read(dev, SAHARA_REG_STATUS);
1323 unsigned int err = sahara_read(dev, SAHARA_REG_ERRSTATUS);
1325 sahara_write(dev, SAHARA_CMD_CLEAR_INT | SAHARA_CMD_CLEAR_ERR,
1328 sahara_decode_status(dev, stat);
1330 if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_BUSY) {
1332 } else if (SAHARA_STATUS_GET_STATE(stat) == SAHARA_STATE_COMPLETE) {
1335 sahara_decode_error(dev, err);
1336 dev->error = -EINVAL;
1339 complete(&dev->dma_completion);
1345 static int sahara_register_algs(struct sahara_dev *dev)
1348 unsigned int i, j, k, l;
1350 for (i = 0; i < ARRAY_SIZE(aes_algs); i++) {
1351 INIT_LIST_HEAD(&aes_algs[i].cra_list);
1352 err = crypto_register_alg(&aes_algs[i]);
1357 for (k = 0; k < ARRAY_SIZE(sha_v3_algs); k++) {
1358 err = crypto_register_ahash(&sha_v3_algs[k]);
1360 goto err_sha_v3_algs;
1363 if (dev->version > SAHARA_VERSION_3)
1364 for (l = 0; l < ARRAY_SIZE(sha_v4_algs); l++) {
1365 err = crypto_register_ahash(&sha_v4_algs[l]);
1367 goto err_sha_v4_algs;
1373 for (j = 0; j < l; j++)
1374 crypto_unregister_ahash(&sha_v4_algs[j]);
1377 for (j = 0; j < k; j++)
1378 crypto_unregister_ahash(&sha_v4_algs[j]);
1381 for (j = 0; j < i; j++)
1382 crypto_unregister_alg(&aes_algs[j]);
1387 static void sahara_unregister_algs(struct sahara_dev *dev)
1391 for (i = 0; i < ARRAY_SIZE(aes_algs); i++)
1392 crypto_unregister_alg(&aes_algs[i]);
1394 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1395 crypto_unregister_ahash(&sha_v3_algs[i]);
1397 if (dev->version > SAHARA_VERSION_3)
1398 for (i = 0; i < ARRAY_SIZE(sha_v4_algs); i++)
1399 crypto_unregister_ahash(&sha_v4_algs[i]);
1402 static struct platform_device_id sahara_platform_ids[] = {
1403 { .name = "sahara-imx27" },
1406 MODULE_DEVICE_TABLE(platform, sahara_platform_ids);
1408 static struct of_device_id sahara_dt_ids[] = {
1409 { .compatible = "fsl,imx53-sahara" },
1410 { .compatible = "fsl,imx27-sahara" },
1413 MODULE_DEVICE_TABLE(of, sahara_dt_ids);
1415 static int sahara_probe(struct platform_device *pdev)
1417 struct sahara_dev *dev;
1418 struct resource *res;
1424 dev = devm_kzalloc(&pdev->dev, sizeof(struct sahara_dev), GFP_KERNEL);
1426 dev_err(&pdev->dev, "unable to alloc data struct.\n");
1430 dev->device = &pdev->dev;
1431 platform_set_drvdata(pdev, dev);
1433 /* Get the base address */
1434 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1435 dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
1436 if (IS_ERR(dev->regs_base))
1437 return PTR_ERR(dev->regs_base);
1440 irq = platform_get_irq(pdev, 0);
1442 dev_err(&pdev->dev, "failed to get irq resource\n");
1446 err = devm_request_irq(&pdev->dev, irq, sahara_irq_handler,
1447 0, dev_name(&pdev->dev), dev);
1449 dev_err(&pdev->dev, "failed to request irq\n");
1454 dev->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1455 if (IS_ERR(dev->clk_ipg)) {
1456 dev_err(&pdev->dev, "Could not get ipg clock\n");
1457 return PTR_ERR(dev->clk_ipg);
1460 dev->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1461 if (IS_ERR(dev->clk_ahb)) {
1462 dev_err(&pdev->dev, "Could not get ahb clock\n");
1463 return PTR_ERR(dev->clk_ahb);
1466 /* Allocate HW descriptors */
1467 dev->hw_desc[0] = dmam_alloc_coherent(&pdev->dev,
1468 SAHARA_MAX_HW_DESC * sizeof(struct sahara_hw_desc),
1469 &dev->hw_phys_desc[0], GFP_KERNEL);
1470 if (!dev->hw_desc[0]) {
1471 dev_err(&pdev->dev, "Could not allocate hw descriptors\n");
1474 dev->hw_desc[1] = dev->hw_desc[0] + 1;
1475 dev->hw_phys_desc[1] = dev->hw_phys_desc[0] +
1476 sizeof(struct sahara_hw_desc);
1478 /* Allocate space for iv and key */
1479 dev->key_base = dmam_alloc_coherent(&pdev->dev, 2 * AES_KEYSIZE_128,
1480 &dev->key_phys_base, GFP_KERNEL);
1481 if (!dev->key_base) {
1482 dev_err(&pdev->dev, "Could not allocate memory for key\n");
1485 dev->iv_base = dev->key_base + AES_KEYSIZE_128;
1486 dev->iv_phys_base = dev->key_phys_base + AES_KEYSIZE_128;
1488 /* Allocate space for context: largest digest + message length field */
1489 dev->context_base = dmam_alloc_coherent(&pdev->dev,
1490 SHA256_DIGEST_SIZE + 4,
1491 &dev->context_phys_base, GFP_KERNEL);
1492 if (!dev->context_base) {
1493 dev_err(&pdev->dev, "Could not allocate memory for MDHA context\n");
1497 /* Allocate space for HW links */
1498 dev->hw_link[0] = dmam_alloc_coherent(&pdev->dev,
1499 SAHARA_MAX_HW_LINK * sizeof(struct sahara_hw_link),
1500 &dev->hw_phys_link[0], GFP_KERNEL);
1501 if (!dev->hw_link[0]) {
1502 dev_err(&pdev->dev, "Could not allocate hw links\n");
1505 for (i = 1; i < SAHARA_MAX_HW_LINK; i++) {
1506 dev->hw_phys_link[i] = dev->hw_phys_link[i - 1] +
1507 sizeof(struct sahara_hw_link);
1508 dev->hw_link[i] = dev->hw_link[i - 1] + 1;
1511 crypto_init_queue(&dev->queue, SAHARA_QUEUE_LENGTH);
1513 spin_lock_init(&dev->lock);
1514 mutex_init(&dev->queue_mutex);
1518 dev->kthread = kthread_run(sahara_queue_manage, dev, "sahara_crypto");
1519 if (IS_ERR(dev->kthread)) {
1520 return PTR_ERR(dev->kthread);
1523 init_completion(&dev->dma_completion);
1525 err = clk_prepare_enable(dev->clk_ipg);
1528 err = clk_prepare_enable(dev->clk_ahb);
1530 goto clk_ipg_disable;
1532 version = sahara_read(dev, SAHARA_REG_VERSION);
1533 if (of_device_is_compatible(pdev->dev.of_node, "fsl,imx27-sahara")) {
1534 if (version != SAHARA_VERSION_3)
1536 } else if (of_device_is_compatible(pdev->dev.of_node,
1537 "fsl,imx53-sahara")) {
1538 if (((version >> 8) & 0xff) != SAHARA_VERSION_4)
1540 version = (version >> 8) & 0xff;
1542 if (err == -ENODEV) {
1543 dev_err(&pdev->dev, "SAHARA version %d not supported\n",
1548 dev->version = version;
1550 sahara_write(dev, SAHARA_CMD_RESET | SAHARA_CMD_MODE_BATCH,
1552 sahara_write(dev, SAHARA_CONTROL_SET_THROTTLE(0) |
1553 SAHARA_CONTROL_SET_MAXBURST(8) |
1554 SAHARA_CONTROL_RNG_AUTORSD |
1555 SAHARA_CONTROL_ENABLE_INT,
1556 SAHARA_REG_CONTROL);
1558 err = sahara_register_algs(dev);
1562 dev_info(&pdev->dev, "SAHARA version %d initialized\n", version);
1567 kthread_stop(dev->kthread);
1569 clk_disable_unprepare(dev->clk_ahb);
1571 clk_disable_unprepare(dev->clk_ipg);
1576 static int sahara_remove(struct platform_device *pdev)
1578 struct sahara_dev *dev = platform_get_drvdata(pdev);
1580 kthread_stop(dev->kthread);
1582 sahara_unregister_algs(dev);
1584 clk_disable_unprepare(dev->clk_ipg);
1585 clk_disable_unprepare(dev->clk_ahb);
1592 static struct platform_driver sahara_driver = {
1593 .probe = sahara_probe,
1594 .remove = sahara_remove,
1596 .name = SAHARA_NAME,
1597 .of_match_table = sahara_dt_ids,
1599 .id_table = sahara_platform_ids,
1602 module_platform_driver(sahara_driver);
1604 MODULE_LICENSE("GPL");
1607 MODULE_DESCRIPTION("SAHARA2 HW crypto accelerator");