1 // SPDX-License-Identifier: GPL-2.0-only
3 * Intel IXP4xx NPE-C crypto driver
8 #include <linux/platform_device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/crypto.h>
12 #include <linux/kernel.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
19 #include <crypto/ctr.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/aes.h>
22 #include <crypto/hmac.h>
23 #include <crypto/sha.h>
24 #include <crypto/algapi.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/authenc.h>
27 #include <crypto/scatterwalk.h>
29 #include <linux/soc/ixp4xx/npe.h>
30 #include <linux/soc/ixp4xx/qmgr.h>
34 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
35 #define NPE_CTX_LEN 80
36 #define AES_BLOCK128 16
38 #define NPE_OP_HASH_VERIFY 0x01
39 #define NPE_OP_CCM_ENABLE 0x04
40 #define NPE_OP_CRYPT_ENABLE 0x08
41 #define NPE_OP_HASH_ENABLE 0x10
42 #define NPE_OP_NOT_IN_PLACE 0x20
43 #define NPE_OP_HMAC_DISABLE 0x40
44 #define NPE_OP_CRYPT_ENCRYPT 0x80
46 #define NPE_OP_CCM_GEN_MIC 0xcc
47 #define NPE_OP_HASH_GEN_ICV 0x50
48 #define NPE_OP_ENC_GEN_KEY 0xc9
50 #define MOD_ECB 0x0000
51 #define MOD_CTR 0x1000
52 #define MOD_CBC_ENC 0x2000
53 #define MOD_CBC_DEC 0x3000
54 #define MOD_CCM_ENC 0x4000
55 #define MOD_CCM_DEC 0x5000
61 #define CIPH_DECR 0x0000
62 #define CIPH_ENCR 0x0400
64 #define MOD_DES 0x0000
65 #define MOD_TDEA2 0x0100
66 #define MOD_3DES 0x0200
67 #define MOD_AES 0x0800
68 #define MOD_AES128 (0x0800 | KEYLEN_128)
69 #define MOD_AES192 (0x0900 | KEYLEN_192)
70 #define MOD_AES256 (0x0a00 | KEYLEN_256)
73 #define NPE_ID 2 /* NPE C */
75 /* Space for registering when the first
76 * NPE_QLEN crypt_ctl are busy */
77 #define NPE_QLEN_TOTAL 64
82 #define CTL_FLAG_UNUSED 0x0000
83 #define CTL_FLAG_USED 0x1000
84 #define CTL_FLAG_PERFORM_ABLK 0x0001
85 #define CTL_FLAG_GEN_ICV 0x0002
86 #define CTL_FLAG_GEN_REVAES 0x0004
87 #define CTL_FLAG_PERFORM_AEAD 0x0008
88 #define CTL_FLAG_MASK 0x000f
90 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
92 #define MD5_DIGEST_SIZE 16
103 dma_addr_t phys_addr;
105 struct buffer_desc *next;
106 enum dma_data_direction dir;
111 u8 mode; /* NPE_OP_* operation mode */
117 u8 mode; /* NPE_OP_* operation mode */
119 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
120 dma_addr_t icv_rev_aes; /* icv or rev aes */
124 u16 auth_offs; /* Authentication start offset */
125 u16 auth_len; /* Authentication data length */
126 u16 crypt_offs; /* Cryption start offset */
127 u16 crypt_len; /* Cryption data length */
129 u16 auth_len; /* Authentication data length */
130 u16 auth_offs; /* Authentication start offset */
131 u16 crypt_len; /* Cryption data length */
132 u16 crypt_offs; /* Cryption start offset */
134 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
135 u32 crypto_ctx; /* NPE Crypto Param structure address */
137 /* Used by Host: 4*4 bytes*/
140 struct ablkcipher_request *ablk_req;
141 struct aead_request *aead_req;
142 struct crypto_tfm *tfm;
144 struct buffer_desc *regist_buf;
149 struct buffer_desc *src;
150 struct buffer_desc *dst;
154 struct buffer_desc *src;
155 struct buffer_desc *dst;
156 struct scatterlist ivlist;
157 /* used when the hmac is not on one sg entry */
162 struct ix_hash_algo {
168 unsigned char *npe_ctx;
169 dma_addr_t npe_ctx_phys;
175 struct ix_sa_dir encrypt;
176 struct ix_sa_dir decrypt;
178 u8 authkey[MAX_KEYLEN];
180 u8 enckey[MAX_KEYLEN];
182 u8 nonce[CTR_RFC3686_NONCE_SIZE];
184 atomic_t configuring;
185 struct completion completion;
189 struct crypto_alg crypto;
190 const struct ix_hash_algo *hash;
197 struct ixp_aead_alg {
198 struct aead_alg crypto;
199 const struct ix_hash_algo *hash;
206 static const struct ix_hash_algo hash_alg_md5 = {
207 .cfgword = 0xAA010004,
208 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
209 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
211 static const struct ix_hash_algo hash_alg_sha1 = {
212 .cfgword = 0x00000005,
213 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
214 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
217 static struct npe *npe_c;
218 static struct dma_pool *buffer_pool = NULL;
219 static struct dma_pool *ctx_pool = NULL;
221 static struct crypt_ctl *crypt_virt = NULL;
222 static dma_addr_t crypt_phys;
224 static int support_aes = 1;
226 #define DRIVER_NAME "ixp4xx_crypto"
228 static struct platform_device *pdev;
230 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
232 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
235 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
237 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
240 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
242 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
245 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
247 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
250 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
252 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
255 static int setup_crypt_desc(void)
257 struct device *dev = &pdev->dev;
258 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
259 crypt_virt = dma_alloc_coherent(dev,
260 NPE_QLEN * sizeof(struct crypt_ctl),
261 &crypt_phys, GFP_ATOMIC);
267 static spinlock_t desc_lock;
268 static struct crypt_ctl *get_crypt_desc(void)
274 spin_lock_irqsave(&desc_lock, flags);
276 if (unlikely(!crypt_virt))
278 if (unlikely(!crypt_virt)) {
279 spin_unlock_irqrestore(&desc_lock, flags);
283 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
284 if (++idx >= NPE_QLEN)
286 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
287 spin_unlock_irqrestore(&desc_lock, flags);
288 return crypt_virt +i;
290 spin_unlock_irqrestore(&desc_lock, flags);
295 static spinlock_t emerg_lock;
296 static struct crypt_ctl *get_crypt_desc_emerg(void)
299 static int idx = NPE_QLEN;
300 struct crypt_ctl *desc;
303 desc = get_crypt_desc();
306 if (unlikely(!crypt_virt))
309 spin_lock_irqsave(&emerg_lock, flags);
311 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
312 if (++idx >= NPE_QLEN_TOTAL)
314 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
315 spin_unlock_irqrestore(&emerg_lock, flags);
316 return crypt_virt +i;
318 spin_unlock_irqrestore(&emerg_lock, flags);
323 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
327 struct buffer_desc *buf1;
331 phys1 = buf->phys_next;
332 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
333 dma_pool_free(buffer_pool, buf, phys);
339 static struct tasklet_struct crypto_done_tasklet;
341 static void finish_scattered_hmac(struct crypt_ctl *crypt)
343 struct aead_request *req = crypt->data.aead_req;
344 struct aead_ctx *req_ctx = aead_request_ctx(req);
345 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
346 int authsize = crypto_aead_authsize(tfm);
347 int decryptlen = req->assoclen + req->cryptlen - authsize;
349 if (req_ctx->encrypt) {
350 scatterwalk_map_and_copy(req_ctx->hmac_virt,
351 req->dst, decryptlen, authsize, 1);
353 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
356 static void one_packet(dma_addr_t phys)
358 struct device *dev = &pdev->dev;
359 struct crypt_ctl *crypt;
363 failed = phys & 0x1 ? -EBADMSG : 0;
365 crypt = crypt_phys2virt(phys);
367 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
368 case CTL_FLAG_PERFORM_AEAD: {
369 struct aead_request *req = crypt->data.aead_req;
370 struct aead_ctx *req_ctx = aead_request_ctx(req);
372 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
373 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
374 if (req_ctx->hmac_virt) {
375 finish_scattered_hmac(crypt);
377 req->base.complete(&req->base, failed);
380 case CTL_FLAG_PERFORM_ABLK: {
381 struct ablkcipher_request *req = crypt->data.ablk_req;
382 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
385 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
387 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
388 req->base.complete(&req->base, failed);
391 case CTL_FLAG_GEN_ICV:
392 ctx = crypto_tfm_ctx(crypt->data.tfm);
393 dma_pool_free(ctx_pool, crypt->regist_ptr,
394 crypt->regist_buf->phys_addr);
395 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
396 if (atomic_dec_and_test(&ctx->configuring))
397 complete(&ctx->completion);
399 case CTL_FLAG_GEN_REVAES:
400 ctx = crypto_tfm_ctx(crypt->data.tfm);
401 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
402 if (atomic_dec_and_test(&ctx->configuring))
403 complete(&ctx->completion);
408 crypt->ctl_flags = CTL_FLAG_UNUSED;
411 static void irqhandler(void *_unused)
413 tasklet_schedule(&crypto_done_tasklet);
416 static void crypto_done_action(unsigned long arg)
421 dma_addr_t phys = qmgr_get_entry(RECV_QID);
426 tasklet_schedule(&crypto_done_tasklet);
429 static int init_ixp_crypto(struct device *dev)
432 u32 msg[2] = { 0, 0 };
434 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
435 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
436 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
439 npe_c = npe_request(NPE_ID);
443 if (!npe_running(npe_c)) {
444 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
447 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
450 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
453 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
457 switch ((msg[1]>>16) & 0xff) {
459 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
468 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
473 /* buffer_pool will also be used to sometimes store the hmac,
474 * so assure it is large enough
476 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
477 buffer_pool = dma_pool_create("buffer", dev,
478 sizeof(struct buffer_desc), 32, 0);
483 ctx_pool = dma_pool_create("context", dev,
488 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
489 "ixp_crypto:out", NULL);
492 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
493 "ixp_crypto:in", NULL);
495 qmgr_release_queue(SEND_QID);
498 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
499 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
501 qmgr_enable_irq(RECV_QID);
505 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
508 dma_pool_destroy(ctx_pool);
509 dma_pool_destroy(buffer_pool);
515 static void release_ixp_crypto(struct device *dev)
517 qmgr_disable_irq(RECV_QID);
518 tasklet_kill(&crypto_done_tasklet);
520 qmgr_release_queue(SEND_QID);
521 qmgr_release_queue(RECV_QID);
523 dma_pool_destroy(ctx_pool);
524 dma_pool_destroy(buffer_pool);
529 dma_free_coherent(dev,
530 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
531 crypt_virt, crypt_phys);
535 static void reset_sa_dir(struct ix_sa_dir *dir)
537 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
538 dir->npe_ctx_idx = 0;
542 static int init_sa_dir(struct ix_sa_dir *dir)
544 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
552 static void free_sa_dir(struct ix_sa_dir *dir)
554 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
555 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
558 static int init_tfm(struct crypto_tfm *tfm)
560 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
563 atomic_set(&ctx->configuring, 0);
564 ret = init_sa_dir(&ctx->encrypt);
567 ret = init_sa_dir(&ctx->decrypt);
569 free_sa_dir(&ctx->encrypt);
574 static int init_tfm_ablk(struct crypto_tfm *tfm)
576 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
577 return init_tfm(tfm);
580 static int init_tfm_aead(struct crypto_aead *tfm)
582 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
583 return init_tfm(crypto_aead_tfm(tfm));
586 static void exit_tfm(struct crypto_tfm *tfm)
588 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
589 free_sa_dir(&ctx->encrypt);
590 free_sa_dir(&ctx->decrypt);
593 static void exit_tfm_aead(struct crypto_aead *tfm)
595 exit_tfm(crypto_aead_tfm(tfm));
598 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
599 int init_len, u32 ctx_addr, const u8 *key, int key_len)
601 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
602 struct crypt_ctl *crypt;
603 struct buffer_desc *buf;
606 dma_addr_t pad_phys, buf_phys;
608 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
609 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
612 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
614 dma_pool_free(ctx_pool, pad, pad_phys);
617 crypt = get_crypt_desc_emerg();
619 dma_pool_free(ctx_pool, pad, pad_phys);
620 dma_pool_free(buffer_pool, buf, buf_phys);
624 memcpy(pad, key, key_len);
625 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
626 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
630 crypt->data.tfm = tfm;
631 crypt->regist_ptr = pad;
632 crypt->regist_buf = buf;
634 crypt->auth_offs = 0;
635 crypt->auth_len = HMAC_PAD_BLOCKLEN;
636 crypt->crypto_ctx = ctx_addr;
637 crypt->src_buf = buf_phys;
638 crypt->icv_rev_aes = target;
639 crypt->mode = NPE_OP_HASH_GEN_ICV;
640 crypt->init_len = init_len;
641 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
644 buf->buf_len = HMAC_PAD_BLOCKLEN;
646 buf->phys_addr = pad_phys;
648 atomic_inc(&ctx->configuring);
649 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
650 BUG_ON(qmgr_stat_overflow(SEND_QID));
654 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
655 const u8 *key, int key_len, unsigned digest_len)
657 u32 itarget, otarget, npe_ctx_addr;
658 unsigned char *cinfo;
659 int init_len, ret = 0;
661 struct ix_sa_dir *dir;
662 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
663 const struct ix_hash_algo *algo;
665 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
666 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
669 /* write cfg word to cryptinfo */
670 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
672 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
674 *(u32*)cinfo = cpu_to_be32(cfgword);
675 cinfo += sizeof(cfgword);
677 /* write ICV to cryptinfo */
678 memcpy(cinfo, algo->icv, digest_len);
681 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
682 + sizeof(algo->cfgword);
683 otarget = itarget + digest_len;
684 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
685 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
687 dir->npe_ctx_idx += init_len;
688 dir->npe_mode |= NPE_OP_HASH_ENABLE;
691 dir->npe_mode |= NPE_OP_HASH_VERIFY;
693 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
694 init_len, npe_ctx_addr, key, key_len);
697 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
698 init_len, npe_ctx_addr, key, key_len);
701 static int gen_rev_aes_key(struct crypto_tfm *tfm)
703 struct crypt_ctl *crypt;
704 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
705 struct ix_sa_dir *dir = &ctx->decrypt;
707 crypt = get_crypt_desc_emerg();
711 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
713 crypt->data.tfm = tfm;
714 crypt->crypt_offs = 0;
715 crypt->crypt_len = AES_BLOCK128;
717 crypt->crypto_ctx = dir->npe_ctx_phys;
718 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
719 crypt->mode = NPE_OP_ENC_GEN_KEY;
720 crypt->init_len = dir->npe_ctx_idx;
721 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
723 atomic_inc(&ctx->configuring);
724 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
725 BUG_ON(qmgr_stat_overflow(SEND_QID));
729 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
730 const u8 *key, int key_len)
735 struct ix_sa_dir *dir;
736 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
737 u32 *flags = &tfm->crt_flags;
739 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
740 cinfo = dir->npe_ctx;
743 cipher_cfg = cipher_cfg_enc(tfm);
744 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
746 cipher_cfg = cipher_cfg_dec(tfm);
748 if (cipher_cfg & MOD_AES) {
750 case 16: keylen_cfg = MOD_AES128; break;
751 case 24: keylen_cfg = MOD_AES192; break;
752 case 32: keylen_cfg = MOD_AES256; break;
754 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
757 cipher_cfg |= keylen_cfg;
759 crypto_des_verify_key(tfm, key);
761 /* write cfg word to cryptinfo */
762 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
763 cinfo += sizeof(cipher_cfg);
765 /* write cipher key to cryptinfo */
766 memcpy(cinfo, key, key_len);
767 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
768 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
769 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
770 key_len = DES3_EDE_KEY_SIZE;
772 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
773 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
774 if ((cipher_cfg & MOD_AES) && !encrypt) {
775 return gen_rev_aes_key(tfm);
780 static struct buffer_desc *chainup_buffers(struct device *dev,
781 struct scatterlist *sg, unsigned nbytes,
782 struct buffer_desc *buf, gfp_t flags,
783 enum dma_data_direction dir)
785 for (; nbytes > 0; sg = sg_next(sg)) {
786 unsigned len = min(nbytes, sg->length);
787 struct buffer_desc *next_buf;
788 dma_addr_t next_buf_phys;
793 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
798 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
799 buf->next = next_buf;
800 buf->phys_next = next_buf_phys;
803 buf->phys_addr = sg_dma_address(sg);
812 static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
813 unsigned int key_len)
815 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
816 u32 *flags = &tfm->base.crt_flags;
819 init_completion(&ctx->completion);
820 atomic_inc(&ctx->configuring);
822 reset_sa_dir(&ctx->encrypt);
823 reset_sa_dir(&ctx->decrypt);
825 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
826 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
828 ret = setup_cipher(&tfm->base, 0, key, key_len);
831 ret = setup_cipher(&tfm->base, 1, key, key_len);
835 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
836 if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
839 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
843 if (!atomic_dec_and_test(&ctx->configuring))
844 wait_for_completion(&ctx->completion);
848 static int ablk_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
849 unsigned int key_len)
851 return verify_ablkcipher_des3_key(tfm, key) ?:
852 ablk_setkey(tfm, key, key_len);
855 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
856 unsigned int key_len)
858 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
860 /* the nonce is stored in bytes at end of key */
861 if (key_len < CTR_RFC3686_NONCE_SIZE)
864 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
865 CTR_RFC3686_NONCE_SIZE);
867 key_len -= CTR_RFC3686_NONCE_SIZE;
868 return ablk_setkey(tfm, key, key_len);
871 static int ablk_perform(struct ablkcipher_request *req, int encrypt)
873 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
874 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
875 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
876 struct ix_sa_dir *dir;
877 struct crypt_ctl *crypt;
878 unsigned int nbytes = req->nbytes;
879 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
880 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
881 struct buffer_desc src_hook;
882 struct device *dev = &pdev->dev;
883 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
884 GFP_KERNEL : GFP_ATOMIC;
886 if (qmgr_stat_full(SEND_QID))
888 if (atomic_read(&ctx->configuring))
891 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
893 crypt = get_crypt_desc();
897 crypt->data.ablk_req = req;
898 crypt->crypto_ctx = dir->npe_ctx_phys;
899 crypt->mode = dir->npe_mode;
900 crypt->init_len = dir->npe_ctx_idx;
902 crypt->crypt_offs = 0;
903 crypt->crypt_len = nbytes;
905 BUG_ON(ivsize && !req->info);
906 memcpy(crypt->iv, req->info, ivsize);
907 if (req->src != req->dst) {
908 struct buffer_desc dst_hook;
909 crypt->mode |= NPE_OP_NOT_IN_PLACE;
910 /* This was never tested by Intel
911 * for more than one dst buffer, I think. */
913 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
914 flags, DMA_FROM_DEVICE))
916 src_direction = DMA_TO_DEVICE;
917 req_ctx->dst = dst_hook.next;
918 crypt->dst_buf = dst_hook.phys_next;
923 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
924 flags, src_direction))
927 req_ctx->src = src_hook.next;
928 crypt->src_buf = src_hook.phys_next;
929 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
930 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
931 BUG_ON(qmgr_stat_overflow(SEND_QID));
935 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
937 if (req->src != req->dst) {
938 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
940 crypt->ctl_flags = CTL_FLAG_UNUSED;
944 static int ablk_encrypt(struct ablkcipher_request *req)
946 return ablk_perform(req, 1);
949 static int ablk_decrypt(struct ablkcipher_request *req)
951 return ablk_perform(req, 0);
954 static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
956 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
957 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
958 u8 iv[CTR_RFC3686_BLOCK_SIZE];
959 u8 *info = req->info;
962 /* set up counter block */
963 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
964 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
966 /* initialize counter portion of counter block */
967 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
971 ret = ablk_perform(req, 1);
976 static int aead_perform(struct aead_request *req, int encrypt,
977 int cryptoffset, int eff_cryptlen, u8 *iv)
979 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
980 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
981 unsigned ivsize = crypto_aead_ivsize(tfm);
982 unsigned authsize = crypto_aead_authsize(tfm);
983 struct ix_sa_dir *dir;
984 struct crypt_ctl *crypt;
985 unsigned int cryptlen;
986 struct buffer_desc *buf, src_hook;
987 struct aead_ctx *req_ctx = aead_request_ctx(req);
988 struct device *dev = &pdev->dev;
989 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
990 GFP_KERNEL : GFP_ATOMIC;
991 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
992 unsigned int lastlen;
994 if (qmgr_stat_full(SEND_QID))
996 if (atomic_read(&ctx->configuring))
1000 dir = &ctx->encrypt;
1001 cryptlen = req->cryptlen;
1003 dir = &ctx->decrypt;
1004 /* req->cryptlen includes the authsize when decrypting */
1005 cryptlen = req->cryptlen -authsize;
1006 eff_cryptlen -= authsize;
1008 crypt = get_crypt_desc();
1012 crypt->data.aead_req = req;
1013 crypt->crypto_ctx = dir->npe_ctx_phys;
1014 crypt->mode = dir->npe_mode;
1015 crypt->init_len = dir->npe_ctx_idx;
1017 crypt->crypt_offs = cryptoffset;
1018 crypt->crypt_len = eff_cryptlen;
1020 crypt->auth_offs = 0;
1021 crypt->auth_len = req->assoclen + cryptlen;
1022 BUG_ON(ivsize && !req->iv);
1023 memcpy(crypt->iv, req->iv, ivsize);
1025 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1026 &src_hook, flags, src_direction);
1027 req_ctx->src = src_hook.next;
1028 crypt->src_buf = src_hook.phys_next;
1032 lastlen = buf->buf_len;
1033 if (lastlen >= authsize)
1034 crypt->icv_rev_aes = buf->phys_addr +
1035 buf->buf_len - authsize;
1037 req_ctx->dst = NULL;
1039 if (req->src != req->dst) {
1040 struct buffer_desc dst_hook;
1042 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1043 src_direction = DMA_TO_DEVICE;
1045 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1046 &dst_hook, flags, DMA_FROM_DEVICE);
1047 req_ctx->dst = dst_hook.next;
1048 crypt->dst_buf = dst_hook.phys_next;
1054 lastlen = buf->buf_len;
1055 if (lastlen >= authsize)
1056 crypt->icv_rev_aes = buf->phys_addr +
1057 buf->buf_len - authsize;
1061 if (unlikely(lastlen < authsize)) {
1062 /* The 12 hmac bytes are scattered,
1063 * we need to copy them into a safe buffer */
1064 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1065 &crypt->icv_rev_aes);
1066 if (unlikely(!req_ctx->hmac_virt))
1069 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1070 req->src, cryptlen, authsize, 0);
1072 req_ctx->encrypt = encrypt;
1074 req_ctx->hmac_virt = NULL;
1077 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1078 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1079 BUG_ON(qmgr_stat_overflow(SEND_QID));
1080 return -EINPROGRESS;
1083 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1085 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1086 crypt->ctl_flags = CTL_FLAG_UNUSED;
1090 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1092 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1093 u32 *flags = &tfm->base.crt_flags;
1094 unsigned digest_len = crypto_aead_maxauthsize(tfm);
1097 if (!ctx->enckey_len && !ctx->authkey_len)
1099 init_completion(&ctx->completion);
1100 atomic_inc(&ctx->configuring);
1102 reset_sa_dir(&ctx->encrypt);
1103 reset_sa_dir(&ctx->decrypt);
1105 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1108 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1111 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1112 ctx->authkey_len, digest_len);
1115 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1116 ctx->authkey_len, digest_len);
1120 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1121 if (*flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS) {
1125 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1129 if (!atomic_dec_and_test(&ctx->configuring))
1130 wait_for_completion(&ctx->completion);
1134 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1136 int max = crypto_aead_maxauthsize(tfm) >> 2;
1138 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1140 return aead_setup(tfm, authsize);
1143 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1144 unsigned int keylen)
1146 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1147 struct crypto_authenc_keys keys;
1149 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1152 if (keys.authkeylen > sizeof(ctx->authkey))
1155 if (keys.enckeylen > sizeof(ctx->enckey))
1158 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1159 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1160 ctx->authkey_len = keys.authkeylen;
1161 ctx->enckey_len = keys.enckeylen;
1163 memzero_explicit(&keys, sizeof(keys));
1164 return aead_setup(tfm, crypto_aead_authsize(tfm));
1166 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1167 memzero_explicit(&keys, sizeof(keys));
1171 static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1172 unsigned int keylen)
1174 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1175 struct crypto_authenc_keys keys;
1178 err = crypto_authenc_extractkeys(&keys, key, keylen);
1183 if (keys.authkeylen > sizeof(ctx->authkey))
1186 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1190 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1191 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1192 ctx->authkey_len = keys.authkeylen;
1193 ctx->enckey_len = keys.enckeylen;
1195 memzero_explicit(&keys, sizeof(keys));
1196 return aead_setup(tfm, crypto_aead_authsize(tfm));
1198 memzero_explicit(&keys, sizeof(keys));
1202 static int aead_encrypt(struct aead_request *req)
1204 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1207 static int aead_decrypt(struct aead_request *req)
1209 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1212 static struct ixp_alg ixp4xx_algos[] = {
1215 .cra_name = "cbc(des)",
1216 .cra_blocksize = DES_BLOCK_SIZE,
1217 .cra_u = { .ablkcipher = {
1218 .min_keysize = DES_KEY_SIZE,
1219 .max_keysize = DES_KEY_SIZE,
1220 .ivsize = DES_BLOCK_SIZE,
1224 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1225 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1229 .cra_name = "ecb(des)",
1230 .cra_blocksize = DES_BLOCK_SIZE,
1231 .cra_u = { .ablkcipher = {
1232 .min_keysize = DES_KEY_SIZE,
1233 .max_keysize = DES_KEY_SIZE,
1237 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1238 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1241 .cra_name = "cbc(des3_ede)",
1242 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1243 .cra_u = { .ablkcipher = {
1244 .min_keysize = DES3_EDE_KEY_SIZE,
1245 .max_keysize = DES3_EDE_KEY_SIZE,
1246 .ivsize = DES3_EDE_BLOCK_SIZE,
1247 .setkey = ablk_des3_setkey,
1251 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1252 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1255 .cra_name = "ecb(des3_ede)",
1256 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1257 .cra_u = { .ablkcipher = {
1258 .min_keysize = DES3_EDE_KEY_SIZE,
1259 .max_keysize = DES3_EDE_KEY_SIZE,
1260 .setkey = ablk_des3_setkey,
1264 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1265 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1268 .cra_name = "cbc(aes)",
1269 .cra_blocksize = AES_BLOCK_SIZE,
1270 .cra_u = { .ablkcipher = {
1271 .min_keysize = AES_MIN_KEY_SIZE,
1272 .max_keysize = AES_MAX_KEY_SIZE,
1273 .ivsize = AES_BLOCK_SIZE,
1277 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1278 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1281 .cra_name = "ecb(aes)",
1282 .cra_blocksize = AES_BLOCK_SIZE,
1283 .cra_u = { .ablkcipher = {
1284 .min_keysize = AES_MIN_KEY_SIZE,
1285 .max_keysize = AES_MAX_KEY_SIZE,
1289 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1290 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1293 .cra_name = "ctr(aes)",
1294 .cra_blocksize = AES_BLOCK_SIZE,
1295 .cra_u = { .ablkcipher = {
1296 .min_keysize = AES_MIN_KEY_SIZE,
1297 .max_keysize = AES_MAX_KEY_SIZE,
1298 .ivsize = AES_BLOCK_SIZE,
1302 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1303 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1306 .cra_name = "rfc3686(ctr(aes))",
1307 .cra_blocksize = AES_BLOCK_SIZE,
1308 .cra_u = { .ablkcipher = {
1309 .min_keysize = AES_MIN_KEY_SIZE,
1310 .max_keysize = AES_MAX_KEY_SIZE,
1311 .ivsize = AES_BLOCK_SIZE,
1312 .setkey = ablk_rfc3686_setkey,
1313 .encrypt = ablk_rfc3686_crypt,
1314 .decrypt = ablk_rfc3686_crypt }
1317 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1318 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1321 static struct ixp_aead_alg ixp4xx_aeads[] = {
1325 .cra_name = "authenc(hmac(md5),cbc(des))",
1326 .cra_blocksize = DES_BLOCK_SIZE,
1328 .ivsize = DES_BLOCK_SIZE,
1329 .maxauthsize = MD5_DIGEST_SIZE,
1331 .hash = &hash_alg_md5,
1332 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1333 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1337 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1338 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1340 .ivsize = DES3_EDE_BLOCK_SIZE,
1341 .maxauthsize = MD5_DIGEST_SIZE,
1342 .setkey = des3_aead_setkey,
1344 .hash = &hash_alg_md5,
1345 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1346 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1350 .cra_name = "authenc(hmac(sha1),cbc(des))",
1351 .cra_blocksize = DES_BLOCK_SIZE,
1353 .ivsize = DES_BLOCK_SIZE,
1354 .maxauthsize = SHA1_DIGEST_SIZE,
1356 .hash = &hash_alg_sha1,
1357 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1358 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1362 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1363 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1365 .ivsize = DES3_EDE_BLOCK_SIZE,
1366 .maxauthsize = SHA1_DIGEST_SIZE,
1367 .setkey = des3_aead_setkey,
1369 .hash = &hash_alg_sha1,
1370 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1371 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1375 .cra_name = "authenc(hmac(md5),cbc(aes))",
1376 .cra_blocksize = AES_BLOCK_SIZE,
1378 .ivsize = AES_BLOCK_SIZE,
1379 .maxauthsize = MD5_DIGEST_SIZE,
1381 .hash = &hash_alg_md5,
1382 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1383 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1387 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1388 .cra_blocksize = AES_BLOCK_SIZE,
1390 .ivsize = AES_BLOCK_SIZE,
1391 .maxauthsize = SHA1_DIGEST_SIZE,
1393 .hash = &hash_alg_sha1,
1394 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1395 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1398 #define IXP_POSTFIX "-ixp4xx"
1400 static const struct platform_device_info ixp_dev_info __initdata = {
1401 .name = DRIVER_NAME,
1403 .dma_mask = DMA_BIT_MASK(32),
1406 static int __init ixp_module_init(void)
1408 int num = ARRAY_SIZE(ixp4xx_algos);
1411 pdev = platform_device_register_full(&ixp_dev_info);
1413 return PTR_ERR(pdev);
1415 spin_lock_init(&desc_lock);
1416 spin_lock_init(&emerg_lock);
1418 err = init_ixp_crypto(&pdev->dev);
1420 platform_device_unregister(pdev);
1423 for (i=0; i< num; i++) {
1424 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1426 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1427 "%s"IXP_POSTFIX, cra->cra_name) >=
1428 CRYPTO_MAX_ALG_NAME)
1432 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1437 cra->cra_type = &crypto_ablkcipher_type;
1438 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1439 CRYPTO_ALG_KERN_DRIVER_ONLY |
1441 if (!cra->cra_ablkcipher.setkey)
1442 cra->cra_ablkcipher.setkey = ablk_setkey;
1443 if (!cra->cra_ablkcipher.encrypt)
1444 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1445 if (!cra->cra_ablkcipher.decrypt)
1446 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1447 cra->cra_init = init_tfm_ablk;
1449 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1450 cra->cra_module = THIS_MODULE;
1451 cra->cra_alignmask = 3;
1452 cra->cra_priority = 300;
1453 cra->cra_exit = exit_tfm;
1454 if (crypto_register_alg(cra))
1455 printk(KERN_ERR "Failed to register '%s'\n",
1458 ixp4xx_algos[i].registered = 1;
1461 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1462 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1464 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1465 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1466 CRYPTO_MAX_ALG_NAME)
1468 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1472 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1474 cra->setkey = cra->setkey ?: aead_setkey;
1475 cra->setauthsize = aead_setauthsize;
1476 cra->encrypt = aead_encrypt;
1477 cra->decrypt = aead_decrypt;
1478 cra->init = init_tfm_aead;
1479 cra->exit = exit_tfm_aead;
1481 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1482 cra->base.cra_module = THIS_MODULE;
1483 cra->base.cra_alignmask = 3;
1484 cra->base.cra_priority = 300;
1486 if (crypto_register_aead(cra))
1487 printk(KERN_ERR "Failed to register '%s'\n",
1488 cra->base.cra_driver_name);
1490 ixp4xx_aeads[i].registered = 1;
1495 static void __exit ixp_module_exit(void)
1497 int num = ARRAY_SIZE(ixp4xx_algos);
1500 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1501 if (ixp4xx_aeads[i].registered)
1502 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1505 for (i=0; i< num; i++) {
1506 if (ixp4xx_algos[i].registered)
1507 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1509 release_ixp_crypto(&pdev->dev);
1510 platform_device_unregister(pdev);
1513 module_init(ixp_module_init);
1514 module_exit(ixp_module_exit);
1516 MODULE_LICENSE("GPL");
1518 MODULE_DESCRIPTION("IXP4xx hardware crypto");