1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Symmetric key cipher operations.
5 * Generic encrypt/decrypt wrapper for ciphers, handles operations across
6 * multiple page boundaries by using temporary blocks. In user context,
7 * the kernel is given a chance to schedule us once per page.
12 #include <crypto/internal/aead.h>
13 #include <crypto/internal/cipher.h>
14 #include <crypto/internal/skcipher.h>
15 #include <crypto/scatterwalk.h>
16 #include <linux/bug.h>
17 #include <linux/cryptouser.h>
18 #include <linux/err.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
22 #include <linux/module.h>
23 #include <linux/seq_file.h>
24 #include <linux/slab.h>
25 #include <linux/string.h>
26 #include <net/netlink.h>
29 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000000e
32 SKCIPHER_WALK_PHYS = 1 << 0,
33 SKCIPHER_WALK_SLOW = 1 << 1,
34 SKCIPHER_WALK_COPY = 1 << 2,
35 SKCIPHER_WALK_DIFF = 1 << 3,
36 SKCIPHER_WALK_SLEEP = 1 << 4,
39 struct skcipher_walk_buffer {
40 struct list_head entry;
41 struct scatter_walk dst;
47 static const struct crypto_type crypto_skcipher_type;
49 static int skcipher_walk_next(struct skcipher_walk *walk);
51 static inline void skcipher_map_src(struct skcipher_walk *walk)
53 walk->src.virt.addr = scatterwalk_map(&walk->in);
56 static inline void skcipher_map_dst(struct skcipher_walk *walk)
58 walk->dst.virt.addr = scatterwalk_map(&walk->out);
61 static inline void skcipher_unmap_src(struct skcipher_walk *walk)
63 scatterwalk_unmap(walk->src.virt.addr);
66 static inline void skcipher_unmap_dst(struct skcipher_walk *walk)
68 scatterwalk_unmap(walk->dst.virt.addr);
71 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
73 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
76 /* Get a spot of the specified length that does not straddle a page.
77 * The caller needs to ensure that there is enough space for this operation.
79 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
81 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
83 return max(start, end_page);
86 static inline struct skcipher_alg *__crypto_skcipher_alg(
87 struct crypto_alg *alg)
89 return container_of(alg, struct skcipher_alg, base);
92 static inline struct crypto_istat_cipher *skcipher_get_stat(
93 struct skcipher_alg *alg)
95 return skcipher_get_stat_common(&alg->co);
98 static inline int crypto_skcipher_errstat(struct skcipher_alg *alg, int err)
100 struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
102 if (!IS_ENABLED(CONFIG_CRYPTO_STATS))
105 if (err && err != -EINPROGRESS && err != -EBUSY)
106 atomic64_inc(&istat->err_cnt);
111 static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
115 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
116 addr = skcipher_get_spot(addr, bsize);
117 scatterwalk_copychunks(addr, &walk->out, bsize,
118 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1);
122 int skcipher_walk_done(struct skcipher_walk *walk, int err)
124 unsigned int n = walk->nbytes;
125 unsigned int nbytes = 0;
130 if (likely(err >= 0)) {
132 nbytes = walk->total - n;
135 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS |
138 SKCIPHER_WALK_DIFF)))) {
140 skcipher_unmap_src(walk);
141 } else if (walk->flags & SKCIPHER_WALK_DIFF) {
142 skcipher_unmap_dst(walk);
144 } else if (walk->flags & SKCIPHER_WALK_COPY) {
145 skcipher_map_dst(walk);
146 memcpy(walk->dst.virt.addr, walk->page, n);
147 skcipher_unmap_dst(walk);
148 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) {
151 * Didn't process all bytes. Either the algorithm is
152 * broken, or this was the last step and it turned out
153 * the message wasn't evenly divisible into blocks but
154 * the algorithm requires it.
159 n = skcipher_done_slow(walk, n);
165 walk->total = nbytes;
168 scatterwalk_advance(&walk->in, n);
169 scatterwalk_advance(&walk->out, n);
170 scatterwalk_done(&walk->in, 0, nbytes);
171 scatterwalk_done(&walk->out, 1, nbytes);
174 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ?
175 CRYPTO_TFM_REQ_MAY_SLEEP : 0);
176 return skcipher_walk_next(walk);
180 /* Short-circuit for the common/fast path. */
181 if (!((unsigned long)walk->buffer | (unsigned long)walk->page))
184 if (walk->flags & SKCIPHER_WALK_PHYS)
187 if (walk->iv != walk->oiv)
188 memcpy(walk->oiv, walk->iv, walk->ivsize);
189 if (walk->buffer != walk->page)
192 free_page((unsigned long)walk->page);
197 EXPORT_SYMBOL_GPL(skcipher_walk_done);
199 void skcipher_walk_complete(struct skcipher_walk *walk, int err)
201 struct skcipher_walk_buffer *p, *tmp;
203 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) {
211 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1);
212 data = skcipher_get_spot(data, walk->stride);
215 scatterwalk_copychunks(data, &p->dst, p->len, 1);
217 if (offset_in_page(p->data) + p->len + walk->stride >
219 free_page((unsigned long)p->data);
226 if (!err && walk->iv != walk->oiv)
227 memcpy(walk->oiv, walk->iv, walk->ivsize);
228 if (walk->buffer != walk->page)
231 free_page((unsigned long)walk->page);
233 EXPORT_SYMBOL_GPL(skcipher_walk_complete);
235 static void skcipher_queue_write(struct skcipher_walk *walk,
236 struct skcipher_walk_buffer *p)
239 list_add_tail(&p->entry, &walk->buffers);
242 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
244 bool phys = walk->flags & SKCIPHER_WALK_PHYS;
245 unsigned alignmask = walk->alignmask;
246 struct skcipher_walk_buffer *p;
254 walk->buffer = walk->page;
255 buffer = walk->buffer;
260 /* Start with the minimum alignment of kmalloc. */
261 a = crypto_tfm_ctx_alignment() - 1;
265 /* Calculate the minimum alignment of p->buffer. */
266 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1;
270 /* Minimum size to align p->buffer by alignmask. */
273 /* Minimum size to ensure p->buffer does not straddle a page. */
274 n += (bsize - 1) & ~(alignmask | a);
276 v = kzalloc(n, skcipher_walk_gfp(walk));
278 return skcipher_walk_done(walk, -ENOMEM);
283 skcipher_queue_write(walk, p);
291 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
292 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
293 walk->src.virt.addr = walk->dst.virt.addr;
295 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
297 walk->nbytes = bsize;
298 walk->flags |= SKCIPHER_WALK_SLOW;
303 static int skcipher_next_copy(struct skcipher_walk *walk)
305 struct skcipher_walk_buffer *p;
306 u8 *tmp = walk->page;
308 skcipher_map_src(walk);
309 memcpy(tmp, walk->src.virt.addr, walk->nbytes);
310 skcipher_unmap_src(walk);
312 walk->src.virt.addr = tmp;
313 walk->dst.virt.addr = tmp;
315 if (!(walk->flags & SKCIPHER_WALK_PHYS))
318 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk));
322 p->data = walk->page;
323 p->len = walk->nbytes;
324 skcipher_queue_write(walk, p);
326 if (offset_in_page(walk->page) + walk->nbytes + walk->stride >
330 walk->page += walk->nbytes;
335 static int skcipher_next_fast(struct skcipher_walk *walk)
339 walk->src.phys.page = scatterwalk_page(&walk->in);
340 walk->src.phys.offset = offset_in_page(walk->in.offset);
341 walk->dst.phys.page = scatterwalk_page(&walk->out);
342 walk->dst.phys.offset = offset_in_page(walk->out.offset);
344 if (walk->flags & SKCIPHER_WALK_PHYS)
347 diff = walk->src.phys.offset - walk->dst.phys.offset;
348 diff |= walk->src.virt.page - walk->dst.virt.page;
350 skcipher_map_src(walk);
351 walk->dst.virt.addr = walk->src.virt.addr;
354 walk->flags |= SKCIPHER_WALK_DIFF;
355 skcipher_map_dst(walk);
361 static int skcipher_walk_next(struct skcipher_walk *walk)
367 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY |
371 bsize = min(walk->stride, max(n, walk->blocksize));
372 n = scatterwalk_clamp(&walk->in, n);
373 n = scatterwalk_clamp(&walk->out, n);
375 if (unlikely(n < bsize)) {
376 if (unlikely(walk->total < walk->blocksize))
377 return skcipher_walk_done(walk, -EINVAL);
380 err = skcipher_next_slow(walk, bsize);
381 goto set_phys_lowmem;
384 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) {
386 gfp_t gfp = skcipher_walk_gfp(walk);
388 walk->page = (void *)__get_free_page(gfp);
393 walk->nbytes = min_t(unsigned, n,
394 PAGE_SIZE - offset_in_page(walk->page));
395 walk->flags |= SKCIPHER_WALK_COPY;
396 err = skcipher_next_copy(walk);
397 goto set_phys_lowmem;
402 return skcipher_next_fast(walk);
405 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) {
406 walk->src.phys.page = virt_to_page(walk->src.virt.addr);
407 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr);
408 walk->src.phys.offset &= PAGE_SIZE - 1;
409 walk->dst.phys.offset &= PAGE_SIZE - 1;
414 static int skcipher_copy_iv(struct skcipher_walk *walk)
416 unsigned a = crypto_tfm_ctx_alignment() - 1;
417 unsigned alignmask = walk->alignmask;
418 unsigned ivsize = walk->ivsize;
419 unsigned bs = walk->stride;
424 aligned_bs = ALIGN(bs, alignmask + 1);
426 /* Minimum size to align buffer by alignmask. */
427 size = alignmask & ~a;
429 if (walk->flags & SKCIPHER_WALK_PHYS)
432 size += aligned_bs + ivsize;
434 /* Minimum size to ensure buffer does not straddle a page. */
435 size += (bs - 1) & ~(alignmask | a);
438 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
442 iv = PTR_ALIGN(walk->buffer, alignmask + 1);
443 iv = skcipher_get_spot(iv, bs) + aligned_bs;
445 walk->iv = memcpy(iv, walk->iv, walk->ivsize);
449 static int skcipher_walk_first(struct skcipher_walk *walk)
451 if (WARN_ON_ONCE(in_hardirq()))
455 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
456 int err = skcipher_copy_iv(walk);
463 return skcipher_walk_next(walk);
466 static int skcipher_walk_skcipher(struct skcipher_walk *walk,
467 struct skcipher_request *req)
469 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
470 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
472 walk->total = req->cryptlen;
477 if (unlikely(!walk->total))
480 scatterwalk_start(&walk->in, req->src);
481 scatterwalk_start(&walk->out, req->dst);
483 walk->flags &= ~SKCIPHER_WALK_SLEEP;
484 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
485 SKCIPHER_WALK_SLEEP : 0;
487 walk->blocksize = crypto_skcipher_blocksize(tfm);
488 walk->ivsize = crypto_skcipher_ivsize(tfm);
489 walk->alignmask = crypto_skcipher_alignmask(tfm);
491 if (alg->co.base.cra_type != &crypto_skcipher_type)
492 walk->stride = alg->co.chunksize;
494 walk->stride = alg->walksize;
496 return skcipher_walk_first(walk);
499 int skcipher_walk_virt(struct skcipher_walk *walk,
500 struct skcipher_request *req, bool atomic)
504 might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
506 walk->flags &= ~SKCIPHER_WALK_PHYS;
508 err = skcipher_walk_skcipher(walk, req);
510 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0;
514 EXPORT_SYMBOL_GPL(skcipher_walk_virt);
516 int skcipher_walk_async(struct skcipher_walk *walk,
517 struct skcipher_request *req)
519 walk->flags |= SKCIPHER_WALK_PHYS;
521 INIT_LIST_HEAD(&walk->buffers);
523 return skcipher_walk_skcipher(walk, req);
525 EXPORT_SYMBOL_GPL(skcipher_walk_async);
527 static int skcipher_walk_aead_common(struct skcipher_walk *walk,
528 struct aead_request *req, bool atomic)
530 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
537 if (unlikely(!walk->total))
540 walk->flags &= ~SKCIPHER_WALK_PHYS;
542 scatterwalk_start(&walk->in, req->src);
543 scatterwalk_start(&walk->out, req->dst);
545 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
546 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
548 scatterwalk_done(&walk->in, 0, walk->total);
549 scatterwalk_done(&walk->out, 0, walk->total);
551 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP)
552 walk->flags |= SKCIPHER_WALK_SLEEP;
554 walk->flags &= ~SKCIPHER_WALK_SLEEP;
556 walk->blocksize = crypto_aead_blocksize(tfm);
557 walk->stride = crypto_aead_chunksize(tfm);
558 walk->ivsize = crypto_aead_ivsize(tfm);
559 walk->alignmask = crypto_aead_alignmask(tfm);
561 err = skcipher_walk_first(walk);
564 walk->flags &= ~SKCIPHER_WALK_SLEEP;
569 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
570 struct aead_request *req, bool atomic)
572 walk->total = req->cryptlen;
574 return skcipher_walk_aead_common(walk, req, atomic);
576 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt);
578 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
579 struct aead_request *req, bool atomic)
581 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
583 walk->total = req->cryptlen - crypto_aead_authsize(tfm);
585 return skcipher_walk_aead_common(walk, req, atomic);
587 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt);
589 static void skcipher_set_needkey(struct crypto_skcipher *tfm)
591 if (crypto_skcipher_max_keysize(tfm) != 0)
592 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_NEED_KEY);
595 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm,
596 const u8 *key, unsigned int keylen)
598 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
599 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
600 u8 *buffer, *alignbuffer;
601 unsigned long absize;
604 absize = keylen + alignmask;
605 buffer = kmalloc(absize, GFP_ATOMIC);
609 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
610 memcpy(alignbuffer, key, keylen);
611 ret = cipher->setkey(tfm, alignbuffer, keylen);
612 kfree_sensitive(buffer);
616 int crypto_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
619 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm);
620 unsigned long alignmask = crypto_skcipher_alignmask(tfm);
623 if (cipher->co.base.cra_type != &crypto_skcipher_type) {
624 struct crypto_lskcipher **ctx = crypto_skcipher_ctx(tfm);
626 crypto_lskcipher_clear_flags(*ctx, CRYPTO_TFM_REQ_MASK);
627 crypto_lskcipher_set_flags(*ctx,
628 crypto_skcipher_get_flags(tfm) &
629 CRYPTO_TFM_REQ_MASK);
630 err = crypto_lskcipher_setkey(*ctx, key, keylen);
634 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize)
637 if ((unsigned long)key & alignmask)
638 err = skcipher_setkey_unaligned(tfm, key, keylen);
640 err = cipher->setkey(tfm, key, keylen);
644 skcipher_set_needkey(tfm);
648 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY);
651 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey);
653 int crypto_skcipher_encrypt(struct skcipher_request *req)
655 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
656 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
659 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
660 struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
662 atomic64_inc(&istat->encrypt_cnt);
663 atomic64_add(req->cryptlen, &istat->encrypt_tlen);
666 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
668 else if (alg->co.base.cra_type != &crypto_skcipher_type)
669 ret = crypto_lskcipher_encrypt_sg(req);
671 ret = alg->encrypt(req);
673 return crypto_skcipher_errstat(alg, ret);
675 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt);
677 int crypto_skcipher_decrypt(struct skcipher_request *req)
679 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
680 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
683 if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
684 struct crypto_istat_cipher *istat = skcipher_get_stat(alg);
686 atomic64_inc(&istat->decrypt_cnt);
687 atomic64_add(req->cryptlen, &istat->decrypt_tlen);
690 if (crypto_skcipher_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
692 else if (alg->co.base.cra_type != &crypto_skcipher_type)
693 ret = crypto_lskcipher_decrypt_sg(req);
695 ret = alg->decrypt(req);
697 return crypto_skcipher_errstat(alg, ret);
699 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt);
701 static int crypto_lskcipher_export(struct skcipher_request *req, void *out)
703 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
704 u8 *ivs = skcipher_request_ctx(req);
706 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
708 memcpy(out, ivs + crypto_skcipher_ivsize(tfm),
709 crypto_skcipher_statesize(tfm));
714 static int crypto_lskcipher_import(struct skcipher_request *req, const void *in)
716 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
717 u8 *ivs = skcipher_request_ctx(req);
719 ivs = PTR_ALIGN(ivs, crypto_skcipher_alignmask(tfm) + 1);
721 memcpy(ivs + crypto_skcipher_ivsize(tfm), in,
722 crypto_skcipher_statesize(tfm));
727 static int skcipher_noexport(struct skcipher_request *req, void *out)
732 static int skcipher_noimport(struct skcipher_request *req, const void *in)
737 int crypto_skcipher_export(struct skcipher_request *req, void *out)
739 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
740 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
742 if (alg->co.base.cra_type != &crypto_skcipher_type)
743 return crypto_lskcipher_export(req, out);
744 return alg->export(req, out);
746 EXPORT_SYMBOL_GPL(crypto_skcipher_export);
748 int crypto_skcipher_import(struct skcipher_request *req, const void *in)
750 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
751 struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
753 if (alg->co.base.cra_type != &crypto_skcipher_type)
754 return crypto_lskcipher_import(req, in);
755 return alg->import(req, in);
757 EXPORT_SYMBOL_GPL(crypto_skcipher_import);
759 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm)
761 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
762 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
767 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
769 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
770 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
772 skcipher_set_needkey(skcipher);
774 if (tfm->__crt_alg->cra_type != &crypto_skcipher_type) {
775 unsigned am = crypto_skcipher_alignmask(skcipher);
778 reqsize = am & ~(crypto_tfm_ctx_alignment() - 1);
779 reqsize += crypto_skcipher_ivsize(skcipher);
780 reqsize += crypto_skcipher_statesize(skcipher);
781 crypto_skcipher_set_reqsize(skcipher, reqsize);
783 return crypto_init_lskcipher_ops_sg(tfm);
787 skcipher->base.exit = crypto_skcipher_exit_tfm;
790 return alg->init(skcipher);
795 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg)
797 if (alg->cra_type != &crypto_skcipher_type)
798 return sizeof(struct crypto_lskcipher *);
800 return crypto_alg_extsize(alg);
803 static void crypto_skcipher_free_instance(struct crypto_instance *inst)
805 struct skcipher_instance *skcipher =
806 container_of(inst, struct skcipher_instance, s.base);
808 skcipher->free(skcipher);
811 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
813 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg)
815 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
817 seq_printf(m, "type : skcipher\n");
818 seq_printf(m, "async : %s\n",
819 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no");
820 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize);
821 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize);
822 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize);
823 seq_printf(m, "ivsize : %u\n", skcipher->ivsize);
824 seq_printf(m, "chunksize : %u\n", skcipher->chunksize);
825 seq_printf(m, "walksize : %u\n", skcipher->walksize);
826 seq_printf(m, "statesize : %u\n", skcipher->statesize);
829 static int __maybe_unused crypto_skcipher_report(
830 struct sk_buff *skb, struct crypto_alg *alg)
832 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
833 struct crypto_report_blkcipher rblkcipher;
835 memset(&rblkcipher, 0, sizeof(rblkcipher));
837 strscpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type));
838 strscpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv));
840 rblkcipher.blocksize = alg->cra_blocksize;
841 rblkcipher.min_keysize = skcipher->min_keysize;
842 rblkcipher.max_keysize = skcipher->max_keysize;
843 rblkcipher.ivsize = skcipher->ivsize;
845 return nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER,
846 sizeof(rblkcipher), &rblkcipher);
849 static int __maybe_unused crypto_skcipher_report_stat(
850 struct sk_buff *skb, struct crypto_alg *alg)
852 struct skcipher_alg *skcipher = __crypto_skcipher_alg(alg);
853 struct crypto_istat_cipher *istat;
854 struct crypto_stat_cipher rcipher;
856 istat = skcipher_get_stat(skcipher);
858 memset(&rcipher, 0, sizeof(rcipher));
860 strscpy(rcipher.type, "cipher", sizeof(rcipher.type));
862 rcipher.stat_encrypt_cnt = atomic64_read(&istat->encrypt_cnt);
863 rcipher.stat_encrypt_tlen = atomic64_read(&istat->encrypt_tlen);
864 rcipher.stat_decrypt_cnt = atomic64_read(&istat->decrypt_cnt);
865 rcipher.stat_decrypt_tlen = atomic64_read(&istat->decrypt_tlen);
866 rcipher.stat_err_cnt = atomic64_read(&istat->err_cnt);
868 return nla_put(skb, CRYPTOCFGA_STAT_CIPHER, sizeof(rcipher), &rcipher);
871 static const struct crypto_type crypto_skcipher_type = {
872 .extsize = crypto_skcipher_extsize,
873 .init_tfm = crypto_skcipher_init_tfm,
874 .free = crypto_skcipher_free_instance,
875 #ifdef CONFIG_PROC_FS
876 .show = crypto_skcipher_show,
878 #if IS_ENABLED(CONFIG_CRYPTO_USER)
879 .report = crypto_skcipher_report,
881 #ifdef CONFIG_CRYPTO_STATS
882 .report_stat = crypto_skcipher_report_stat,
884 .maskclear = ~CRYPTO_ALG_TYPE_MASK,
885 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MASK,
886 .type = CRYPTO_ALG_TYPE_SKCIPHER,
887 .tfmsize = offsetof(struct crypto_skcipher, base),
890 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
891 struct crypto_instance *inst,
892 const char *name, u32 type, u32 mask)
894 spawn->base.frontend = &crypto_skcipher_type;
895 return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
897 EXPORT_SYMBOL_GPL(crypto_grab_skcipher);
899 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
902 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
904 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);
906 struct crypto_sync_skcipher *crypto_alloc_sync_skcipher(
907 const char *alg_name, u32 type, u32 mask)
909 struct crypto_skcipher *tfm;
911 /* Only sync algorithms allowed. */
912 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_SKCIPHER_REQSIZE_LARGE;
914 tfm = crypto_alloc_tfm(alg_name, &crypto_skcipher_type, type, mask);
917 * Make sure we do not allocate something that might get used with
918 * an on-stack request: check the request size.
920 if (!IS_ERR(tfm) && WARN_ON(crypto_skcipher_reqsize(tfm) >
921 MAX_SYNC_SKCIPHER_REQSIZE)) {
922 crypto_free_skcipher(tfm);
923 return ERR_PTR(-EINVAL);
926 return (struct crypto_sync_skcipher *)tfm;
928 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher);
930 int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask)
932 return crypto_type_has_alg(alg_name, &crypto_skcipher_type, type, mask);
934 EXPORT_SYMBOL_GPL(crypto_has_skcipher);
936 int skcipher_prepare_alg_common(struct skcipher_alg_common *alg)
938 struct crypto_istat_cipher *istat = skcipher_get_stat_common(alg);
939 struct crypto_alg *base = &alg->base;
941 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 ||
942 alg->statesize > PAGE_SIZE / 2 ||
943 (alg->ivsize + alg->statesize) > PAGE_SIZE / 2)
947 alg->chunksize = base->cra_blocksize;
949 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
951 if (IS_ENABLED(CONFIG_CRYPTO_STATS))
952 memset(istat, 0, sizeof(*istat));
957 static int skcipher_prepare_alg(struct skcipher_alg *alg)
959 struct crypto_alg *base = &alg->base;
962 err = skcipher_prepare_alg_common(&alg->co);
966 if (alg->walksize > PAGE_SIZE / 8)
970 alg->walksize = alg->chunksize;
972 if (!alg->statesize) {
973 alg->import = skcipher_noimport;
974 alg->export = skcipher_noexport;
975 } else if (!(alg->import && alg->export))
978 base->cra_type = &crypto_skcipher_type;
979 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER;
984 int crypto_register_skcipher(struct skcipher_alg *alg)
986 struct crypto_alg *base = &alg->base;
989 err = skcipher_prepare_alg(alg);
993 return crypto_register_alg(base);
995 EXPORT_SYMBOL_GPL(crypto_register_skcipher);
997 void crypto_unregister_skcipher(struct skcipher_alg *alg)
999 crypto_unregister_alg(&alg->base);
1001 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher);
1003 int crypto_register_skciphers(struct skcipher_alg *algs, int count)
1007 for (i = 0; i < count; i++) {
1008 ret = crypto_register_skcipher(&algs[i]);
1016 for (--i; i >= 0; --i)
1017 crypto_unregister_skcipher(&algs[i]);
1021 EXPORT_SYMBOL_GPL(crypto_register_skciphers);
1023 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count)
1027 for (i = count - 1; i >= 0; --i)
1028 crypto_unregister_skcipher(&algs[i]);
1030 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers);
1032 int skcipher_register_instance(struct crypto_template *tmpl,
1033 struct skcipher_instance *inst)
1037 if (WARN_ON(!inst->free))
1040 err = skcipher_prepare_alg(&inst->alg);
1044 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst));
1046 EXPORT_SYMBOL_GPL(skcipher_register_instance);
1048 static int skcipher_setkey_simple(struct crypto_skcipher *tfm, const u8 *key,
1049 unsigned int keylen)
1051 struct crypto_cipher *cipher = skcipher_cipher_simple(tfm);
1053 crypto_cipher_clear_flags(cipher, CRYPTO_TFM_REQ_MASK);
1054 crypto_cipher_set_flags(cipher, crypto_skcipher_get_flags(tfm) &
1055 CRYPTO_TFM_REQ_MASK);
1056 return crypto_cipher_setkey(cipher, key, keylen);
1059 static int skcipher_init_tfm_simple(struct crypto_skcipher *tfm)
1061 struct skcipher_instance *inst = skcipher_alg_instance(tfm);
1062 struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
1063 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1064 struct crypto_cipher *cipher;
1066 cipher = crypto_spawn_cipher(spawn);
1068 return PTR_ERR(cipher);
1070 ctx->cipher = cipher;
1074 static void skcipher_exit_tfm_simple(struct crypto_skcipher *tfm)
1076 struct skcipher_ctx_simple *ctx = crypto_skcipher_ctx(tfm);
1078 crypto_free_cipher(ctx->cipher);
1081 static void skcipher_free_instance_simple(struct skcipher_instance *inst)
1083 crypto_drop_cipher(skcipher_instance_ctx(inst));
1088 * skcipher_alloc_instance_simple - allocate instance of simple block cipher mode
1090 * Allocate an skcipher_instance for a simple block cipher mode of operation,
1091 * e.g. cbc or ecb. The instance context will have just a single crypto_spawn,
1092 * that for the underlying cipher. The {min,max}_keysize, ivsize, blocksize,
1093 * alignmask, and priority are set from the underlying cipher but can be
1094 * overridden if needed. The tfm context defaults to skcipher_ctx_simple, and
1095 * default ->setkey(), ->init(), and ->exit() methods are installed.
1097 * @tmpl: the template being instantiated
1098 * @tb: the template parameters
1100 * Return: a pointer to the new instance, or an ERR_PTR(). The caller still
1101 * needs to register the instance.
1103 struct skcipher_instance *skcipher_alloc_instance_simple(
1104 struct crypto_template *tmpl, struct rtattr **tb)
1107 struct skcipher_instance *inst;
1108 struct crypto_cipher_spawn *spawn;
1109 struct crypto_alg *cipher_alg;
1112 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER, &mask);
1114 return ERR_PTR(err);
1116 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
1118 return ERR_PTR(-ENOMEM);
1119 spawn = skcipher_instance_ctx(inst);
1121 err = crypto_grab_cipher(spawn, skcipher_crypto_instance(inst),
1122 crypto_attr_alg_name(tb[1]), 0, mask);
1125 cipher_alg = crypto_spawn_cipher_alg(spawn);
1127 err = crypto_inst_setname(skcipher_crypto_instance(inst), tmpl->name,
1132 inst->free = skcipher_free_instance_simple;
1134 /* Default algorithm properties, can be overridden */
1135 inst->alg.base.cra_blocksize = cipher_alg->cra_blocksize;
1136 inst->alg.base.cra_alignmask = cipher_alg->cra_alignmask;
1137 inst->alg.base.cra_priority = cipher_alg->cra_priority;
1138 inst->alg.min_keysize = cipher_alg->cra_cipher.cia_min_keysize;
1139 inst->alg.max_keysize = cipher_alg->cra_cipher.cia_max_keysize;
1140 inst->alg.ivsize = cipher_alg->cra_blocksize;
1142 /* Use skcipher_ctx_simple by default, can be overridden */
1143 inst->alg.base.cra_ctxsize = sizeof(struct skcipher_ctx_simple);
1144 inst->alg.setkey = skcipher_setkey_simple;
1145 inst->alg.init = skcipher_init_tfm_simple;
1146 inst->alg.exit = skcipher_exit_tfm_simple;
1151 skcipher_free_instance_simple(inst);
1152 return ERR_PTR(err);
1154 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_simple);
1156 MODULE_LICENSE("GPL");
1157 MODULE_DESCRIPTION("Symmetric key cipher type");
1158 MODULE_IMPORT_NS(CRYPTO_INTERNAL);