1 // SPDX-License-Identifier: GPL-2.0+
5 * s390 implementation of the AES Cipher Algorithm.
8 * Copyright IBM Corp. 2005, 2017
14 * Derived from "crypto/aes_generic.c"
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/skcipher.h>
25 #include <crypto/scatterwalk.h>
26 #include <linux/err.h>
27 #include <linux/module.h>
28 #include <linux/cpufeature.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/fips.h>
32 #include <linux/string.h>
33 #include <crypto/xts.h>
34 #include <asm/cpacf.h>
37 static DEFINE_SPINLOCK(ctrblk_lock);
39 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
43 u8 key[AES_MAX_KEY_SIZE];
47 struct crypto_sync_skcipher *blk;
48 struct crypto_cipher *cip;
57 struct crypto_sync_skcipher *fallback;
61 struct scatter_walk walk;
62 unsigned int walk_bytes;
64 unsigned int walk_bytes_remain;
65 u8 buf[AES_BLOCK_SIZE];
66 unsigned int buf_bytes;
71 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
74 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
77 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
78 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
81 ret = crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
83 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
84 tfm->crt_flags |= (sctx->fallback.cip->base.crt_flags &
90 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
93 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
96 /* Pick the correct function code based on the key length */
97 fc = (key_len == 16) ? CPACF_KM_AES_128 :
98 (key_len == 24) ? CPACF_KM_AES_192 :
99 (key_len == 32) ? CPACF_KM_AES_256 : 0;
101 /* Check if the function code is available */
102 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
104 return setkey_fallback_cip(tfm, in_key, key_len);
106 sctx->key_len = key_len;
107 memcpy(sctx->key, in_key, key_len);
111 static void aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
113 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
115 if (unlikely(!sctx->fc)) {
116 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
119 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
122 static void aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
124 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
126 if (unlikely(!sctx->fc)) {
127 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
130 cpacf_km(sctx->fc | CPACF_DECRYPT,
131 &sctx->key, out, in, AES_BLOCK_SIZE);
134 static int fallback_init_cip(struct crypto_tfm *tfm)
136 const char *name = tfm->__crt_alg->cra_name;
137 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
139 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
140 CRYPTO_ALG_NEED_FALLBACK);
142 if (IS_ERR(sctx->fallback.cip)) {
143 pr_err("Allocating AES fallback algorithm %s failed\n",
145 return PTR_ERR(sctx->fallback.cip);
151 static void fallback_exit_cip(struct crypto_tfm *tfm)
153 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
155 crypto_free_cipher(sctx->fallback.cip);
156 sctx->fallback.cip = NULL;
159 static struct crypto_alg aes_alg = {
161 .cra_driver_name = "aes-s390",
163 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
164 CRYPTO_ALG_NEED_FALLBACK,
165 .cra_blocksize = AES_BLOCK_SIZE,
166 .cra_ctxsize = sizeof(struct s390_aes_ctx),
167 .cra_module = THIS_MODULE,
168 .cra_init = fallback_init_cip,
169 .cra_exit = fallback_exit_cip,
172 .cia_min_keysize = AES_MIN_KEY_SIZE,
173 .cia_max_keysize = AES_MAX_KEY_SIZE,
174 .cia_setkey = aes_set_key,
175 .cia_encrypt = aes_encrypt,
176 .cia_decrypt = aes_decrypt,
181 static int setkey_fallback_blk(struct crypto_tfm *tfm, const u8 *key,
184 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
187 crypto_sync_skcipher_clear_flags(sctx->fallback.blk,
188 CRYPTO_TFM_REQ_MASK);
189 crypto_sync_skcipher_set_flags(sctx->fallback.blk, tfm->crt_flags &
190 CRYPTO_TFM_REQ_MASK);
192 ret = crypto_sync_skcipher_setkey(sctx->fallback.blk, key, len);
194 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
195 tfm->crt_flags |= crypto_sync_skcipher_get_flags(sctx->fallback.blk) &
201 static int fallback_blk_dec(struct blkcipher_desc *desc,
202 struct scatterlist *dst, struct scatterlist *src,
206 struct crypto_blkcipher *tfm = desc->tfm;
207 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
208 SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
210 skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
211 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
212 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
214 ret = crypto_skcipher_decrypt(req);
216 skcipher_request_zero(req);
220 static int fallback_blk_enc(struct blkcipher_desc *desc,
221 struct scatterlist *dst, struct scatterlist *src,
225 struct crypto_blkcipher *tfm = desc->tfm;
226 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(tfm);
227 SYNC_SKCIPHER_REQUEST_ON_STACK(req, sctx->fallback.blk);
229 skcipher_request_set_sync_tfm(req, sctx->fallback.blk);
230 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
231 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
233 ret = crypto_skcipher_encrypt(req);
237 static int ecb_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
238 unsigned int key_len)
240 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
243 /* Pick the correct function code based on the key length */
244 fc = (key_len == 16) ? CPACF_KM_AES_128 :
245 (key_len == 24) ? CPACF_KM_AES_192 :
246 (key_len == 32) ? CPACF_KM_AES_256 : 0;
248 /* Check if the function code is available */
249 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
251 return setkey_fallback_blk(tfm, in_key, key_len);
253 sctx->key_len = key_len;
254 memcpy(sctx->key, in_key, key_len);
258 static int ecb_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
259 struct blkcipher_walk *walk)
261 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
262 unsigned int nbytes, n;
265 ret = blkcipher_walk_virt(desc, walk);
266 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
267 /* only use complete blocks */
268 n = nbytes & ~(AES_BLOCK_SIZE - 1);
269 cpacf_km(sctx->fc | modifier, sctx->key,
270 walk->dst.virt.addr, walk->src.virt.addr, n);
271 ret = blkcipher_walk_done(desc, walk, nbytes - n);
277 static int ecb_aes_encrypt(struct blkcipher_desc *desc,
278 struct scatterlist *dst, struct scatterlist *src,
281 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
282 struct blkcipher_walk walk;
284 if (unlikely(!sctx->fc))
285 return fallback_blk_enc(desc, dst, src, nbytes);
287 blkcipher_walk_init(&walk, dst, src, nbytes);
288 return ecb_aes_crypt(desc, 0, &walk);
291 static int ecb_aes_decrypt(struct blkcipher_desc *desc,
292 struct scatterlist *dst, struct scatterlist *src,
295 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
296 struct blkcipher_walk walk;
298 if (unlikely(!sctx->fc))
299 return fallback_blk_dec(desc, dst, src, nbytes);
301 blkcipher_walk_init(&walk, dst, src, nbytes);
302 return ecb_aes_crypt(desc, CPACF_DECRYPT, &walk);
305 static int fallback_init_blk(struct crypto_tfm *tfm)
307 const char *name = tfm->__crt_alg->cra_name;
308 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
310 sctx->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
311 CRYPTO_ALG_NEED_FALLBACK);
313 if (IS_ERR(sctx->fallback.blk)) {
314 pr_err("Allocating AES fallback algorithm %s failed\n",
316 return PTR_ERR(sctx->fallback.blk);
322 static void fallback_exit_blk(struct crypto_tfm *tfm)
324 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
326 crypto_free_sync_skcipher(sctx->fallback.blk);
329 static struct crypto_alg ecb_aes_alg = {
330 .cra_name = "ecb(aes)",
331 .cra_driver_name = "ecb-aes-s390",
332 .cra_priority = 401, /* combo: aes + ecb + 1 */
333 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
334 CRYPTO_ALG_NEED_FALLBACK,
335 .cra_blocksize = AES_BLOCK_SIZE,
336 .cra_ctxsize = sizeof(struct s390_aes_ctx),
337 .cra_type = &crypto_blkcipher_type,
338 .cra_module = THIS_MODULE,
339 .cra_init = fallback_init_blk,
340 .cra_exit = fallback_exit_blk,
343 .min_keysize = AES_MIN_KEY_SIZE,
344 .max_keysize = AES_MAX_KEY_SIZE,
345 .setkey = ecb_aes_set_key,
346 .encrypt = ecb_aes_encrypt,
347 .decrypt = ecb_aes_decrypt,
352 static int cbc_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
353 unsigned int key_len)
355 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
358 /* Pick the correct function code based on the key length */
359 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
360 (key_len == 24) ? CPACF_KMC_AES_192 :
361 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
363 /* Check if the function code is available */
364 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
366 return setkey_fallback_blk(tfm, in_key, key_len);
368 sctx->key_len = key_len;
369 memcpy(sctx->key, in_key, key_len);
373 static int cbc_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
374 struct blkcipher_walk *walk)
376 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
377 unsigned int nbytes, n;
380 u8 iv[AES_BLOCK_SIZE];
381 u8 key[AES_MAX_KEY_SIZE];
384 ret = blkcipher_walk_virt(desc, walk);
385 memcpy(param.iv, walk->iv, AES_BLOCK_SIZE);
386 memcpy(param.key, sctx->key, sctx->key_len);
387 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
388 /* only use complete blocks */
389 n = nbytes & ~(AES_BLOCK_SIZE - 1);
390 cpacf_kmc(sctx->fc | modifier, ¶m,
391 walk->dst.virt.addr, walk->src.virt.addr, n);
392 ret = blkcipher_walk_done(desc, walk, nbytes - n);
394 memcpy(walk->iv, param.iv, AES_BLOCK_SIZE);
398 static int cbc_aes_encrypt(struct blkcipher_desc *desc,
399 struct scatterlist *dst, struct scatterlist *src,
402 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
403 struct blkcipher_walk walk;
405 if (unlikely(!sctx->fc))
406 return fallback_blk_enc(desc, dst, src, nbytes);
408 blkcipher_walk_init(&walk, dst, src, nbytes);
409 return cbc_aes_crypt(desc, 0, &walk);
412 static int cbc_aes_decrypt(struct blkcipher_desc *desc,
413 struct scatterlist *dst, struct scatterlist *src,
416 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
417 struct blkcipher_walk walk;
419 if (unlikely(!sctx->fc))
420 return fallback_blk_dec(desc, dst, src, nbytes);
422 blkcipher_walk_init(&walk, dst, src, nbytes);
423 return cbc_aes_crypt(desc, CPACF_DECRYPT, &walk);
426 static struct crypto_alg cbc_aes_alg = {
427 .cra_name = "cbc(aes)",
428 .cra_driver_name = "cbc-aes-s390",
429 .cra_priority = 402, /* ecb-aes-s390 + 1 */
430 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
431 CRYPTO_ALG_NEED_FALLBACK,
432 .cra_blocksize = AES_BLOCK_SIZE,
433 .cra_ctxsize = sizeof(struct s390_aes_ctx),
434 .cra_type = &crypto_blkcipher_type,
435 .cra_module = THIS_MODULE,
436 .cra_init = fallback_init_blk,
437 .cra_exit = fallback_exit_blk,
440 .min_keysize = AES_MIN_KEY_SIZE,
441 .max_keysize = AES_MAX_KEY_SIZE,
442 .ivsize = AES_BLOCK_SIZE,
443 .setkey = cbc_aes_set_key,
444 .encrypt = cbc_aes_encrypt,
445 .decrypt = cbc_aes_decrypt,
450 static int xts_fallback_setkey(struct crypto_tfm *tfm, const u8 *key,
453 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
456 crypto_sync_skcipher_clear_flags(xts_ctx->fallback,
457 CRYPTO_TFM_REQ_MASK);
458 crypto_sync_skcipher_set_flags(xts_ctx->fallback, tfm->crt_flags &
459 CRYPTO_TFM_REQ_MASK);
461 ret = crypto_sync_skcipher_setkey(xts_ctx->fallback, key, len);
463 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
464 tfm->crt_flags |= crypto_sync_skcipher_get_flags(xts_ctx->fallback) &
470 static int xts_fallback_decrypt(struct blkcipher_desc *desc,
471 struct scatterlist *dst, struct scatterlist *src,
474 struct crypto_blkcipher *tfm = desc->tfm;
475 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
476 SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
479 skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
480 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
481 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
483 ret = crypto_skcipher_decrypt(req);
485 skcipher_request_zero(req);
489 static int xts_fallback_encrypt(struct blkcipher_desc *desc,
490 struct scatterlist *dst, struct scatterlist *src,
493 struct crypto_blkcipher *tfm = desc->tfm;
494 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(tfm);
495 SYNC_SKCIPHER_REQUEST_ON_STACK(req, xts_ctx->fallback);
498 skcipher_request_set_sync_tfm(req, xts_ctx->fallback);
499 skcipher_request_set_callback(req, desc->flags, NULL, NULL);
500 skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
502 ret = crypto_skcipher_encrypt(req);
504 skcipher_request_zero(req);
508 static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
509 unsigned int key_len)
511 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
515 err = xts_check_key(tfm, in_key, key_len);
519 /* In fips mode only 128 bit or 256 bit keys are valid */
520 if (fips_enabled && key_len != 32 && key_len != 64) {
521 tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
525 /* Pick the correct function code based on the key length */
526 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
527 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
529 /* Check if the function code is available */
530 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
532 return xts_fallback_setkey(tfm, in_key, key_len);
534 /* Split the XTS key into the two subkeys */
535 key_len = key_len / 2;
536 xts_ctx->key_len = key_len;
537 memcpy(xts_ctx->key, in_key, key_len);
538 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
542 static int xts_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
543 struct blkcipher_walk *walk)
545 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
546 unsigned int offset, nbytes, n;
560 ret = blkcipher_walk_virt(desc, walk);
561 offset = xts_ctx->key_len & 0x10;
562 memset(pcc_param.block, 0, sizeof(pcc_param.block));
563 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
564 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
565 memcpy(pcc_param.tweak, walk->iv, sizeof(pcc_param.tweak));
566 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
567 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
569 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
570 memcpy(xts_param.init, pcc_param.xts, 16);
572 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
573 /* only use complete blocks */
574 n = nbytes & ~(AES_BLOCK_SIZE - 1);
575 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
576 walk->dst.virt.addr, walk->src.virt.addr, n);
577 ret = blkcipher_walk_done(desc, walk, nbytes - n);
582 static int xts_aes_encrypt(struct blkcipher_desc *desc,
583 struct scatterlist *dst, struct scatterlist *src,
586 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
587 struct blkcipher_walk walk;
589 if (unlikely(!xts_ctx->fc))
590 return xts_fallback_encrypt(desc, dst, src, nbytes);
592 blkcipher_walk_init(&walk, dst, src, nbytes);
593 return xts_aes_crypt(desc, 0, &walk);
596 static int xts_aes_decrypt(struct blkcipher_desc *desc,
597 struct scatterlist *dst, struct scatterlist *src,
600 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
601 struct blkcipher_walk walk;
603 if (unlikely(!xts_ctx->fc))
604 return xts_fallback_decrypt(desc, dst, src, nbytes);
606 blkcipher_walk_init(&walk, dst, src, nbytes);
607 return xts_aes_crypt(desc, CPACF_DECRYPT, &walk);
610 static int xts_fallback_init(struct crypto_tfm *tfm)
612 const char *name = tfm->__crt_alg->cra_name;
613 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
615 xts_ctx->fallback = crypto_alloc_sync_skcipher(name, 0,
616 CRYPTO_ALG_NEED_FALLBACK);
618 if (IS_ERR(xts_ctx->fallback)) {
619 pr_err("Allocating XTS fallback algorithm %s failed\n",
621 return PTR_ERR(xts_ctx->fallback);
626 static void xts_fallback_exit(struct crypto_tfm *tfm)
628 struct s390_xts_ctx *xts_ctx = crypto_tfm_ctx(tfm);
630 crypto_free_sync_skcipher(xts_ctx->fallback);
633 static struct crypto_alg xts_aes_alg = {
634 .cra_name = "xts(aes)",
635 .cra_driver_name = "xts-aes-s390",
636 .cra_priority = 402, /* ecb-aes-s390 + 1 */
637 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
638 CRYPTO_ALG_NEED_FALLBACK,
639 .cra_blocksize = AES_BLOCK_SIZE,
640 .cra_ctxsize = sizeof(struct s390_xts_ctx),
641 .cra_type = &crypto_blkcipher_type,
642 .cra_module = THIS_MODULE,
643 .cra_init = xts_fallback_init,
644 .cra_exit = xts_fallback_exit,
647 .min_keysize = 2 * AES_MIN_KEY_SIZE,
648 .max_keysize = 2 * AES_MAX_KEY_SIZE,
649 .ivsize = AES_BLOCK_SIZE,
650 .setkey = xts_aes_set_key,
651 .encrypt = xts_aes_encrypt,
652 .decrypt = xts_aes_decrypt,
657 static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
658 unsigned int key_len)
660 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
663 /* Pick the correct function code based on the key length */
664 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
665 (key_len == 24) ? CPACF_KMCTR_AES_192 :
666 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
668 /* Check if the function code is available */
669 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
671 return setkey_fallback_blk(tfm, in_key, key_len);
673 sctx->key_len = key_len;
674 memcpy(sctx->key, in_key, key_len);
678 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
682 /* only use complete blocks, max. PAGE_SIZE */
683 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
684 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
685 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
686 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
687 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
688 ctrptr += AES_BLOCK_SIZE;
693 static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
694 struct blkcipher_walk *walk)
696 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
697 u8 buf[AES_BLOCK_SIZE], *ctrptr;
698 unsigned int n, nbytes;
701 locked = spin_trylock(&ctrblk_lock);
703 ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
704 while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
706 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
707 n = __ctrblk_init(ctrblk, walk->iv, nbytes);
708 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk->iv;
709 cpacf_kmctr(sctx->fc | modifier, sctx->key,
710 walk->dst.virt.addr, walk->src.virt.addr,
712 if (ctrptr == ctrblk)
713 memcpy(walk->iv, ctrptr + n - AES_BLOCK_SIZE,
715 crypto_inc(walk->iv, AES_BLOCK_SIZE);
716 ret = blkcipher_walk_done(desc, walk, nbytes - n);
719 spin_unlock(&ctrblk_lock);
721 * final block may be < AES_BLOCK_SIZE, copy only nbytes
724 cpacf_kmctr(sctx->fc | modifier, sctx->key,
725 buf, walk->src.virt.addr,
726 AES_BLOCK_SIZE, walk->iv);
727 memcpy(walk->dst.virt.addr, buf, nbytes);
728 crypto_inc(walk->iv, AES_BLOCK_SIZE);
729 ret = blkcipher_walk_done(desc, walk, 0);
735 static int ctr_aes_encrypt(struct blkcipher_desc *desc,
736 struct scatterlist *dst, struct scatterlist *src,
739 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
740 struct blkcipher_walk walk;
742 if (unlikely(!sctx->fc))
743 return fallback_blk_enc(desc, dst, src, nbytes);
745 blkcipher_walk_init(&walk, dst, src, nbytes);
746 return ctr_aes_crypt(desc, 0, &walk);
749 static int ctr_aes_decrypt(struct blkcipher_desc *desc,
750 struct scatterlist *dst, struct scatterlist *src,
753 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm);
754 struct blkcipher_walk walk;
756 if (unlikely(!sctx->fc))
757 return fallback_blk_dec(desc, dst, src, nbytes);
759 blkcipher_walk_init(&walk, dst, src, nbytes);
760 return ctr_aes_crypt(desc, CPACF_DECRYPT, &walk);
763 static struct crypto_alg ctr_aes_alg = {
764 .cra_name = "ctr(aes)",
765 .cra_driver_name = "ctr-aes-s390",
766 .cra_priority = 402, /* ecb-aes-s390 + 1 */
767 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER |
768 CRYPTO_ALG_NEED_FALLBACK,
770 .cra_ctxsize = sizeof(struct s390_aes_ctx),
771 .cra_type = &crypto_blkcipher_type,
772 .cra_module = THIS_MODULE,
773 .cra_init = fallback_init_blk,
774 .cra_exit = fallback_exit_blk,
777 .min_keysize = AES_MIN_KEY_SIZE,
778 .max_keysize = AES_MAX_KEY_SIZE,
779 .ivsize = AES_BLOCK_SIZE,
780 .setkey = ctr_aes_set_key,
781 .encrypt = ctr_aes_encrypt,
782 .decrypt = ctr_aes_decrypt,
787 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
790 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
793 case AES_KEYSIZE_128:
794 ctx->fc = CPACF_KMA_GCM_AES_128;
796 case AES_KEYSIZE_192:
797 ctx->fc = CPACF_KMA_GCM_AES_192;
799 case AES_KEYSIZE_256:
800 ctx->fc = CPACF_KMA_GCM_AES_256;
806 memcpy(ctx->key, key, keylen);
807 ctx->key_len = keylen;
811 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
829 static void gcm_sg_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
832 memset(gw, 0, sizeof(*gw));
833 gw->walk_bytes_remain = len;
834 scatterwalk_start(&gw->walk, sg);
837 static int gcm_sg_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
841 /* minbytesneeded <= AES_BLOCK_SIZE */
842 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
844 gw->nbytes = gw->buf_bytes;
848 if (gw->walk_bytes_remain == 0) {
854 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
855 if (!gw->walk_bytes) {
856 scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
857 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
858 gw->walk_bytes_remain);
860 gw->walk_ptr = scatterwalk_map(&gw->walk);
862 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
863 gw->ptr = gw->walk_ptr;
864 gw->nbytes = gw->walk_bytes;
869 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
870 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
872 gw->walk_bytes_remain -= n;
873 scatterwalk_unmap(&gw->walk);
874 scatterwalk_advance(&gw->walk, n);
875 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
877 if (gw->buf_bytes >= minbytesneeded) {
879 gw->nbytes = gw->buf_bytes;
883 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
884 gw->walk_bytes_remain);
885 if (!gw->walk_bytes) {
886 scatterwalk_start(&gw->walk, sg_next(gw->walk.sg));
887 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
888 gw->walk_bytes_remain);
890 gw->walk_ptr = scatterwalk_map(&gw->walk);
897 static void gcm_sg_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
904 if (gw->ptr == gw->buf) {
905 n = gw->buf_bytes - bytesdone;
907 memmove(gw->buf, gw->buf + bytesdone, n);
912 gw->walk_bytes_remain -= bytesdone;
913 scatterwalk_unmap(&gw->walk);
914 scatterwalk_advance(&gw->walk, bytesdone);
915 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
919 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
921 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
922 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
923 unsigned int ivsize = crypto_aead_ivsize(tfm);
924 unsigned int taglen = crypto_aead_authsize(tfm);
925 unsigned int aadlen = req->assoclen;
926 unsigned int pclen = req->cryptlen;
929 unsigned int len, in_bytes, out_bytes,
930 min_bytes, bytes, aad_bytes, pc_bytes;
931 struct gcm_sg_walk gw_in, gw_out;
932 u8 tag[GHASH_DIGEST_SIZE];
935 u32 _[3]; /* reserved */
936 u32 cv; /* Counter Value */
937 u8 t[GHASH_DIGEST_SIZE];/* Tag */
938 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
939 u64 taadl; /* Total AAD Length */
940 u64 tpcl; /* Total Plain-/Cipher-text Length */
941 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
942 u8 k[AES_MAX_KEY_SIZE]; /* Key */
947 * req->src: aad||plaintext
948 * req->dst: aad||ciphertext||tag
950 * req->src: aad||ciphertext||tag
951 * req->dst: aad||plaintext, return 0 or -EBADMSG
952 * aad, plaintext and ciphertext may be empty.
954 if (flags & CPACF_DECRYPT)
956 len = aadlen + pclen;
958 memset(¶m, 0, sizeof(param));
960 param.taadl = aadlen * 8;
961 param.tpcl = pclen * 8;
962 memcpy(param.j0, req->iv, ivsize);
963 *(u32 *)(param.j0 + ivsize) = 1;
964 memcpy(param.k, ctx->key, ctx->key_len);
966 gcm_sg_walk_start(&gw_in, req->src, len);
967 gcm_sg_walk_start(&gw_out, req->dst, len);
970 min_bytes = min_t(unsigned int,
971 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
972 in_bytes = gcm_sg_walk_go(&gw_in, min_bytes);
973 out_bytes = gcm_sg_walk_go(&gw_out, min_bytes);
974 bytes = min(in_bytes, out_bytes);
976 if (aadlen + pclen <= bytes) {
979 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
981 if (aadlen <= bytes) {
983 pc_bytes = (bytes - aadlen) &
984 ~(AES_BLOCK_SIZE - 1);
985 flags |= CPACF_KMA_LAAD;
987 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
993 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
995 cpacf_kma(ctx->fc | flags, ¶m,
996 gw_out.ptr + aad_bytes,
997 gw_in.ptr + aad_bytes, pc_bytes,
998 gw_in.ptr, aad_bytes);
1000 gcm_sg_walk_done(&gw_in, aad_bytes + pc_bytes);
1001 gcm_sg_walk_done(&gw_out, aad_bytes + pc_bytes);
1002 aadlen -= aad_bytes;
1004 } while (aadlen + pclen > 0);
1006 if (flags & CPACF_DECRYPT) {
1007 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1008 if (crypto_memneq(tag, param.t, taglen))
1011 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1013 memzero_explicit(¶m, sizeof(param));
1017 static int gcm_aes_encrypt(struct aead_request *req)
1019 return gcm_aes_crypt(req, CPACF_ENCRYPT);
1022 static int gcm_aes_decrypt(struct aead_request *req)
1024 return gcm_aes_crypt(req, CPACF_DECRYPT);
1027 static struct aead_alg gcm_aes_aead = {
1028 .setkey = gcm_aes_setkey,
1029 .setauthsize = gcm_aes_setauthsize,
1030 .encrypt = gcm_aes_encrypt,
1031 .decrypt = gcm_aes_decrypt,
1033 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
1034 .maxauthsize = GHASH_DIGEST_SIZE,
1035 .chunksize = AES_BLOCK_SIZE,
1039 .cra_ctxsize = sizeof(struct s390_aes_ctx),
1040 .cra_priority = 900,
1041 .cra_name = "gcm(aes)",
1042 .cra_driver_name = "gcm-aes-s390",
1043 .cra_module = THIS_MODULE,
1047 static struct crypto_alg *aes_s390_algs_ptr[5];
1048 static int aes_s390_algs_num;
1049 static struct aead_alg *aes_s390_aead_alg;
1051 static int aes_s390_register_alg(struct crypto_alg *alg)
1055 ret = crypto_register_alg(alg);
1057 aes_s390_algs_ptr[aes_s390_algs_num++] = alg;
1061 static void aes_s390_fini(void)
1063 while (aes_s390_algs_num--)
1064 crypto_unregister_alg(aes_s390_algs_ptr[aes_s390_algs_num]);
1066 free_page((unsigned long) ctrblk);
1068 if (aes_s390_aead_alg)
1069 crypto_unregister_aead(aes_s390_aead_alg);
1072 static int __init aes_s390_init(void)
1076 /* Query available functions for KM, KMC, KMCTR and KMA */
1077 cpacf_query(CPACF_KM, &km_functions);
1078 cpacf_query(CPACF_KMC, &kmc_functions);
1079 cpacf_query(CPACF_KMCTR, &kmctr_functions);
1080 cpacf_query(CPACF_KMA, &kma_functions);
1082 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1083 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1084 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1085 ret = aes_s390_register_alg(&aes_alg);
1088 ret = aes_s390_register_alg(&ecb_aes_alg);
1093 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1094 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1095 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1096 ret = aes_s390_register_alg(&cbc_aes_alg);
1101 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1102 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1103 ret = aes_s390_register_alg(&xts_aes_alg);
1108 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1109 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1110 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1111 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1116 ret = aes_s390_register_alg(&ctr_aes_alg);
1121 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1122 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1123 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1124 ret = crypto_register_aead(&gcm_aes_aead);
1127 aes_s390_aead_alg = &gcm_aes_aead;
1136 module_cpu_feature_match(MSA, aes_s390_init);
1137 module_exit(aes_s390_fini);
1139 MODULE_ALIAS_CRYPTO("aes-all");
1141 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1142 MODULE_LICENSE("GPL");