1 // SPDX-License-Identifier: GPL-2.0+
5 * s390 implementation of the AES Cipher Algorithm.
8 * Copyright IBM Corp. 2005, 2017
14 * Derived from "crypto/aes_generic.c"
17 #define KMSG_COMPONENT "aes_s390"
18 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 #include <crypto/aes.h>
21 #include <crypto/algapi.h>
22 #include <crypto/ghash.h>
23 #include <crypto/internal/aead.h>
24 #include <crypto/internal/cipher.h>
25 #include <crypto/internal/skcipher.h>
26 #include <crypto/scatterwalk.h>
27 #include <linux/err.h>
28 #include <linux/module.h>
29 #include <linux/cpufeature.h>
30 #include <linux/init.h>
31 #include <linux/mutex.h>
32 #include <linux/fips.h>
33 #include <linux/string.h>
34 #include <crypto/xts.h>
35 #include <asm/cpacf.h>
38 static DEFINE_MUTEX(ctrblk_lock);
40 static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
44 u8 key[AES_MAX_KEY_SIZE];
48 struct crypto_skcipher *skcipher;
49 struct crypto_cipher *cip;
63 struct crypto_skcipher *fallback;
67 struct scatter_walk walk;
68 unsigned int walk_bytes;
70 unsigned int walk_bytes_remain;
71 u8 buf[AES_BLOCK_SIZE];
72 unsigned int buf_bytes;
77 static int setkey_fallback_cip(struct crypto_tfm *tfm, const u8 *in_key,
80 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
82 sctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
83 sctx->fallback.cip->base.crt_flags |= (tfm->crt_flags &
86 return crypto_cipher_setkey(sctx->fallback.cip, in_key, key_len);
89 static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
92 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
95 /* Pick the correct function code based on the key length */
96 fc = (key_len == 16) ? CPACF_KM_AES_128 :
97 (key_len == 24) ? CPACF_KM_AES_192 :
98 (key_len == 32) ? CPACF_KM_AES_256 : 0;
100 /* Check if the function code is available */
101 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
103 return setkey_fallback_cip(tfm, in_key, key_len);
105 sctx->key_len = key_len;
106 memcpy(sctx->key, in_key, key_len);
110 static void crypto_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
112 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
114 if (unlikely(!sctx->fc)) {
115 crypto_cipher_encrypt_one(sctx->fallback.cip, out, in);
118 cpacf_km(sctx->fc, &sctx->key, out, in, AES_BLOCK_SIZE);
121 static void crypto_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
123 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
125 if (unlikely(!sctx->fc)) {
126 crypto_cipher_decrypt_one(sctx->fallback.cip, out, in);
129 cpacf_km(sctx->fc | CPACF_DECRYPT,
130 &sctx->key, out, in, AES_BLOCK_SIZE);
133 static int fallback_init_cip(struct crypto_tfm *tfm)
135 const char *name = tfm->__crt_alg->cra_name;
136 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
138 sctx->fallback.cip = crypto_alloc_cipher(name, 0,
139 CRYPTO_ALG_NEED_FALLBACK);
141 if (IS_ERR(sctx->fallback.cip)) {
142 pr_err("Allocating AES fallback algorithm %s failed\n",
144 return PTR_ERR(sctx->fallback.cip);
150 static void fallback_exit_cip(struct crypto_tfm *tfm)
152 struct s390_aes_ctx *sctx = crypto_tfm_ctx(tfm);
154 crypto_free_cipher(sctx->fallback.cip);
155 sctx->fallback.cip = NULL;
158 static struct crypto_alg aes_alg = {
160 .cra_driver_name = "aes-s390",
162 .cra_flags = CRYPTO_ALG_TYPE_CIPHER |
163 CRYPTO_ALG_NEED_FALLBACK,
164 .cra_blocksize = AES_BLOCK_SIZE,
165 .cra_ctxsize = sizeof(struct s390_aes_ctx),
166 .cra_module = THIS_MODULE,
167 .cra_init = fallback_init_cip,
168 .cra_exit = fallback_exit_cip,
171 .cia_min_keysize = AES_MIN_KEY_SIZE,
172 .cia_max_keysize = AES_MAX_KEY_SIZE,
173 .cia_setkey = aes_set_key,
174 .cia_encrypt = crypto_aes_encrypt,
175 .cia_decrypt = crypto_aes_decrypt,
180 static int setkey_fallback_skcipher(struct crypto_skcipher *tfm, const u8 *key,
183 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
185 crypto_skcipher_clear_flags(sctx->fallback.skcipher,
186 CRYPTO_TFM_REQ_MASK);
187 crypto_skcipher_set_flags(sctx->fallback.skcipher,
188 crypto_skcipher_get_flags(tfm) &
189 CRYPTO_TFM_REQ_MASK);
190 return crypto_skcipher_setkey(sctx->fallback.skcipher, key, len);
193 static int fallback_skcipher_crypt(struct s390_aes_ctx *sctx,
194 struct skcipher_request *req,
195 unsigned long modifier)
197 struct skcipher_request *subreq = skcipher_request_ctx(req);
200 skcipher_request_set_tfm(subreq, sctx->fallback.skcipher);
201 return (modifier & CPACF_DECRYPT) ?
202 crypto_skcipher_decrypt(subreq) :
203 crypto_skcipher_encrypt(subreq);
206 static int ecb_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
207 unsigned int key_len)
209 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
212 /* Pick the correct function code based on the key length */
213 fc = (key_len == 16) ? CPACF_KM_AES_128 :
214 (key_len == 24) ? CPACF_KM_AES_192 :
215 (key_len == 32) ? CPACF_KM_AES_256 : 0;
217 /* Check if the function code is available */
218 sctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
220 return setkey_fallback_skcipher(tfm, in_key, key_len);
222 sctx->key_len = key_len;
223 memcpy(sctx->key, in_key, key_len);
227 static int ecb_aes_crypt(struct skcipher_request *req, unsigned long modifier)
229 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
230 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
231 struct skcipher_walk walk;
232 unsigned int nbytes, n;
235 if (unlikely(!sctx->fc))
236 return fallback_skcipher_crypt(sctx, req, modifier);
238 ret = skcipher_walk_virt(&walk, req, false);
239 while ((nbytes = walk.nbytes) != 0) {
240 /* only use complete blocks */
241 n = nbytes & ~(AES_BLOCK_SIZE - 1);
242 cpacf_km(sctx->fc | modifier, sctx->key,
243 walk.dst.virt.addr, walk.src.virt.addr, n);
244 ret = skcipher_walk_done(&walk, nbytes - n);
249 static int ecb_aes_encrypt(struct skcipher_request *req)
251 return ecb_aes_crypt(req, 0);
254 static int ecb_aes_decrypt(struct skcipher_request *req)
256 return ecb_aes_crypt(req, CPACF_DECRYPT);
259 static int fallback_init_skcipher(struct crypto_skcipher *tfm)
261 const char *name = crypto_tfm_alg_name(&tfm->base);
262 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
264 sctx->fallback.skcipher = crypto_alloc_skcipher(name, 0,
265 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
267 if (IS_ERR(sctx->fallback.skcipher)) {
268 pr_err("Allocating AES fallback algorithm %s failed\n",
270 return PTR_ERR(sctx->fallback.skcipher);
273 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
274 crypto_skcipher_reqsize(sctx->fallback.skcipher));
278 static void fallback_exit_skcipher(struct crypto_skcipher *tfm)
280 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
282 crypto_free_skcipher(sctx->fallback.skcipher);
285 static struct skcipher_alg ecb_aes_alg = {
286 .base.cra_name = "ecb(aes)",
287 .base.cra_driver_name = "ecb-aes-s390",
288 .base.cra_priority = 401, /* combo: aes + ecb + 1 */
289 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
290 .base.cra_blocksize = AES_BLOCK_SIZE,
291 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
292 .base.cra_module = THIS_MODULE,
293 .init = fallback_init_skcipher,
294 .exit = fallback_exit_skcipher,
295 .min_keysize = AES_MIN_KEY_SIZE,
296 .max_keysize = AES_MAX_KEY_SIZE,
297 .setkey = ecb_aes_set_key,
298 .encrypt = ecb_aes_encrypt,
299 .decrypt = ecb_aes_decrypt,
302 static int cbc_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
303 unsigned int key_len)
305 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
308 /* Pick the correct function code based on the key length */
309 fc = (key_len == 16) ? CPACF_KMC_AES_128 :
310 (key_len == 24) ? CPACF_KMC_AES_192 :
311 (key_len == 32) ? CPACF_KMC_AES_256 : 0;
313 /* Check if the function code is available */
314 sctx->fc = (fc && cpacf_test_func(&kmc_functions, fc)) ? fc : 0;
316 return setkey_fallback_skcipher(tfm, in_key, key_len);
318 sctx->key_len = key_len;
319 memcpy(sctx->key, in_key, key_len);
323 static int cbc_aes_crypt(struct skcipher_request *req, unsigned long modifier)
325 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
326 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
327 struct skcipher_walk walk;
328 unsigned int nbytes, n;
331 u8 iv[AES_BLOCK_SIZE];
332 u8 key[AES_MAX_KEY_SIZE];
335 if (unlikely(!sctx->fc))
336 return fallback_skcipher_crypt(sctx, req, modifier);
338 ret = skcipher_walk_virt(&walk, req, false);
341 memcpy(param.iv, walk.iv, AES_BLOCK_SIZE);
342 memcpy(param.key, sctx->key, sctx->key_len);
343 while ((nbytes = walk.nbytes) != 0) {
344 /* only use complete blocks */
345 n = nbytes & ~(AES_BLOCK_SIZE - 1);
346 cpacf_kmc(sctx->fc | modifier, ¶m,
347 walk.dst.virt.addr, walk.src.virt.addr, n);
348 memcpy(walk.iv, param.iv, AES_BLOCK_SIZE);
349 ret = skcipher_walk_done(&walk, nbytes - n);
351 memzero_explicit(¶m, sizeof(param));
355 static int cbc_aes_encrypt(struct skcipher_request *req)
357 return cbc_aes_crypt(req, 0);
360 static int cbc_aes_decrypt(struct skcipher_request *req)
362 return cbc_aes_crypt(req, CPACF_DECRYPT);
365 static struct skcipher_alg cbc_aes_alg = {
366 .base.cra_name = "cbc(aes)",
367 .base.cra_driver_name = "cbc-aes-s390",
368 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
369 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
370 .base.cra_blocksize = AES_BLOCK_SIZE,
371 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
372 .base.cra_module = THIS_MODULE,
373 .init = fallback_init_skcipher,
374 .exit = fallback_exit_skcipher,
375 .min_keysize = AES_MIN_KEY_SIZE,
376 .max_keysize = AES_MAX_KEY_SIZE,
377 .ivsize = AES_BLOCK_SIZE,
378 .setkey = cbc_aes_set_key,
379 .encrypt = cbc_aes_encrypt,
380 .decrypt = cbc_aes_decrypt,
383 static int xts_fallback_setkey(struct crypto_skcipher *tfm, const u8 *key,
386 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
388 crypto_skcipher_clear_flags(xts_ctx->fallback, CRYPTO_TFM_REQ_MASK);
389 crypto_skcipher_set_flags(xts_ctx->fallback,
390 crypto_skcipher_get_flags(tfm) &
391 CRYPTO_TFM_REQ_MASK);
392 return crypto_skcipher_setkey(xts_ctx->fallback, key, len);
395 static int xts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
396 unsigned int key_len)
398 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
402 err = xts_fallback_setkey(tfm, in_key, key_len);
406 /* Pick the correct function code based on the key length */
407 fc = (key_len == 32) ? CPACF_KM_XTS_128 :
408 (key_len == 64) ? CPACF_KM_XTS_256 : 0;
410 /* Check if the function code is available */
411 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
415 /* Split the XTS key into the two subkeys */
416 key_len = key_len / 2;
417 xts_ctx->key_len = key_len;
418 memcpy(xts_ctx->key, in_key, key_len);
419 memcpy(xts_ctx->pcc_key, in_key + key_len, key_len);
423 static int xts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
425 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
426 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
427 struct skcipher_walk walk;
428 unsigned int offset, nbytes, n;
442 if (req->cryptlen < AES_BLOCK_SIZE)
445 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
446 struct skcipher_request *subreq = skcipher_request_ctx(req);
449 skcipher_request_set_tfm(subreq, xts_ctx->fallback);
450 return (modifier & CPACF_DECRYPT) ?
451 crypto_skcipher_decrypt(subreq) :
452 crypto_skcipher_encrypt(subreq);
455 ret = skcipher_walk_virt(&walk, req, false);
458 offset = xts_ctx->key_len & 0x10;
459 memset(pcc_param.block, 0, sizeof(pcc_param.block));
460 memset(pcc_param.bit, 0, sizeof(pcc_param.bit));
461 memset(pcc_param.xts, 0, sizeof(pcc_param.xts));
462 memcpy(pcc_param.tweak, walk.iv, sizeof(pcc_param.tweak));
463 memcpy(pcc_param.key + offset, xts_ctx->pcc_key, xts_ctx->key_len);
464 cpacf_pcc(xts_ctx->fc, pcc_param.key + offset);
466 memcpy(xts_param.key + offset, xts_ctx->key, xts_ctx->key_len);
467 memcpy(xts_param.init, pcc_param.xts, 16);
469 while ((nbytes = walk.nbytes) != 0) {
470 /* only use complete blocks */
471 n = nbytes & ~(AES_BLOCK_SIZE - 1);
472 cpacf_km(xts_ctx->fc | modifier, xts_param.key + offset,
473 walk.dst.virt.addr, walk.src.virt.addr, n);
474 ret = skcipher_walk_done(&walk, nbytes - n);
476 memzero_explicit(&pcc_param, sizeof(pcc_param));
477 memzero_explicit(&xts_param, sizeof(xts_param));
481 static int xts_aes_encrypt(struct skcipher_request *req)
483 return xts_aes_crypt(req, 0);
486 static int xts_aes_decrypt(struct skcipher_request *req)
488 return xts_aes_crypt(req, CPACF_DECRYPT);
491 static int xts_fallback_init(struct crypto_skcipher *tfm)
493 const char *name = crypto_tfm_alg_name(&tfm->base);
494 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
496 xts_ctx->fallback = crypto_alloc_skcipher(name, 0,
497 CRYPTO_ALG_NEED_FALLBACK | CRYPTO_ALG_ASYNC);
499 if (IS_ERR(xts_ctx->fallback)) {
500 pr_err("Allocating XTS fallback algorithm %s failed\n",
502 return PTR_ERR(xts_ctx->fallback);
504 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) +
505 crypto_skcipher_reqsize(xts_ctx->fallback));
509 static void xts_fallback_exit(struct crypto_skcipher *tfm)
511 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
513 crypto_free_skcipher(xts_ctx->fallback);
516 static struct skcipher_alg xts_aes_alg = {
517 .base.cra_name = "xts(aes)",
518 .base.cra_driver_name = "xts-aes-s390",
519 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
520 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
521 .base.cra_blocksize = AES_BLOCK_SIZE,
522 .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
523 .base.cra_module = THIS_MODULE,
524 .init = xts_fallback_init,
525 .exit = xts_fallback_exit,
526 .min_keysize = 2 * AES_MIN_KEY_SIZE,
527 .max_keysize = 2 * AES_MAX_KEY_SIZE,
528 .ivsize = AES_BLOCK_SIZE,
529 .setkey = xts_aes_set_key,
530 .encrypt = xts_aes_encrypt,
531 .decrypt = xts_aes_decrypt,
534 static int fullxts_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
535 unsigned int key_len)
537 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
541 err = xts_fallback_setkey(tfm, in_key, key_len);
545 /* Pick the correct function code based on the key length */
546 fc = (key_len == 32) ? CPACF_KM_XTS_128_FULL :
547 (key_len == 64) ? CPACF_KM_XTS_256_FULL : 0;
549 /* Check if the function code is available */
550 xts_ctx->fc = (fc && cpacf_test_func(&km_functions, fc)) ? fc : 0;
554 /* Store double-key */
555 memcpy(xts_ctx->keys, in_key, key_len);
556 xts_ctx->key_len = key_len;
560 static int fullxts_aes_crypt(struct skcipher_request *req, unsigned long modifier)
562 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
563 struct s390_xts_ctx *xts_ctx = crypto_skcipher_ctx(tfm);
564 unsigned int offset, nbytes, n;
565 struct skcipher_walk walk;
575 if (req->cryptlen < AES_BLOCK_SIZE)
578 if (unlikely(!xts_ctx->fc || (req->cryptlen % AES_BLOCK_SIZE) != 0)) {
579 struct skcipher_request *subreq = skcipher_request_ctx(req);
582 skcipher_request_set_tfm(subreq, xts_ctx->fallback);
583 return (modifier & CPACF_DECRYPT) ?
584 crypto_skcipher_decrypt(subreq) :
585 crypto_skcipher_encrypt(subreq);
588 ret = skcipher_walk_virt(&walk, req, false);
592 offset = xts_ctx->key_len & 0x20;
593 memcpy(fxts_param.key + offset, xts_ctx->keys, xts_ctx->key_len);
594 memcpy(fxts_param.tweak, req->iv, AES_BLOCK_SIZE);
595 fxts_param.nap[0] = 0x01; /* initial alpha power (1, little-endian) */
597 while ((nbytes = walk.nbytes) != 0) {
598 /* only use complete blocks */
599 n = nbytes & ~(AES_BLOCK_SIZE - 1);
600 cpacf_km(xts_ctx->fc | modifier, fxts_param.key + offset,
601 walk.dst.virt.addr, walk.src.virt.addr, n);
602 ret = skcipher_walk_done(&walk, nbytes - n);
604 memzero_explicit(&fxts_param, sizeof(fxts_param));
608 static int fullxts_aes_encrypt(struct skcipher_request *req)
610 return fullxts_aes_crypt(req, 0);
613 static int fullxts_aes_decrypt(struct skcipher_request *req)
615 return fullxts_aes_crypt(req, CPACF_DECRYPT);
618 static struct skcipher_alg fullxts_aes_alg = {
619 .base.cra_name = "xts(aes)",
620 .base.cra_driver_name = "full-xts-aes-s390",
621 .base.cra_priority = 403, /* aes-xts-s390 + 1 */
622 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
623 .base.cra_blocksize = AES_BLOCK_SIZE,
624 .base.cra_ctxsize = sizeof(struct s390_xts_ctx),
625 .base.cra_module = THIS_MODULE,
626 .init = xts_fallback_init,
627 .exit = xts_fallback_exit,
628 .min_keysize = 2 * AES_MIN_KEY_SIZE,
629 .max_keysize = 2 * AES_MAX_KEY_SIZE,
630 .ivsize = AES_BLOCK_SIZE,
631 .setkey = fullxts_aes_set_key,
632 .encrypt = fullxts_aes_encrypt,
633 .decrypt = fullxts_aes_decrypt,
636 static int ctr_aes_set_key(struct crypto_skcipher *tfm, const u8 *in_key,
637 unsigned int key_len)
639 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
642 /* Pick the correct function code based on the key length */
643 fc = (key_len == 16) ? CPACF_KMCTR_AES_128 :
644 (key_len == 24) ? CPACF_KMCTR_AES_192 :
645 (key_len == 32) ? CPACF_KMCTR_AES_256 : 0;
647 /* Check if the function code is available */
648 sctx->fc = (fc && cpacf_test_func(&kmctr_functions, fc)) ? fc : 0;
650 return setkey_fallback_skcipher(tfm, in_key, key_len);
652 sctx->key_len = key_len;
653 memcpy(sctx->key, in_key, key_len);
657 static unsigned int __ctrblk_init(u8 *ctrptr, u8 *iv, unsigned int nbytes)
661 /* only use complete blocks, max. PAGE_SIZE */
662 memcpy(ctrptr, iv, AES_BLOCK_SIZE);
663 n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
664 for (i = (n / AES_BLOCK_SIZE) - 1; i > 0; i--) {
665 memcpy(ctrptr + AES_BLOCK_SIZE, ctrptr, AES_BLOCK_SIZE);
666 crypto_inc(ctrptr + AES_BLOCK_SIZE, AES_BLOCK_SIZE);
667 ctrptr += AES_BLOCK_SIZE;
672 static int ctr_aes_crypt(struct skcipher_request *req)
674 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
675 struct s390_aes_ctx *sctx = crypto_skcipher_ctx(tfm);
676 u8 buf[AES_BLOCK_SIZE], *ctrptr;
677 struct skcipher_walk walk;
678 unsigned int n, nbytes;
681 if (unlikely(!sctx->fc))
682 return fallback_skcipher_crypt(sctx, req, 0);
684 locked = mutex_trylock(&ctrblk_lock);
686 ret = skcipher_walk_virt(&walk, req, false);
687 while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
690 if (nbytes >= 2*AES_BLOCK_SIZE && locked)
691 n = __ctrblk_init(ctrblk, walk.iv, nbytes);
692 ctrptr = (n > AES_BLOCK_SIZE) ? ctrblk : walk.iv;
693 cpacf_kmctr(sctx->fc, sctx->key, walk.dst.virt.addr,
694 walk.src.virt.addr, n, ctrptr);
695 if (ctrptr == ctrblk)
696 memcpy(walk.iv, ctrptr + n - AES_BLOCK_SIZE,
698 crypto_inc(walk.iv, AES_BLOCK_SIZE);
699 ret = skcipher_walk_done(&walk, nbytes - n);
702 mutex_unlock(&ctrblk_lock);
704 * final block may be < AES_BLOCK_SIZE, copy only nbytes
707 memset(buf, 0, AES_BLOCK_SIZE);
708 memcpy(buf, walk.src.virt.addr, nbytes);
709 cpacf_kmctr(sctx->fc, sctx->key, buf, buf,
710 AES_BLOCK_SIZE, walk.iv);
711 memcpy(walk.dst.virt.addr, buf, nbytes);
712 crypto_inc(walk.iv, AES_BLOCK_SIZE);
713 ret = skcipher_walk_done(&walk, 0);
719 static struct skcipher_alg ctr_aes_alg = {
720 .base.cra_name = "ctr(aes)",
721 .base.cra_driver_name = "ctr-aes-s390",
722 .base.cra_priority = 402, /* ecb-aes-s390 + 1 */
723 .base.cra_flags = CRYPTO_ALG_NEED_FALLBACK,
724 .base.cra_blocksize = 1,
725 .base.cra_ctxsize = sizeof(struct s390_aes_ctx),
726 .base.cra_module = THIS_MODULE,
727 .init = fallback_init_skcipher,
728 .exit = fallback_exit_skcipher,
729 .min_keysize = AES_MIN_KEY_SIZE,
730 .max_keysize = AES_MAX_KEY_SIZE,
731 .ivsize = AES_BLOCK_SIZE,
732 .setkey = ctr_aes_set_key,
733 .encrypt = ctr_aes_crypt,
734 .decrypt = ctr_aes_crypt,
735 .chunksize = AES_BLOCK_SIZE,
738 static int gcm_aes_setkey(struct crypto_aead *tfm, const u8 *key,
741 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
744 case AES_KEYSIZE_128:
745 ctx->fc = CPACF_KMA_GCM_AES_128;
747 case AES_KEYSIZE_192:
748 ctx->fc = CPACF_KMA_GCM_AES_192;
750 case AES_KEYSIZE_256:
751 ctx->fc = CPACF_KMA_GCM_AES_256;
757 memcpy(ctx->key, key, keylen);
758 ctx->key_len = keylen;
762 static int gcm_aes_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
780 static void gcm_walk_start(struct gcm_sg_walk *gw, struct scatterlist *sg,
783 memset(gw, 0, sizeof(*gw));
784 gw->walk_bytes_remain = len;
785 scatterwalk_start(&gw->walk, sg);
788 static inline unsigned int _gcm_sg_clamp_and_map(struct gcm_sg_walk *gw)
790 struct scatterlist *nextsg;
792 gw->walk_bytes = scatterwalk_clamp(&gw->walk, gw->walk_bytes_remain);
793 while (!gw->walk_bytes) {
794 nextsg = sg_next(gw->walk.sg);
797 scatterwalk_start(&gw->walk, nextsg);
798 gw->walk_bytes = scatterwalk_clamp(&gw->walk,
799 gw->walk_bytes_remain);
801 gw->walk_ptr = scatterwalk_map(&gw->walk);
802 return gw->walk_bytes;
805 static inline void _gcm_sg_unmap_and_advance(struct gcm_sg_walk *gw,
808 gw->walk_bytes_remain -= nbytes;
809 scatterwalk_unmap(gw->walk_ptr);
810 scatterwalk_advance(&gw->walk, nbytes);
811 scatterwalk_done(&gw->walk, 0, gw->walk_bytes_remain);
815 static int gcm_in_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
819 if (gw->buf_bytes && gw->buf_bytes >= minbytesneeded) {
821 gw->nbytes = gw->buf_bytes;
825 if (gw->walk_bytes_remain == 0) {
831 if (!_gcm_sg_clamp_and_map(gw)) {
837 if (!gw->buf_bytes && gw->walk_bytes >= minbytesneeded) {
838 gw->ptr = gw->walk_ptr;
839 gw->nbytes = gw->walk_bytes;
844 n = min(gw->walk_bytes, AES_BLOCK_SIZE - gw->buf_bytes);
845 memcpy(gw->buf + gw->buf_bytes, gw->walk_ptr, n);
847 _gcm_sg_unmap_and_advance(gw, n);
848 if (gw->buf_bytes >= minbytesneeded) {
850 gw->nbytes = gw->buf_bytes;
853 if (!_gcm_sg_clamp_and_map(gw)) {
864 static int gcm_out_walk_go(struct gcm_sg_walk *gw, unsigned int minbytesneeded)
866 if (gw->walk_bytes_remain == 0) {
872 if (!_gcm_sg_clamp_and_map(gw)) {
878 if (gw->walk_bytes >= minbytesneeded) {
879 gw->ptr = gw->walk_ptr;
880 gw->nbytes = gw->walk_bytes;
884 scatterwalk_unmap(gw->walk_ptr);
888 gw->nbytes = sizeof(gw->buf);
894 static int gcm_in_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
899 if (gw->ptr == gw->buf) {
900 int n = gw->buf_bytes - bytesdone;
902 memmove(gw->buf, gw->buf + bytesdone, n);
907 _gcm_sg_unmap_and_advance(gw, bytesdone);
912 static int gcm_out_walk_done(struct gcm_sg_walk *gw, unsigned int bytesdone)
919 if (gw->ptr == gw->buf) {
920 for (i = 0; i < bytesdone; i += n) {
921 if (!_gcm_sg_clamp_and_map(gw))
923 n = min(gw->walk_bytes, bytesdone - i);
924 memcpy(gw->walk_ptr, gw->buf + i, n);
925 _gcm_sg_unmap_and_advance(gw, n);
928 _gcm_sg_unmap_and_advance(gw, bytesdone);
933 static int gcm_aes_crypt(struct aead_request *req, unsigned int flags)
935 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
936 struct s390_aes_ctx *ctx = crypto_aead_ctx(tfm);
937 unsigned int ivsize = crypto_aead_ivsize(tfm);
938 unsigned int taglen = crypto_aead_authsize(tfm);
939 unsigned int aadlen = req->assoclen;
940 unsigned int pclen = req->cryptlen;
943 unsigned int n, len, in_bytes, out_bytes,
944 min_bytes, bytes, aad_bytes, pc_bytes;
945 struct gcm_sg_walk gw_in, gw_out;
946 u8 tag[GHASH_DIGEST_SIZE];
949 u32 _[3]; /* reserved */
950 u32 cv; /* Counter Value */
951 u8 t[GHASH_DIGEST_SIZE];/* Tag */
952 u8 h[AES_BLOCK_SIZE]; /* Hash-subkey */
953 u64 taadl; /* Total AAD Length */
954 u64 tpcl; /* Total Plain-/Cipher-text Length */
955 u8 j0[GHASH_BLOCK_SIZE];/* initial counter value */
956 u8 k[AES_MAX_KEY_SIZE]; /* Key */
961 * req->src: aad||plaintext
962 * req->dst: aad||ciphertext||tag
964 * req->src: aad||ciphertext||tag
965 * req->dst: aad||plaintext, return 0 or -EBADMSG
966 * aad, plaintext and ciphertext may be empty.
968 if (flags & CPACF_DECRYPT)
970 len = aadlen + pclen;
972 memset(¶m, 0, sizeof(param));
974 param.taadl = aadlen * 8;
975 param.tpcl = pclen * 8;
976 memcpy(param.j0, req->iv, ivsize);
977 *(u32 *)(param.j0 + ivsize) = 1;
978 memcpy(param.k, ctx->key, ctx->key_len);
980 gcm_walk_start(&gw_in, req->src, len);
981 gcm_walk_start(&gw_out, req->dst, len);
984 min_bytes = min_t(unsigned int,
985 aadlen > 0 ? aadlen : pclen, AES_BLOCK_SIZE);
986 in_bytes = gcm_in_walk_go(&gw_in, min_bytes);
987 out_bytes = gcm_out_walk_go(&gw_out, min_bytes);
988 bytes = min(in_bytes, out_bytes);
990 if (aadlen + pclen <= bytes) {
993 flags |= CPACF_KMA_LAAD | CPACF_KMA_LPC;
995 if (aadlen <= bytes) {
997 pc_bytes = (bytes - aadlen) &
998 ~(AES_BLOCK_SIZE - 1);
999 flags |= CPACF_KMA_LAAD;
1001 aad_bytes = bytes & ~(AES_BLOCK_SIZE - 1);
1007 memcpy(gw_out.ptr, gw_in.ptr, aad_bytes);
1009 cpacf_kma(ctx->fc | flags, ¶m,
1010 gw_out.ptr + aad_bytes,
1011 gw_in.ptr + aad_bytes, pc_bytes,
1012 gw_in.ptr, aad_bytes);
1014 n = aad_bytes + pc_bytes;
1015 if (gcm_in_walk_done(&gw_in, n) != n)
1017 if (gcm_out_walk_done(&gw_out, n) != n)
1019 aadlen -= aad_bytes;
1021 } while (aadlen + pclen > 0);
1023 if (flags & CPACF_DECRYPT) {
1024 scatterwalk_map_and_copy(tag, req->src, len, taglen, 0);
1025 if (crypto_memneq(tag, param.t, taglen))
1028 scatterwalk_map_and_copy(param.t, req->dst, len, taglen, 1);
1030 memzero_explicit(¶m, sizeof(param));
1034 static int gcm_aes_encrypt(struct aead_request *req)
1036 return gcm_aes_crypt(req, CPACF_ENCRYPT);
1039 static int gcm_aes_decrypt(struct aead_request *req)
1041 return gcm_aes_crypt(req, CPACF_DECRYPT);
1044 static struct aead_alg gcm_aes_aead = {
1045 .setkey = gcm_aes_setkey,
1046 .setauthsize = gcm_aes_setauthsize,
1047 .encrypt = gcm_aes_encrypt,
1048 .decrypt = gcm_aes_decrypt,
1050 .ivsize = GHASH_BLOCK_SIZE - sizeof(u32),
1051 .maxauthsize = GHASH_DIGEST_SIZE,
1052 .chunksize = AES_BLOCK_SIZE,
1056 .cra_ctxsize = sizeof(struct s390_aes_ctx),
1057 .cra_priority = 900,
1058 .cra_name = "gcm(aes)",
1059 .cra_driver_name = "gcm-aes-s390",
1060 .cra_module = THIS_MODULE,
1064 static struct crypto_alg *aes_s390_alg;
1065 static struct skcipher_alg *aes_s390_skcipher_algs[5];
1066 static int aes_s390_skciphers_num;
1067 static struct aead_alg *aes_s390_aead_alg;
1069 static int aes_s390_register_skcipher(struct skcipher_alg *alg)
1073 ret = crypto_register_skcipher(alg);
1075 aes_s390_skcipher_algs[aes_s390_skciphers_num++] = alg;
1079 static void aes_s390_fini(void)
1082 crypto_unregister_alg(aes_s390_alg);
1083 while (aes_s390_skciphers_num--)
1084 crypto_unregister_skcipher(aes_s390_skcipher_algs[aes_s390_skciphers_num]);
1086 free_page((unsigned long) ctrblk);
1088 if (aes_s390_aead_alg)
1089 crypto_unregister_aead(aes_s390_aead_alg);
1092 static int __init aes_s390_init(void)
1096 /* Query available functions for KM, KMC, KMCTR and KMA */
1097 cpacf_query(CPACF_KM, &km_functions);
1098 cpacf_query(CPACF_KMC, &kmc_functions);
1099 cpacf_query(CPACF_KMCTR, &kmctr_functions);
1100 cpacf_query(CPACF_KMA, &kma_functions);
1102 if (cpacf_test_func(&km_functions, CPACF_KM_AES_128) ||
1103 cpacf_test_func(&km_functions, CPACF_KM_AES_192) ||
1104 cpacf_test_func(&km_functions, CPACF_KM_AES_256)) {
1105 ret = crypto_register_alg(&aes_alg);
1108 aes_s390_alg = &aes_alg;
1109 ret = aes_s390_register_skcipher(&ecb_aes_alg);
1114 if (cpacf_test_func(&kmc_functions, CPACF_KMC_AES_128) ||
1115 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_192) ||
1116 cpacf_test_func(&kmc_functions, CPACF_KMC_AES_256)) {
1117 ret = aes_s390_register_skcipher(&cbc_aes_alg);
1122 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128_FULL) ||
1123 cpacf_test_func(&km_functions, CPACF_KM_XTS_256_FULL)) {
1124 ret = aes_s390_register_skcipher(&fullxts_aes_alg);
1129 if (cpacf_test_func(&km_functions, CPACF_KM_XTS_128) ||
1130 cpacf_test_func(&km_functions, CPACF_KM_XTS_256)) {
1131 ret = aes_s390_register_skcipher(&xts_aes_alg);
1136 if (cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_128) ||
1137 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_192) ||
1138 cpacf_test_func(&kmctr_functions, CPACF_KMCTR_AES_256)) {
1139 ctrblk = (u8 *) __get_free_page(GFP_KERNEL);
1144 ret = aes_s390_register_skcipher(&ctr_aes_alg);
1149 if (cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_128) ||
1150 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_192) ||
1151 cpacf_test_func(&kma_functions, CPACF_KMA_GCM_AES_256)) {
1152 ret = crypto_register_aead(&gcm_aes_aead);
1155 aes_s390_aead_alg = &gcm_aes_aead;
1164 module_cpu_feature_match(S390_CPU_FEATURE_MSA, aes_s390_init);
1165 module_exit(aes_s390_fini);
1167 MODULE_ALIAS_CRYPTO("aes-all");
1169 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
1170 MODULE_LICENSE("GPL");
1171 MODULE_IMPORT_NS("CRYPTO_INTERNAL");