1 // SPDX-License-Identifier: GPL-2.0-only
3 * AES GCM routines supporting the Power 7+ Nest Accelerators driver
5 * Copyright (C) 2012 International Business Machines Inc.
10 #include <crypto/internal/aead.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <crypto/gcm.h>
14 #include <crypto/scatterwalk.h>
15 #include <linux/module.h>
16 #include <linux/types.h>
19 #include "nx_csbcpb.h"
23 static int gcm_aes_nx_set_key(struct crypto_aead *tfm,
27 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
28 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
29 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
31 nx_ctx_init(nx_ctx, HCOP_FC_AES);
35 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_128);
36 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_128);
37 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_128];
40 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_192);
41 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_192);
42 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_192];
45 NX_CPB_SET_KEY_SIZE(csbcpb, NX_KS_AES_256);
46 NX_CPB_SET_KEY_SIZE(csbcpb_aead, NX_KS_AES_256);
47 nx_ctx->ap = &nx_ctx->props[NX_PROPS_AES_256];
53 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
54 memcpy(csbcpb->cpb.aes_gcm.key, in_key, key_len);
56 csbcpb_aead->cpb.hdr.mode = NX_MODE_AES_GCA;
57 memcpy(csbcpb_aead->cpb.aes_gca.key, in_key, key_len);
62 static int gcm4106_aes_nx_set_key(struct crypto_aead *tfm,
66 struct nx_crypto_ctx *nx_ctx = crypto_aead_ctx(tfm);
67 char *nonce = nx_ctx->priv.gcm.nonce;
75 rc = gcm_aes_nx_set_key(tfm, in_key, key_len);
79 memcpy(nonce, in_key + key_len, 4);
84 static int gcm4106_aes_nx_setauthsize(struct crypto_aead *tfm,
85 unsigned int authsize)
99 static int nx_gca(struct nx_crypto_ctx *nx_ctx,
100 struct aead_request *req,
102 unsigned int assoclen)
105 struct nx_csbcpb *csbcpb_aead = nx_ctx->csbcpb_aead;
106 struct scatter_walk walk;
107 struct nx_sg *nx_sg = nx_ctx->in_sg;
108 unsigned int nbytes = assoclen;
109 unsigned int processed = 0, to_process;
110 unsigned int max_sg_len;
112 if (nbytes <= AES_BLOCK_SIZE) {
113 scatterwalk_start(&walk, req->src);
114 scatterwalk_copychunks(out, &walk, nbytes, SCATTERWALK_FROM_SG);
115 scatterwalk_done(&walk, SCATTERWALK_FROM_SG, 0);
119 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_CONTINUATION;
121 /* page_limit: number of sg entries that fit on one page */
122 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
124 max_sg_len = min_t(u64, max_sg_len,
125 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
129 * to_process: the data chunk to process in this update.
130 * This value is bound by sg list limits.
132 to_process = min_t(u64, nbytes - processed,
133 nx_ctx->ap->databytelen);
134 to_process = min_t(u64, to_process,
135 NX_PAGE_SIZE * (max_sg_len - 1));
137 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
138 req->src, processed, &to_process);
140 if ((to_process + processed) < nbytes)
141 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_INTERMEDIATE;
143 NX_CPB_FDM(csbcpb_aead) &= ~NX_FDM_INTERMEDIATE;
145 nx_ctx->op_aead.inlen = (nx_ctx->in_sg - nx_sg)
146 * sizeof(struct nx_sg);
148 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op_aead,
149 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
153 memcpy(csbcpb_aead->cpb.aes_gca.in_pat,
154 csbcpb_aead->cpb.aes_gca.out_pat,
156 NX_CPB_FDM(csbcpb_aead) |= NX_FDM_CONTINUATION;
158 atomic_inc(&(nx_ctx->stats->aes_ops));
159 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
161 processed += to_process;
162 } while (processed < nbytes);
164 memcpy(out, csbcpb_aead->cpb.aes_gca.out_pat, AES_BLOCK_SIZE);
169 static int gmac(struct aead_request *req, const u8 *iv, unsigned int assoclen)
172 struct nx_crypto_ctx *nx_ctx =
173 crypto_aead_ctx(crypto_aead_reqtfm(req));
174 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
176 unsigned int nbytes = assoclen;
177 unsigned int processed = 0, to_process;
178 unsigned int max_sg_len;
181 csbcpb->cpb.hdr.mode = NX_MODE_AES_GMAC;
183 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
185 /* page_limit: number of sg entries that fit on one page */
186 max_sg_len = min_t(u64, nx_driver.of.max_sg_len/sizeof(struct nx_sg),
188 max_sg_len = min_t(u64, max_sg_len,
189 nx_ctx->ap->databytelen/NX_PAGE_SIZE);
192 memcpy(csbcpb->cpb.aes_gcm.iv_or_cnt, iv, AES_BLOCK_SIZE);
196 * to_process: the data chunk to process in this update.
197 * This value is bound by sg list limits.
199 to_process = min_t(u64, nbytes - processed,
200 nx_ctx->ap->databytelen);
201 to_process = min_t(u64, to_process,
202 NX_PAGE_SIZE * (max_sg_len - 1));
204 nx_sg = nx_walk_and_build(nx_ctx->in_sg, max_sg_len,
205 req->src, processed, &to_process);
207 if ((to_process + processed) < nbytes)
208 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
210 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
212 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_sg)
213 * sizeof(struct nx_sg);
215 csbcpb->cpb.aes_gcm.bit_length_data = 0;
216 csbcpb->cpb.aes_gcm.bit_length_aad = 8 * nbytes;
218 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
219 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
223 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
224 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
225 memcpy(csbcpb->cpb.aes_gcm.in_s0,
226 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
228 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
230 atomic_inc(&(nx_ctx->stats->aes_ops));
231 atomic64_add(assoclen, &(nx_ctx->stats->aes_bytes));
233 processed += to_process;
234 } while (processed < nbytes);
237 /* Restore GCM mode */
238 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
242 static int gcm_empty(struct aead_request *req, const u8 *iv, int enc)
245 struct nx_crypto_ctx *nx_ctx =
246 crypto_aead_ctx(crypto_aead_reqtfm(req));
247 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
248 char out[AES_BLOCK_SIZE];
249 struct nx_sg *in_sg, *out_sg;
252 /* For scenarios where the input message is zero length, AES CTR mode
253 * may be used. Set the source data to be a single block (16B) of all
254 * zeros, and set the input IV value to be the same as the GMAC IV
255 * value. - nx_wb 4.8.1.3 */
257 /* Change to ECB mode */
258 csbcpb->cpb.hdr.mode = NX_MODE_AES_ECB;
259 memcpy(csbcpb->cpb.aes_ecb.key, csbcpb->cpb.aes_gcm.key,
260 sizeof(csbcpb->cpb.aes_ecb.key));
262 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
264 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
266 len = AES_BLOCK_SIZE;
268 /* Encrypt the counter/IV */
269 in_sg = nx_build_sg_list(nx_ctx->in_sg, (u8 *) iv,
270 &len, nx_ctx->ap->sglen);
272 if (len != AES_BLOCK_SIZE)
276 out_sg = nx_build_sg_list(nx_ctx->out_sg, (u8 *) out, &len,
279 if (len != sizeof(out))
282 nx_ctx->op.inlen = (nx_ctx->in_sg - in_sg) * sizeof(struct nx_sg);
283 nx_ctx->op.outlen = (nx_ctx->out_sg - out_sg) * sizeof(struct nx_sg);
285 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
286 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
289 atomic_inc(&(nx_ctx->stats->aes_ops));
291 /* Copy out the auth tag */
292 memcpy(csbcpb->cpb.aes_gcm.out_pat_or_mac, out,
293 crypto_aead_authsize(crypto_aead_reqtfm(req)));
295 /* Restore XCBC mode */
296 csbcpb->cpb.hdr.mode = NX_MODE_AES_GCM;
299 * ECB key uses the same region that GCM AAD and counter, so it's safe
300 * to just fill it with zeroes.
302 memset(csbcpb->cpb.aes_ecb.key, 0, sizeof(csbcpb->cpb.aes_ecb.key));
307 static int gcm_aes_nx_crypt(struct aead_request *req, int enc,
308 unsigned int assoclen)
310 struct nx_crypto_ctx *nx_ctx =
311 crypto_aead_ctx(crypto_aead_reqtfm(req));
312 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
313 struct nx_csbcpb *csbcpb = nx_ctx->csbcpb;
314 unsigned int nbytes = req->cryptlen;
315 unsigned int processed = 0, to_process;
316 unsigned long irq_flags;
319 spin_lock_irqsave(&nx_ctx->lock, irq_flags);
321 /* initialize the counter */
322 *(u32 *)&rctx->iv[NX_GCM_CTR_OFFSET] = 1;
326 rc = gcm_empty(req, rctx->iv, enc);
328 rc = gmac(req, rctx->iv, assoclen);
335 /* Process associated data */
336 csbcpb->cpb.aes_gcm.bit_length_aad = assoclen * 8;
338 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad,
344 /* Set flags for encryption */
345 NX_CPB_FDM(csbcpb) &= ~NX_FDM_CONTINUATION;
347 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
349 NX_CPB_FDM(csbcpb) &= ~NX_FDM_ENDE_ENCRYPT;
350 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
354 to_process = nbytes - processed;
356 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
357 rc = nx_build_sg_lists(nx_ctx, rctx->iv, req->dst,
358 req->src, &to_process,
359 processed + req->assoclen,
360 csbcpb->cpb.aes_gcm.iv_or_cnt);
365 if ((to_process + processed) < nbytes)
366 NX_CPB_FDM(csbcpb) |= NX_FDM_INTERMEDIATE;
368 NX_CPB_FDM(csbcpb) &= ~NX_FDM_INTERMEDIATE;
371 rc = nx_hcall_sync(nx_ctx, &nx_ctx->op,
372 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
376 memcpy(rctx->iv, csbcpb->cpb.aes_gcm.out_cnt, AES_BLOCK_SIZE);
377 memcpy(csbcpb->cpb.aes_gcm.in_pat_or_aad,
378 csbcpb->cpb.aes_gcm.out_pat_or_mac, AES_BLOCK_SIZE);
379 memcpy(csbcpb->cpb.aes_gcm.in_s0,
380 csbcpb->cpb.aes_gcm.out_s0, AES_BLOCK_SIZE);
382 NX_CPB_FDM(csbcpb) |= NX_FDM_CONTINUATION;
384 atomic_inc(&(nx_ctx->stats->aes_ops));
385 atomic64_add(be32_to_cpu(csbcpb->csb.processed_byte_count),
386 &(nx_ctx->stats->aes_bytes));
388 processed += to_process;
389 } while (processed < nbytes);
393 /* copy out the auth tag */
394 scatterwalk_map_and_copy(
395 csbcpb->cpb.aes_gcm.out_pat_or_mac,
396 req->dst, req->assoclen + nbytes,
397 crypto_aead_authsize(crypto_aead_reqtfm(req)),
400 u8 *itag = nx_ctx->priv.gcm.iauth_tag;
401 u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac;
403 scatterwalk_map_and_copy(
404 itag, req->src, req->assoclen + nbytes,
405 crypto_aead_authsize(crypto_aead_reqtfm(req)),
406 SCATTERWALK_FROM_SG);
407 rc = crypto_memneq(itag, otag,
408 crypto_aead_authsize(crypto_aead_reqtfm(req))) ?
412 spin_unlock_irqrestore(&nx_ctx->lock, irq_flags);
416 static int gcm_aes_nx_encrypt(struct aead_request *req)
418 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
421 memcpy(iv, req->iv, GCM_AES_IV_SIZE);
423 return gcm_aes_nx_crypt(req, 1, req->assoclen);
426 static int gcm_aes_nx_decrypt(struct aead_request *req)
428 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
431 memcpy(iv, req->iv, GCM_AES_IV_SIZE);
433 return gcm_aes_nx_crypt(req, 0, req->assoclen);
436 static int gcm4106_aes_nx_encrypt(struct aead_request *req)
438 struct nx_crypto_ctx *nx_ctx =
439 crypto_aead_ctx(crypto_aead_reqtfm(req));
440 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
442 char *nonce = nx_ctx->priv.gcm.nonce;
444 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
445 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
447 if (req->assoclen < 8)
450 return gcm_aes_nx_crypt(req, 1, req->assoclen - 8);
453 static int gcm4106_aes_nx_decrypt(struct aead_request *req)
455 struct nx_crypto_ctx *nx_ctx =
456 crypto_aead_ctx(crypto_aead_reqtfm(req));
457 struct nx_gcm_rctx *rctx = aead_request_ctx(req);
459 char *nonce = nx_ctx->priv.gcm.nonce;
461 memcpy(iv, nonce, NX_GCM4106_NONCE_LEN);
462 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8);
464 if (req->assoclen < 8)
467 return gcm_aes_nx_crypt(req, 0, req->assoclen - 8);
470 struct aead_alg nx_gcm_aes_alg = {
472 .cra_name = "gcm(aes)",
473 .cra_driver_name = "gcm-aes-nx",
476 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
477 .cra_module = THIS_MODULE,
479 .init = nx_crypto_ctx_aes_gcm_init,
480 .exit = nx_crypto_ctx_aead_exit,
481 .ivsize = GCM_AES_IV_SIZE,
482 .maxauthsize = AES_BLOCK_SIZE,
483 .setkey = gcm_aes_nx_set_key,
484 .encrypt = gcm_aes_nx_encrypt,
485 .decrypt = gcm_aes_nx_decrypt,
488 struct aead_alg nx_gcm4106_aes_alg = {
490 .cra_name = "rfc4106(gcm(aes))",
491 .cra_driver_name = "rfc4106-gcm-aes-nx",
494 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
495 .cra_module = THIS_MODULE,
497 .init = nx_crypto_ctx_aes_gcm_init,
498 .exit = nx_crypto_ctx_aead_exit,
499 .ivsize = GCM_RFC4106_IV_SIZE,
500 .maxauthsize = AES_BLOCK_SIZE,
501 .setkey = gcm4106_aes_nx_set_key,
502 .setauthsize = gcm4106_aes_nx_setauthsize,
503 .encrypt = gcm4106_aes_nx_encrypt,
504 .decrypt = gcm4106_aes_nx_decrypt,