1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Algorithms supported by virtio crypto device
6 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
9 #include <linux/scatterlist.h>
10 #include <crypto/algapi.h>
11 #include <crypto/internal/skcipher.h>
12 #include <linux/err.h>
13 #include <crypto/scatterwalk.h>
14 #include <linux/atomic.h>
16 #include <uapi/linux/virtio_crypto.h>
17 #include "virtio_crypto_common.h"
20 struct virtio_crypto_skcipher_ctx {
21 struct crypto_engine_ctx enginectx;
22 struct virtio_crypto *vcrypto;
23 struct crypto_skcipher *tfm;
25 struct virtio_crypto_sym_session_info enc_sess_info;
26 struct virtio_crypto_sym_session_info dec_sess_info;
29 struct virtio_crypto_sym_request {
30 struct virtio_crypto_request base;
34 struct virtio_crypto_skcipher_ctx *skcipher_ctx;
35 struct skcipher_request *skcipher_req;
41 struct virtio_crypto_algo {
44 unsigned int active_devs;
45 struct skcipher_alg algo;
49 * The algs_lock protects the below global virtio_crypto_active_devs
50 * and crypto algorithms registion.
52 static DEFINE_MUTEX(algs_lock);
53 static void virtio_crypto_skcipher_finalize_req(
54 struct virtio_crypto_sym_request *vc_sym_req,
55 struct skcipher_request *req,
58 static void virtio_crypto_dataq_sym_callback
59 (struct virtio_crypto_request *vc_req, int len)
61 struct virtio_crypto_sym_request *vc_sym_req =
62 container_of(vc_req, struct virtio_crypto_sym_request, base);
63 struct skcipher_request *ablk_req;
66 /* Finish the encrypt or decrypt process */
67 if (vc_sym_req->type == VIRTIO_CRYPTO_SYM_OP_CIPHER) {
68 switch (vc_req->status) {
69 case VIRTIO_CRYPTO_OK:
72 case VIRTIO_CRYPTO_INVSESS:
73 case VIRTIO_CRYPTO_ERR:
76 case VIRTIO_CRYPTO_BADMSG:
83 ablk_req = vc_sym_req->skcipher_req;
84 virtio_crypto_skcipher_finalize_req(vc_sym_req,
89 static u64 virtio_crypto_alg_sg_nents_length(struct scatterlist *sg)
93 for (total = 0; sg; sg = sg_next(sg))
100 virtio_crypto_alg_validate_key(int key_len, uint32_t *alg)
103 case AES_KEYSIZE_128:
104 case AES_KEYSIZE_192:
105 case AES_KEYSIZE_256:
106 *alg = VIRTIO_CRYPTO_CIPHER_AES_CBC;
114 static int virtio_crypto_alg_skcipher_init_session(
115 struct virtio_crypto_skcipher_ctx *ctx,
116 uint32_t alg, const uint8_t *key,
120 struct scatterlist outhdr, key_sg, inhdr, *sgs[3];
122 struct virtio_crypto *vcrypto = ctx->vcrypto;
123 int op = encrypt ? VIRTIO_CRYPTO_OP_ENCRYPT : VIRTIO_CRYPTO_OP_DECRYPT;
125 unsigned int num_out = 0, num_in = 0;
128 * Avoid to do DMA from the stack, switch to using
129 * dynamically-allocated for the key
131 uint8_t *cipher_key = kmemdup(key, keylen, GFP_ATOMIC);
136 spin_lock(&vcrypto->ctrl_lock);
137 /* Pad ctrl header */
138 vcrypto->ctrl.header.opcode =
139 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_CREATE_SESSION);
140 vcrypto->ctrl.header.algo = cpu_to_le32(alg);
141 /* Set the default dataqueue id to 0 */
142 vcrypto->ctrl.header.queue_id = 0;
144 vcrypto->input.status = cpu_to_le32(VIRTIO_CRYPTO_ERR);
145 /* Pad cipher's parameters */
146 vcrypto->ctrl.u.sym_create_session.op_type =
147 cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
148 vcrypto->ctrl.u.sym_create_session.u.cipher.para.algo =
149 vcrypto->ctrl.header.algo;
150 vcrypto->ctrl.u.sym_create_session.u.cipher.para.keylen =
152 vcrypto->ctrl.u.sym_create_session.u.cipher.para.op =
155 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
156 sgs[num_out++] = &outhdr;
159 sg_init_one(&key_sg, cipher_key, keylen);
160 sgs[num_out++] = &key_sg;
162 /* Return status and session id back */
163 sg_init_one(&inhdr, &vcrypto->input, sizeof(vcrypto->input));
164 sgs[num_out + num_in++] = &inhdr;
166 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
167 num_in, vcrypto, GFP_ATOMIC);
169 spin_unlock(&vcrypto->ctrl_lock);
170 kfree_sensitive(cipher_key);
173 virtqueue_kick(vcrypto->ctrl_vq);
176 * Trapping into the hypervisor, so the request should be
177 * handled immediately.
179 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
180 !virtqueue_is_broken(vcrypto->ctrl_vq))
183 if (le32_to_cpu(vcrypto->input.status) != VIRTIO_CRYPTO_OK) {
184 spin_unlock(&vcrypto->ctrl_lock);
185 pr_err("virtio_crypto: Create session failed status: %u\n",
186 le32_to_cpu(vcrypto->input.status));
187 kfree_sensitive(cipher_key);
192 ctx->enc_sess_info.session_id =
193 le64_to_cpu(vcrypto->input.session_id);
195 ctx->dec_sess_info.session_id =
196 le64_to_cpu(vcrypto->input.session_id);
198 spin_unlock(&vcrypto->ctrl_lock);
200 kfree_sensitive(cipher_key);
204 static int virtio_crypto_alg_skcipher_close_session(
205 struct virtio_crypto_skcipher_ctx *ctx,
208 struct scatterlist outhdr, status_sg, *sgs[2];
210 struct virtio_crypto_destroy_session_req *destroy_session;
211 struct virtio_crypto *vcrypto = ctx->vcrypto;
213 unsigned int num_out = 0, num_in = 0;
215 spin_lock(&vcrypto->ctrl_lock);
216 vcrypto->ctrl_status.status = VIRTIO_CRYPTO_ERR;
217 /* Pad ctrl header */
218 vcrypto->ctrl.header.opcode =
219 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DESTROY_SESSION);
220 /* Set the default virtqueue id to 0 */
221 vcrypto->ctrl.header.queue_id = 0;
223 destroy_session = &vcrypto->ctrl.u.destroy_session;
226 destroy_session->session_id =
227 cpu_to_le64(ctx->enc_sess_info.session_id);
229 destroy_session->session_id =
230 cpu_to_le64(ctx->dec_sess_info.session_id);
232 sg_init_one(&outhdr, &vcrypto->ctrl, sizeof(vcrypto->ctrl));
233 sgs[num_out++] = &outhdr;
235 /* Return status and session id back */
236 sg_init_one(&status_sg, &vcrypto->ctrl_status.status,
237 sizeof(vcrypto->ctrl_status.status));
238 sgs[num_out + num_in++] = &status_sg;
240 err = virtqueue_add_sgs(vcrypto->ctrl_vq, sgs, num_out,
241 num_in, vcrypto, GFP_ATOMIC);
243 spin_unlock(&vcrypto->ctrl_lock);
246 virtqueue_kick(vcrypto->ctrl_vq);
248 while (!virtqueue_get_buf(vcrypto->ctrl_vq, &tmp) &&
249 !virtqueue_is_broken(vcrypto->ctrl_vq))
252 if (vcrypto->ctrl_status.status != VIRTIO_CRYPTO_OK) {
253 spin_unlock(&vcrypto->ctrl_lock);
254 pr_err("virtio_crypto: Close session failed status: %u, session_id: 0x%llx\n",
255 vcrypto->ctrl_status.status,
256 destroy_session->session_id);
260 spin_unlock(&vcrypto->ctrl_lock);
265 static int virtio_crypto_alg_skcipher_init_sessions(
266 struct virtio_crypto_skcipher_ctx *ctx,
267 const uint8_t *key, unsigned int keylen)
271 struct virtio_crypto *vcrypto = ctx->vcrypto;
273 if (keylen > vcrypto->max_cipher_key_len) {
274 pr_err("virtio_crypto: the key is too long\n");
278 if (virtio_crypto_alg_validate_key(keylen, &alg))
281 /* Create encryption session */
282 ret = virtio_crypto_alg_skcipher_init_session(ctx,
283 alg, key, keylen, 1);
286 /* Create decryption session */
287 ret = virtio_crypto_alg_skcipher_init_session(ctx,
288 alg, key, keylen, 0);
290 virtio_crypto_alg_skcipher_close_session(ctx, 1);
296 /* Note: kernel crypto API realization */
297 static int virtio_crypto_skcipher_setkey(struct crypto_skcipher *tfm,
301 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
305 ret = virtio_crypto_alg_validate_key(keylen, &alg);
311 int node = virtio_crypto_get_current_node();
312 struct virtio_crypto *vcrypto =
313 virtcrypto_get_dev_node(node,
314 VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
316 pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
320 ctx->vcrypto = vcrypto;
322 /* Rekeying, we should close the created sessions previously */
323 virtio_crypto_alg_skcipher_close_session(ctx, 1);
324 virtio_crypto_alg_skcipher_close_session(ctx, 0);
327 ret = virtio_crypto_alg_skcipher_init_sessions(ctx, key, keylen);
329 virtcrypto_dev_put(ctx->vcrypto);
339 __virtio_crypto_skcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
340 struct skcipher_request *req,
341 struct data_queue *data_vq)
343 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
344 struct virtio_crypto_skcipher_ctx *ctx = vc_sym_req->skcipher_ctx;
345 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
346 unsigned int ivsize = crypto_skcipher_ivsize(tfm);
347 struct virtio_crypto *vcrypto = ctx->vcrypto;
348 struct virtio_crypto_op_data_req *req_data;
349 int src_nents, dst_nents;
352 struct scatterlist outhdr, iv_sg, status_sg, **sgs;
354 unsigned int num_out = 0, num_in = 0;
357 struct scatterlist *sg;
359 src_nents = sg_nents_for_len(req->src, req->cryptlen);
361 pr_err("Invalid number of src SG.\n");
365 dst_nents = sg_nents(req->dst);
367 pr_debug("virtio_crypto: Number of sgs (src_nents: %d, dst_nents: %d)\n",
368 src_nents, dst_nents);
370 /* Why 3? outhdr + iv + inhdr */
371 sg_total = src_nents + dst_nents + 3;
372 sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
373 dev_to_node(&vcrypto->vdev->dev));
377 req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
378 dev_to_node(&vcrypto->vdev->dev));
384 vc_req->req_data = req_data;
385 vc_sym_req->type = VIRTIO_CRYPTO_SYM_OP_CIPHER;
386 /* Head of operation */
387 if (vc_sym_req->encrypt) {
388 req_data->header.session_id =
389 cpu_to_le64(ctx->enc_sess_info.session_id);
390 req_data->header.opcode =
391 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_ENCRYPT);
393 req_data->header.session_id =
394 cpu_to_le64(ctx->dec_sess_info.session_id);
395 req_data->header.opcode =
396 cpu_to_le32(VIRTIO_CRYPTO_CIPHER_DECRYPT);
398 req_data->u.sym_req.op_type = cpu_to_le32(VIRTIO_CRYPTO_SYM_OP_CIPHER);
399 req_data->u.sym_req.u.cipher.para.iv_len = cpu_to_le32(ivsize);
400 req_data->u.sym_req.u.cipher.para.src_data_len =
401 cpu_to_le32(req->cryptlen);
403 dst_len = virtio_crypto_alg_sg_nents_length(req->dst);
404 if (unlikely(dst_len > U32_MAX)) {
405 pr_err("virtio_crypto: The dst_len is beyond U32_MAX\n");
410 dst_len = min_t(unsigned int, req->cryptlen, dst_len);
411 pr_debug("virtio_crypto: src_len: %u, dst_len: %llu\n",
412 req->cryptlen, dst_len);
414 if (unlikely(req->cryptlen + dst_len + ivsize +
415 sizeof(vc_req->status) > vcrypto->max_size)) {
416 pr_err("virtio_crypto: The length is too big\n");
421 req_data->u.sym_req.u.cipher.para.dst_data_len =
422 cpu_to_le32((uint32_t)dst_len);
425 sg_init_one(&outhdr, req_data, sizeof(*req_data));
426 sgs[num_out++] = &outhdr;
431 * Avoid to do DMA from the stack, switch to using
432 * dynamically-allocated for the IV
434 iv = kzalloc_node(ivsize, GFP_ATOMIC,
435 dev_to_node(&vcrypto->vdev->dev));
440 memcpy(iv, req->iv, ivsize);
441 if (!vc_sym_req->encrypt)
442 scatterwalk_map_and_copy(req->iv, req->src,
443 req->cryptlen - AES_BLOCK_SIZE,
446 sg_init_one(&iv_sg, iv, ivsize);
447 sgs[num_out++] = &iv_sg;
451 for (sg = req->src; src_nents; sg = sg_next(sg), src_nents--)
454 /* Destination data */
455 for (sg = req->dst; sg; sg = sg_next(sg))
456 sgs[num_out + num_in++] = sg;
459 sg_init_one(&status_sg, &vc_req->status, sizeof(vc_req->status));
460 sgs[num_out + num_in++] = &status_sg;
464 spin_lock_irqsave(&data_vq->lock, flags);
465 err = virtqueue_add_sgs(data_vq->vq, sgs, num_out,
466 num_in, vc_req, GFP_ATOMIC);
467 virtqueue_kick(data_vq->vq);
468 spin_unlock_irqrestore(&data_vq->lock, flags);
469 if (unlikely(err < 0))
477 kfree_sensitive(req_data);
482 static int virtio_crypto_skcipher_encrypt(struct skcipher_request *req)
484 struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
485 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
486 struct virtio_crypto_sym_request *vc_sym_req =
487 skcipher_request_ctx(req);
488 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
489 struct virtio_crypto *vcrypto = ctx->vcrypto;
490 /* Use the first data virtqueue as default */
491 struct data_queue *data_vq = &vcrypto->data_vq[0];
495 if (req->cryptlen % AES_BLOCK_SIZE)
498 vc_req->dataq = data_vq;
499 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
500 vc_sym_req->skcipher_ctx = ctx;
501 vc_sym_req->skcipher_req = req;
502 vc_sym_req->encrypt = true;
504 return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
507 static int virtio_crypto_skcipher_decrypt(struct skcipher_request *req)
509 struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(req);
510 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(atfm);
511 struct virtio_crypto_sym_request *vc_sym_req =
512 skcipher_request_ctx(req);
513 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
514 struct virtio_crypto *vcrypto = ctx->vcrypto;
515 /* Use the first data virtqueue as default */
516 struct data_queue *data_vq = &vcrypto->data_vq[0];
520 if (req->cryptlen % AES_BLOCK_SIZE)
523 vc_req->dataq = data_vq;
524 vc_req->alg_cb = virtio_crypto_dataq_sym_callback;
525 vc_sym_req->skcipher_ctx = ctx;
526 vc_sym_req->skcipher_req = req;
527 vc_sym_req->encrypt = false;
529 return crypto_transfer_skcipher_request_to_engine(data_vq->engine, req);
532 static int virtio_crypto_skcipher_init(struct crypto_skcipher *tfm)
534 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
536 crypto_skcipher_set_reqsize(tfm, sizeof(struct virtio_crypto_sym_request));
539 ctx->enginectx.op.do_one_request = virtio_crypto_skcipher_crypt_req;
540 ctx->enginectx.op.prepare_request = NULL;
541 ctx->enginectx.op.unprepare_request = NULL;
545 static void virtio_crypto_skcipher_exit(struct crypto_skcipher *tfm)
547 struct virtio_crypto_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
552 virtio_crypto_alg_skcipher_close_session(ctx, 1);
553 virtio_crypto_alg_skcipher_close_session(ctx, 0);
554 virtcrypto_dev_put(ctx->vcrypto);
558 int virtio_crypto_skcipher_crypt_req(
559 struct crypto_engine *engine, void *vreq)
561 struct skcipher_request *req = container_of(vreq, struct skcipher_request, base);
562 struct virtio_crypto_sym_request *vc_sym_req =
563 skcipher_request_ctx(req);
564 struct virtio_crypto_request *vc_req = &vc_sym_req->base;
565 struct data_queue *data_vq = vc_req->dataq;
568 ret = __virtio_crypto_skcipher_do_req(vc_sym_req, req, data_vq);
572 virtqueue_kick(data_vq->vq);
577 static void virtio_crypto_skcipher_finalize_req(
578 struct virtio_crypto_sym_request *vc_sym_req,
579 struct skcipher_request *req,
582 if (vc_sym_req->encrypt)
583 scatterwalk_map_and_copy(req->iv, req->dst,
584 req->cryptlen - AES_BLOCK_SIZE,
586 kfree_sensitive(vc_sym_req->iv);
587 virtcrypto_clear_request(&vc_sym_req->base);
589 crypto_finalize_skcipher_request(vc_sym_req->base.dataq->engine,
593 static struct virtio_crypto_algo virtio_crypto_algs[] = { {
594 .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
595 .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
597 .base.cra_name = "cbc(aes)",
598 .base.cra_driver_name = "virtio_crypto_aes_cbc",
599 .base.cra_priority = 150,
600 .base.cra_flags = CRYPTO_ALG_ASYNC |
601 CRYPTO_ALG_ALLOCATES_MEMORY,
602 .base.cra_blocksize = AES_BLOCK_SIZE,
603 .base.cra_ctxsize = sizeof(struct virtio_crypto_skcipher_ctx),
604 .base.cra_module = THIS_MODULE,
605 .init = virtio_crypto_skcipher_init,
606 .exit = virtio_crypto_skcipher_exit,
607 .setkey = virtio_crypto_skcipher_setkey,
608 .decrypt = virtio_crypto_skcipher_decrypt,
609 .encrypt = virtio_crypto_skcipher_encrypt,
610 .min_keysize = AES_MIN_KEY_SIZE,
611 .max_keysize = AES_MAX_KEY_SIZE,
612 .ivsize = AES_BLOCK_SIZE,
616 int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
621 mutex_lock(&algs_lock);
623 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
625 uint32_t service = virtio_crypto_algs[i].service;
626 uint32_t algonum = virtio_crypto_algs[i].algonum;
628 if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
631 if (virtio_crypto_algs[i].active_devs == 0) {
632 ret = crypto_register_skcipher(&virtio_crypto_algs[i].algo);
637 virtio_crypto_algs[i].active_devs++;
638 dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
639 virtio_crypto_algs[i].algo.base.cra_name);
643 mutex_unlock(&algs_lock);
647 void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
651 mutex_lock(&algs_lock);
653 for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
655 uint32_t service = virtio_crypto_algs[i].service;
656 uint32_t algonum = virtio_crypto_algs[i].algonum;
658 if (virtio_crypto_algs[i].active_devs == 0 ||
659 !virtcrypto_algo_is_supported(vcrypto, service, algonum))
662 if (virtio_crypto_algs[i].active_devs == 1)
663 crypto_unregister_skcipher(&virtio_crypto_algs[i].algo);
665 virtio_crypto_algs[i].active_devs--;
668 mutex_unlock(&algs_lock);