]> Git Repo - linux.git/blobdiff - drivers/crypto/virtio/virtio_crypto_algs.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
[linux.git] / drivers / crypto / virtio / virtio_crypto_algs.c
index af6a908dfa7a98ea02e37613a456d5cde84734c0..2c573d1aaa64f2bd2f7c70088c6ddec7ad81f35b 100644 (file)
@@ -49,12 +49,18 @@ struct virtio_crypto_sym_request {
        bool encrypt;
 };
 
+struct virtio_crypto_algo {
+       uint32_t algonum;
+       uint32_t service;
+       unsigned int active_devs;
+       struct crypto_alg algo;
+};
+
 /*
  * The algs_lock protects the below global virtio_crypto_active_devs
  * and crypto algorithms registion.
  */
 static DEFINE_MUTEX(algs_lock);
-static unsigned int virtio_crypto_active_devs;
 static void virtio_crypto_ablkcipher_finalize_req(
        struct virtio_crypto_sym_request *vc_sym_req,
        struct ablkcipher_request *req,
@@ -312,15 +318,21 @@ static int virtio_crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
                                         unsigned int keylen)
 {
        struct virtio_crypto_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+       uint32_t alg;
        int ret;
 
+       ret = virtio_crypto_alg_validate_key(keylen, &alg);
+       if (ret)
+               return ret;
+
        if (!ctx->vcrypto) {
                /* New key */
                int node = virtio_crypto_get_current_node();
                struct virtio_crypto *vcrypto =
-                                     virtcrypto_get_dev_node(node);
+                                     virtcrypto_get_dev_node(node,
+                                     VIRTIO_CRYPTO_SERVICE_CIPHER, alg);
                if (!vcrypto) {
-                       pr_err("virtio_crypto: Could not find a virtio device in the system\n");
+                       pr_err("virtio_crypto: Could not find a virtio device in the system or unsupported algo\n");
                        return -ENODEV;
                }
 
@@ -371,12 +383,12 @@ __virtio_crypto_ablkcipher_do_req(struct virtio_crypto_sym_request *vc_sym_req,
 
        /* Why 3?  outhdr + iv + inhdr */
        sg_total = src_nents + dst_nents + 3;
-       sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_ATOMIC,
+       sgs = kcalloc_node(sg_total, sizeof(*sgs), GFP_KERNEL,
                                dev_to_node(&vcrypto->vdev->dev));
        if (!sgs)
                return -ENOMEM;
 
-       req_data = kzalloc_node(sizeof(*req_data), GFP_ATOMIC,
+       req_data = kzalloc_node(sizeof(*req_data), GFP_KERNEL,
                                dev_to_node(&vcrypto->vdev->dev));
        if (!req_data) {
                kfree(sgs);
@@ -571,57 +583,85 @@ static void virtio_crypto_ablkcipher_finalize_req(
        virtcrypto_clear_request(&vc_sym_req->base);
 }
 
-static struct crypto_alg virtio_crypto_algs[] = { {
-       .cra_name = "cbc(aes)",
-       .cra_driver_name = "virtio_crypto_aes_cbc",
-       .cra_priority = 150,
-       .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
-       .cra_blocksize = AES_BLOCK_SIZE,
-       .cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
-       .cra_alignmask = 0,
-       .cra_module = THIS_MODULE,
-       .cra_type = &crypto_ablkcipher_type,
-       .cra_init = virtio_crypto_ablkcipher_init,
-       .cra_exit = virtio_crypto_ablkcipher_exit,
-       .cra_u = {
-          .ablkcipher = {
-                       .setkey = virtio_crypto_ablkcipher_setkey,
-                       .decrypt = virtio_crypto_ablkcipher_decrypt,
-                       .encrypt = virtio_crypto_ablkcipher_encrypt,
-                       .min_keysize = AES_MIN_KEY_SIZE,
-                       .max_keysize = AES_MAX_KEY_SIZE,
-                       .ivsize = AES_BLOCK_SIZE,
+static struct virtio_crypto_algo virtio_crypto_algs[] = { {
+       .algonum = VIRTIO_CRYPTO_CIPHER_AES_CBC,
+       .service = VIRTIO_CRYPTO_SERVICE_CIPHER,
+       .algo = {
+               .cra_name = "cbc(aes)",
+               .cra_driver_name = "virtio_crypto_aes_cbc",
+               .cra_priority = 150,
+               .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+               .cra_blocksize = AES_BLOCK_SIZE,
+               .cra_ctxsize  = sizeof(struct virtio_crypto_ablkcipher_ctx),
+               .cra_alignmask = 0,
+               .cra_module = THIS_MODULE,
+               .cra_type = &crypto_ablkcipher_type,
+               .cra_init = virtio_crypto_ablkcipher_init,
+               .cra_exit = virtio_crypto_ablkcipher_exit,
+               .cra_u = {
+                       .ablkcipher = {
+                               .setkey = virtio_crypto_ablkcipher_setkey,
+                               .decrypt = virtio_crypto_ablkcipher_decrypt,
+                               .encrypt = virtio_crypto_ablkcipher_encrypt,
+                               .min_keysize = AES_MIN_KEY_SIZE,
+                               .max_keysize = AES_MAX_KEY_SIZE,
+                               .ivsize = AES_BLOCK_SIZE,
+                       },
                },
        },
 } };
 
-int virtio_crypto_algs_register(void)
+int virtio_crypto_algs_register(struct virtio_crypto *vcrypto)
 {
        int ret = 0;
+       int i = 0;
 
        mutex_lock(&algs_lock);
-       if (++virtio_crypto_active_devs != 1)
-               goto unlock;
 
-       ret = crypto_register_algs(virtio_crypto_algs,
-                       ARRAY_SIZE(virtio_crypto_algs));
-       if (ret)
-               virtio_crypto_active_devs--;
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+               uint32_t service = virtio_crypto_algs[i].service;
+               uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+               if (!virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_algs[i].active_devs == 0) {
+                       ret = crypto_register_alg(&virtio_crypto_algs[i].algo);
+                       if (ret)
+                               goto unlock;
+               }
+
+               virtio_crypto_algs[i].active_devs++;
+               dev_info(&vcrypto->vdev->dev, "Registered algo %s\n",
+                        virtio_crypto_algs[i].algo.cra_name);
+       }
 
 unlock:
        mutex_unlock(&algs_lock);
        return ret;
 }
 
-void virtio_crypto_algs_unregister(void)
+void virtio_crypto_algs_unregister(struct virtio_crypto *vcrypto)
 {
+       int i = 0;
+
        mutex_lock(&algs_lock);
-       if (--virtio_crypto_active_devs != 0)
-               goto unlock;
 
-       crypto_unregister_algs(virtio_crypto_algs,
-                       ARRAY_SIZE(virtio_crypto_algs));
+       for (i = 0; i < ARRAY_SIZE(virtio_crypto_algs); i++) {
+
+               uint32_t service = virtio_crypto_algs[i].service;
+               uint32_t algonum = virtio_crypto_algs[i].algonum;
+
+               if (virtio_crypto_algs[i].active_devs == 0 ||
+                   !virtcrypto_algo_is_supported(vcrypto, service, algonum))
+                       continue;
+
+               if (virtio_crypto_algs[i].active_devs == 1)
+                       crypto_unregister_alg(&virtio_crypto_algs[i].algo);
+
+               virtio_crypto_algs[i].active_devs--;
+       }
 
-unlock:
        mutex_unlock(&algs_lock);
 }
This page took 0.03779 seconds and 4 git commands to generate.