1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Cryptographic Coprocessor (CCP) crypto API support
5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
10 #include <linux/module.h>
11 #include <linux/moduleparam.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/ccp.h>
15 #include <linux/scatterlist.h>
16 #include <crypto/internal/hash.h>
17 #include <crypto/internal/akcipher.h>
19 #include "ccp-crypto.h"
22 MODULE_LICENSE("GPL");
23 MODULE_VERSION("1.0.0");
24 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
26 static unsigned int aes_disable;
27 module_param(aes_disable, uint, 0444);
28 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
30 static unsigned int sha_disable;
31 module_param(sha_disable, uint, 0444);
32 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
34 static unsigned int des3_disable;
35 module_param(des3_disable, uint, 0444);
36 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
38 static unsigned int rsa_disable;
39 module_param(rsa_disable, uint, 0444);
40 MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value");
42 /* List heads for the supported algorithms */
43 static LIST_HEAD(hash_algs);
44 static LIST_HEAD(skcipher_algs);
45 static LIST_HEAD(aead_algs);
46 static LIST_HEAD(akcipher_algs);
48 /* For any tfm, requests for that tfm must be returned on the order
49 * received. With multiple queues available, the CCP can process more
50 * than one cmd at a time. Therefore we must maintain a cmd list to insure
51 * the proper ordering of requests on a given tfm.
53 struct ccp_crypto_queue {
54 struct list_head cmds;
55 struct list_head *backlog;
56 unsigned int cmd_count;
59 #define CCP_CRYPTO_MAX_QLEN 100
61 static struct ccp_crypto_queue req_queue;
62 static spinlock_t req_queue_lock;
64 struct ccp_crypto_cmd {
65 struct list_head entry;
69 /* Save the crypto_tfm and crypto_async_request addresses
70 * separately to avoid any reference to a possibly invalid
71 * crypto_async_request structure after invoking the request
74 struct crypto_async_request *req;
75 struct crypto_tfm *tfm;
77 /* Used for held command processing to determine state */
81 struct ccp_crypto_cpu {
82 struct work_struct work;
83 struct completion completion;
84 struct ccp_crypto_cmd *crypto_cmd;
88 static inline bool ccp_crypto_success(int err)
90 if (err && (err != -EINPROGRESS) && (err != -EBUSY))
96 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
97 struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
99 struct ccp_crypto_cmd *held = NULL, *tmp;
104 spin_lock_irqsave(&req_queue_lock, flags);
106 /* Held cmds will be after the current cmd in the queue so start
107 * searching for a cmd with a matching tfm for submission.
110 list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
111 if (crypto_cmd->tfm != tmp->tfm)
117 /* Process the backlog:
118 * Because cmds can be executed from any point in the cmd list
119 * special precautions have to be taken when handling the backlog.
121 if (req_queue.backlog != &req_queue.cmds) {
122 /* Skip over this cmd if it is the next backlog cmd */
123 if (req_queue.backlog == &crypto_cmd->entry)
124 req_queue.backlog = crypto_cmd->entry.next;
126 *backlog = container_of(req_queue.backlog,
127 struct ccp_crypto_cmd, entry);
128 req_queue.backlog = req_queue.backlog->next;
130 /* Skip over this cmd if it is now the next backlog cmd */
131 if (req_queue.backlog == &crypto_cmd->entry)
132 req_queue.backlog = crypto_cmd->entry.next;
135 /* Remove the cmd entry from the list of cmds */
136 req_queue.cmd_count--;
137 list_del(&crypto_cmd->entry);
139 spin_unlock_irqrestore(&req_queue_lock, flags);
144 static void ccp_crypto_complete(void *data, int err)
146 struct ccp_crypto_cmd *crypto_cmd = data;
147 struct ccp_crypto_cmd *held, *next, *backlog;
148 struct crypto_async_request *req = crypto_cmd->req;
149 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
152 if (err == -EINPROGRESS) {
153 /* Only propagate the -EINPROGRESS if necessary */
154 if (crypto_cmd->ret == -EBUSY) {
155 crypto_cmd->ret = -EINPROGRESS;
156 req->complete(req, -EINPROGRESS);
162 /* Operation has completed - update the queue before invoking
163 * the completion callbacks and retrieve the next cmd (cmd with
164 * a matching tfm) that can be submitted to the CCP.
166 held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
168 backlog->ret = -EINPROGRESS;
169 backlog->req->complete(backlog->req, -EINPROGRESS);
172 /* Transition the state from -EBUSY to -EINPROGRESS first */
173 if (crypto_cmd->ret == -EBUSY)
174 req->complete(req, -EINPROGRESS);
176 /* Completion callbacks */
179 ret = ctx->complete(req, ret);
180 req->complete(req, ret);
182 /* Submit the next cmd */
184 /* Since we have already queued the cmd, we must indicate that
185 * we can backlog so as not to "lose" this request.
187 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
188 ret = ccp_enqueue_cmd(held->cmd);
189 if (ccp_crypto_success(ret))
192 /* Error occurred, report it and get the next entry */
193 ctx = crypto_tfm_ctx(held->req->tfm);
195 ret = ctx->complete(held->req, ret);
196 held->req->complete(held->req, ret);
198 next = ccp_crypto_cmd_complete(held, &backlog);
200 backlog->ret = -EINPROGRESS;
201 backlog->req->complete(backlog->req, -EINPROGRESS);
211 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
213 struct ccp_crypto_cmd *active = NULL, *tmp;
215 bool free_cmd = true;
218 spin_lock_irqsave(&req_queue_lock, flags);
220 /* Check if the cmd can/should be queued */
221 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
222 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG)) {
228 /* Look for an entry with the same tfm. If there is a cmd
229 * with the same tfm in the list then the current cmd cannot
230 * be submitted to the CCP yet.
232 list_for_each_entry(tmp, &req_queue.cmds, entry) {
233 if (crypto_cmd->tfm != tmp->tfm)
241 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
242 if (!ccp_crypto_success(ret))
243 goto e_lock; /* Error, don't queue it */
246 if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
248 if (req_queue.backlog == &req_queue.cmds)
249 req_queue.backlog = &crypto_cmd->entry;
251 crypto_cmd->ret = ret;
253 req_queue.cmd_count++;
254 list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
259 spin_unlock_irqrestore(&req_queue_lock, flags);
268 * ccp_crypto_enqueue_request - queue an crypto async request for processing
271 * @req: crypto_async_request struct to be processed
272 * @cmd: ccp_cmd struct to be sent to the CCP
274 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
277 struct ccp_crypto_cmd *crypto_cmd;
280 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
282 crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
286 /* The tfm pointer must be saved and not referenced from the
287 * crypto_async_request (req) pointer because it is used after
288 * completion callback for the request and the req pointer
289 * might not be valid anymore.
291 crypto_cmd->cmd = cmd;
292 crypto_cmd->req = req;
293 crypto_cmd->tfm = req->tfm;
295 cmd->callback = ccp_crypto_complete;
296 cmd->data = crypto_cmd;
298 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
299 cmd->flags |= CCP_CMD_MAY_BACKLOG;
301 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
303 return ccp_crypto_enqueue_cmd(crypto_cmd);
306 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
307 struct scatterlist *sg_add)
309 struct scatterlist *sg, *sg_last = NULL;
311 for (sg = table->sgl; sg; sg = sg_next(sg))
317 for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
318 sg_set_page(sg, sg_page(sg_add), sg_add->length,
328 static int ccp_register_algs(void)
333 ret = ccp_register_aes_algs(&skcipher_algs);
337 ret = ccp_register_aes_cmac_algs(&hash_algs);
341 ret = ccp_register_aes_xts_algs(&skcipher_algs);
345 ret = ccp_register_aes_aeads(&aead_algs);
351 ret = ccp_register_des3_algs(&skcipher_algs);
357 ret = ccp_register_sha_algs(&hash_algs);
363 ret = ccp_register_rsa_algs(&akcipher_algs);
371 static void ccp_unregister_algs(void)
373 struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
374 struct ccp_crypto_skcipher_alg *ablk_alg, *ablk_tmp;
375 struct ccp_crypto_aead *aead_alg, *aead_tmp;
376 struct ccp_crypto_akcipher_alg *akc_alg, *akc_tmp;
378 list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
379 crypto_unregister_ahash(&ahash_alg->alg);
380 list_del(&ahash_alg->entry);
384 list_for_each_entry_safe(ablk_alg, ablk_tmp, &skcipher_algs, entry) {
385 crypto_unregister_skcipher(&ablk_alg->alg);
386 list_del(&ablk_alg->entry);
390 list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
391 crypto_unregister_aead(&aead_alg->alg);
392 list_del(&aead_alg->entry);
396 list_for_each_entry_safe(akc_alg, akc_tmp, &akcipher_algs, entry) {
397 crypto_unregister_akcipher(&akc_alg->alg);
398 list_del(&akc_alg->entry);
403 static int ccp_crypto_init(void)
409 pr_err("Cannot load: there are no available CCPs\n");
413 spin_lock_init(&req_queue_lock);
414 INIT_LIST_HEAD(&req_queue.cmds);
415 req_queue.backlog = &req_queue.cmds;
416 req_queue.cmd_count = 0;
418 ret = ccp_register_algs();
420 ccp_unregister_algs();
425 static void ccp_crypto_exit(void)
427 ccp_unregister_algs();
430 module_init(ccp_crypto_init);
431 module_exit(ccp_crypto_exit);