]> Git Repo - linux.git/blob - drivers/crypto/ccp/ccp-crypto-main.c
Merge branch 'perf/urgent' into perf/core, to pick up latest fixes and refresh the...
[linux.git] / drivers / crypto / ccp / ccp-crypto-main.c
1 /*
2  * AMD Cryptographic Coprocessor (CCP) crypto API support
3  *
4  * Copyright (C) 2013 Advanced Micro Devices, Inc.
5  *
6  * Author: Tom Lendacky <[email protected]>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/ccp.h>
18 #include <linux/scatterlist.h>
19 #include <crypto/internal/hash.h>
20
21 #include "ccp-crypto.h"
22
23 MODULE_AUTHOR("Tom Lendacky <[email protected]>");
24 MODULE_LICENSE("GPL");
25 MODULE_VERSION("1.0.0");
26 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor crypto API support");
27
28 static unsigned int aes_disable;
29 module_param(aes_disable, uint, 0444);
30 MODULE_PARM_DESC(aes_disable, "Disable use of AES - any non-zero value");
31
32 static unsigned int sha_disable;
33 module_param(sha_disable, uint, 0444);
34 MODULE_PARM_DESC(sha_disable, "Disable use of SHA - any non-zero value");
35
36 static unsigned int des3_disable;
37 module_param(des3_disable, uint, 0444);
38 MODULE_PARM_DESC(des3_disable, "Disable use of 3DES - any non-zero value");
39
40 /* List heads for the supported algorithms */
41 static LIST_HEAD(hash_algs);
42 static LIST_HEAD(cipher_algs);
43 static LIST_HEAD(aead_algs);
44
45 /* For any tfm, requests for that tfm must be returned on the order
46  * received.  With multiple queues available, the CCP can process more
47  * than one cmd at a time.  Therefore we must maintain a cmd list to insure
48  * the proper ordering of requests on a given tfm.
49  */
50 struct ccp_crypto_queue {
51         struct list_head cmds;
52         struct list_head *backlog;
53         unsigned int cmd_count;
54 };
55
56 #define CCP_CRYPTO_MAX_QLEN     100
57
58 static struct ccp_crypto_queue req_queue;
59 static spinlock_t req_queue_lock;
60
61 struct ccp_crypto_cmd {
62         struct list_head entry;
63
64         struct ccp_cmd *cmd;
65
66         /* Save the crypto_tfm and crypto_async_request addresses
67          * separately to avoid any reference to a possibly invalid
68          * crypto_async_request structure after invoking the request
69          * callback
70          */
71         struct crypto_async_request *req;
72         struct crypto_tfm *tfm;
73
74         /* Used for held command processing to determine state */
75         int ret;
76 };
77
78 struct ccp_crypto_cpu {
79         struct work_struct work;
80         struct completion completion;
81         struct ccp_crypto_cmd *crypto_cmd;
82         int err;
83 };
84
85 static inline bool ccp_crypto_success(int err)
86 {
87         if (err && (err != -EINPROGRESS) && (err != -EBUSY))
88                 return false;
89
90         return true;
91 }
92
93 static struct ccp_crypto_cmd *ccp_crypto_cmd_complete(
94         struct ccp_crypto_cmd *crypto_cmd, struct ccp_crypto_cmd **backlog)
95 {
96         struct ccp_crypto_cmd *held = NULL, *tmp;
97         unsigned long flags;
98
99         *backlog = NULL;
100
101         spin_lock_irqsave(&req_queue_lock, flags);
102
103         /* Held cmds will be after the current cmd in the queue so start
104          * searching for a cmd with a matching tfm for submission.
105          */
106         tmp = crypto_cmd;
107         list_for_each_entry_continue(tmp, &req_queue.cmds, entry) {
108                 if (crypto_cmd->tfm != tmp->tfm)
109                         continue;
110                 held = tmp;
111                 break;
112         }
113
114         /* Process the backlog:
115          *   Because cmds can be executed from any point in the cmd list
116          *   special precautions have to be taken when handling the backlog.
117          */
118         if (req_queue.backlog != &req_queue.cmds) {
119                 /* Skip over this cmd if it is the next backlog cmd */
120                 if (req_queue.backlog == &crypto_cmd->entry)
121                         req_queue.backlog = crypto_cmd->entry.next;
122
123                 *backlog = container_of(req_queue.backlog,
124                                         struct ccp_crypto_cmd, entry);
125                 req_queue.backlog = req_queue.backlog->next;
126
127                 /* Skip over this cmd if it is now the next backlog cmd */
128                 if (req_queue.backlog == &crypto_cmd->entry)
129                         req_queue.backlog = crypto_cmd->entry.next;
130         }
131
132         /* Remove the cmd entry from the list of cmds */
133         req_queue.cmd_count--;
134         list_del(&crypto_cmd->entry);
135
136         spin_unlock_irqrestore(&req_queue_lock, flags);
137
138         return held;
139 }
140
141 static void ccp_crypto_complete(void *data, int err)
142 {
143         struct ccp_crypto_cmd *crypto_cmd = data;
144         struct ccp_crypto_cmd *held, *next, *backlog;
145         struct crypto_async_request *req = crypto_cmd->req;
146         struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm);
147         int ret;
148
149         if (err == -EINPROGRESS) {
150                 /* Only propagate the -EINPROGRESS if necessary */
151                 if (crypto_cmd->ret == -EBUSY) {
152                         crypto_cmd->ret = -EINPROGRESS;
153                         req->complete(req, -EINPROGRESS);
154                 }
155
156                 return;
157         }
158
159         /* Operation has completed - update the queue before invoking
160          * the completion callbacks and retrieve the next cmd (cmd with
161          * a matching tfm) that can be submitted to the CCP.
162          */
163         held = ccp_crypto_cmd_complete(crypto_cmd, &backlog);
164         if (backlog) {
165                 backlog->ret = -EINPROGRESS;
166                 backlog->req->complete(backlog->req, -EINPROGRESS);
167         }
168
169         /* Transition the state from -EBUSY to -EINPROGRESS first */
170         if (crypto_cmd->ret == -EBUSY)
171                 req->complete(req, -EINPROGRESS);
172
173         /* Completion callbacks */
174         ret = err;
175         if (ctx->complete)
176                 ret = ctx->complete(req, ret);
177         req->complete(req, ret);
178
179         /* Submit the next cmd */
180         while (held) {
181                 /* Since we have already queued the cmd, we must indicate that
182                  * we can backlog so as not to "lose" this request.
183                  */
184                 held->cmd->flags |= CCP_CMD_MAY_BACKLOG;
185                 ret = ccp_enqueue_cmd(held->cmd);
186                 if (ccp_crypto_success(ret))
187                         break;
188
189                 /* Error occurred, report it and get the next entry */
190                 ctx = crypto_tfm_ctx(held->req->tfm);
191                 if (ctx->complete)
192                         ret = ctx->complete(held->req, ret);
193                 held->req->complete(held->req, ret);
194
195                 next = ccp_crypto_cmd_complete(held, &backlog);
196                 if (backlog) {
197                         backlog->ret = -EINPROGRESS;
198                         backlog->req->complete(backlog->req, -EINPROGRESS);
199                 }
200
201                 kfree(held);
202                 held = next;
203         }
204
205         kfree(crypto_cmd);
206 }
207
208 static int ccp_crypto_enqueue_cmd(struct ccp_crypto_cmd *crypto_cmd)
209 {
210         struct ccp_crypto_cmd *active = NULL, *tmp;
211         unsigned long flags;
212         bool free_cmd = true;
213         int ret;
214
215         spin_lock_irqsave(&req_queue_lock, flags);
216
217         /* Check if the cmd can/should be queued */
218         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
219                 ret = -EBUSY;
220                 if (!(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
221                         goto e_lock;
222         }
223
224         /* Look for an entry with the same tfm.  If there is a cmd
225          * with the same tfm in the list then the current cmd cannot
226          * be submitted to the CCP yet.
227          */
228         list_for_each_entry(tmp, &req_queue.cmds, entry) {
229                 if (crypto_cmd->tfm != tmp->tfm)
230                         continue;
231                 active = tmp;
232                 break;
233         }
234
235         ret = -EINPROGRESS;
236         if (!active) {
237                 ret = ccp_enqueue_cmd(crypto_cmd->cmd);
238                 if (!ccp_crypto_success(ret))
239                         goto e_lock;    /* Error, don't queue it */
240                 if ((ret == -EBUSY) &&
241                     !(crypto_cmd->cmd->flags & CCP_CMD_MAY_BACKLOG))
242                         goto e_lock;    /* Not backlogging, don't queue it */
243         }
244
245         if (req_queue.cmd_count >= CCP_CRYPTO_MAX_QLEN) {
246                 ret = -EBUSY;
247                 if (req_queue.backlog == &req_queue.cmds)
248                         req_queue.backlog = &crypto_cmd->entry;
249         }
250         crypto_cmd->ret = ret;
251
252         req_queue.cmd_count++;
253         list_add_tail(&crypto_cmd->entry, &req_queue.cmds);
254
255         free_cmd = false;
256
257 e_lock:
258         spin_unlock_irqrestore(&req_queue_lock, flags);
259
260         if (free_cmd)
261                 kfree(crypto_cmd);
262
263         return ret;
264 }
265
266 /**
267  * ccp_crypto_enqueue_request - queue an crypto async request for processing
268  *                              by the CCP
269  *
270  * @req: crypto_async_request struct to be processed
271  * @cmd: ccp_cmd struct to be sent to the CCP
272  */
273 int ccp_crypto_enqueue_request(struct crypto_async_request *req,
274                                struct ccp_cmd *cmd)
275 {
276         struct ccp_crypto_cmd *crypto_cmd;
277         gfp_t gfp;
278
279         gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
280
281         crypto_cmd = kzalloc(sizeof(*crypto_cmd), gfp);
282         if (!crypto_cmd)
283                 return -ENOMEM;
284
285         /* The tfm pointer must be saved and not referenced from the
286          * crypto_async_request (req) pointer because it is used after
287          * completion callback for the request and the req pointer
288          * might not be valid anymore.
289          */
290         crypto_cmd->cmd = cmd;
291         crypto_cmd->req = req;
292         crypto_cmd->tfm = req->tfm;
293
294         cmd->callback = ccp_crypto_complete;
295         cmd->data = crypto_cmd;
296
297         if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
298                 cmd->flags |= CCP_CMD_MAY_BACKLOG;
299         else
300                 cmd->flags &= ~CCP_CMD_MAY_BACKLOG;
301
302         return ccp_crypto_enqueue_cmd(crypto_cmd);
303 }
304
305 struct scatterlist *ccp_crypto_sg_table_add(struct sg_table *table,
306                                             struct scatterlist *sg_add)
307 {
308         struct scatterlist *sg, *sg_last = NULL;
309
310         for (sg = table->sgl; sg; sg = sg_next(sg))
311                 if (!sg_page(sg))
312                         break;
313         if (WARN_ON(!sg))
314                 return NULL;
315
316         for (; sg && sg_add; sg = sg_next(sg), sg_add = sg_next(sg_add)) {
317                 sg_set_page(sg, sg_page(sg_add), sg_add->length,
318                             sg_add->offset);
319                 sg_last = sg;
320         }
321         if (WARN_ON(sg_add))
322                 return NULL;
323
324         return sg_last;
325 }
326
327 static int ccp_register_algs(void)
328 {
329         int ret;
330
331         if (!aes_disable) {
332                 ret = ccp_register_aes_algs(&cipher_algs);
333                 if (ret)
334                         return ret;
335
336                 ret = ccp_register_aes_cmac_algs(&hash_algs);
337                 if (ret)
338                         return ret;
339
340                 ret = ccp_register_aes_xts_algs(&cipher_algs);
341                 if (ret)
342                         return ret;
343
344                 ret = ccp_register_aes_aeads(&aead_algs);
345                 if (ret)
346                         return ret;
347         }
348
349         if (!des3_disable) {
350                 ret = ccp_register_des3_algs(&cipher_algs);
351                 if (ret)
352                         return ret;
353         }
354
355         if (!sha_disable) {
356                 ret = ccp_register_sha_algs(&hash_algs);
357                 if (ret)
358                         return ret;
359         }
360
361         return 0;
362 }
363
364 static void ccp_unregister_algs(void)
365 {
366         struct ccp_crypto_ahash_alg *ahash_alg, *ahash_tmp;
367         struct ccp_crypto_ablkcipher_alg *ablk_alg, *ablk_tmp;
368         struct ccp_crypto_aead *aead_alg, *aead_tmp;
369
370         list_for_each_entry_safe(ahash_alg, ahash_tmp, &hash_algs, entry) {
371                 crypto_unregister_ahash(&ahash_alg->alg);
372                 list_del(&ahash_alg->entry);
373                 kfree(ahash_alg);
374         }
375
376         list_for_each_entry_safe(ablk_alg, ablk_tmp, &cipher_algs, entry) {
377                 crypto_unregister_alg(&ablk_alg->alg);
378                 list_del(&ablk_alg->entry);
379                 kfree(ablk_alg);
380         }
381
382         list_for_each_entry_safe(aead_alg, aead_tmp, &aead_algs, entry) {
383                 crypto_unregister_aead(&aead_alg->alg);
384                 list_del(&aead_alg->entry);
385                 kfree(aead_alg);
386         }
387 }
388
389 static int ccp_crypto_init(void)
390 {
391         int ret;
392
393         ret = ccp_present();
394         if (ret)
395                 return ret;
396
397         spin_lock_init(&req_queue_lock);
398         INIT_LIST_HEAD(&req_queue.cmds);
399         req_queue.backlog = &req_queue.cmds;
400         req_queue.cmd_count = 0;
401
402         ret = ccp_register_algs();
403         if (ret)
404                 ccp_unregister_algs();
405
406         return ret;
407 }
408
409 static void ccp_crypto_exit(void)
410 {
411         ccp_unregister_algs();
412 }
413
414 module_init(ccp_crypto_init);
415 module_exit(ccp_crypto_exit);
This page took 0.058866 seconds and 4 git commands to generate.