]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <[email protected]> | |
5 | * | |
298c926c AH |
6 | * Added AEAD support to cryptd. |
7 | * Authors: Tadeusz Struk ([email protected]) | |
8 | * Adrian Hoban <[email protected]> | |
9 | * Gabriele Paoloni <[email protected]> | |
10 | * Aidan O'Mahony ([email protected]) | |
11 | * Copyright (c) 2010, Intel Corporation. | |
12 | * | |
124b53d0 HX |
13 | * This program is free software; you can redistribute it and/or modify it |
14 | * under the terms of the GNU General Public License as published by the Free | |
15 | * Software Foundation; either version 2 of the License, or (at your option) | |
16 | * any later version. | |
17 | * | |
18 | */ | |
19 | ||
18e33e6d | 20 | #include <crypto/internal/hash.h> |
298c926c | 21 | #include <crypto/internal/aead.h> |
4e0958d1 | 22 | #include <crypto/internal/skcipher.h> |
1cac2cbc | 23 | #include <crypto/cryptd.h> |
254eff77 | 24 | #include <crypto/crypto_wq.h> |
81760ea6 | 25 | #include <linux/atomic.h> |
124b53d0 HX |
26 | #include <linux/err.h> |
27 | #include <linux/init.h> | |
28 | #include <linux/kernel.h> | |
124b53d0 HX |
29 | #include <linux/list.h> |
30 | #include <linux/module.h> | |
124b53d0 HX |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/sched.h> | |
33 | #include <linux/slab.h> | |
124b53d0 | 34 | |
eaf356e4 | 35 | static unsigned int cryptd_max_cpu_qlen = 1000; |
c3a53605 JM |
36 | module_param(cryptd_max_cpu_qlen, uint, 0); |
37 | MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth"); | |
124b53d0 | 38 | |
254eff77 | 39 | struct cryptd_cpu_queue { |
124b53d0 | 40 | struct crypto_queue queue; |
254eff77 YH |
41 | struct work_struct work; |
42 | }; | |
43 | ||
44 | struct cryptd_queue { | |
a29d8b8e | 45 | struct cryptd_cpu_queue __percpu *cpu_queue; |
124b53d0 HX |
46 | }; |
47 | ||
48 | struct cryptd_instance_ctx { | |
49 | struct crypto_spawn spawn; | |
254eff77 | 50 | struct cryptd_queue *queue; |
124b53d0 HX |
51 | }; |
52 | ||
4e0958d1 HX |
53 | struct skcipherd_instance_ctx { |
54 | struct crypto_skcipher_spawn spawn; | |
55 | struct cryptd_queue *queue; | |
56 | }; | |
57 | ||
46309d89 HX |
58 | struct hashd_instance_ctx { |
59 | struct crypto_shash_spawn spawn; | |
60 | struct cryptd_queue *queue; | |
61 | }; | |
62 | ||
298c926c AH |
63 | struct aead_instance_ctx { |
64 | struct crypto_aead_spawn aead_spawn; | |
65 | struct cryptd_queue *queue; | |
66 | }; | |
67 | ||
124b53d0 | 68 | struct cryptd_blkcipher_ctx { |
81760ea6 | 69 | atomic_t refcnt; |
124b53d0 HX |
70 | struct crypto_blkcipher *child; |
71 | }; | |
72 | ||
73 | struct cryptd_blkcipher_request_ctx { | |
74 | crypto_completion_t complete; | |
75 | }; | |
76 | ||
4e0958d1 HX |
77 | struct cryptd_skcipher_ctx { |
78 | atomic_t refcnt; | |
36b3875a | 79 | struct crypto_sync_skcipher *child; |
4e0958d1 HX |
80 | }; |
81 | ||
82 | struct cryptd_skcipher_request_ctx { | |
83 | crypto_completion_t complete; | |
84 | }; | |
85 | ||
b8a28251 | 86 | struct cryptd_hash_ctx { |
81760ea6 | 87 | atomic_t refcnt; |
46309d89 | 88 | struct crypto_shash *child; |
b8a28251 LH |
89 | }; |
90 | ||
91 | struct cryptd_hash_request_ctx { | |
92 | crypto_completion_t complete; | |
46309d89 | 93 | struct shash_desc desc; |
b8a28251 | 94 | }; |
124b53d0 | 95 | |
298c926c | 96 | struct cryptd_aead_ctx { |
81760ea6 | 97 | atomic_t refcnt; |
298c926c AH |
98 | struct crypto_aead *child; |
99 | }; | |
100 | ||
101 | struct cryptd_aead_request_ctx { | |
102 | crypto_completion_t complete; | |
103 | }; | |
104 | ||
254eff77 YH |
105 | static void cryptd_queue_worker(struct work_struct *work); |
106 | ||
107 | static int cryptd_init_queue(struct cryptd_queue *queue, | |
108 | unsigned int max_cpu_qlen) | |
109 | { | |
110 | int cpu; | |
111 | struct cryptd_cpu_queue *cpu_queue; | |
112 | ||
113 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | |
114 | if (!queue->cpu_queue) | |
115 | return -ENOMEM; | |
116 | for_each_possible_cpu(cpu) { | |
117 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
118 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
119 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | |
120 | } | |
c3a53605 | 121 | pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen); |
254eff77 YH |
122 | return 0; |
123 | } | |
124 | ||
125 | static void cryptd_fini_queue(struct cryptd_queue *queue) | |
126 | { | |
127 | int cpu; | |
128 | struct cryptd_cpu_queue *cpu_queue; | |
129 | ||
130 | for_each_possible_cpu(cpu) { | |
131 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
132 | BUG_ON(cpu_queue->queue.qlen); | |
133 | } | |
134 | free_percpu(queue->cpu_queue); | |
135 | } | |
136 | ||
137 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | |
138 | struct crypto_async_request *request) | |
139 | { | |
140 | int cpu, err; | |
141 | struct cryptd_cpu_queue *cpu_queue; | |
81760ea6 | 142 | atomic_t *refcnt; |
254eff77 YH |
143 | |
144 | cpu = get_cpu(); | |
0b44f486 | 145 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
254eff77 | 146 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
81760ea6 HX |
147 | |
148 | refcnt = crypto_tfm_ctx(request->tfm); | |
81760ea6 | 149 | |
6b80ea38 | 150 | if (err == -ENOSPC) |
81760ea6 HX |
151 | goto out_put_cpu; |
152 | ||
254eff77 | 153 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); |
81760ea6 HX |
154 | |
155 | if (!atomic_read(refcnt)) | |
156 | goto out_put_cpu; | |
157 | ||
81760ea6 HX |
158 | atomic_inc(refcnt); |
159 | ||
160 | out_put_cpu: | |
254eff77 YH |
161 | put_cpu(); |
162 | ||
163 | return err; | |
164 | } | |
165 | ||
166 | /* Called in workqueue context, do one real cryption work (via | |
167 | * req->complete) and reschedule itself if there are more work to | |
168 | * do. */ | |
169 | static void cryptd_queue_worker(struct work_struct *work) | |
170 | { | |
171 | struct cryptd_cpu_queue *cpu_queue; | |
172 | struct crypto_async_request *req, *backlog; | |
173 | ||
174 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | |
9efade1b JK |
175 | /* |
176 | * Only handle one request at a time to avoid hogging crypto workqueue. | |
177 | * preempt_disable/enable is used to prevent being preempted by | |
178 | * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent | |
179 | * cryptd_enqueue_request() being accessed from software interrupts. | |
180 | */ | |
181 | local_bh_disable(); | |
254eff77 YH |
182 | preempt_disable(); |
183 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
184 | req = crypto_dequeue_request(&cpu_queue->queue); | |
185 | preempt_enable(); | |
9efade1b | 186 | local_bh_enable(); |
254eff77 YH |
187 | |
188 | if (!req) | |
189 | return; | |
190 | ||
191 | if (backlog) | |
192 | backlog->complete(backlog, -EINPROGRESS); | |
193 | req->complete(req, 0); | |
194 | ||
195 | if (cpu_queue->queue.qlen) | |
196 | queue_work(kcrypto_wq, &cpu_queue->work); | |
197 | } | |
198 | ||
199 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | |
124b53d0 HX |
200 | { |
201 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
202 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
254eff77 | 203 | return ictx->queue; |
124b53d0 HX |
204 | } |
205 | ||
466a7b9e SM |
206 | static inline void cryptd_check_internal(struct rtattr **tb, u32 *type, |
207 | u32 *mask) | |
208 | { | |
209 | struct crypto_attr_type *algt; | |
210 | ||
211 | algt = crypto_get_attr_type(tb); | |
212 | if (IS_ERR(algt)) | |
213 | return; | |
f6da3205 | 214 | |
5e4b8c1f HX |
215 | *type |= algt->type & CRYPTO_ALG_INTERNAL; |
216 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | |
466a7b9e SM |
217 | } |
218 | ||
124b53d0 HX |
219 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, |
220 | const u8 *key, unsigned int keylen) | |
221 | { | |
222 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
223 | struct crypto_blkcipher *child = ctx->child; | |
224 | int err; | |
225 | ||
226 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
227 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
228 | CRYPTO_TFM_REQ_MASK); | |
229 | err = crypto_blkcipher_setkey(child, key, keylen); | |
230 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
231 | CRYPTO_TFM_RES_MASK); | |
232 | return err; | |
233 | } | |
234 | ||
235 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
236 | struct crypto_blkcipher *child, | |
237 | int err, | |
238 | int (*crypt)(struct blkcipher_desc *desc, | |
239 | struct scatterlist *dst, | |
240 | struct scatterlist *src, | |
241 | unsigned int len)) | |
242 | { | |
243 | struct cryptd_blkcipher_request_ctx *rctx; | |
81760ea6 HX |
244 | struct cryptd_blkcipher_ctx *ctx; |
245 | struct crypto_ablkcipher *tfm; | |
124b53d0 | 246 | struct blkcipher_desc desc; |
81760ea6 | 247 | int refcnt; |
124b53d0 HX |
248 | |
249 | rctx = ablkcipher_request_ctx(req); | |
250 | ||
93aa7f8a HX |
251 | if (unlikely(err == -EINPROGRESS)) |
252 | goto out; | |
124b53d0 HX |
253 | |
254 | desc.tfm = child; | |
255 | desc.info = req->info; | |
256 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
257 | ||
258 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
259 | ||
260 | req->base.complete = rctx->complete; | |
261 | ||
93aa7f8a | 262 | out: |
81760ea6 HX |
263 | tfm = crypto_ablkcipher_reqtfm(req); |
264 | ctx = crypto_ablkcipher_ctx(tfm); | |
265 | refcnt = atomic_read(&ctx->refcnt); | |
266 | ||
124b53d0 | 267 | local_bh_disable(); |
93aa7f8a | 268 | rctx->complete(&req->base, err); |
124b53d0 | 269 | local_bh_enable(); |
81760ea6 HX |
270 | |
271 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
272 | crypto_free_ablkcipher(tfm); | |
124b53d0 HX |
273 | } |
274 | ||
275 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
276 | { | |
277 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
278 | struct crypto_blkcipher *child = ctx->child; | |
279 | ||
280 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
281 | crypto_blkcipher_crt(child)->encrypt); | |
282 | } | |
283 | ||
284 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
285 | { | |
286 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
287 | struct crypto_blkcipher *child = ctx->child; | |
288 | ||
289 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
290 | crypto_blkcipher_crt(child)->decrypt); | |
291 | } | |
292 | ||
293 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
3e3dc25f | 294 | crypto_completion_t compl) |
124b53d0 HX |
295 | { |
296 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
297 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
254eff77 | 298 | struct cryptd_queue *queue; |
124b53d0 | 299 | |
254eff77 | 300 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
124b53d0 | 301 | rctx->complete = req->base.complete; |
3e3dc25f | 302 | req->base.complete = compl; |
124b53d0 | 303 | |
254eff77 | 304 | return cryptd_enqueue_request(queue, &req->base); |
124b53d0 HX |
305 | } |
306 | ||
307 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
308 | { | |
309 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
310 | } | |
311 | ||
312 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
313 | { | |
314 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
315 | } | |
316 | ||
317 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
318 | { | |
319 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
320 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
321 | struct crypto_spawn *spawn = &ictx->spawn; | |
322 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
323 | struct crypto_blkcipher *cipher; | |
324 | ||
325 | cipher = crypto_spawn_blkcipher(spawn); | |
326 | if (IS_ERR(cipher)) | |
327 | return PTR_ERR(cipher); | |
328 | ||
329 | ctx->child = cipher; | |
330 | tfm->crt_ablkcipher.reqsize = | |
331 | sizeof(struct cryptd_blkcipher_request_ctx); | |
332 | return 0; | |
333 | } | |
334 | ||
335 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
336 | { | |
337 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
124b53d0 HX |
338 | |
339 | crypto_free_blkcipher(ctx->child); | |
340 | } | |
341 | ||
9b8c456e HX |
342 | static int cryptd_init_instance(struct crypto_instance *inst, |
343 | struct crypto_alg *alg) | |
344 | { | |
345 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
346 | "cryptd(%s)", | |
347 | alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
348 | return -ENAMETOOLONG; | |
349 | ||
350 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
351 | ||
352 | inst->alg.cra_priority = alg->cra_priority + 50; | |
353 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
354 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
0b535adf HX |
359 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
360 | unsigned int tail) | |
124b53d0 | 361 | { |
0b535adf | 362 | char *p; |
124b53d0 | 363 | struct crypto_instance *inst; |
124b53d0 HX |
364 | int err; |
365 | ||
0b535adf HX |
366 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
367 | if (!p) | |
368 | return ERR_PTR(-ENOMEM); | |
369 | ||
370 | inst = (void *)(p + head); | |
124b53d0 | 371 | |
9b8c456e HX |
372 | err = cryptd_init_instance(inst, alg); |
373 | if (err) | |
124b53d0 HX |
374 | goto out_free_inst; |
375 | ||
124b53d0 | 376 | out: |
0b535adf | 377 | return p; |
124b53d0 HX |
378 | |
379 | out_free_inst: | |
0b535adf HX |
380 | kfree(p); |
381 | p = ERR_PTR(err); | |
124b53d0 HX |
382 | goto out; |
383 | } | |
384 | ||
9cd899a3 HX |
385 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
386 | struct rtattr **tb, | |
387 | struct cryptd_queue *queue) | |
124b53d0 | 388 | { |
46309d89 | 389 | struct cryptd_instance_ctx *ctx; |
124b53d0 HX |
390 | struct crypto_instance *inst; |
391 | struct crypto_alg *alg; | |
466a7b9e SM |
392 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; |
393 | u32 mask = CRYPTO_ALG_TYPE_MASK; | |
46309d89 | 394 | int err; |
124b53d0 | 395 | |
466a7b9e SM |
396 | cryptd_check_internal(tb, &type, &mask); |
397 | ||
398 | alg = crypto_get_attr_alg(tb, type, mask); | |
124b53d0 | 399 | if (IS_ERR(alg)) |
9cd899a3 | 400 | return PTR_ERR(alg); |
124b53d0 | 401 | |
0b535adf | 402 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
05ed8758 | 403 | err = PTR_ERR(inst); |
124b53d0 HX |
404 | if (IS_ERR(inst)) |
405 | goto out_put_alg; | |
406 | ||
46309d89 HX |
407 | ctx = crypto_instance_ctx(inst); |
408 | ctx->queue = queue; | |
409 | ||
410 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
411 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
412 | if (err) | |
413 | goto out_free_inst; | |
414 | ||
466a7b9e SM |
415 | type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
416 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
417 | type |= CRYPTO_ALG_INTERNAL; | |
418 | inst->alg.cra_flags = type; | |
124b53d0 HX |
419 | inst->alg.cra_type = &crypto_ablkcipher_type; |
420 | ||
421 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
422 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
423 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
424 | ||
927eead5 HX |
425 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
426 | ||
124b53d0 HX |
427 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
428 | ||
429 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
430 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
431 | ||
432 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
433 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
434 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
435 | ||
9cd899a3 HX |
436 | err = crypto_register_instance(tmpl, inst); |
437 | if (err) { | |
438 | crypto_drop_spawn(&ctx->spawn); | |
439 | out_free_inst: | |
440 | kfree(inst); | |
441 | } | |
442 | ||
124b53d0 HX |
443 | out_put_alg: |
444 | crypto_mod_put(alg); | |
9cd899a3 | 445 | return err; |
124b53d0 HX |
446 | } |
447 | ||
4e0958d1 HX |
448 | static int cryptd_skcipher_setkey(struct crypto_skcipher *parent, |
449 | const u8 *key, unsigned int keylen) | |
450 | { | |
451 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent); | |
36b3875a | 452 | struct crypto_sync_skcipher *child = ctx->child; |
4e0958d1 HX |
453 | int err; |
454 | ||
36b3875a KC |
455 | crypto_sync_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
456 | crypto_sync_skcipher_set_flags(child, | |
457 | crypto_skcipher_get_flags(parent) & | |
4e0958d1 | 458 | CRYPTO_TFM_REQ_MASK); |
36b3875a KC |
459 | err = crypto_sync_skcipher_setkey(child, key, keylen); |
460 | crypto_skcipher_set_flags(parent, | |
461 | crypto_sync_skcipher_get_flags(child) & | |
4e0958d1 HX |
462 | CRYPTO_TFM_RES_MASK); |
463 | return err; | |
464 | } | |
465 | ||
466 | static void cryptd_skcipher_complete(struct skcipher_request *req, int err) | |
467 | { | |
468 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
469 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
470 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
471 | int refcnt = atomic_read(&ctx->refcnt); | |
472 | ||
473 | local_bh_disable(); | |
474 | rctx->complete(&req->base, err); | |
475 | local_bh_enable(); | |
476 | ||
477 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
478 | crypto_free_skcipher(tfm); | |
479 | } | |
480 | ||
481 | static void cryptd_skcipher_encrypt(struct crypto_async_request *base, | |
482 | int err) | |
483 | { | |
484 | struct skcipher_request *req = skcipher_request_cast(base); | |
485 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
486 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
487 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
36b3875a KC |
488 | struct crypto_sync_skcipher *child = ctx->child; |
489 | SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); | |
4e0958d1 HX |
490 | |
491 | if (unlikely(err == -EINPROGRESS)) | |
492 | goto out; | |
493 | ||
36b3875a | 494 | skcipher_request_set_sync_tfm(subreq, child); |
4e0958d1 HX |
495 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, |
496 | NULL, NULL); | |
497 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
498 | req->iv); | |
499 | ||
500 | err = crypto_skcipher_encrypt(subreq); | |
501 | skcipher_request_zero(subreq); | |
502 | ||
503 | req->base.complete = rctx->complete; | |
504 | ||
505 | out: | |
506 | cryptd_skcipher_complete(req, err); | |
507 | } | |
508 | ||
509 | static void cryptd_skcipher_decrypt(struct crypto_async_request *base, | |
510 | int err) | |
511 | { | |
512 | struct skcipher_request *req = skcipher_request_cast(base); | |
513 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
514 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
515 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
36b3875a KC |
516 | struct crypto_sync_skcipher *child = ctx->child; |
517 | SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, child); | |
4e0958d1 HX |
518 | |
519 | if (unlikely(err == -EINPROGRESS)) | |
520 | goto out; | |
521 | ||
36b3875a | 522 | skcipher_request_set_sync_tfm(subreq, child); |
4e0958d1 HX |
523 | skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP, |
524 | NULL, NULL); | |
525 | skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, | |
526 | req->iv); | |
527 | ||
528 | err = crypto_skcipher_decrypt(subreq); | |
529 | skcipher_request_zero(subreq); | |
530 | ||
531 | req->base.complete = rctx->complete; | |
532 | ||
533 | out: | |
534 | cryptd_skcipher_complete(req, err); | |
535 | } | |
536 | ||
537 | static int cryptd_skcipher_enqueue(struct skcipher_request *req, | |
538 | crypto_completion_t compl) | |
539 | { | |
540 | struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req); | |
541 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
542 | struct cryptd_queue *queue; | |
543 | ||
544 | queue = cryptd_get_queue(crypto_skcipher_tfm(tfm)); | |
545 | rctx->complete = req->base.complete; | |
546 | req->base.complete = compl; | |
547 | ||
548 | return cryptd_enqueue_request(queue, &req->base); | |
549 | } | |
550 | ||
551 | static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req) | |
552 | { | |
553 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt); | |
554 | } | |
555 | ||
556 | static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req) | |
557 | { | |
558 | return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt); | |
559 | } | |
560 | ||
561 | static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm) | |
562 | { | |
563 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | |
564 | struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst); | |
565 | struct crypto_skcipher_spawn *spawn = &ictx->spawn; | |
566 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
567 | struct crypto_skcipher *cipher; | |
568 | ||
569 | cipher = crypto_spawn_skcipher(spawn); | |
570 | if (IS_ERR(cipher)) | |
571 | return PTR_ERR(cipher); | |
572 | ||
36b3875a | 573 | ctx->child = (struct crypto_sync_skcipher *)cipher; |
4e0958d1 HX |
574 | crypto_skcipher_set_reqsize( |
575 | tfm, sizeof(struct cryptd_skcipher_request_ctx)); | |
576 | return 0; | |
577 | } | |
578 | ||
579 | static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm) | |
580 | { | |
581 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm); | |
582 | ||
36b3875a | 583 | crypto_free_sync_skcipher(ctx->child); |
4e0958d1 HX |
584 | } |
585 | ||
586 | static void cryptd_skcipher_free(struct skcipher_instance *inst) | |
587 | { | |
588 | struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst); | |
589 | ||
590 | crypto_drop_skcipher(&ctx->spawn); | |
591 | } | |
592 | ||
593 | static int cryptd_create_skcipher(struct crypto_template *tmpl, | |
594 | struct rtattr **tb, | |
595 | struct cryptd_queue *queue) | |
596 | { | |
597 | struct skcipherd_instance_ctx *ctx; | |
598 | struct skcipher_instance *inst; | |
599 | struct skcipher_alg *alg; | |
600 | const char *name; | |
601 | u32 type; | |
602 | u32 mask; | |
603 | int err; | |
604 | ||
605 | type = 0; | |
606 | mask = CRYPTO_ALG_ASYNC; | |
607 | ||
608 | cryptd_check_internal(tb, &type, &mask); | |
609 | ||
610 | name = crypto_attr_alg_name(tb[1]); | |
611 | if (IS_ERR(name)) | |
612 | return PTR_ERR(name); | |
613 | ||
614 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
615 | if (!inst) | |
616 | return -ENOMEM; | |
617 | ||
618 | ctx = skcipher_instance_ctx(inst); | |
619 | ctx->queue = queue; | |
620 | ||
621 | crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst)); | |
622 | err = crypto_grab_skcipher(&ctx->spawn, name, type, mask); | |
623 | if (err) | |
624 | goto out_free_inst; | |
625 | ||
626 | alg = crypto_spawn_skcipher_alg(&ctx->spawn); | |
627 | err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base); | |
628 | if (err) | |
629 | goto out_drop_skcipher; | |
630 | ||
631 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | | |
632 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); | |
633 | ||
634 | inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg); | |
635 | inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg); | |
636 | inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg); | |
637 | inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg); | |
638 | ||
639 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx); | |
640 | ||
641 | inst->alg.init = cryptd_skcipher_init_tfm; | |
642 | inst->alg.exit = cryptd_skcipher_exit_tfm; | |
643 | ||
644 | inst->alg.setkey = cryptd_skcipher_setkey; | |
645 | inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue; | |
646 | inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue; | |
647 | ||
648 | inst->free = cryptd_skcipher_free; | |
649 | ||
650 | err = skcipher_register_instance(tmpl, inst); | |
651 | if (err) { | |
652 | out_drop_skcipher: | |
653 | crypto_drop_skcipher(&ctx->spawn); | |
654 | out_free_inst: | |
655 | kfree(inst); | |
656 | } | |
657 | return err; | |
658 | } | |
659 | ||
b8a28251 LH |
660 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
661 | { | |
662 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
46309d89 HX |
663 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
664 | struct crypto_shash_spawn *spawn = &ictx->spawn; | |
b8a28251 | 665 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
46309d89 | 666 | struct crypto_shash *hash; |
b8a28251 | 667 | |
46309d89 HX |
668 | hash = crypto_spawn_shash(spawn); |
669 | if (IS_ERR(hash)) | |
670 | return PTR_ERR(hash); | |
b8a28251 | 671 | |
46309d89 | 672 | ctx->child = hash; |
0d6669e2 HX |
673 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
674 | sizeof(struct cryptd_hash_request_ctx) + | |
675 | crypto_shash_descsize(hash)); | |
b8a28251 LH |
676 | return 0; |
677 | } | |
678 | ||
679 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
680 | { | |
681 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
b8a28251 | 682 | |
46309d89 | 683 | crypto_free_shash(ctx->child); |
b8a28251 LH |
684 | } |
685 | ||
686 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
687 | const u8 *key, unsigned int keylen) | |
688 | { | |
689 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
46309d89 | 690 | struct crypto_shash *child = ctx->child; |
b8a28251 LH |
691 | int err; |
692 | ||
46309d89 HX |
693 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
694 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | |
695 | CRYPTO_TFM_REQ_MASK); | |
696 | err = crypto_shash_setkey(child, key, keylen); | |
697 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | |
698 | CRYPTO_TFM_RES_MASK); | |
b8a28251 LH |
699 | return err; |
700 | } | |
701 | ||
702 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
3e3dc25f | 703 | crypto_completion_t compl) |
b8a28251 LH |
704 | { |
705 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
706 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
254eff77 YH |
707 | struct cryptd_queue *queue = |
708 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | |
b8a28251 LH |
709 | |
710 | rctx->complete = req->base.complete; | |
3e3dc25f | 711 | req->base.complete = compl; |
b8a28251 | 712 | |
254eff77 | 713 | return cryptd_enqueue_request(queue, &req->base); |
b8a28251 LH |
714 | } |
715 | ||
81760ea6 HX |
716 | static void cryptd_hash_complete(struct ahash_request *req, int err) |
717 | { | |
718 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
719 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
720 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
721 | int refcnt = atomic_read(&ctx->refcnt); | |
722 | ||
723 | local_bh_disable(); | |
724 | rctx->complete(&req->base, err); | |
725 | local_bh_enable(); | |
726 | ||
727 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
728 | crypto_free_ahash(tfm); | |
729 | } | |
730 | ||
b8a28251 LH |
731 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) |
732 | { | |
46309d89 HX |
733 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
734 | struct crypto_shash *child = ctx->child; | |
735 | struct ahash_request *req = ahash_request_cast(req_async); | |
736 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
737 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
738 | |
739 | if (unlikely(err == -EINPROGRESS)) | |
740 | goto out; | |
741 | ||
46309d89 HX |
742 | desc->tfm = child; |
743 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 744 | |
46309d89 | 745 | err = crypto_shash_init(desc); |
b8a28251 LH |
746 | |
747 | req->base.complete = rctx->complete; | |
748 | ||
749 | out: | |
81760ea6 | 750 | cryptd_hash_complete(req, err); |
b8a28251 LH |
751 | } |
752 | ||
753 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
754 | { | |
755 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
756 | } | |
757 | ||
758 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
759 | { | |
46309d89 | 760 | struct ahash_request *req = ahash_request_cast(req_async); |
b8a28251 | 761 | struct cryptd_hash_request_ctx *rctx; |
b8a28251 LH |
762 | |
763 | rctx = ahash_request_ctx(req); | |
764 | ||
765 | if (unlikely(err == -EINPROGRESS)) | |
766 | goto out; | |
767 | ||
46309d89 | 768 | err = shash_ahash_update(req, &rctx->desc); |
b8a28251 LH |
769 | |
770 | req->base.complete = rctx->complete; | |
771 | ||
772 | out: | |
81760ea6 | 773 | cryptd_hash_complete(req, err); |
b8a28251 LH |
774 | } |
775 | ||
776 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
777 | { | |
778 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
779 | } | |
780 | ||
781 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
782 | { | |
46309d89 HX |
783 | struct ahash_request *req = ahash_request_cast(req_async); |
784 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
b8a28251 LH |
785 | |
786 | if (unlikely(err == -EINPROGRESS)) | |
787 | goto out; | |
788 | ||
46309d89 | 789 | err = crypto_shash_final(&rctx->desc, req->result); |
b8a28251 LH |
790 | |
791 | req->base.complete = rctx->complete; | |
792 | ||
793 | out: | |
81760ea6 | 794 | cryptd_hash_complete(req, err); |
b8a28251 LH |
795 | } |
796 | ||
797 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
798 | { | |
799 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
800 | } | |
801 | ||
6fba00d1 HX |
802 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
803 | { | |
804 | struct ahash_request *req = ahash_request_cast(req_async); | |
805 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
806 | ||
807 | if (unlikely(err == -EINPROGRESS)) | |
808 | goto out; | |
809 | ||
810 | err = shash_ahash_finup(req, &rctx->desc); | |
811 | ||
812 | req->base.complete = rctx->complete; | |
813 | ||
814 | out: | |
81760ea6 | 815 | cryptd_hash_complete(req, err); |
6fba00d1 HX |
816 | } |
817 | ||
818 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | |
819 | { | |
820 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | |
821 | } | |
822 | ||
b8a28251 LH |
823 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
824 | { | |
46309d89 HX |
825 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
826 | struct crypto_shash *child = ctx->child; | |
827 | struct ahash_request *req = ahash_request_cast(req_async); | |
828 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
829 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
830 | |
831 | if (unlikely(err == -EINPROGRESS)) | |
832 | goto out; | |
833 | ||
46309d89 HX |
834 | desc->tfm = child; |
835 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 836 | |
46309d89 | 837 | err = shash_ahash_digest(req, desc); |
b8a28251 LH |
838 | |
839 | req->base.complete = rctx->complete; | |
840 | ||
841 | out: | |
81760ea6 | 842 | cryptd_hash_complete(req, err); |
b8a28251 LH |
843 | } |
844 | ||
845 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
846 | { | |
847 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
848 | } | |
849 | ||
6fba00d1 HX |
850 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
851 | { | |
852 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
853 | ||
854 | return crypto_shash_export(&rctx->desc, out); | |
855 | } | |
856 | ||
857 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | |
858 | { | |
0bd22235 AB |
859 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); |
860 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm); | |
861 | struct shash_desc *desc = cryptd_shash_desc(req); | |
862 | ||
863 | desc->tfm = ctx->child; | |
864 | desc->flags = req->base.flags; | |
6fba00d1 | 865 | |
0bd22235 | 866 | return crypto_shash_import(desc, in); |
6fba00d1 HX |
867 | } |
868 | ||
9cd899a3 HX |
869 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
870 | struct cryptd_queue *queue) | |
b8a28251 | 871 | { |
46309d89 | 872 | struct hashd_instance_ctx *ctx; |
0b535adf | 873 | struct ahash_instance *inst; |
46309d89 | 874 | struct shash_alg *salg; |
b8a28251 | 875 | struct crypto_alg *alg; |
466a7b9e SM |
876 | u32 type = 0; |
877 | u32 mask = 0; | |
46309d89 | 878 | int err; |
b8a28251 | 879 | |
466a7b9e SM |
880 | cryptd_check_internal(tb, &type, &mask); |
881 | ||
882 | salg = shash_attr_alg(tb[1], type, mask); | |
46309d89 | 883 | if (IS_ERR(salg)) |
9cd899a3 | 884 | return PTR_ERR(salg); |
b8a28251 | 885 | |
46309d89 | 886 | alg = &salg->base; |
0b535adf HX |
887 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
888 | sizeof(*ctx)); | |
05ed8758 | 889 | err = PTR_ERR(inst); |
b8a28251 LH |
890 | if (IS_ERR(inst)) |
891 | goto out_put_alg; | |
892 | ||
0b535adf | 893 | ctx = ahash_instance_ctx(inst); |
46309d89 HX |
894 | ctx->queue = queue; |
895 | ||
0b535adf HX |
896 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
897 | ahash_crypto_instance(inst)); | |
46309d89 HX |
898 | if (err) |
899 | goto out_free_inst; | |
900 | ||
a208fa8f EB |
901 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC | |
902 | (alg->cra_flags & (CRYPTO_ALG_INTERNAL | | |
903 | CRYPTO_ALG_OPTIONAL_KEY)); | |
b8a28251 | 904 | |
0b535adf | 905 | inst->alg.halg.digestsize = salg->digestsize; |
1a078340 | 906 | inst->alg.halg.statesize = salg->statesize; |
0b535adf | 907 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); |
b8a28251 | 908 | |
0b535adf HX |
909 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
910 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; | |
b8a28251 | 911 | |
0b535adf HX |
912 | inst->alg.init = cryptd_hash_init_enqueue; |
913 | inst->alg.update = cryptd_hash_update_enqueue; | |
914 | inst->alg.final = cryptd_hash_final_enqueue; | |
6fba00d1 HX |
915 | inst->alg.finup = cryptd_hash_finup_enqueue; |
916 | inst->alg.export = cryptd_hash_export; | |
917 | inst->alg.import = cryptd_hash_import; | |
841a3ff3 EB |
918 | if (crypto_shash_alg_has_setkey(salg)) |
919 | inst->alg.setkey = cryptd_hash_setkey; | |
0b535adf | 920 | inst->alg.digest = cryptd_hash_digest_enqueue; |
b8a28251 | 921 | |
0b535adf | 922 | err = ahash_register_instance(tmpl, inst); |
9cd899a3 HX |
923 | if (err) { |
924 | crypto_drop_shash(&ctx->spawn); | |
925 | out_free_inst: | |
926 | kfree(inst); | |
927 | } | |
928 | ||
b8a28251 LH |
929 | out_put_alg: |
930 | crypto_mod_put(alg); | |
9cd899a3 | 931 | return err; |
b8a28251 LH |
932 | } |
933 | ||
92b9876b HX |
934 | static int cryptd_aead_setkey(struct crypto_aead *parent, |
935 | const u8 *key, unsigned int keylen) | |
936 | { | |
937 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | |
938 | struct crypto_aead *child = ctx->child; | |
939 | ||
940 | return crypto_aead_setkey(child, key, keylen); | |
941 | } | |
942 | ||
943 | static int cryptd_aead_setauthsize(struct crypto_aead *parent, | |
944 | unsigned int authsize) | |
945 | { | |
946 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent); | |
947 | struct crypto_aead *child = ctx->child; | |
948 | ||
949 | return crypto_aead_setauthsize(child, authsize); | |
950 | } | |
951 | ||
298c926c AH |
952 | static void cryptd_aead_crypt(struct aead_request *req, |
953 | struct crypto_aead *child, | |
954 | int err, | |
955 | int (*crypt)(struct aead_request *req)) | |
956 | { | |
957 | struct cryptd_aead_request_ctx *rctx; | |
81760ea6 | 958 | struct cryptd_aead_ctx *ctx; |
ec9f2006 | 959 | crypto_completion_t compl; |
81760ea6 HX |
960 | struct crypto_aead *tfm; |
961 | int refcnt; | |
ec9f2006 | 962 | |
298c926c | 963 | rctx = aead_request_ctx(req); |
ec9f2006 | 964 | compl = rctx->complete; |
298c926c | 965 | |
31bd44e7 HX |
966 | tfm = crypto_aead_reqtfm(req); |
967 | ||
298c926c AH |
968 | if (unlikely(err == -EINPROGRESS)) |
969 | goto out; | |
970 | aead_request_set_tfm(req, child); | |
971 | err = crypt( req ); | |
81760ea6 | 972 | |
298c926c | 973 | out: |
81760ea6 HX |
974 | ctx = crypto_aead_ctx(tfm); |
975 | refcnt = atomic_read(&ctx->refcnt); | |
976 | ||
298c926c | 977 | local_bh_disable(); |
ec9f2006 | 978 | compl(&req->base, err); |
298c926c | 979 | local_bh_enable(); |
81760ea6 HX |
980 | |
981 | if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt)) | |
982 | crypto_free_aead(tfm); | |
298c926c AH |
983 | } |
984 | ||
985 | static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err) | |
986 | { | |
987 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | |
988 | struct crypto_aead *child = ctx->child; | |
989 | struct aead_request *req; | |
990 | ||
991 | req = container_of(areq, struct aead_request, base); | |
ba3749a7 | 992 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt); |
298c926c AH |
993 | } |
994 | ||
995 | static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err) | |
996 | { | |
997 | struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm); | |
998 | struct crypto_aead *child = ctx->child; | |
999 | struct aead_request *req; | |
1000 | ||
1001 | req = container_of(areq, struct aead_request, base); | |
ba3749a7 | 1002 | cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt); |
298c926c AH |
1003 | } |
1004 | ||
1005 | static int cryptd_aead_enqueue(struct aead_request *req, | |
3e3dc25f | 1006 | crypto_completion_t compl) |
298c926c AH |
1007 | { |
1008 | struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); | |
1009 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | |
1010 | struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); | |
1011 | ||
1012 | rctx->complete = req->base.complete; | |
3e3dc25f | 1013 | req->base.complete = compl; |
298c926c AH |
1014 | return cryptd_enqueue_request(queue, &req->base); |
1015 | } | |
1016 | ||
1017 | static int cryptd_aead_encrypt_enqueue(struct aead_request *req) | |
1018 | { | |
1019 | return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); | |
1020 | } | |
1021 | ||
1022 | static int cryptd_aead_decrypt_enqueue(struct aead_request *req) | |
1023 | { | |
1024 | return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); | |
1025 | } | |
1026 | ||
f614e546 | 1027 | static int cryptd_aead_init_tfm(struct crypto_aead *tfm) |
298c926c | 1028 | { |
f614e546 HX |
1029 | struct aead_instance *inst = aead_alg_instance(tfm); |
1030 | struct aead_instance_ctx *ictx = aead_instance_ctx(inst); | |
298c926c | 1031 | struct crypto_aead_spawn *spawn = &ictx->aead_spawn; |
f614e546 | 1032 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); |
298c926c AH |
1033 | struct crypto_aead *cipher; |
1034 | ||
1035 | cipher = crypto_spawn_aead(spawn); | |
1036 | if (IS_ERR(cipher)) | |
1037 | return PTR_ERR(cipher); | |
1038 | ||
298c926c | 1039 | ctx->child = cipher; |
ec9f2006 HX |
1040 | crypto_aead_set_reqsize( |
1041 | tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx), | |
1042 | crypto_aead_reqsize(cipher))); | |
298c926c AH |
1043 | return 0; |
1044 | } | |
1045 | ||
f614e546 | 1046 | static void cryptd_aead_exit_tfm(struct crypto_aead *tfm) |
298c926c | 1047 | { |
f614e546 | 1048 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm); |
298c926c AH |
1049 | crypto_free_aead(ctx->child); |
1050 | } | |
1051 | ||
1052 | static int cryptd_create_aead(struct crypto_template *tmpl, | |
1053 | struct rtattr **tb, | |
1054 | struct cryptd_queue *queue) | |
1055 | { | |
1056 | struct aead_instance_ctx *ctx; | |
f614e546 HX |
1057 | struct aead_instance *inst; |
1058 | struct aead_alg *alg; | |
9b8c456e HX |
1059 | const char *name; |
1060 | u32 type = 0; | |
ec9f2006 | 1061 | u32 mask = CRYPTO_ALG_ASYNC; |
298c926c AH |
1062 | int err; |
1063 | ||
466a7b9e SM |
1064 | cryptd_check_internal(tb, &type, &mask); |
1065 | ||
9b8c456e HX |
1066 | name = crypto_attr_alg_name(tb[1]); |
1067 | if (IS_ERR(name)) | |
1068 | return PTR_ERR(name); | |
298c926c | 1069 | |
9b8c456e HX |
1070 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); |
1071 | if (!inst) | |
1072 | return -ENOMEM; | |
298c926c | 1073 | |
f614e546 | 1074 | ctx = aead_instance_ctx(inst); |
298c926c AH |
1075 | ctx->queue = queue; |
1076 | ||
f614e546 | 1077 | crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst)); |
9b8c456e | 1078 | err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask); |
298c926c AH |
1079 | if (err) |
1080 | goto out_free_inst; | |
1081 | ||
f614e546 HX |
1082 | alg = crypto_spawn_aead_alg(&ctx->aead_spawn); |
1083 | err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base); | |
9b8c456e HX |
1084 | if (err) |
1085 | goto out_drop_aead; | |
1086 | ||
f614e546 | 1087 | inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC | |
5e4b8c1f | 1088 | (alg->base.cra_flags & CRYPTO_ALG_INTERNAL); |
f614e546 | 1089 | inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx); |
298c926c | 1090 | |
f614e546 HX |
1091 | inst->alg.ivsize = crypto_aead_alg_ivsize(alg); |
1092 | inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); | |
1093 | ||
1094 | inst->alg.init = cryptd_aead_init_tfm; | |
1095 | inst->alg.exit = cryptd_aead_exit_tfm; | |
1096 | inst->alg.setkey = cryptd_aead_setkey; | |
1097 | inst->alg.setauthsize = cryptd_aead_setauthsize; | |
1098 | inst->alg.encrypt = cryptd_aead_encrypt_enqueue; | |
1099 | inst->alg.decrypt = cryptd_aead_decrypt_enqueue; | |
1100 | ||
1101 | err = aead_register_instance(tmpl, inst); | |
298c926c | 1102 | if (err) { |
9b8c456e HX |
1103 | out_drop_aead: |
1104 | crypto_drop_aead(&ctx->aead_spawn); | |
298c926c AH |
1105 | out_free_inst: |
1106 | kfree(inst); | |
1107 | } | |
298c926c AH |
1108 | return err; |
1109 | } | |
1110 | ||
254eff77 | 1111 | static struct cryptd_queue queue; |
124b53d0 | 1112 | |
9cd899a3 | 1113 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
124b53d0 HX |
1114 | { |
1115 | struct crypto_attr_type *algt; | |
1116 | ||
1117 | algt = crypto_get_attr_type(tb); | |
1118 | if (IS_ERR(algt)) | |
9cd899a3 | 1119 | return PTR_ERR(algt); |
124b53d0 HX |
1120 | |
1121 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
1122 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
4e0958d1 HX |
1123 | if ((algt->type & CRYPTO_ALG_TYPE_MASK) == |
1124 | CRYPTO_ALG_TYPE_BLKCIPHER) | |
1125 | return cryptd_create_blkcipher(tmpl, tb, &queue); | |
1126 | ||
1127 | return cryptd_create_skcipher(tmpl, tb, &queue); | |
b8a28251 | 1128 | case CRYPTO_ALG_TYPE_DIGEST: |
9cd899a3 | 1129 | return cryptd_create_hash(tmpl, tb, &queue); |
298c926c AH |
1130 | case CRYPTO_ALG_TYPE_AEAD: |
1131 | return cryptd_create_aead(tmpl, tb, &queue); | |
124b53d0 HX |
1132 | } |
1133 | ||
9cd899a3 | 1134 | return -EINVAL; |
124b53d0 HX |
1135 | } |
1136 | ||
1137 | static void cryptd_free(struct crypto_instance *inst) | |
1138 | { | |
1139 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
0b535adf | 1140 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
298c926c | 1141 | struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst); |
0b535adf HX |
1142 | |
1143 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
1144 | case CRYPTO_ALG_TYPE_AHASH: | |
1145 | crypto_drop_shash(&hctx->spawn); | |
1146 | kfree(ahash_instance(inst)); | |
1147 | return; | |
298c926c | 1148 | case CRYPTO_ALG_TYPE_AEAD: |
f614e546 HX |
1149 | crypto_drop_aead(&aead_ctx->aead_spawn); |
1150 | kfree(aead_instance(inst)); | |
298c926c AH |
1151 | return; |
1152 | default: | |
1153 | crypto_drop_spawn(&ctx->spawn); | |
1154 | kfree(inst); | |
0b535adf | 1155 | } |
124b53d0 HX |
1156 | } |
1157 | ||
1158 | static struct crypto_template cryptd_tmpl = { | |
1159 | .name = "cryptd", | |
9cd899a3 | 1160 | .create = cryptd_create, |
124b53d0 HX |
1161 | .free = cryptd_free, |
1162 | .module = THIS_MODULE, | |
1163 | }; | |
1164 | ||
1cac2cbc YH |
1165 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
1166 | u32 type, u32 mask) | |
1167 | { | |
1168 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
81760ea6 | 1169 | struct cryptd_blkcipher_ctx *ctx; |
505fd21d | 1170 | struct crypto_tfm *tfm; |
1cac2cbc YH |
1171 | |
1172 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1173 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1174 | return ERR_PTR(-EINVAL); | |
c012a79d | 1175 | type = crypto_skcipher_type(type); |
505fd21d YH |
1176 | mask &= ~CRYPTO_ALG_TYPE_MASK; |
1177 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | |
1178 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | |
1cac2cbc YH |
1179 | if (IS_ERR(tfm)) |
1180 | return ERR_CAST(tfm); | |
505fd21d YH |
1181 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
1182 | crypto_free_tfm(tfm); | |
1cac2cbc YH |
1183 | return ERR_PTR(-EINVAL); |
1184 | } | |
1185 | ||
81760ea6 HX |
1186 | ctx = crypto_tfm_ctx(tfm); |
1187 | atomic_set(&ctx->refcnt, 1); | |
1188 | ||
505fd21d | 1189 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
1cac2cbc YH |
1190 | } |
1191 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | |
1192 | ||
1193 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | |
1194 | { | |
1195 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
1196 | return ctx->child; | |
1197 | } | |
1198 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | |
1199 | ||
81760ea6 HX |
1200 | bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm) |
1201 | { | |
1202 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
1203 | ||
1204 | return atomic_read(&ctx->refcnt) - 1; | |
1205 | } | |
1206 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued); | |
1207 | ||
1cac2cbc YH |
1208 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) |
1209 | { | |
81760ea6 HX |
1210 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); |
1211 | ||
1212 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1213 | crypto_free_ablkcipher(&tfm->base); | |
1cac2cbc YH |
1214 | } |
1215 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | |
1216 | ||
4e0958d1 HX |
1217 | struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name, |
1218 | u32 type, u32 mask) | |
1219 | { | |
1220 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
1221 | struct cryptd_skcipher_ctx *ctx; | |
1222 | struct crypto_skcipher *tfm; | |
1223 | ||
1224 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1225 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1226 | return ERR_PTR(-EINVAL); | |
1227 | ||
1228 | tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask); | |
1229 | if (IS_ERR(tfm)) | |
1230 | return ERR_CAST(tfm); | |
1231 | ||
1232 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
1233 | crypto_free_skcipher(tfm); | |
1234 | return ERR_PTR(-EINVAL); | |
1235 | } | |
1236 | ||
1237 | ctx = crypto_skcipher_ctx(tfm); | |
1238 | atomic_set(&ctx->refcnt, 1); | |
1239 | ||
1240 | return container_of(tfm, struct cryptd_skcipher, base); | |
1241 | } | |
1242 | EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher); | |
1243 | ||
1244 | struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm) | |
1245 | { | |
1246 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | |
1247 | ||
36b3875a | 1248 | return &ctx->child->base; |
4e0958d1 HX |
1249 | } |
1250 | EXPORT_SYMBOL_GPL(cryptd_skcipher_child); | |
1251 | ||
1252 | bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm) | |
1253 | { | |
1254 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | |
1255 | ||
1256 | return atomic_read(&ctx->refcnt) - 1; | |
1257 | } | |
1258 | EXPORT_SYMBOL_GPL(cryptd_skcipher_queued); | |
1259 | ||
1260 | void cryptd_free_skcipher(struct cryptd_skcipher *tfm) | |
1261 | { | |
1262 | struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base); | |
1263 | ||
1264 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1265 | crypto_free_skcipher(&tfm->base); | |
1266 | } | |
1267 | EXPORT_SYMBOL_GPL(cryptd_free_skcipher); | |
1268 | ||
ace13663 YH |
1269 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
1270 | u32 type, u32 mask) | |
1271 | { | |
1272 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
81760ea6 | 1273 | struct cryptd_hash_ctx *ctx; |
ace13663 YH |
1274 | struct crypto_ahash *tfm; |
1275 | ||
1276 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1277 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1278 | return ERR_PTR(-EINVAL); | |
1279 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | |
1280 | if (IS_ERR(tfm)) | |
1281 | return ERR_CAST(tfm); | |
1282 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
1283 | crypto_free_ahash(tfm); | |
1284 | return ERR_PTR(-EINVAL); | |
1285 | } | |
1286 | ||
81760ea6 HX |
1287 | ctx = crypto_ahash_ctx(tfm); |
1288 | atomic_set(&ctx->refcnt, 1); | |
1289 | ||
ace13663 YH |
1290 | return __cryptd_ahash_cast(tfm); |
1291 | } | |
1292 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | |
1293 | ||
1294 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | |
1295 | { | |
1296 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
1297 | ||
1298 | return ctx->child; | |
1299 | } | |
1300 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | |
1301 | ||
0e1227d3 YH |
1302 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) |
1303 | { | |
1304 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
1305 | return &rctx->desc; | |
1306 | } | |
1307 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | |
1308 | ||
81760ea6 HX |
1309 | bool cryptd_ahash_queued(struct cryptd_ahash *tfm) |
1310 | { | |
1311 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
1312 | ||
1313 | return atomic_read(&ctx->refcnt) - 1; | |
1314 | } | |
1315 | EXPORT_SYMBOL_GPL(cryptd_ahash_queued); | |
1316 | ||
ace13663 YH |
1317 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
1318 | { | |
81760ea6 HX |
1319 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); |
1320 | ||
1321 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1322 | crypto_free_ahash(&tfm->base); | |
ace13663 YH |
1323 | } |
1324 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | |
1325 | ||
298c926c AH |
1326 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, |
1327 | u32 type, u32 mask) | |
1328 | { | |
1329 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
81760ea6 | 1330 | struct cryptd_aead_ctx *ctx; |
298c926c AH |
1331 | struct crypto_aead *tfm; |
1332 | ||
1333 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
1334 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1335 | return ERR_PTR(-EINVAL); | |
1336 | tfm = crypto_alloc_aead(cryptd_alg_name, type, mask); | |
1337 | if (IS_ERR(tfm)) | |
1338 | return ERR_CAST(tfm); | |
1339 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
1340 | crypto_free_aead(tfm); | |
1341 | return ERR_PTR(-EINVAL); | |
1342 | } | |
81760ea6 HX |
1343 | |
1344 | ctx = crypto_aead_ctx(tfm); | |
1345 | atomic_set(&ctx->refcnt, 1); | |
1346 | ||
298c926c AH |
1347 | return __cryptd_aead_cast(tfm); |
1348 | } | |
1349 | EXPORT_SYMBOL_GPL(cryptd_alloc_aead); | |
1350 | ||
1351 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm) | |
1352 | { | |
1353 | struct cryptd_aead_ctx *ctx; | |
1354 | ctx = crypto_aead_ctx(&tfm->base); | |
1355 | return ctx->child; | |
1356 | } | |
1357 | EXPORT_SYMBOL_GPL(cryptd_aead_child); | |
1358 | ||
81760ea6 HX |
1359 | bool cryptd_aead_queued(struct cryptd_aead *tfm) |
1360 | { | |
1361 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); | |
1362 | ||
1363 | return atomic_read(&ctx->refcnt) - 1; | |
1364 | } | |
1365 | EXPORT_SYMBOL_GPL(cryptd_aead_queued); | |
1366 | ||
298c926c AH |
1367 | void cryptd_free_aead(struct cryptd_aead *tfm) |
1368 | { | |
81760ea6 HX |
1369 | struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base); |
1370 | ||
1371 | if (atomic_dec_and_test(&ctx->refcnt)) | |
1372 | crypto_free_aead(&tfm->base); | |
298c926c AH |
1373 | } |
1374 | EXPORT_SYMBOL_GPL(cryptd_free_aead); | |
1375 | ||
124b53d0 HX |
1376 | static int __init cryptd_init(void) |
1377 | { | |
1378 | int err; | |
1379 | ||
c3a53605 | 1380 | err = cryptd_init_queue(&queue, cryptd_max_cpu_qlen); |
124b53d0 HX |
1381 | if (err) |
1382 | return err; | |
1383 | ||
1384 | err = crypto_register_template(&cryptd_tmpl); | |
1385 | if (err) | |
254eff77 | 1386 | cryptd_fini_queue(&queue); |
124b53d0 HX |
1387 | |
1388 | return err; | |
1389 | } | |
1390 | ||
1391 | static void __exit cryptd_exit(void) | |
1392 | { | |
254eff77 | 1393 | cryptd_fini_queue(&queue); |
124b53d0 HX |
1394 | crypto_unregister_template(&cryptd_tmpl); |
1395 | } | |
1396 | ||
b2bac6ac | 1397 | subsys_initcall(cryptd_init); |
124b53d0 HX |
1398 | module_exit(cryptd_exit); |
1399 | ||
1400 | MODULE_LICENSE("GPL"); | |
1401 | MODULE_DESCRIPTION("Software async crypto daemon"); | |
4943ba16 | 1402 | MODULE_ALIAS_CRYPTO("cryptd"); |