]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
18e33e6d | 14 | #include <crypto/internal/hash.h> |
1cac2cbc | 15 | #include <crypto/cryptd.h> |
254eff77 | 16 | #include <crypto/crypto_wq.h> |
124b53d0 HX |
17 | #include <linux/err.h> |
18 | #include <linux/init.h> | |
19 | #include <linux/kernel.h> | |
124b53d0 HX |
20 | #include <linux/list.h> |
21 | #include <linux/module.h> | |
124b53d0 HX |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/sched.h> | |
24 | #include <linux/slab.h> | |
124b53d0 | 25 | |
254eff77 | 26 | #define CRYPTD_MAX_CPU_QLEN 100 |
124b53d0 | 27 | |
254eff77 | 28 | struct cryptd_cpu_queue { |
124b53d0 | 29 | struct crypto_queue queue; |
254eff77 YH |
30 | struct work_struct work; |
31 | }; | |
32 | ||
33 | struct cryptd_queue { | |
34 | struct cryptd_cpu_queue *cpu_queue; | |
124b53d0 HX |
35 | }; |
36 | ||
37 | struct cryptd_instance_ctx { | |
38 | struct crypto_spawn spawn; | |
254eff77 | 39 | struct cryptd_queue *queue; |
124b53d0 HX |
40 | }; |
41 | ||
46309d89 HX |
42 | struct hashd_instance_ctx { |
43 | struct crypto_shash_spawn spawn; | |
44 | struct cryptd_queue *queue; | |
45 | }; | |
46 | ||
124b53d0 HX |
47 | struct cryptd_blkcipher_ctx { |
48 | struct crypto_blkcipher *child; | |
49 | }; | |
50 | ||
51 | struct cryptd_blkcipher_request_ctx { | |
52 | crypto_completion_t complete; | |
53 | }; | |
54 | ||
b8a28251 | 55 | struct cryptd_hash_ctx { |
46309d89 | 56 | struct crypto_shash *child; |
b8a28251 LH |
57 | }; |
58 | ||
59 | struct cryptd_hash_request_ctx { | |
60 | crypto_completion_t complete; | |
46309d89 | 61 | struct shash_desc desc; |
b8a28251 | 62 | }; |
124b53d0 | 63 | |
254eff77 YH |
64 | static void cryptd_queue_worker(struct work_struct *work); |
65 | ||
66 | static int cryptd_init_queue(struct cryptd_queue *queue, | |
67 | unsigned int max_cpu_qlen) | |
68 | { | |
69 | int cpu; | |
70 | struct cryptd_cpu_queue *cpu_queue; | |
71 | ||
72 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | |
73 | if (!queue->cpu_queue) | |
74 | return -ENOMEM; | |
75 | for_each_possible_cpu(cpu) { | |
76 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
77 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
78 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | |
79 | } | |
80 | return 0; | |
81 | } | |
82 | ||
83 | static void cryptd_fini_queue(struct cryptd_queue *queue) | |
84 | { | |
85 | int cpu; | |
86 | struct cryptd_cpu_queue *cpu_queue; | |
87 | ||
88 | for_each_possible_cpu(cpu) { | |
89 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
90 | BUG_ON(cpu_queue->queue.qlen); | |
91 | } | |
92 | free_percpu(queue->cpu_queue); | |
93 | } | |
94 | ||
95 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | |
96 | struct crypto_async_request *request) | |
97 | { | |
98 | int cpu, err; | |
99 | struct cryptd_cpu_queue *cpu_queue; | |
100 | ||
101 | cpu = get_cpu(); | |
0b44f486 | 102 | cpu_queue = this_cpu_ptr(queue->cpu_queue); |
254eff77 YH |
103 | err = crypto_enqueue_request(&cpu_queue->queue, request); |
104 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | |
105 | put_cpu(); | |
106 | ||
107 | return err; | |
108 | } | |
109 | ||
110 | /* Called in workqueue context, do one real cryption work (via | |
111 | * req->complete) and reschedule itself if there are more work to | |
112 | * do. */ | |
113 | static void cryptd_queue_worker(struct work_struct *work) | |
114 | { | |
115 | struct cryptd_cpu_queue *cpu_queue; | |
116 | struct crypto_async_request *req, *backlog; | |
117 | ||
118 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | |
119 | /* Only handle one request at a time to avoid hogging crypto | |
120 | * workqueue. preempt_disable/enable is used to prevent | |
121 | * being preempted by cryptd_enqueue_request() */ | |
122 | preempt_disable(); | |
123 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
124 | req = crypto_dequeue_request(&cpu_queue->queue); | |
125 | preempt_enable(); | |
126 | ||
127 | if (!req) | |
128 | return; | |
129 | ||
130 | if (backlog) | |
131 | backlog->complete(backlog, -EINPROGRESS); | |
132 | req->complete(req, 0); | |
133 | ||
134 | if (cpu_queue->queue.qlen) | |
135 | queue_work(kcrypto_wq, &cpu_queue->work); | |
136 | } | |
137 | ||
138 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | |
124b53d0 HX |
139 | { |
140 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
141 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
254eff77 | 142 | return ictx->queue; |
124b53d0 HX |
143 | } |
144 | ||
145 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
146 | const u8 *key, unsigned int keylen) | |
147 | { | |
148 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
149 | struct crypto_blkcipher *child = ctx->child; | |
150 | int err; | |
151 | ||
152 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
153 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
154 | CRYPTO_TFM_REQ_MASK); | |
155 | err = crypto_blkcipher_setkey(child, key, keylen); | |
156 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
157 | CRYPTO_TFM_RES_MASK); | |
158 | return err; | |
159 | } | |
160 | ||
161 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
162 | struct crypto_blkcipher *child, | |
163 | int err, | |
164 | int (*crypt)(struct blkcipher_desc *desc, | |
165 | struct scatterlist *dst, | |
166 | struct scatterlist *src, | |
167 | unsigned int len)) | |
168 | { | |
169 | struct cryptd_blkcipher_request_ctx *rctx; | |
170 | struct blkcipher_desc desc; | |
171 | ||
172 | rctx = ablkcipher_request_ctx(req); | |
173 | ||
93aa7f8a HX |
174 | if (unlikely(err == -EINPROGRESS)) |
175 | goto out; | |
124b53d0 HX |
176 | |
177 | desc.tfm = child; | |
178 | desc.info = req->info; | |
179 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
180 | ||
181 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
182 | ||
183 | req->base.complete = rctx->complete; | |
184 | ||
93aa7f8a | 185 | out: |
124b53d0 | 186 | local_bh_disable(); |
93aa7f8a | 187 | rctx->complete(&req->base, err); |
124b53d0 HX |
188 | local_bh_enable(); |
189 | } | |
190 | ||
191 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
192 | { | |
193 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
194 | struct crypto_blkcipher *child = ctx->child; | |
195 | ||
196 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
197 | crypto_blkcipher_crt(child)->encrypt); | |
198 | } | |
199 | ||
200 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
201 | { | |
202 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
203 | struct crypto_blkcipher *child = ctx->child; | |
204 | ||
205 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
206 | crypto_blkcipher_crt(child)->decrypt); | |
207 | } | |
208 | ||
209 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
210 | crypto_completion_t complete) | |
211 | { | |
212 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
213 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
254eff77 | 214 | struct cryptd_queue *queue; |
124b53d0 | 215 | |
254eff77 | 216 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
124b53d0 HX |
217 | rctx->complete = req->base.complete; |
218 | req->base.complete = complete; | |
219 | ||
254eff77 | 220 | return cryptd_enqueue_request(queue, &req->base); |
124b53d0 HX |
221 | } |
222 | ||
223 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
224 | { | |
225 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
226 | } | |
227 | ||
228 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
229 | { | |
230 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
231 | } | |
232 | ||
233 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
234 | { | |
235 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
236 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
237 | struct crypto_spawn *spawn = &ictx->spawn; | |
238 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
239 | struct crypto_blkcipher *cipher; | |
240 | ||
241 | cipher = crypto_spawn_blkcipher(spawn); | |
242 | if (IS_ERR(cipher)) | |
243 | return PTR_ERR(cipher); | |
244 | ||
245 | ctx->child = cipher; | |
246 | tfm->crt_ablkcipher.reqsize = | |
247 | sizeof(struct cryptd_blkcipher_request_ctx); | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
252 | { | |
253 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
124b53d0 HX |
254 | |
255 | crypto_free_blkcipher(ctx->child); | |
256 | } | |
257 | ||
0b535adf HX |
258 | static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, |
259 | unsigned int tail) | |
124b53d0 | 260 | { |
0b535adf | 261 | char *p; |
124b53d0 | 262 | struct crypto_instance *inst; |
124b53d0 HX |
263 | int err; |
264 | ||
0b535adf HX |
265 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); |
266 | if (!p) | |
267 | return ERR_PTR(-ENOMEM); | |
268 | ||
269 | inst = (void *)(p + head); | |
124b53d0 HX |
270 | |
271 | err = -ENAMETOOLONG; | |
272 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
273 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
274 | goto out_free_inst; | |
275 | ||
124b53d0 HX |
276 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); |
277 | ||
278 | inst->alg.cra_priority = alg->cra_priority + 50; | |
279 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
280 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
281 | ||
282 | out: | |
0b535adf | 283 | return p; |
124b53d0 HX |
284 | |
285 | out_free_inst: | |
0b535adf HX |
286 | kfree(p); |
287 | p = ERR_PTR(err); | |
124b53d0 HX |
288 | goto out; |
289 | } | |
290 | ||
9cd899a3 HX |
291 | static int cryptd_create_blkcipher(struct crypto_template *tmpl, |
292 | struct rtattr **tb, | |
293 | struct cryptd_queue *queue) | |
124b53d0 | 294 | { |
46309d89 | 295 | struct cryptd_instance_ctx *ctx; |
124b53d0 HX |
296 | struct crypto_instance *inst; |
297 | struct crypto_alg *alg; | |
46309d89 | 298 | int err; |
124b53d0 HX |
299 | |
300 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
332f8840 | 301 | CRYPTO_ALG_TYPE_MASK); |
124b53d0 | 302 | if (IS_ERR(alg)) |
9cd899a3 | 303 | return PTR_ERR(alg); |
124b53d0 | 304 | |
0b535adf | 305 | inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx)); |
05ed8758 | 306 | err = PTR_ERR(inst); |
124b53d0 HX |
307 | if (IS_ERR(inst)) |
308 | goto out_put_alg; | |
309 | ||
46309d89 HX |
310 | ctx = crypto_instance_ctx(inst); |
311 | ctx->queue = queue; | |
312 | ||
313 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
314 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
315 | if (err) | |
316 | goto out_free_inst; | |
317 | ||
332f8840 | 318 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
124b53d0 HX |
319 | inst->alg.cra_type = &crypto_ablkcipher_type; |
320 | ||
321 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
322 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
323 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
324 | ||
927eead5 HX |
325 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
326 | ||
124b53d0 HX |
327 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
328 | ||
329 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
330 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
331 | ||
332 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
333 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
334 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
335 | ||
9cd899a3 HX |
336 | err = crypto_register_instance(tmpl, inst); |
337 | if (err) { | |
338 | crypto_drop_spawn(&ctx->spawn); | |
339 | out_free_inst: | |
340 | kfree(inst); | |
341 | } | |
342 | ||
124b53d0 HX |
343 | out_put_alg: |
344 | crypto_mod_put(alg); | |
9cd899a3 | 345 | return err; |
124b53d0 HX |
346 | } |
347 | ||
b8a28251 LH |
348 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
349 | { | |
350 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
46309d89 HX |
351 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); |
352 | struct crypto_shash_spawn *spawn = &ictx->spawn; | |
b8a28251 | 353 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
46309d89 | 354 | struct crypto_shash *hash; |
b8a28251 | 355 | |
46309d89 HX |
356 | hash = crypto_spawn_shash(spawn); |
357 | if (IS_ERR(hash)) | |
358 | return PTR_ERR(hash); | |
b8a28251 | 359 | |
46309d89 | 360 | ctx->child = hash; |
0d6669e2 HX |
361 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
362 | sizeof(struct cryptd_hash_request_ctx) + | |
363 | crypto_shash_descsize(hash)); | |
b8a28251 LH |
364 | return 0; |
365 | } | |
366 | ||
367 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
368 | { | |
369 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
b8a28251 | 370 | |
46309d89 | 371 | crypto_free_shash(ctx->child); |
b8a28251 LH |
372 | } |
373 | ||
374 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
375 | const u8 *key, unsigned int keylen) | |
376 | { | |
377 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
46309d89 | 378 | struct crypto_shash *child = ctx->child; |
b8a28251 LH |
379 | int err; |
380 | ||
46309d89 HX |
381 | crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
382 | crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) & | |
383 | CRYPTO_TFM_REQ_MASK); | |
384 | err = crypto_shash_setkey(child, key, keylen); | |
385 | crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) & | |
386 | CRYPTO_TFM_RES_MASK); | |
b8a28251 LH |
387 | return err; |
388 | } | |
389 | ||
390 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
391 | crypto_completion_t complete) | |
392 | { | |
393 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
394 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
254eff77 YH |
395 | struct cryptd_queue *queue = |
396 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | |
b8a28251 LH |
397 | |
398 | rctx->complete = req->base.complete; | |
399 | req->base.complete = complete; | |
400 | ||
254eff77 | 401 | return cryptd_enqueue_request(queue, &req->base); |
b8a28251 LH |
402 | } |
403 | ||
404 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | |
405 | { | |
46309d89 HX |
406 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
407 | struct crypto_shash *child = ctx->child; | |
408 | struct ahash_request *req = ahash_request_cast(req_async); | |
409 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
410 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
411 | |
412 | if (unlikely(err == -EINPROGRESS)) | |
413 | goto out; | |
414 | ||
46309d89 HX |
415 | desc->tfm = child; |
416 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 417 | |
46309d89 | 418 | err = crypto_shash_init(desc); |
b8a28251 LH |
419 | |
420 | req->base.complete = rctx->complete; | |
421 | ||
422 | out: | |
423 | local_bh_disable(); | |
424 | rctx->complete(&req->base, err); | |
425 | local_bh_enable(); | |
426 | } | |
427 | ||
428 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
429 | { | |
430 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
431 | } | |
432 | ||
433 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
434 | { | |
46309d89 | 435 | struct ahash_request *req = ahash_request_cast(req_async); |
b8a28251 | 436 | struct cryptd_hash_request_ctx *rctx; |
b8a28251 LH |
437 | |
438 | rctx = ahash_request_ctx(req); | |
439 | ||
440 | if (unlikely(err == -EINPROGRESS)) | |
441 | goto out; | |
442 | ||
46309d89 | 443 | err = shash_ahash_update(req, &rctx->desc); |
b8a28251 LH |
444 | |
445 | req->base.complete = rctx->complete; | |
446 | ||
447 | out: | |
448 | local_bh_disable(); | |
449 | rctx->complete(&req->base, err); | |
450 | local_bh_enable(); | |
451 | } | |
452 | ||
453 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
454 | { | |
455 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
456 | } | |
457 | ||
458 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
459 | { | |
46309d89 HX |
460 | struct ahash_request *req = ahash_request_cast(req_async); |
461 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
b8a28251 LH |
462 | |
463 | if (unlikely(err == -EINPROGRESS)) | |
464 | goto out; | |
465 | ||
46309d89 | 466 | err = crypto_shash_final(&rctx->desc, req->result); |
b8a28251 LH |
467 | |
468 | req->base.complete = rctx->complete; | |
469 | ||
470 | out: | |
471 | local_bh_disable(); | |
472 | rctx->complete(&req->base, err); | |
473 | local_bh_enable(); | |
474 | } | |
475 | ||
476 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
477 | { | |
478 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
479 | } | |
480 | ||
6fba00d1 HX |
481 | static void cryptd_hash_finup(struct crypto_async_request *req_async, int err) |
482 | { | |
483 | struct ahash_request *req = ahash_request_cast(req_async); | |
484 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
485 | ||
486 | if (unlikely(err == -EINPROGRESS)) | |
487 | goto out; | |
488 | ||
489 | err = shash_ahash_finup(req, &rctx->desc); | |
490 | ||
491 | req->base.complete = rctx->complete; | |
492 | ||
493 | out: | |
494 | local_bh_disable(); | |
495 | rctx->complete(&req->base, err); | |
496 | local_bh_enable(); | |
497 | } | |
498 | ||
499 | static int cryptd_hash_finup_enqueue(struct ahash_request *req) | |
500 | { | |
501 | return cryptd_hash_enqueue(req, cryptd_hash_finup); | |
502 | } | |
503 | ||
b8a28251 LH |
504 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) |
505 | { | |
46309d89 HX |
506 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); |
507 | struct crypto_shash *child = ctx->child; | |
508 | struct ahash_request *req = ahash_request_cast(req_async); | |
509 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
510 | struct shash_desc *desc = &rctx->desc; | |
b8a28251 LH |
511 | |
512 | if (unlikely(err == -EINPROGRESS)) | |
513 | goto out; | |
514 | ||
46309d89 HX |
515 | desc->tfm = child; |
516 | desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
b8a28251 | 517 | |
46309d89 | 518 | err = shash_ahash_digest(req, desc); |
b8a28251 LH |
519 | |
520 | req->base.complete = rctx->complete; | |
521 | ||
522 | out: | |
523 | local_bh_disable(); | |
524 | rctx->complete(&req->base, err); | |
525 | local_bh_enable(); | |
526 | } | |
527 | ||
528 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
529 | { | |
530 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
531 | } | |
532 | ||
6fba00d1 HX |
533 | static int cryptd_hash_export(struct ahash_request *req, void *out) |
534 | { | |
535 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
536 | ||
537 | return crypto_shash_export(&rctx->desc, out); | |
538 | } | |
539 | ||
540 | static int cryptd_hash_import(struct ahash_request *req, const void *in) | |
541 | { | |
542 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
543 | ||
544 | return crypto_shash_import(&rctx->desc, in); | |
545 | } | |
546 | ||
9cd899a3 HX |
547 | static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, |
548 | struct cryptd_queue *queue) | |
b8a28251 | 549 | { |
46309d89 | 550 | struct hashd_instance_ctx *ctx; |
0b535adf | 551 | struct ahash_instance *inst; |
46309d89 | 552 | struct shash_alg *salg; |
b8a28251 | 553 | struct crypto_alg *alg; |
46309d89 | 554 | int err; |
b8a28251 | 555 | |
46309d89 HX |
556 | salg = shash_attr_alg(tb[1], 0, 0); |
557 | if (IS_ERR(salg)) | |
9cd899a3 | 558 | return PTR_ERR(salg); |
b8a28251 | 559 | |
46309d89 | 560 | alg = &salg->base; |
0b535adf HX |
561 | inst = cryptd_alloc_instance(alg, ahash_instance_headroom(), |
562 | sizeof(*ctx)); | |
05ed8758 | 563 | err = PTR_ERR(inst); |
b8a28251 LH |
564 | if (IS_ERR(inst)) |
565 | goto out_put_alg; | |
566 | ||
0b535adf | 567 | ctx = ahash_instance_ctx(inst); |
46309d89 HX |
568 | ctx->queue = queue; |
569 | ||
0b535adf HX |
570 | err = crypto_init_shash_spawn(&ctx->spawn, salg, |
571 | ahash_crypto_instance(inst)); | |
46309d89 HX |
572 | if (err) |
573 | goto out_free_inst; | |
574 | ||
0b535adf | 575 | inst->alg.halg.base.cra_flags = CRYPTO_ALG_ASYNC; |
b8a28251 | 576 | |
0b535adf HX |
577 | inst->alg.halg.digestsize = salg->digestsize; |
578 | inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | |
b8a28251 | 579 | |
0b535adf HX |
580 | inst->alg.halg.base.cra_init = cryptd_hash_init_tfm; |
581 | inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm; | |
b8a28251 | 582 | |
0b535adf HX |
583 | inst->alg.init = cryptd_hash_init_enqueue; |
584 | inst->alg.update = cryptd_hash_update_enqueue; | |
585 | inst->alg.final = cryptd_hash_final_enqueue; | |
6fba00d1 HX |
586 | inst->alg.finup = cryptd_hash_finup_enqueue; |
587 | inst->alg.export = cryptd_hash_export; | |
588 | inst->alg.import = cryptd_hash_import; | |
0b535adf HX |
589 | inst->alg.setkey = cryptd_hash_setkey; |
590 | inst->alg.digest = cryptd_hash_digest_enqueue; | |
b8a28251 | 591 | |
0b535adf | 592 | err = ahash_register_instance(tmpl, inst); |
9cd899a3 HX |
593 | if (err) { |
594 | crypto_drop_shash(&ctx->spawn); | |
595 | out_free_inst: | |
596 | kfree(inst); | |
597 | } | |
598 | ||
b8a28251 LH |
599 | out_put_alg: |
600 | crypto_mod_put(alg); | |
9cd899a3 | 601 | return err; |
b8a28251 LH |
602 | } |
603 | ||
254eff77 | 604 | static struct cryptd_queue queue; |
124b53d0 | 605 | |
9cd899a3 | 606 | static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb) |
124b53d0 HX |
607 | { |
608 | struct crypto_attr_type *algt; | |
609 | ||
610 | algt = crypto_get_attr_type(tb); | |
611 | if (IS_ERR(algt)) | |
9cd899a3 | 612 | return PTR_ERR(algt); |
124b53d0 HX |
613 | |
614 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
615 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
9cd899a3 | 616 | return cryptd_create_blkcipher(tmpl, tb, &queue); |
b8a28251 | 617 | case CRYPTO_ALG_TYPE_DIGEST: |
9cd899a3 | 618 | return cryptd_create_hash(tmpl, tb, &queue); |
124b53d0 HX |
619 | } |
620 | ||
9cd899a3 | 621 | return -EINVAL; |
124b53d0 HX |
622 | } |
623 | ||
624 | static void cryptd_free(struct crypto_instance *inst) | |
625 | { | |
626 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
0b535adf HX |
627 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); |
628 | ||
629 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
630 | case CRYPTO_ALG_TYPE_AHASH: | |
631 | crypto_drop_shash(&hctx->spawn); | |
632 | kfree(ahash_instance(inst)); | |
633 | return; | |
634 | } | |
124b53d0 HX |
635 | |
636 | crypto_drop_spawn(&ctx->spawn); | |
637 | kfree(inst); | |
638 | } | |
639 | ||
640 | static struct crypto_template cryptd_tmpl = { | |
641 | .name = "cryptd", | |
9cd899a3 | 642 | .create = cryptd_create, |
124b53d0 HX |
643 | .free = cryptd_free, |
644 | .module = THIS_MODULE, | |
645 | }; | |
646 | ||
1cac2cbc YH |
647 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
648 | u32 type, u32 mask) | |
649 | { | |
650 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
505fd21d | 651 | struct crypto_tfm *tfm; |
1cac2cbc YH |
652 | |
653 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
654 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
655 | return ERR_PTR(-EINVAL); | |
505fd21d YH |
656 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
657 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; | |
658 | mask &= ~CRYPTO_ALG_TYPE_MASK; | |
659 | mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK); | |
660 | tfm = crypto_alloc_base(cryptd_alg_name, type, mask); | |
1cac2cbc YH |
661 | if (IS_ERR(tfm)) |
662 | return ERR_CAST(tfm); | |
505fd21d YH |
663 | if (tfm->__crt_alg->cra_module != THIS_MODULE) { |
664 | crypto_free_tfm(tfm); | |
1cac2cbc YH |
665 | return ERR_PTR(-EINVAL); |
666 | } | |
667 | ||
505fd21d | 668 | return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm)); |
1cac2cbc YH |
669 | } |
670 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | |
671 | ||
672 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | |
673 | { | |
674 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
675 | return ctx->child; | |
676 | } | |
677 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | |
678 | ||
679 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |
680 | { | |
681 | crypto_free_ablkcipher(&tfm->base); | |
682 | } | |
683 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | |
684 | ||
ace13663 YH |
685 | struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name, |
686 | u32 type, u32 mask) | |
687 | { | |
688 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
689 | struct crypto_ahash *tfm; | |
690 | ||
691 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
692 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
693 | return ERR_PTR(-EINVAL); | |
694 | tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask); | |
695 | if (IS_ERR(tfm)) | |
696 | return ERR_CAST(tfm); | |
697 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
698 | crypto_free_ahash(tfm); | |
699 | return ERR_PTR(-EINVAL); | |
700 | } | |
701 | ||
702 | return __cryptd_ahash_cast(tfm); | |
703 | } | |
704 | EXPORT_SYMBOL_GPL(cryptd_alloc_ahash); | |
705 | ||
706 | struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm) | |
707 | { | |
708 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
709 | ||
710 | return ctx->child; | |
711 | } | |
712 | EXPORT_SYMBOL_GPL(cryptd_ahash_child); | |
713 | ||
0e1227d3 YH |
714 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req) |
715 | { | |
716 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
717 | return &rctx->desc; | |
718 | } | |
719 | EXPORT_SYMBOL_GPL(cryptd_shash_desc); | |
720 | ||
ace13663 YH |
721 | void cryptd_free_ahash(struct cryptd_ahash *tfm) |
722 | { | |
723 | crypto_free_ahash(&tfm->base); | |
724 | } | |
725 | EXPORT_SYMBOL_GPL(cryptd_free_ahash); | |
726 | ||
124b53d0 HX |
727 | static int __init cryptd_init(void) |
728 | { | |
729 | int err; | |
730 | ||
254eff77 | 731 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
124b53d0 HX |
732 | if (err) |
733 | return err; | |
734 | ||
735 | err = crypto_register_template(&cryptd_tmpl); | |
736 | if (err) | |
254eff77 | 737 | cryptd_fini_queue(&queue); |
124b53d0 HX |
738 | |
739 | return err; | |
740 | } | |
741 | ||
742 | static void __exit cryptd_exit(void) | |
743 | { | |
254eff77 | 744 | cryptd_fini_queue(&queue); |
124b53d0 HX |
745 | crypto_unregister_template(&cryptd_tmpl); |
746 | } | |
747 | ||
748 | module_init(cryptd_init); | |
749 | module_exit(cryptd_exit); | |
750 | ||
751 | MODULE_LICENSE("GPL"); | |
752 | MODULE_DESCRIPTION("Software async crypto daemon"); |