]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
18e33e6d | 14 | #include <crypto/internal/hash.h> |
1cac2cbc | 15 | #include <crypto/cryptd.h> |
254eff77 | 16 | #include <crypto/crypto_wq.h> |
124b53d0 HX |
17 | #include <linux/err.h> |
18 | #include <linux/init.h> | |
19 | #include <linux/kernel.h> | |
124b53d0 HX |
20 | #include <linux/list.h> |
21 | #include <linux/module.h> | |
124b53d0 HX |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/sched.h> | |
24 | #include <linux/slab.h> | |
124b53d0 | 25 | |
254eff77 | 26 | #define CRYPTD_MAX_CPU_QLEN 100 |
124b53d0 | 27 | |
254eff77 | 28 | struct cryptd_cpu_queue { |
124b53d0 | 29 | struct crypto_queue queue; |
254eff77 YH |
30 | struct work_struct work; |
31 | }; | |
32 | ||
33 | struct cryptd_queue { | |
34 | struct cryptd_cpu_queue *cpu_queue; | |
124b53d0 HX |
35 | }; |
36 | ||
37 | struct cryptd_instance_ctx { | |
38 | struct crypto_spawn spawn; | |
254eff77 | 39 | struct cryptd_queue *queue; |
124b53d0 HX |
40 | }; |
41 | ||
42 | struct cryptd_blkcipher_ctx { | |
43 | struct crypto_blkcipher *child; | |
44 | }; | |
45 | ||
46 | struct cryptd_blkcipher_request_ctx { | |
47 | crypto_completion_t complete; | |
48 | }; | |
49 | ||
b8a28251 LH |
50 | struct cryptd_hash_ctx { |
51 | struct crypto_hash *child; | |
52 | }; | |
53 | ||
54 | struct cryptd_hash_request_ctx { | |
55 | crypto_completion_t complete; | |
56 | }; | |
124b53d0 | 57 | |
254eff77 YH |
58 | static void cryptd_queue_worker(struct work_struct *work); |
59 | ||
60 | static int cryptd_init_queue(struct cryptd_queue *queue, | |
61 | unsigned int max_cpu_qlen) | |
62 | { | |
63 | int cpu; | |
64 | struct cryptd_cpu_queue *cpu_queue; | |
65 | ||
66 | queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); | |
67 | if (!queue->cpu_queue) | |
68 | return -ENOMEM; | |
69 | for_each_possible_cpu(cpu) { | |
70 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
71 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
72 | INIT_WORK(&cpu_queue->work, cryptd_queue_worker); | |
73 | } | |
74 | return 0; | |
75 | } | |
76 | ||
77 | static void cryptd_fini_queue(struct cryptd_queue *queue) | |
78 | { | |
79 | int cpu; | |
80 | struct cryptd_cpu_queue *cpu_queue; | |
81 | ||
82 | for_each_possible_cpu(cpu) { | |
83 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
84 | BUG_ON(cpu_queue->queue.qlen); | |
85 | } | |
86 | free_percpu(queue->cpu_queue); | |
87 | } | |
88 | ||
89 | static int cryptd_enqueue_request(struct cryptd_queue *queue, | |
90 | struct crypto_async_request *request) | |
91 | { | |
92 | int cpu, err; | |
93 | struct cryptd_cpu_queue *cpu_queue; | |
94 | ||
95 | cpu = get_cpu(); | |
96 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
97 | err = crypto_enqueue_request(&cpu_queue->queue, request); | |
98 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | |
99 | put_cpu(); | |
100 | ||
101 | return err; | |
102 | } | |
103 | ||
104 | /* Called in workqueue context, do one real cryption work (via | |
105 | * req->complete) and reschedule itself if there are more work to | |
106 | * do. */ | |
107 | static void cryptd_queue_worker(struct work_struct *work) | |
108 | { | |
109 | struct cryptd_cpu_queue *cpu_queue; | |
110 | struct crypto_async_request *req, *backlog; | |
111 | ||
112 | cpu_queue = container_of(work, struct cryptd_cpu_queue, work); | |
113 | /* Only handle one request at a time to avoid hogging crypto | |
114 | * workqueue. preempt_disable/enable is used to prevent | |
115 | * being preempted by cryptd_enqueue_request() */ | |
116 | preempt_disable(); | |
117 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
118 | req = crypto_dequeue_request(&cpu_queue->queue); | |
119 | preempt_enable(); | |
120 | ||
121 | if (!req) | |
122 | return; | |
123 | ||
124 | if (backlog) | |
125 | backlog->complete(backlog, -EINPROGRESS); | |
126 | req->complete(req, 0); | |
127 | ||
128 | if (cpu_queue->queue.qlen) | |
129 | queue_work(kcrypto_wq, &cpu_queue->work); | |
130 | } | |
131 | ||
132 | static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm) | |
124b53d0 HX |
133 | { |
134 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
135 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
254eff77 | 136 | return ictx->queue; |
124b53d0 HX |
137 | } |
138 | ||
139 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
140 | const u8 *key, unsigned int keylen) | |
141 | { | |
142 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
143 | struct crypto_blkcipher *child = ctx->child; | |
144 | int err; | |
145 | ||
146 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
147 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
148 | CRYPTO_TFM_REQ_MASK); | |
149 | err = crypto_blkcipher_setkey(child, key, keylen); | |
150 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
151 | CRYPTO_TFM_RES_MASK); | |
152 | return err; | |
153 | } | |
154 | ||
155 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
156 | struct crypto_blkcipher *child, | |
157 | int err, | |
158 | int (*crypt)(struct blkcipher_desc *desc, | |
159 | struct scatterlist *dst, | |
160 | struct scatterlist *src, | |
161 | unsigned int len)) | |
162 | { | |
163 | struct cryptd_blkcipher_request_ctx *rctx; | |
164 | struct blkcipher_desc desc; | |
165 | ||
166 | rctx = ablkcipher_request_ctx(req); | |
167 | ||
93aa7f8a HX |
168 | if (unlikely(err == -EINPROGRESS)) |
169 | goto out; | |
124b53d0 HX |
170 | |
171 | desc.tfm = child; | |
172 | desc.info = req->info; | |
173 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
174 | ||
175 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
176 | ||
177 | req->base.complete = rctx->complete; | |
178 | ||
93aa7f8a | 179 | out: |
124b53d0 | 180 | local_bh_disable(); |
93aa7f8a | 181 | rctx->complete(&req->base, err); |
124b53d0 HX |
182 | local_bh_enable(); |
183 | } | |
184 | ||
185 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
186 | { | |
187 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
188 | struct crypto_blkcipher *child = ctx->child; | |
189 | ||
190 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
191 | crypto_blkcipher_crt(child)->encrypt); | |
192 | } | |
193 | ||
194 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
195 | { | |
196 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
197 | struct crypto_blkcipher *child = ctx->child; | |
198 | ||
199 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
200 | crypto_blkcipher_crt(child)->decrypt); | |
201 | } | |
202 | ||
203 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
204 | crypto_completion_t complete) | |
205 | { | |
206 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
207 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
254eff77 | 208 | struct cryptd_queue *queue; |
124b53d0 | 209 | |
254eff77 | 210 | queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); |
124b53d0 HX |
211 | rctx->complete = req->base.complete; |
212 | req->base.complete = complete; | |
213 | ||
254eff77 | 214 | return cryptd_enqueue_request(queue, &req->base); |
124b53d0 HX |
215 | } |
216 | ||
217 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
218 | { | |
219 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
220 | } | |
221 | ||
222 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
223 | { | |
224 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
225 | } | |
226 | ||
227 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
228 | { | |
229 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
230 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
231 | struct crypto_spawn *spawn = &ictx->spawn; | |
232 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
233 | struct crypto_blkcipher *cipher; | |
234 | ||
235 | cipher = crypto_spawn_blkcipher(spawn); | |
236 | if (IS_ERR(cipher)) | |
237 | return PTR_ERR(cipher); | |
238 | ||
239 | ctx->child = cipher; | |
240 | tfm->crt_ablkcipher.reqsize = | |
241 | sizeof(struct cryptd_blkcipher_request_ctx); | |
242 | return 0; | |
243 | } | |
244 | ||
245 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
246 | { | |
247 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
124b53d0 HX |
248 | |
249 | crypto_free_blkcipher(ctx->child); | |
250 | } | |
251 | ||
252 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |
254eff77 | 253 | struct cryptd_queue *queue) |
124b53d0 HX |
254 | { |
255 | struct crypto_instance *inst; | |
256 | struct cryptd_instance_ctx *ctx; | |
257 | int err; | |
258 | ||
259 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
b1145ce3 JL |
260 | if (!inst) { |
261 | inst = ERR_PTR(-ENOMEM); | |
124b53d0 | 262 | goto out; |
b1145ce3 | 263 | } |
124b53d0 HX |
264 | |
265 | err = -ENAMETOOLONG; | |
266 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
267 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
268 | goto out_free_inst; | |
269 | ||
270 | ctx = crypto_instance_ctx(inst); | |
271 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
272 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
273 | if (err) | |
274 | goto out_free_inst; | |
275 | ||
254eff77 | 276 | ctx->queue = queue; |
124b53d0 HX |
277 | |
278 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
279 | ||
280 | inst->alg.cra_priority = alg->cra_priority + 50; | |
281 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
282 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
283 | ||
284 | out: | |
285 | return inst; | |
286 | ||
287 | out_free_inst: | |
288 | kfree(inst); | |
289 | inst = ERR_PTR(err); | |
290 | goto out; | |
291 | } | |
292 | ||
293 | static struct crypto_instance *cryptd_alloc_blkcipher( | |
254eff77 | 294 | struct rtattr **tb, struct cryptd_queue *queue) |
124b53d0 HX |
295 | { |
296 | struct crypto_instance *inst; | |
297 | struct crypto_alg *alg; | |
298 | ||
299 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
332f8840 | 300 | CRYPTO_ALG_TYPE_MASK); |
124b53d0 | 301 | if (IS_ERR(alg)) |
e231c2ee | 302 | return ERR_CAST(alg); |
124b53d0 | 303 | |
254eff77 | 304 | inst = cryptd_alloc_instance(alg, queue); |
124b53d0 HX |
305 | if (IS_ERR(inst)) |
306 | goto out_put_alg; | |
307 | ||
332f8840 | 308 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; |
124b53d0 HX |
309 | inst->alg.cra_type = &crypto_ablkcipher_type; |
310 | ||
311 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
312 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
313 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
314 | ||
927eead5 HX |
315 | inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv; |
316 | ||
124b53d0 HX |
317 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); |
318 | ||
319 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
320 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
321 | ||
322 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
323 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
324 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
325 | ||
124b53d0 HX |
326 | out_put_alg: |
327 | crypto_mod_put(alg); | |
328 | return inst; | |
329 | } | |
330 | ||
b8a28251 LH |
331 | static int cryptd_hash_init_tfm(struct crypto_tfm *tfm) |
332 | { | |
333 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
334 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
335 | struct crypto_spawn *spawn = &ictx->spawn; | |
336 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
337 | struct crypto_hash *cipher; | |
338 | ||
339 | cipher = crypto_spawn_hash(spawn); | |
340 | if (IS_ERR(cipher)) | |
341 | return PTR_ERR(cipher); | |
342 | ||
343 | ctx->child = cipher; | |
344 | tfm->crt_ahash.reqsize = | |
345 | sizeof(struct cryptd_hash_request_ctx); | |
346 | return 0; | |
347 | } | |
348 | ||
349 | static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
350 | { | |
351 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
b8a28251 LH |
352 | |
353 | crypto_free_hash(ctx->child); | |
354 | } | |
355 | ||
356 | static int cryptd_hash_setkey(struct crypto_ahash *parent, | |
357 | const u8 *key, unsigned int keylen) | |
358 | { | |
359 | struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
360 | struct crypto_hash *child = ctx->child; | |
361 | int err; | |
362 | ||
363 | crypto_hash_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
364 | crypto_hash_set_flags(child, crypto_ahash_get_flags(parent) & | |
365 | CRYPTO_TFM_REQ_MASK); | |
366 | err = crypto_hash_setkey(child, key, keylen); | |
367 | crypto_ahash_set_flags(parent, crypto_hash_get_flags(child) & | |
368 | CRYPTO_TFM_RES_MASK); | |
369 | return err; | |
370 | } | |
371 | ||
372 | static int cryptd_hash_enqueue(struct ahash_request *req, | |
373 | crypto_completion_t complete) | |
374 | { | |
375 | struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
376 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
254eff77 YH |
377 | struct cryptd_queue *queue = |
378 | cryptd_get_queue(crypto_ahash_tfm(tfm)); | |
b8a28251 LH |
379 | |
380 | rctx->complete = req->base.complete; | |
381 | req->base.complete = complete; | |
382 | ||
254eff77 | 383 | return cryptd_enqueue_request(queue, &req->base); |
b8a28251 LH |
384 | } |
385 | ||
386 | static void cryptd_hash_init(struct crypto_async_request *req_async, int err) | |
387 | { | |
388 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
389 | struct crypto_hash *child = ctx->child; | |
390 | struct ahash_request *req = ahash_request_cast(req_async); | |
391 | struct cryptd_hash_request_ctx *rctx; | |
392 | struct hash_desc desc; | |
393 | ||
394 | rctx = ahash_request_ctx(req); | |
395 | ||
396 | if (unlikely(err == -EINPROGRESS)) | |
397 | goto out; | |
398 | ||
399 | desc.tfm = child; | |
400 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
401 | ||
402 | err = crypto_hash_crt(child)->init(&desc); | |
403 | ||
404 | req->base.complete = rctx->complete; | |
405 | ||
406 | out: | |
407 | local_bh_disable(); | |
408 | rctx->complete(&req->base, err); | |
409 | local_bh_enable(); | |
410 | } | |
411 | ||
412 | static int cryptd_hash_init_enqueue(struct ahash_request *req) | |
413 | { | |
414 | return cryptd_hash_enqueue(req, cryptd_hash_init); | |
415 | } | |
416 | ||
417 | static void cryptd_hash_update(struct crypto_async_request *req_async, int err) | |
418 | { | |
419 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
420 | struct crypto_hash *child = ctx->child; | |
421 | struct ahash_request *req = ahash_request_cast(req_async); | |
422 | struct cryptd_hash_request_ctx *rctx; | |
423 | struct hash_desc desc; | |
424 | ||
425 | rctx = ahash_request_ctx(req); | |
426 | ||
427 | if (unlikely(err == -EINPROGRESS)) | |
428 | goto out; | |
429 | ||
430 | desc.tfm = child; | |
431 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
432 | ||
433 | err = crypto_hash_crt(child)->update(&desc, | |
434 | req->src, | |
435 | req->nbytes); | |
436 | ||
437 | req->base.complete = rctx->complete; | |
438 | ||
439 | out: | |
440 | local_bh_disable(); | |
441 | rctx->complete(&req->base, err); | |
442 | local_bh_enable(); | |
443 | } | |
444 | ||
445 | static int cryptd_hash_update_enqueue(struct ahash_request *req) | |
446 | { | |
447 | return cryptd_hash_enqueue(req, cryptd_hash_update); | |
448 | } | |
449 | ||
450 | static void cryptd_hash_final(struct crypto_async_request *req_async, int err) | |
451 | { | |
452 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
453 | struct crypto_hash *child = ctx->child; | |
454 | struct ahash_request *req = ahash_request_cast(req_async); | |
455 | struct cryptd_hash_request_ctx *rctx; | |
456 | struct hash_desc desc; | |
457 | ||
458 | rctx = ahash_request_ctx(req); | |
459 | ||
460 | if (unlikely(err == -EINPROGRESS)) | |
461 | goto out; | |
462 | ||
463 | desc.tfm = child; | |
464 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
465 | ||
466 | err = crypto_hash_crt(child)->final(&desc, req->result); | |
467 | ||
468 | req->base.complete = rctx->complete; | |
469 | ||
470 | out: | |
471 | local_bh_disable(); | |
472 | rctx->complete(&req->base, err); | |
473 | local_bh_enable(); | |
474 | } | |
475 | ||
476 | static int cryptd_hash_final_enqueue(struct ahash_request *req) | |
477 | { | |
478 | return cryptd_hash_enqueue(req, cryptd_hash_final); | |
479 | } | |
480 | ||
481 | static void cryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
482 | { | |
483 | struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
484 | struct crypto_hash *child = ctx->child; | |
485 | struct ahash_request *req = ahash_request_cast(req_async); | |
486 | struct cryptd_hash_request_ctx *rctx; | |
487 | struct hash_desc desc; | |
488 | ||
489 | rctx = ahash_request_ctx(req); | |
490 | ||
491 | if (unlikely(err == -EINPROGRESS)) | |
492 | goto out; | |
493 | ||
494 | desc.tfm = child; | |
495 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
496 | ||
497 | err = crypto_hash_crt(child)->digest(&desc, | |
498 | req->src, | |
499 | req->nbytes, | |
500 | req->result); | |
501 | ||
502 | req->base.complete = rctx->complete; | |
503 | ||
504 | out: | |
505 | local_bh_disable(); | |
506 | rctx->complete(&req->base, err); | |
507 | local_bh_enable(); | |
508 | } | |
509 | ||
510 | static int cryptd_hash_digest_enqueue(struct ahash_request *req) | |
511 | { | |
512 | return cryptd_hash_enqueue(req, cryptd_hash_digest); | |
513 | } | |
514 | ||
515 | static struct crypto_instance *cryptd_alloc_hash( | |
254eff77 | 516 | struct rtattr **tb, struct cryptd_queue *queue) |
b8a28251 LH |
517 | { |
518 | struct crypto_instance *inst; | |
519 | struct crypto_alg *alg; | |
520 | ||
521 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_HASH, | |
522 | CRYPTO_ALG_TYPE_HASH_MASK); | |
523 | if (IS_ERR(alg)) | |
524 | return ERR_PTR(PTR_ERR(alg)); | |
525 | ||
254eff77 | 526 | inst = cryptd_alloc_instance(alg, queue); |
b8a28251 LH |
527 | if (IS_ERR(inst)) |
528 | goto out_put_alg; | |
529 | ||
530 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_ASYNC; | |
531 | inst->alg.cra_type = &crypto_ahash_type; | |
532 | ||
533 | inst->alg.cra_ahash.digestsize = alg->cra_hash.digestsize; | |
534 | inst->alg.cra_ctxsize = sizeof(struct cryptd_hash_ctx); | |
535 | ||
536 | inst->alg.cra_init = cryptd_hash_init_tfm; | |
537 | inst->alg.cra_exit = cryptd_hash_exit_tfm; | |
538 | ||
539 | inst->alg.cra_ahash.init = cryptd_hash_init_enqueue; | |
540 | inst->alg.cra_ahash.update = cryptd_hash_update_enqueue; | |
541 | inst->alg.cra_ahash.final = cryptd_hash_final_enqueue; | |
542 | inst->alg.cra_ahash.setkey = cryptd_hash_setkey; | |
543 | inst->alg.cra_ahash.digest = cryptd_hash_digest_enqueue; | |
544 | ||
545 | out_put_alg: | |
546 | crypto_mod_put(alg); | |
547 | return inst; | |
548 | } | |
549 | ||
254eff77 | 550 | static struct cryptd_queue queue; |
124b53d0 HX |
551 | |
552 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |
553 | { | |
554 | struct crypto_attr_type *algt; | |
555 | ||
556 | algt = crypto_get_attr_type(tb); | |
557 | if (IS_ERR(algt)) | |
e231c2ee | 558 | return ERR_CAST(algt); |
124b53d0 HX |
559 | |
560 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
561 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
254eff77 | 562 | return cryptd_alloc_blkcipher(tb, &queue); |
b8a28251 | 563 | case CRYPTO_ALG_TYPE_DIGEST: |
254eff77 | 564 | return cryptd_alloc_hash(tb, &queue); |
124b53d0 HX |
565 | } |
566 | ||
567 | return ERR_PTR(-EINVAL); | |
568 | } | |
569 | ||
570 | static void cryptd_free(struct crypto_instance *inst) | |
571 | { | |
572 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
573 | ||
574 | crypto_drop_spawn(&ctx->spawn); | |
575 | kfree(inst); | |
576 | } | |
577 | ||
578 | static struct crypto_template cryptd_tmpl = { | |
579 | .name = "cryptd", | |
580 | .alloc = cryptd_alloc, | |
581 | .free = cryptd_free, | |
582 | .module = THIS_MODULE, | |
583 | }; | |
584 | ||
1cac2cbc YH |
585 | struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name, |
586 | u32 type, u32 mask) | |
587 | { | |
588 | char cryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
589 | struct crypto_ablkcipher *tfm; | |
590 | ||
591 | if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
592 | "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
593 | return ERR_PTR(-EINVAL); | |
594 | tfm = crypto_alloc_ablkcipher(cryptd_alg_name, type, mask); | |
595 | if (IS_ERR(tfm)) | |
596 | return ERR_CAST(tfm); | |
597 | if (crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_module != THIS_MODULE) { | |
598 | crypto_free_ablkcipher(tfm); | |
599 | return ERR_PTR(-EINVAL); | |
600 | } | |
601 | ||
602 | return __cryptd_ablkcipher_cast(tfm); | |
603 | } | |
604 | EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher); | |
605 | ||
606 | struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm) | |
607 | { | |
608 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base); | |
609 | return ctx->child; | |
610 | } | |
611 | EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child); | |
612 | ||
613 | void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm) | |
614 | { | |
615 | crypto_free_ablkcipher(&tfm->base); | |
616 | } | |
617 | EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher); | |
618 | ||
124b53d0 HX |
619 | static int __init cryptd_init(void) |
620 | { | |
621 | int err; | |
622 | ||
254eff77 | 623 | err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); |
124b53d0 HX |
624 | if (err) |
625 | return err; | |
626 | ||
627 | err = crypto_register_template(&cryptd_tmpl); | |
628 | if (err) | |
254eff77 | 629 | cryptd_fini_queue(&queue); |
124b53d0 HX |
630 | |
631 | return err; | |
632 | } | |
633 | ||
634 | static void __exit cryptd_exit(void) | |
635 | { | |
254eff77 | 636 | cryptd_fini_queue(&queue); |
124b53d0 HX |
637 | crypto_unregister_template(&cryptd_tmpl); |
638 | } | |
639 | ||
640 | module_init(cryptd_init); | |
641 | module_exit(cryptd_exit); | |
642 | ||
643 | MODULE_LICENSE("GPL"); | |
644 | MODULE_DESCRIPTION("Software async crypto daemon"); |