]>
Commit | Line | Data |
---|---|---|
1e65b81a TC |
1 | /* |
2 | * Software multibuffer async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2014 Tim Chen <[email protected]> | |
5 | * | |
6 | * Adapted from crypto daemon. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <crypto/algapi.h> | |
16 | #include <crypto/internal/hash.h> | |
17 | #include <crypto/internal/aead.h> | |
18 | #include <crypto/mcryptd.h> | |
19 | #include <crypto/crypto_wq.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/sched.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/hardirq.h> | |
29 | ||
30 | #define MCRYPTD_MAX_CPU_QLEN 100 | |
31 | #define MCRYPTD_BATCH 9 | |
32 | ||
33 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
34 | unsigned int tail); | |
35 | ||
36 | struct mcryptd_flush_list { | |
37 | struct list_head list; | |
38 | struct mutex lock; | |
39 | }; | |
40 | ||
1f6e97f6 | 41 | static struct mcryptd_flush_list __percpu *mcryptd_flist; |
1e65b81a TC |
42 | |
43 | struct hashd_instance_ctx { | |
331bf739 | 44 | struct crypto_ahash_spawn spawn; |
1e65b81a TC |
45 | struct mcryptd_queue *queue; |
46 | }; | |
47 | ||
48 | static void mcryptd_queue_worker(struct work_struct *work); | |
49 | ||
50 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) | |
51 | { | |
52 | struct mcryptd_flush_list *flist; | |
53 | ||
54 | if (!cstate->flusher_engaged) { | |
55 | /* put the flusher on the flush list */ | |
56 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
57 | mutex_lock(&flist->lock); | |
58 | list_add_tail(&cstate->flush_list, &flist->list); | |
59 | cstate->flusher_engaged = true; | |
60 | cstate->next_flush = jiffies + delay; | |
61 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, | |
62 | &cstate->flush, delay); | |
63 | mutex_unlock(&flist->lock); | |
64 | } | |
65 | } | |
66 | EXPORT_SYMBOL(mcryptd_arm_flusher); | |
67 | ||
68 | static int mcryptd_init_queue(struct mcryptd_queue *queue, | |
69 | unsigned int max_cpu_qlen) | |
70 | { | |
71 | int cpu; | |
72 | struct mcryptd_cpu_queue *cpu_queue; | |
73 | ||
74 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); | |
75 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); | |
76 | if (!queue->cpu_queue) | |
77 | return -ENOMEM; | |
78 | for_each_possible_cpu(cpu) { | |
79 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
80 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | |
81 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
82 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | |
83 | } | |
84 | return 0; | |
85 | } | |
86 | ||
87 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) | |
88 | { | |
89 | int cpu; | |
90 | struct mcryptd_cpu_queue *cpu_queue; | |
91 | ||
92 | for_each_possible_cpu(cpu) { | |
93 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
94 | BUG_ON(cpu_queue->queue.qlen); | |
95 | } | |
96 | free_percpu(queue->cpu_queue); | |
97 | } | |
98 | ||
99 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | |
100 | struct crypto_async_request *request, | |
101 | struct mcryptd_hash_request_ctx *rctx) | |
102 | { | |
103 | int cpu, err; | |
104 | struct mcryptd_cpu_queue *cpu_queue; | |
105 | ||
106 | cpu = get_cpu(); | |
107 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | |
108 | rctx->tag.cpu = cpu; | |
109 | ||
110 | err = crypto_enqueue_request(&cpu_queue->queue, request); | |
111 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | |
112 | cpu, cpu_queue, request); | |
113 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | |
114 | put_cpu(); | |
115 | ||
116 | return err; | |
117 | } | |
118 | ||
119 | /* | |
120 | * Try to opportunisticlly flush the partially completed jobs if | |
121 | * crypto daemon is the only task running. | |
122 | */ | |
123 | static void mcryptd_opportunistic_flush(void) | |
124 | { | |
125 | struct mcryptd_flush_list *flist; | |
126 | struct mcryptd_alg_cstate *cstate; | |
127 | ||
128 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
129 | while (single_task_running()) { | |
130 | mutex_lock(&flist->lock); | |
08346170 | 131 | cstate = list_first_entry_or_null(&flist->list, |
1e65b81a | 132 | struct mcryptd_alg_cstate, flush_list); |
08346170 | 133 | if (!cstate || !cstate->flusher_engaged) { |
1e65b81a TC |
134 | mutex_unlock(&flist->lock); |
135 | return; | |
136 | } | |
137 | list_del(&cstate->flush_list); | |
138 | cstate->flusher_engaged = false; | |
139 | mutex_unlock(&flist->lock); | |
140 | cstate->alg_state->flusher(cstate); | |
141 | } | |
142 | } | |
143 | ||
144 | /* | |
145 | * Called in workqueue context, do one real cryption work (via | |
146 | * req->complete) and reschedule itself if there are more work to | |
147 | * do. | |
148 | */ | |
149 | static void mcryptd_queue_worker(struct work_struct *work) | |
150 | { | |
151 | struct mcryptd_cpu_queue *cpu_queue; | |
152 | struct crypto_async_request *req, *backlog; | |
153 | int i; | |
154 | ||
155 | /* | |
156 | * Need to loop through more than once for multi-buffer to | |
157 | * be effective. | |
158 | */ | |
159 | ||
160 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | |
161 | i = 0; | |
162 | while (i < MCRYPTD_BATCH || single_task_running()) { | |
163 | /* | |
164 | * preempt_disable/enable is used to prevent | |
165 | * being preempted by mcryptd_enqueue_request() | |
166 | */ | |
167 | local_bh_disable(); | |
168 | preempt_disable(); | |
169 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
170 | req = crypto_dequeue_request(&cpu_queue->queue); | |
171 | preempt_enable(); | |
172 | local_bh_enable(); | |
173 | ||
174 | if (!req) { | |
175 | mcryptd_opportunistic_flush(); | |
176 | return; | |
177 | } | |
178 | ||
179 | if (backlog) | |
180 | backlog->complete(backlog, -EINPROGRESS); | |
181 | req->complete(req, 0); | |
182 | if (!cpu_queue->queue.qlen) | |
183 | return; | |
184 | ++i; | |
185 | } | |
186 | if (cpu_queue->queue.qlen) | |
187 | queue_work(kcrypto_wq, &cpu_queue->work); | |
188 | } | |
189 | ||
190 | void mcryptd_flusher(struct work_struct *__work) | |
191 | { | |
192 | struct mcryptd_alg_cstate *alg_cpu_state; | |
193 | struct mcryptd_alg_state *alg_state; | |
194 | struct mcryptd_flush_list *flist; | |
195 | int cpu; | |
196 | ||
197 | cpu = smp_processor_id(); | |
198 | alg_cpu_state = container_of(to_delayed_work(__work), | |
199 | struct mcryptd_alg_cstate, flush); | |
200 | alg_state = alg_cpu_state->alg_state; | |
201 | if (alg_cpu_state->cpu != cpu) | |
202 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", | |
203 | cpu, alg_cpu_state->cpu); | |
204 | ||
205 | if (alg_cpu_state->flusher_engaged) { | |
206 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
207 | mutex_lock(&flist->lock); | |
208 | list_del(&alg_cpu_state->flush_list); | |
209 | alg_cpu_state->flusher_engaged = false; | |
210 | mutex_unlock(&flist->lock); | |
211 | alg_state->flusher(alg_cpu_state); | |
212 | } | |
213 | } | |
214 | EXPORT_SYMBOL_GPL(mcryptd_flusher); | |
215 | ||
216 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) | |
217 | { | |
218 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
219 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
220 | ||
221 | return ictx->queue; | |
222 | } | |
223 | ||
224 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
225 | unsigned int tail) | |
226 | { | |
227 | char *p; | |
228 | struct crypto_instance *inst; | |
229 | int err; | |
230 | ||
231 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | |
232 | if (!p) | |
233 | return ERR_PTR(-ENOMEM); | |
234 | ||
235 | inst = (void *)(p + head); | |
236 | ||
237 | err = -ENAMETOOLONG; | |
238 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
239 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
240 | goto out_free_inst; | |
241 | ||
242 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
243 | ||
244 | inst->alg.cra_priority = alg->cra_priority + 50; | |
245 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
246 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
247 | ||
248 | out: | |
249 | return p; | |
250 | ||
251 | out_free_inst: | |
252 | kfree(p); | |
253 | p = ERR_PTR(err); | |
254 | goto out; | |
255 | } | |
256 | ||
48a99272 | 257 | static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, |
f52bbf55 SM |
258 | u32 *mask) |
259 | { | |
260 | struct crypto_attr_type *algt; | |
261 | ||
262 | algt = crypto_get_attr_type(tb); | |
263 | if (IS_ERR(algt)) | |
48a99272 | 264 | return false; |
265 | ||
266 | *type |= algt->type & CRYPTO_ALG_INTERNAL; | |
267 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | |
268 | ||
269 | if (*type & *mask & CRYPTO_ALG_INTERNAL) | |
270 | return true; | |
271 | else | |
272 | return false; | |
f52bbf55 SM |
273 | } |
274 | ||
1e65b81a TC |
275 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) |
276 | { | |
277 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
278 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
331bf739 | 279 | struct crypto_ahash_spawn *spawn = &ictx->spawn; |
1e65b81a | 280 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
331bf739 | 281 | struct crypto_ahash *hash; |
1e65b81a | 282 | |
331bf739 | 283 | hash = crypto_spawn_ahash(spawn); |
1e65b81a TC |
284 | if (IS_ERR(hash)) |
285 | return PTR_ERR(hash); | |
286 | ||
287 | ctx->child = hash; | |
288 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
289 | sizeof(struct mcryptd_hash_request_ctx) + | |
331bf739 | 290 | crypto_ahash_reqsize(hash)); |
1e65b81a TC |
291 | return 0; |
292 | } | |
293 | ||
294 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
295 | { | |
296 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
297 | ||
331bf739 | 298 | crypto_free_ahash(ctx->child); |
1e65b81a TC |
299 | } |
300 | ||
301 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, | |
302 | const u8 *key, unsigned int keylen) | |
303 | { | |
304 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
331bf739 | 305 | struct crypto_ahash *child = ctx->child; |
1e65b81a TC |
306 | int err; |
307 | ||
331bf739 MD |
308 | crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
309 | crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) & | |
1e65b81a | 310 | CRYPTO_TFM_REQ_MASK); |
331bf739 MD |
311 | err = crypto_ahash_setkey(child, key, keylen); |
312 | crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) & | |
1e65b81a TC |
313 | CRYPTO_TFM_RES_MASK); |
314 | return err; | |
315 | } | |
316 | ||
317 | static int mcryptd_hash_enqueue(struct ahash_request *req, | |
318 | crypto_completion_t complete) | |
319 | { | |
320 | int ret; | |
321 | ||
322 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
323 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
324 | struct mcryptd_queue *queue = | |
325 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); | |
326 | ||
327 | rctx->complete = req->base.complete; | |
328 | req->base.complete = complete; | |
329 | ||
330 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); | |
331 | ||
332 | return ret; | |
333 | } | |
334 | ||
335 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) | |
336 | { | |
337 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
331bf739 | 338 | struct crypto_ahash *child = ctx->child; |
1e65b81a TC |
339 | struct ahash_request *req = ahash_request_cast(req_async); |
340 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
331bf739 | 341 | struct ahash_request *desc = &rctx->areq; |
1e65b81a TC |
342 | |
343 | if (unlikely(err == -EINPROGRESS)) | |
344 | goto out; | |
345 | ||
331bf739 MD |
346 | ahash_request_set_tfm(desc, child); |
347 | ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, | |
348 | rctx->complete, req_async); | |
1e65b81a | 349 | |
331bf739 MD |
350 | rctx->out = req->result; |
351 | err = crypto_ahash_init(desc); | |
1e65b81a TC |
352 | |
353 | out: | |
354 | local_bh_disable(); | |
355 | rctx->complete(&req->base, err); | |
356 | local_bh_enable(); | |
357 | } | |
358 | ||
359 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) | |
360 | { | |
361 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); | |
362 | } | |
363 | ||
364 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) | |
365 | { | |
366 | struct ahash_request *req = ahash_request_cast(req_async); | |
367 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
368 | ||
369 | if (unlikely(err == -EINPROGRESS)) | |
370 | goto out; | |
371 | ||
331bf739 MD |
372 | rctx->out = req->result; |
373 | err = ahash_mcryptd_update(&rctx->areq); | |
1e65b81a TC |
374 | if (err) { |
375 | req->base.complete = rctx->complete; | |
376 | goto out; | |
377 | } | |
378 | ||
379 | return; | |
380 | out: | |
381 | local_bh_disable(); | |
382 | rctx->complete(&req->base, err); | |
383 | local_bh_enable(); | |
384 | } | |
385 | ||
386 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) | |
387 | { | |
388 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); | |
389 | } | |
390 | ||
391 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) | |
392 | { | |
393 | struct ahash_request *req = ahash_request_cast(req_async); | |
394 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
395 | ||
396 | if (unlikely(err == -EINPROGRESS)) | |
397 | goto out; | |
398 | ||
331bf739 MD |
399 | rctx->out = req->result; |
400 | err = ahash_mcryptd_final(&rctx->areq); | |
1e65b81a TC |
401 | if (err) { |
402 | req->base.complete = rctx->complete; | |
403 | goto out; | |
404 | } | |
405 | ||
406 | return; | |
407 | out: | |
408 | local_bh_disable(); | |
409 | rctx->complete(&req->base, err); | |
410 | local_bh_enable(); | |
411 | } | |
412 | ||
413 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) | |
414 | { | |
415 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); | |
416 | } | |
417 | ||
418 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) | |
419 | { | |
420 | struct ahash_request *req = ahash_request_cast(req_async); | |
421 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
422 | ||
423 | if (unlikely(err == -EINPROGRESS)) | |
424 | goto out; | |
331bf739 MD |
425 | rctx->out = req->result; |
426 | err = ahash_mcryptd_finup(&rctx->areq); | |
1e65b81a TC |
427 | |
428 | if (err) { | |
429 | req->base.complete = rctx->complete; | |
430 | goto out; | |
431 | } | |
432 | ||
433 | return; | |
434 | out: | |
435 | local_bh_disable(); | |
436 | rctx->complete(&req->base, err); | |
437 | local_bh_enable(); | |
438 | } | |
439 | ||
440 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) | |
441 | { | |
442 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); | |
443 | } | |
444 | ||
445 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
446 | { | |
447 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
331bf739 | 448 | struct crypto_ahash *child = ctx->child; |
1e65b81a TC |
449 | struct ahash_request *req = ahash_request_cast(req_async); |
450 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
331bf739 | 451 | struct ahash_request *desc = &rctx->areq; |
1e65b81a TC |
452 | |
453 | if (unlikely(err == -EINPROGRESS)) | |
454 | goto out; | |
455 | ||
331bf739 MD |
456 | ahash_request_set_tfm(desc, child); |
457 | ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, | |
458 | rctx->complete, req_async); | |
1e65b81a | 459 | |
331bf739 MD |
460 | rctx->out = req->result; |
461 | err = ahash_mcryptd_digest(desc); | |
1e65b81a | 462 | |
1e65b81a TC |
463 | out: |
464 | local_bh_disable(); | |
465 | rctx->complete(&req->base, err); | |
466 | local_bh_enable(); | |
467 | } | |
468 | ||
469 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) | |
470 | { | |
471 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); | |
472 | } | |
473 | ||
474 | static int mcryptd_hash_export(struct ahash_request *req, void *out) | |
475 | { | |
476 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
477 | ||
331bf739 | 478 | return crypto_ahash_export(&rctx->areq, out); |
1e65b81a TC |
479 | } |
480 | ||
481 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) | |
482 | { | |
483 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
484 | ||
331bf739 | 485 | return crypto_ahash_import(&rctx->areq, in); |
1e65b81a TC |
486 | } |
487 | ||
488 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |
489 | struct mcryptd_queue *queue) | |
490 | { | |
491 | struct hashd_instance_ctx *ctx; | |
492 | struct ahash_instance *inst; | |
331bf739 | 493 | struct hash_alg_common *halg; |
1e65b81a | 494 | struct crypto_alg *alg; |
f52bbf55 SM |
495 | u32 type = 0; |
496 | u32 mask = 0; | |
1e65b81a TC |
497 | int err; |
498 | ||
48a99272 | 499 | if (!mcryptd_check_internal(tb, &type, &mask)) |
500 | return -EINVAL; | |
f52bbf55 | 501 | |
331bf739 MD |
502 | halg = ahash_attr_alg(tb[1], type, mask); |
503 | if (IS_ERR(halg)) | |
504 | return PTR_ERR(halg); | |
1e65b81a | 505 | |
331bf739 | 506 | alg = &halg->base; |
1e65b81a TC |
507 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); |
508 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), | |
509 | sizeof(*ctx)); | |
510 | err = PTR_ERR(inst); | |
511 | if (IS_ERR(inst)) | |
512 | goto out_put_alg; | |
513 | ||
514 | ctx = ahash_instance_ctx(inst); | |
515 | ctx->queue = queue; | |
516 | ||
331bf739 | 517 | err = crypto_init_ahash_spawn(&ctx->spawn, halg, |
1e65b81a TC |
518 | ahash_crypto_instance(inst)); |
519 | if (err) | |
520 | goto out_free_inst; | |
521 | ||
f52bbf55 SM |
522 | type = CRYPTO_ALG_ASYNC; |
523 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
524 | type |= CRYPTO_ALG_INTERNAL; | |
525 | inst->alg.halg.base.cra_flags = type; | |
1e65b81a | 526 | |
331bf739 MD |
527 | inst->alg.halg.digestsize = halg->digestsize; |
528 | inst->alg.halg.statesize = halg->statesize; | |
1e65b81a TC |
529 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
530 | ||
531 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | |
532 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; | |
533 | ||
534 | inst->alg.init = mcryptd_hash_init_enqueue; | |
535 | inst->alg.update = mcryptd_hash_update_enqueue; | |
536 | inst->alg.final = mcryptd_hash_final_enqueue; | |
537 | inst->alg.finup = mcryptd_hash_finup_enqueue; | |
538 | inst->alg.export = mcryptd_hash_export; | |
539 | inst->alg.import = mcryptd_hash_import; | |
540 | inst->alg.setkey = mcryptd_hash_setkey; | |
541 | inst->alg.digest = mcryptd_hash_digest_enqueue; | |
542 | ||
543 | err = ahash_register_instance(tmpl, inst); | |
544 | if (err) { | |
331bf739 | 545 | crypto_drop_ahash(&ctx->spawn); |
1e65b81a TC |
546 | out_free_inst: |
547 | kfree(inst); | |
548 | } | |
549 | ||
550 | out_put_alg: | |
551 | crypto_mod_put(alg); | |
552 | return err; | |
553 | } | |
554 | ||
555 | static struct mcryptd_queue mqueue; | |
556 | ||
557 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |
558 | { | |
559 | struct crypto_attr_type *algt; | |
560 | ||
561 | algt = crypto_get_attr_type(tb); | |
562 | if (IS_ERR(algt)) | |
563 | return PTR_ERR(algt); | |
564 | ||
565 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
566 | case CRYPTO_ALG_TYPE_DIGEST: | |
567 | return mcryptd_create_hash(tmpl, tb, &mqueue); | |
568 | break; | |
569 | } | |
570 | ||
571 | return -EINVAL; | |
572 | } | |
573 | ||
574 | static void mcryptd_free(struct crypto_instance *inst) | |
575 | { | |
576 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
577 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | |
578 | ||
579 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
580 | case CRYPTO_ALG_TYPE_AHASH: | |
331bf739 | 581 | crypto_drop_ahash(&hctx->spawn); |
1e65b81a TC |
582 | kfree(ahash_instance(inst)); |
583 | return; | |
584 | default: | |
585 | crypto_drop_spawn(&ctx->spawn); | |
586 | kfree(inst); | |
587 | } | |
588 | } | |
589 | ||
590 | static struct crypto_template mcryptd_tmpl = { | |
591 | .name = "mcryptd", | |
592 | .create = mcryptd_create, | |
593 | .free = mcryptd_free, | |
594 | .module = THIS_MODULE, | |
595 | }; | |
596 | ||
597 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | |
598 | u32 type, u32 mask) | |
599 | { | |
600 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
601 | struct crypto_ahash *tfm; | |
602 | ||
603 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
604 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
605 | return ERR_PTR(-EINVAL); | |
606 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); | |
607 | if (IS_ERR(tfm)) | |
608 | return ERR_CAST(tfm); | |
609 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
610 | crypto_free_ahash(tfm); | |
611 | return ERR_PTR(-EINVAL); | |
612 | } | |
613 | ||
614 | return __mcryptd_ahash_cast(tfm); | |
615 | } | |
616 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | |
617 | ||
331bf739 | 618 | int ahash_mcryptd_digest(struct ahash_request *desc) |
1e65b81a | 619 | { |
36e09e1f | 620 | return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc); |
1e65b81a | 621 | } |
1e65b81a | 622 | |
331bf739 | 623 | int ahash_mcryptd_update(struct ahash_request *desc) |
1e65b81a | 624 | { |
1e65b81a TC |
625 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
626 | ||
331bf739 | 627 | return crypto_ahash_update(desc); |
1e65b81a | 628 | } |
1e65b81a | 629 | |
331bf739 | 630 | int ahash_mcryptd_finup(struct ahash_request *desc) |
1e65b81a | 631 | { |
1e65b81a TC |
632 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
633 | ||
331bf739 | 634 | return crypto_ahash_finup(desc); |
1e65b81a | 635 | } |
1e65b81a | 636 | |
331bf739 | 637 | int ahash_mcryptd_final(struct ahash_request *desc) |
1e65b81a | 638 | { |
1e65b81a TC |
639 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
640 | ||
331bf739 | 641 | return crypto_ahash_final(desc); |
1e65b81a | 642 | } |
1e65b81a | 643 | |
331bf739 | 644 | struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) |
1e65b81a TC |
645 | { |
646 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
647 | ||
648 | return ctx->child; | |
649 | } | |
650 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); | |
651 | ||
331bf739 | 652 | struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req) |
1e65b81a TC |
653 | { |
654 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
331bf739 | 655 | return &rctx->areq; |
1e65b81a | 656 | } |
331bf739 | 657 | EXPORT_SYMBOL_GPL(mcryptd_ahash_desc); |
1e65b81a TC |
658 | |
659 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) | |
660 | { | |
661 | crypto_free_ahash(&tfm->base); | |
662 | } | |
663 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); | |
664 | ||
1e65b81a TC |
665 | static int __init mcryptd_init(void) |
666 | { | |
667 | int err, cpu; | |
668 | struct mcryptd_flush_list *flist; | |
669 | ||
670 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); | |
671 | for_each_possible_cpu(cpu) { | |
672 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
673 | INIT_LIST_HEAD(&flist->list); | |
674 | mutex_init(&flist->lock); | |
675 | } | |
676 | ||
677 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); | |
678 | if (err) { | |
679 | free_percpu(mcryptd_flist); | |
680 | return err; | |
681 | } | |
682 | ||
683 | err = crypto_register_template(&mcryptd_tmpl); | |
684 | if (err) { | |
685 | mcryptd_fini_queue(&mqueue); | |
686 | free_percpu(mcryptd_flist); | |
687 | } | |
688 | ||
689 | return err; | |
690 | } | |
691 | ||
692 | static void __exit mcryptd_exit(void) | |
693 | { | |
694 | mcryptd_fini_queue(&mqueue); | |
695 | crypto_unregister_template(&mcryptd_tmpl); | |
696 | free_percpu(mcryptd_flist); | |
697 | } | |
698 | ||
699 | subsys_initcall(mcryptd_init); | |
700 | module_exit(mcryptd_exit); | |
701 | ||
702 | MODULE_LICENSE("GPL"); | |
703 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); | |
4943ba16 | 704 | MODULE_ALIAS_CRYPTO("mcryptd"); |