]>
Commit | Line | Data |
---|---|---|
1e65b81a TC |
1 | /* |
2 | * Software multibuffer async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2014 Tim Chen <[email protected]> | |
5 | * | |
6 | * Adapted from crypto daemon. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License as published by the Free | |
10 | * Software Foundation; either version 2 of the License, or (at your option) | |
11 | * any later version. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include <crypto/algapi.h> | |
16 | #include <crypto/internal/hash.h> | |
17 | #include <crypto/internal/aead.h> | |
18 | #include <crypto/mcryptd.h> | |
19 | #include <crypto/crypto_wq.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/list.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/sched.h> | |
03441a34 | 27 | #include <linux/sched/stat.h> |
1e65b81a TC |
28 | #include <linux/slab.h> |
29 | #include <linux/hardirq.h> | |
30 | ||
31 | #define MCRYPTD_MAX_CPU_QLEN 100 | |
32 | #define MCRYPTD_BATCH 9 | |
33 | ||
34 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
35 | unsigned int tail); | |
36 | ||
37 | struct mcryptd_flush_list { | |
38 | struct list_head list; | |
39 | struct mutex lock; | |
40 | }; | |
41 | ||
1f6e97f6 | 42 | static struct mcryptd_flush_list __percpu *mcryptd_flist; |
1e65b81a TC |
43 | |
44 | struct hashd_instance_ctx { | |
331bf739 | 45 | struct crypto_ahash_spawn spawn; |
1e65b81a TC |
46 | struct mcryptd_queue *queue; |
47 | }; | |
48 | ||
49 | static void mcryptd_queue_worker(struct work_struct *work); | |
50 | ||
51 | void mcryptd_arm_flusher(struct mcryptd_alg_cstate *cstate, unsigned long delay) | |
52 | { | |
53 | struct mcryptd_flush_list *flist; | |
54 | ||
55 | if (!cstate->flusher_engaged) { | |
56 | /* put the flusher on the flush list */ | |
57 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
58 | mutex_lock(&flist->lock); | |
59 | list_add_tail(&cstate->flush_list, &flist->list); | |
60 | cstate->flusher_engaged = true; | |
61 | cstate->next_flush = jiffies + delay; | |
62 | queue_delayed_work_on(smp_processor_id(), kcrypto_wq, | |
63 | &cstate->flush, delay); | |
64 | mutex_unlock(&flist->lock); | |
65 | } | |
66 | } | |
67 | EXPORT_SYMBOL(mcryptd_arm_flusher); | |
68 | ||
69 | static int mcryptd_init_queue(struct mcryptd_queue *queue, | |
70 | unsigned int max_cpu_qlen) | |
71 | { | |
72 | int cpu; | |
73 | struct mcryptd_cpu_queue *cpu_queue; | |
74 | ||
75 | queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); | |
76 | pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); | |
77 | if (!queue->cpu_queue) | |
78 | return -ENOMEM; | |
79 | for_each_possible_cpu(cpu) { | |
80 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
81 | pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); | |
82 | crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); | |
83 | INIT_WORK(&cpu_queue->work, mcryptd_queue_worker); | |
84 | } | |
85 | return 0; | |
86 | } | |
87 | ||
88 | static void mcryptd_fini_queue(struct mcryptd_queue *queue) | |
89 | { | |
90 | int cpu; | |
91 | struct mcryptd_cpu_queue *cpu_queue; | |
92 | ||
93 | for_each_possible_cpu(cpu) { | |
94 | cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); | |
95 | BUG_ON(cpu_queue->queue.qlen); | |
96 | } | |
97 | free_percpu(queue->cpu_queue); | |
98 | } | |
99 | ||
100 | static int mcryptd_enqueue_request(struct mcryptd_queue *queue, | |
101 | struct crypto_async_request *request, | |
102 | struct mcryptd_hash_request_ctx *rctx) | |
103 | { | |
104 | int cpu, err; | |
105 | struct mcryptd_cpu_queue *cpu_queue; | |
106 | ||
107 | cpu = get_cpu(); | |
108 | cpu_queue = this_cpu_ptr(queue->cpu_queue); | |
109 | rctx->tag.cpu = cpu; | |
110 | ||
111 | err = crypto_enqueue_request(&cpu_queue->queue, request); | |
112 | pr_debug("enqueue request: cpu %d cpu_queue %p request %p\n", | |
113 | cpu, cpu_queue, request); | |
114 | queue_work_on(cpu, kcrypto_wq, &cpu_queue->work); | |
115 | put_cpu(); | |
116 | ||
117 | return err; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Try to opportunisticlly flush the partially completed jobs if | |
122 | * crypto daemon is the only task running. | |
123 | */ | |
124 | static void mcryptd_opportunistic_flush(void) | |
125 | { | |
126 | struct mcryptd_flush_list *flist; | |
127 | struct mcryptd_alg_cstate *cstate; | |
128 | ||
129 | flist = per_cpu_ptr(mcryptd_flist, smp_processor_id()); | |
130 | while (single_task_running()) { | |
131 | mutex_lock(&flist->lock); | |
08346170 | 132 | cstate = list_first_entry_or_null(&flist->list, |
1e65b81a | 133 | struct mcryptd_alg_cstate, flush_list); |
08346170 | 134 | if (!cstate || !cstate->flusher_engaged) { |
1e65b81a TC |
135 | mutex_unlock(&flist->lock); |
136 | return; | |
137 | } | |
138 | list_del(&cstate->flush_list); | |
139 | cstate->flusher_engaged = false; | |
140 | mutex_unlock(&flist->lock); | |
141 | cstate->alg_state->flusher(cstate); | |
142 | } | |
143 | } | |
144 | ||
145 | /* | |
146 | * Called in workqueue context, do one real cryption work (via | |
147 | * req->complete) and reschedule itself if there are more work to | |
148 | * do. | |
149 | */ | |
150 | static void mcryptd_queue_worker(struct work_struct *work) | |
151 | { | |
152 | struct mcryptd_cpu_queue *cpu_queue; | |
153 | struct crypto_async_request *req, *backlog; | |
154 | int i; | |
155 | ||
156 | /* | |
157 | * Need to loop through more than once for multi-buffer to | |
158 | * be effective. | |
159 | */ | |
160 | ||
161 | cpu_queue = container_of(work, struct mcryptd_cpu_queue, work); | |
162 | i = 0; | |
163 | while (i < MCRYPTD_BATCH || single_task_running()) { | |
164 | /* | |
165 | * preempt_disable/enable is used to prevent | |
166 | * being preempted by mcryptd_enqueue_request() | |
167 | */ | |
168 | local_bh_disable(); | |
169 | preempt_disable(); | |
170 | backlog = crypto_get_backlog(&cpu_queue->queue); | |
171 | req = crypto_dequeue_request(&cpu_queue->queue); | |
172 | preempt_enable(); | |
173 | local_bh_enable(); | |
174 | ||
175 | if (!req) { | |
176 | mcryptd_opportunistic_flush(); | |
177 | return; | |
178 | } | |
179 | ||
180 | if (backlog) | |
181 | backlog->complete(backlog, -EINPROGRESS); | |
182 | req->complete(req, 0); | |
183 | if (!cpu_queue->queue.qlen) | |
184 | return; | |
185 | ++i; | |
186 | } | |
187 | if (cpu_queue->queue.qlen) | |
188 | queue_work(kcrypto_wq, &cpu_queue->work); | |
189 | } | |
190 | ||
191 | void mcryptd_flusher(struct work_struct *__work) | |
192 | { | |
193 | struct mcryptd_alg_cstate *alg_cpu_state; | |
194 | struct mcryptd_alg_state *alg_state; | |
195 | struct mcryptd_flush_list *flist; | |
196 | int cpu; | |
197 | ||
198 | cpu = smp_processor_id(); | |
199 | alg_cpu_state = container_of(to_delayed_work(__work), | |
200 | struct mcryptd_alg_cstate, flush); | |
201 | alg_state = alg_cpu_state->alg_state; | |
202 | if (alg_cpu_state->cpu != cpu) | |
203 | pr_debug("mcryptd error: work on cpu %d, should be cpu %d\n", | |
204 | cpu, alg_cpu_state->cpu); | |
205 | ||
206 | if (alg_cpu_state->flusher_engaged) { | |
207 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
208 | mutex_lock(&flist->lock); | |
209 | list_del(&alg_cpu_state->flush_list); | |
210 | alg_cpu_state->flusher_engaged = false; | |
211 | mutex_unlock(&flist->lock); | |
212 | alg_state->flusher(alg_cpu_state); | |
213 | } | |
214 | } | |
215 | EXPORT_SYMBOL_GPL(mcryptd_flusher); | |
216 | ||
217 | static inline struct mcryptd_queue *mcryptd_get_queue(struct crypto_tfm *tfm) | |
218 | { | |
219 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
220 | struct mcryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
221 | ||
222 | return ictx->queue; | |
223 | } | |
224 | ||
225 | static void *mcryptd_alloc_instance(struct crypto_alg *alg, unsigned int head, | |
226 | unsigned int tail) | |
227 | { | |
228 | char *p; | |
229 | struct crypto_instance *inst; | |
230 | int err; | |
231 | ||
232 | p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL); | |
233 | if (!p) | |
234 | return ERR_PTR(-ENOMEM); | |
235 | ||
236 | inst = (void *)(p + head); | |
237 | ||
238 | err = -ENAMETOOLONG; | |
239 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
240 | "mcryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
241 | goto out_free_inst; | |
242 | ||
243 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
244 | ||
245 | inst->alg.cra_priority = alg->cra_priority + 50; | |
246 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
247 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
248 | ||
249 | out: | |
250 | return p; | |
251 | ||
252 | out_free_inst: | |
253 | kfree(p); | |
254 | p = ERR_PTR(err); | |
255 | goto out; | |
256 | } | |
257 | ||
48a99272 | 258 | static inline bool mcryptd_check_internal(struct rtattr **tb, u32 *type, |
f52bbf55 SM |
259 | u32 *mask) |
260 | { | |
261 | struct crypto_attr_type *algt; | |
262 | ||
263 | algt = crypto_get_attr_type(tb); | |
264 | if (IS_ERR(algt)) | |
48a99272 | 265 | return false; |
266 | ||
267 | *type |= algt->type & CRYPTO_ALG_INTERNAL; | |
268 | *mask |= algt->mask & CRYPTO_ALG_INTERNAL; | |
269 | ||
270 | if (*type & *mask & CRYPTO_ALG_INTERNAL) | |
271 | return true; | |
272 | else | |
273 | return false; | |
f52bbf55 SM |
274 | } |
275 | ||
1e65b81a TC |
276 | static int mcryptd_hash_init_tfm(struct crypto_tfm *tfm) |
277 | { | |
278 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
279 | struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
331bf739 | 280 | struct crypto_ahash_spawn *spawn = &ictx->spawn; |
1e65b81a | 281 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); |
331bf739 | 282 | struct crypto_ahash *hash; |
1e65b81a | 283 | |
331bf739 | 284 | hash = crypto_spawn_ahash(spawn); |
1e65b81a TC |
285 | if (IS_ERR(hash)) |
286 | return PTR_ERR(hash); | |
287 | ||
288 | ctx->child = hash; | |
289 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
290 | sizeof(struct mcryptd_hash_request_ctx) + | |
331bf739 | 291 | crypto_ahash_reqsize(hash)); |
1e65b81a TC |
292 | return 0; |
293 | } | |
294 | ||
295 | static void mcryptd_hash_exit_tfm(struct crypto_tfm *tfm) | |
296 | { | |
297 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm); | |
298 | ||
331bf739 | 299 | crypto_free_ahash(ctx->child); |
1e65b81a TC |
300 | } |
301 | ||
302 | static int mcryptd_hash_setkey(struct crypto_ahash *parent, | |
303 | const u8 *key, unsigned int keylen) | |
304 | { | |
305 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(parent); | |
331bf739 | 306 | struct crypto_ahash *child = ctx->child; |
1e65b81a TC |
307 | int err; |
308 | ||
331bf739 MD |
309 | crypto_ahash_clear_flags(child, CRYPTO_TFM_REQ_MASK); |
310 | crypto_ahash_set_flags(child, crypto_ahash_get_flags(parent) & | |
1e65b81a | 311 | CRYPTO_TFM_REQ_MASK); |
331bf739 MD |
312 | err = crypto_ahash_setkey(child, key, keylen); |
313 | crypto_ahash_set_flags(parent, crypto_ahash_get_flags(child) & | |
1e65b81a TC |
314 | CRYPTO_TFM_RES_MASK); |
315 | return err; | |
316 | } | |
317 | ||
318 | static int mcryptd_hash_enqueue(struct ahash_request *req, | |
319 | crypto_completion_t complete) | |
320 | { | |
321 | int ret; | |
322 | ||
323 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
324 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
325 | struct mcryptd_queue *queue = | |
326 | mcryptd_get_queue(crypto_ahash_tfm(tfm)); | |
327 | ||
328 | rctx->complete = req->base.complete; | |
329 | req->base.complete = complete; | |
330 | ||
331 | ret = mcryptd_enqueue_request(queue, &req->base, rctx); | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
336 | static void mcryptd_hash_init(struct crypto_async_request *req_async, int err) | |
337 | { | |
338 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
331bf739 | 339 | struct crypto_ahash *child = ctx->child; |
1e65b81a TC |
340 | struct ahash_request *req = ahash_request_cast(req_async); |
341 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
331bf739 | 342 | struct ahash_request *desc = &rctx->areq; |
1e65b81a TC |
343 | |
344 | if (unlikely(err == -EINPROGRESS)) | |
345 | goto out; | |
346 | ||
331bf739 MD |
347 | ahash_request_set_tfm(desc, child); |
348 | ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, | |
349 | rctx->complete, req_async); | |
1e65b81a | 350 | |
331bf739 MD |
351 | rctx->out = req->result; |
352 | err = crypto_ahash_init(desc); | |
1e65b81a TC |
353 | |
354 | out: | |
355 | local_bh_disable(); | |
356 | rctx->complete(&req->base, err); | |
357 | local_bh_enable(); | |
358 | } | |
359 | ||
360 | static int mcryptd_hash_init_enqueue(struct ahash_request *req) | |
361 | { | |
362 | return mcryptd_hash_enqueue(req, mcryptd_hash_init); | |
363 | } | |
364 | ||
365 | static void mcryptd_hash_update(struct crypto_async_request *req_async, int err) | |
366 | { | |
367 | struct ahash_request *req = ahash_request_cast(req_async); | |
368 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
369 | ||
370 | if (unlikely(err == -EINPROGRESS)) | |
371 | goto out; | |
372 | ||
331bf739 MD |
373 | rctx->out = req->result; |
374 | err = ahash_mcryptd_update(&rctx->areq); | |
1e65b81a TC |
375 | if (err) { |
376 | req->base.complete = rctx->complete; | |
377 | goto out; | |
378 | } | |
379 | ||
380 | return; | |
381 | out: | |
382 | local_bh_disable(); | |
383 | rctx->complete(&req->base, err); | |
384 | local_bh_enable(); | |
385 | } | |
386 | ||
387 | static int mcryptd_hash_update_enqueue(struct ahash_request *req) | |
388 | { | |
389 | return mcryptd_hash_enqueue(req, mcryptd_hash_update); | |
390 | } | |
391 | ||
392 | static void mcryptd_hash_final(struct crypto_async_request *req_async, int err) | |
393 | { | |
394 | struct ahash_request *req = ahash_request_cast(req_async); | |
395 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
396 | ||
397 | if (unlikely(err == -EINPROGRESS)) | |
398 | goto out; | |
399 | ||
331bf739 MD |
400 | rctx->out = req->result; |
401 | err = ahash_mcryptd_final(&rctx->areq); | |
1e65b81a TC |
402 | if (err) { |
403 | req->base.complete = rctx->complete; | |
404 | goto out; | |
405 | } | |
406 | ||
407 | return; | |
408 | out: | |
409 | local_bh_disable(); | |
410 | rctx->complete(&req->base, err); | |
411 | local_bh_enable(); | |
412 | } | |
413 | ||
414 | static int mcryptd_hash_final_enqueue(struct ahash_request *req) | |
415 | { | |
416 | return mcryptd_hash_enqueue(req, mcryptd_hash_final); | |
417 | } | |
418 | ||
419 | static void mcryptd_hash_finup(struct crypto_async_request *req_async, int err) | |
420 | { | |
421 | struct ahash_request *req = ahash_request_cast(req_async); | |
422 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
423 | ||
424 | if (unlikely(err == -EINPROGRESS)) | |
425 | goto out; | |
331bf739 MD |
426 | rctx->out = req->result; |
427 | err = ahash_mcryptd_finup(&rctx->areq); | |
1e65b81a TC |
428 | |
429 | if (err) { | |
430 | req->base.complete = rctx->complete; | |
431 | goto out; | |
432 | } | |
433 | ||
434 | return; | |
435 | out: | |
436 | local_bh_disable(); | |
437 | rctx->complete(&req->base, err); | |
438 | local_bh_enable(); | |
439 | } | |
440 | ||
441 | static int mcryptd_hash_finup_enqueue(struct ahash_request *req) | |
442 | { | |
443 | return mcryptd_hash_enqueue(req, mcryptd_hash_finup); | |
444 | } | |
445 | ||
446 | static void mcryptd_hash_digest(struct crypto_async_request *req_async, int err) | |
447 | { | |
448 | struct mcryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm); | |
331bf739 | 449 | struct crypto_ahash *child = ctx->child; |
1e65b81a TC |
450 | struct ahash_request *req = ahash_request_cast(req_async); |
451 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
331bf739 | 452 | struct ahash_request *desc = &rctx->areq; |
1e65b81a TC |
453 | |
454 | if (unlikely(err == -EINPROGRESS)) | |
455 | goto out; | |
456 | ||
331bf739 MD |
457 | ahash_request_set_tfm(desc, child); |
458 | ahash_request_set_callback(desc, CRYPTO_TFM_REQ_MAY_SLEEP, | |
459 | rctx->complete, req_async); | |
1e65b81a | 460 | |
331bf739 MD |
461 | rctx->out = req->result; |
462 | err = ahash_mcryptd_digest(desc); | |
1e65b81a | 463 | |
1e65b81a TC |
464 | out: |
465 | local_bh_disable(); | |
466 | rctx->complete(&req->base, err); | |
467 | local_bh_enable(); | |
468 | } | |
469 | ||
470 | static int mcryptd_hash_digest_enqueue(struct ahash_request *req) | |
471 | { | |
472 | return mcryptd_hash_enqueue(req, mcryptd_hash_digest); | |
473 | } | |
474 | ||
475 | static int mcryptd_hash_export(struct ahash_request *req, void *out) | |
476 | { | |
477 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
478 | ||
331bf739 | 479 | return crypto_ahash_export(&rctx->areq, out); |
1e65b81a TC |
480 | } |
481 | ||
482 | static int mcryptd_hash_import(struct ahash_request *req, const void *in) | |
483 | { | |
484 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
485 | ||
331bf739 | 486 | return crypto_ahash_import(&rctx->areq, in); |
1e65b81a TC |
487 | } |
488 | ||
489 | static int mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, | |
490 | struct mcryptd_queue *queue) | |
491 | { | |
492 | struct hashd_instance_ctx *ctx; | |
493 | struct ahash_instance *inst; | |
331bf739 | 494 | struct hash_alg_common *halg; |
1e65b81a | 495 | struct crypto_alg *alg; |
f52bbf55 SM |
496 | u32 type = 0; |
497 | u32 mask = 0; | |
1e65b81a TC |
498 | int err; |
499 | ||
48a99272 | 500 | if (!mcryptd_check_internal(tb, &type, &mask)) |
501 | return -EINVAL; | |
f52bbf55 | 502 | |
331bf739 MD |
503 | halg = ahash_attr_alg(tb[1], type, mask); |
504 | if (IS_ERR(halg)) | |
505 | return PTR_ERR(halg); | |
1e65b81a | 506 | |
331bf739 | 507 | alg = &halg->base; |
1e65b81a TC |
508 | pr_debug("crypto: mcryptd hash alg: %s\n", alg->cra_name); |
509 | inst = mcryptd_alloc_instance(alg, ahash_instance_headroom(), | |
510 | sizeof(*ctx)); | |
511 | err = PTR_ERR(inst); | |
512 | if (IS_ERR(inst)) | |
513 | goto out_put_alg; | |
514 | ||
515 | ctx = ahash_instance_ctx(inst); | |
516 | ctx->queue = queue; | |
517 | ||
331bf739 | 518 | err = crypto_init_ahash_spawn(&ctx->spawn, halg, |
1e65b81a TC |
519 | ahash_crypto_instance(inst)); |
520 | if (err) | |
521 | goto out_free_inst; | |
522 | ||
f52bbf55 SM |
523 | type = CRYPTO_ALG_ASYNC; |
524 | if (alg->cra_flags & CRYPTO_ALG_INTERNAL) | |
525 | type |= CRYPTO_ALG_INTERNAL; | |
526 | inst->alg.halg.base.cra_flags = type; | |
1e65b81a | 527 | |
331bf739 MD |
528 | inst->alg.halg.digestsize = halg->digestsize; |
529 | inst->alg.halg.statesize = halg->statesize; | |
1e65b81a TC |
530 | inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx); |
531 | ||
532 | inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm; | |
533 | inst->alg.halg.base.cra_exit = mcryptd_hash_exit_tfm; | |
534 | ||
535 | inst->alg.init = mcryptd_hash_init_enqueue; | |
536 | inst->alg.update = mcryptd_hash_update_enqueue; | |
537 | inst->alg.final = mcryptd_hash_final_enqueue; | |
538 | inst->alg.finup = mcryptd_hash_finup_enqueue; | |
539 | inst->alg.export = mcryptd_hash_export; | |
540 | inst->alg.import = mcryptd_hash_import; | |
541 | inst->alg.setkey = mcryptd_hash_setkey; | |
542 | inst->alg.digest = mcryptd_hash_digest_enqueue; | |
543 | ||
544 | err = ahash_register_instance(tmpl, inst); | |
545 | if (err) { | |
331bf739 | 546 | crypto_drop_ahash(&ctx->spawn); |
1e65b81a TC |
547 | out_free_inst: |
548 | kfree(inst); | |
549 | } | |
550 | ||
551 | out_put_alg: | |
552 | crypto_mod_put(alg); | |
553 | return err; | |
554 | } | |
555 | ||
556 | static struct mcryptd_queue mqueue; | |
557 | ||
558 | static int mcryptd_create(struct crypto_template *tmpl, struct rtattr **tb) | |
559 | { | |
560 | struct crypto_attr_type *algt; | |
561 | ||
562 | algt = crypto_get_attr_type(tb); | |
563 | if (IS_ERR(algt)) | |
564 | return PTR_ERR(algt); | |
565 | ||
566 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
567 | case CRYPTO_ALG_TYPE_DIGEST: | |
568 | return mcryptd_create_hash(tmpl, tb, &mqueue); | |
569 | break; | |
570 | } | |
571 | ||
572 | return -EINVAL; | |
573 | } | |
574 | ||
575 | static void mcryptd_free(struct crypto_instance *inst) | |
576 | { | |
577 | struct mcryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
578 | struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst); | |
579 | ||
580 | switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) { | |
581 | case CRYPTO_ALG_TYPE_AHASH: | |
331bf739 | 582 | crypto_drop_ahash(&hctx->spawn); |
1e65b81a TC |
583 | kfree(ahash_instance(inst)); |
584 | return; | |
585 | default: | |
586 | crypto_drop_spawn(&ctx->spawn); | |
587 | kfree(inst); | |
588 | } | |
589 | } | |
590 | ||
591 | static struct crypto_template mcryptd_tmpl = { | |
592 | .name = "mcryptd", | |
593 | .create = mcryptd_create, | |
594 | .free = mcryptd_free, | |
595 | .module = THIS_MODULE, | |
596 | }; | |
597 | ||
598 | struct mcryptd_ahash *mcryptd_alloc_ahash(const char *alg_name, | |
599 | u32 type, u32 mask) | |
600 | { | |
601 | char mcryptd_alg_name[CRYPTO_MAX_ALG_NAME]; | |
602 | struct crypto_ahash *tfm; | |
603 | ||
604 | if (snprintf(mcryptd_alg_name, CRYPTO_MAX_ALG_NAME, | |
605 | "mcryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME) | |
606 | return ERR_PTR(-EINVAL); | |
607 | tfm = crypto_alloc_ahash(mcryptd_alg_name, type, mask); | |
608 | if (IS_ERR(tfm)) | |
609 | return ERR_CAST(tfm); | |
610 | if (tfm->base.__crt_alg->cra_module != THIS_MODULE) { | |
611 | crypto_free_ahash(tfm); | |
612 | return ERR_PTR(-EINVAL); | |
613 | } | |
614 | ||
615 | return __mcryptd_ahash_cast(tfm); | |
616 | } | |
617 | EXPORT_SYMBOL_GPL(mcryptd_alloc_ahash); | |
618 | ||
331bf739 | 619 | int ahash_mcryptd_digest(struct ahash_request *desc) |
1e65b81a | 620 | { |
36e09e1f | 621 | return crypto_ahash_init(desc) ?: ahash_mcryptd_finup(desc); |
1e65b81a | 622 | } |
1e65b81a | 623 | |
331bf739 | 624 | int ahash_mcryptd_update(struct ahash_request *desc) |
1e65b81a | 625 | { |
1e65b81a TC |
626 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
627 | ||
331bf739 | 628 | return crypto_ahash_update(desc); |
1e65b81a | 629 | } |
1e65b81a | 630 | |
331bf739 | 631 | int ahash_mcryptd_finup(struct ahash_request *desc) |
1e65b81a | 632 | { |
1e65b81a TC |
633 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
634 | ||
331bf739 | 635 | return crypto_ahash_finup(desc); |
1e65b81a | 636 | } |
1e65b81a | 637 | |
331bf739 | 638 | int ahash_mcryptd_final(struct ahash_request *desc) |
1e65b81a | 639 | { |
1e65b81a TC |
640 | /* alignment is to be done by multi-buffer crypto algorithm if needed */ |
641 | ||
331bf739 | 642 | return crypto_ahash_final(desc); |
1e65b81a | 643 | } |
1e65b81a | 644 | |
331bf739 | 645 | struct crypto_ahash *mcryptd_ahash_child(struct mcryptd_ahash *tfm) |
1e65b81a TC |
646 | { |
647 | struct mcryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base); | |
648 | ||
649 | return ctx->child; | |
650 | } | |
651 | EXPORT_SYMBOL_GPL(mcryptd_ahash_child); | |
652 | ||
331bf739 | 653 | struct ahash_request *mcryptd_ahash_desc(struct ahash_request *req) |
1e65b81a TC |
654 | { |
655 | struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); | |
331bf739 | 656 | return &rctx->areq; |
1e65b81a | 657 | } |
331bf739 | 658 | EXPORT_SYMBOL_GPL(mcryptd_ahash_desc); |
1e65b81a TC |
659 | |
660 | void mcryptd_free_ahash(struct mcryptd_ahash *tfm) | |
661 | { | |
662 | crypto_free_ahash(&tfm->base); | |
663 | } | |
664 | EXPORT_SYMBOL_GPL(mcryptd_free_ahash); | |
665 | ||
1e65b81a TC |
666 | static int __init mcryptd_init(void) |
667 | { | |
668 | int err, cpu; | |
669 | struct mcryptd_flush_list *flist; | |
670 | ||
671 | mcryptd_flist = alloc_percpu(struct mcryptd_flush_list); | |
672 | for_each_possible_cpu(cpu) { | |
673 | flist = per_cpu_ptr(mcryptd_flist, cpu); | |
674 | INIT_LIST_HEAD(&flist->list); | |
675 | mutex_init(&flist->lock); | |
676 | } | |
677 | ||
678 | err = mcryptd_init_queue(&mqueue, MCRYPTD_MAX_CPU_QLEN); | |
679 | if (err) { | |
680 | free_percpu(mcryptd_flist); | |
681 | return err; | |
682 | } | |
683 | ||
684 | err = crypto_register_template(&mcryptd_tmpl); | |
685 | if (err) { | |
686 | mcryptd_fini_queue(&mqueue); | |
687 | free_percpu(mcryptd_flist); | |
688 | } | |
689 | ||
690 | return err; | |
691 | } | |
692 | ||
693 | static void __exit mcryptd_exit(void) | |
694 | { | |
695 | mcryptd_fini_queue(&mqueue); | |
696 | crypto_unregister_template(&mcryptd_tmpl); | |
697 | free_percpu(mcryptd_flist); | |
698 | } | |
699 | ||
700 | subsys_initcall(mcryptd_init); | |
701 | module_exit(mcryptd_exit); | |
702 | ||
703 | MODULE_LICENSE("GPL"); | |
704 | MODULE_DESCRIPTION("Software async multibuffer crypto daemon"); | |
4943ba16 | 705 | MODULE_ALIAS_CRYPTO("mcryptd"); |