]>
Commit | Line | Data |
---|---|---|
124b53d0 HX |
1 | /* |
2 | * Software async crypto daemon. | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | ||
13 | #include <crypto/algapi.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/kthread.h> | |
18 | #include <linux/list.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/mutex.h> | |
21 | #include <linux/scatterlist.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | #define CRYPTD_MAX_QLEN 100 | |
27 | ||
28 | struct cryptd_state { | |
29 | spinlock_t lock; | |
30 | struct mutex mutex; | |
31 | struct crypto_queue queue; | |
32 | struct task_struct *task; | |
33 | }; | |
34 | ||
35 | struct cryptd_instance_ctx { | |
36 | struct crypto_spawn spawn; | |
37 | struct cryptd_state *state; | |
38 | }; | |
39 | ||
40 | struct cryptd_blkcipher_ctx { | |
41 | struct crypto_blkcipher *child; | |
42 | }; | |
43 | ||
44 | struct cryptd_blkcipher_request_ctx { | |
45 | crypto_completion_t complete; | |
46 | }; | |
47 | ||
48 | ||
49 | static inline struct cryptd_state *cryptd_get_state(struct crypto_tfm *tfm) | |
50 | { | |
51 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
52 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
53 | return ictx->state; | |
54 | } | |
55 | ||
56 | static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent, | |
57 | const u8 *key, unsigned int keylen) | |
58 | { | |
59 | struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent); | |
60 | struct crypto_blkcipher *child = ctx->child; | |
61 | int err; | |
62 | ||
63 | crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
64 | crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) & | |
65 | CRYPTO_TFM_REQ_MASK); | |
66 | err = crypto_blkcipher_setkey(child, key, keylen); | |
67 | crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) & | |
68 | CRYPTO_TFM_RES_MASK); | |
69 | return err; | |
70 | } | |
71 | ||
72 | static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, | |
73 | struct crypto_blkcipher *child, | |
74 | int err, | |
75 | int (*crypt)(struct blkcipher_desc *desc, | |
76 | struct scatterlist *dst, | |
77 | struct scatterlist *src, | |
78 | unsigned int len)) | |
79 | { | |
80 | struct cryptd_blkcipher_request_ctx *rctx; | |
81 | struct blkcipher_desc desc; | |
82 | ||
83 | rctx = ablkcipher_request_ctx(req); | |
84 | ||
85 | if (unlikely(err == -EINPROGRESS)) { | |
86 | rctx->complete(&req->base, err); | |
87 | return; | |
88 | } | |
89 | ||
90 | desc.tfm = child; | |
91 | desc.info = req->info; | |
92 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
93 | ||
94 | err = crypt(&desc, req->dst, req->src, req->nbytes); | |
95 | ||
96 | req->base.complete = rctx->complete; | |
97 | ||
98 | local_bh_disable(); | |
99 | req->base.complete(&req->base, err); | |
100 | local_bh_enable(); | |
101 | } | |
102 | ||
103 | static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) | |
104 | { | |
105 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
106 | struct crypto_blkcipher *child = ctx->child; | |
107 | ||
108 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
109 | crypto_blkcipher_crt(child)->encrypt); | |
110 | } | |
111 | ||
112 | static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) | |
113 | { | |
114 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); | |
115 | struct crypto_blkcipher *child = ctx->child; | |
116 | ||
117 | cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, | |
118 | crypto_blkcipher_crt(child)->decrypt); | |
119 | } | |
120 | ||
121 | static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, | |
122 | crypto_completion_t complete) | |
123 | { | |
124 | struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); | |
125 | struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); | |
126 | struct cryptd_state *state = | |
127 | cryptd_get_state(crypto_ablkcipher_tfm(tfm)); | |
128 | int err; | |
129 | ||
130 | rctx->complete = req->base.complete; | |
131 | req->base.complete = complete; | |
132 | ||
133 | spin_lock_bh(&state->lock); | |
134 | err = ablkcipher_enqueue_request(crypto_ablkcipher_alg(tfm), req); | |
135 | spin_unlock_bh(&state->lock); | |
136 | ||
137 | wake_up_process(state->task); | |
138 | return err; | |
139 | } | |
140 | ||
141 | static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) | |
142 | { | |
143 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); | |
144 | } | |
145 | ||
146 | static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) | |
147 | { | |
148 | return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); | |
149 | } | |
150 | ||
151 | static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm) | |
152 | { | |
153 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
154 | struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst); | |
155 | struct crypto_spawn *spawn = &ictx->spawn; | |
156 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
157 | struct crypto_blkcipher *cipher; | |
158 | ||
159 | cipher = crypto_spawn_blkcipher(spawn); | |
160 | if (IS_ERR(cipher)) | |
161 | return PTR_ERR(cipher); | |
162 | ||
163 | ctx->child = cipher; | |
164 | tfm->crt_ablkcipher.reqsize = | |
165 | sizeof(struct cryptd_blkcipher_request_ctx); | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm) | |
170 | { | |
171 | struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm); | |
172 | struct cryptd_state *state = cryptd_get_state(tfm); | |
173 | int active; | |
174 | ||
175 | mutex_lock(&state->mutex); | |
176 | active = ablkcipher_tfm_in_queue(__crypto_ablkcipher_cast(tfm)); | |
177 | mutex_unlock(&state->mutex); | |
178 | ||
179 | BUG_ON(active); | |
180 | ||
181 | crypto_free_blkcipher(ctx->child); | |
182 | } | |
183 | ||
184 | static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg, | |
185 | struct cryptd_state *state) | |
186 | { | |
187 | struct crypto_instance *inst; | |
188 | struct cryptd_instance_ctx *ctx; | |
189 | int err; | |
190 | ||
191 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
192 | if (IS_ERR(inst)) | |
193 | goto out; | |
194 | ||
195 | err = -ENAMETOOLONG; | |
196 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
197 | "cryptd(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
198 | goto out_free_inst; | |
199 | ||
200 | ctx = crypto_instance_ctx(inst); | |
201 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
202 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
203 | if (err) | |
204 | goto out_free_inst; | |
205 | ||
206 | ctx->state = state; | |
207 | ||
208 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
209 | ||
210 | inst->alg.cra_priority = alg->cra_priority + 50; | |
211 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
212 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
213 | ||
214 | out: | |
215 | return inst; | |
216 | ||
217 | out_free_inst: | |
218 | kfree(inst); | |
219 | inst = ERR_PTR(err); | |
220 | goto out; | |
221 | } | |
222 | ||
223 | static struct crypto_instance *cryptd_alloc_blkcipher( | |
224 | struct rtattr **tb, struct cryptd_state *state) | |
225 | { | |
226 | struct crypto_instance *inst; | |
227 | struct crypto_alg *alg; | |
228 | ||
229 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_BLKCIPHER, | |
230 | CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC); | |
231 | if (IS_ERR(alg)) | |
232 | return ERR_PTR(PTR_ERR(alg)); | |
233 | ||
234 | inst = cryptd_alloc_instance(alg, state); | |
235 | if (IS_ERR(inst)) | |
236 | goto out_put_alg; | |
237 | ||
238 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER | CRYPTO_ALG_ASYNC; | |
239 | inst->alg.cra_type = &crypto_ablkcipher_type; | |
240 | ||
241 | inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize; | |
242 | inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize; | |
243 | inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize; | |
244 | ||
245 | inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx); | |
246 | ||
247 | inst->alg.cra_init = cryptd_blkcipher_init_tfm; | |
248 | inst->alg.cra_exit = cryptd_blkcipher_exit_tfm; | |
249 | ||
250 | inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey; | |
251 | inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue; | |
252 | inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue; | |
253 | ||
254 | inst->alg.cra_ablkcipher.queue = &state->queue; | |
255 | ||
256 | out_put_alg: | |
257 | crypto_mod_put(alg); | |
258 | return inst; | |
259 | } | |
260 | ||
261 | static struct cryptd_state state; | |
262 | ||
263 | static struct crypto_instance *cryptd_alloc(struct rtattr **tb) | |
264 | { | |
265 | struct crypto_attr_type *algt; | |
266 | ||
267 | algt = crypto_get_attr_type(tb); | |
268 | if (IS_ERR(algt)) | |
269 | return ERR_PTR(PTR_ERR(algt)); | |
270 | ||
271 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
272 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
273 | return cryptd_alloc_blkcipher(tb, &state); | |
274 | } | |
275 | ||
276 | return ERR_PTR(-EINVAL); | |
277 | } | |
278 | ||
279 | static void cryptd_free(struct crypto_instance *inst) | |
280 | { | |
281 | struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst); | |
282 | ||
283 | crypto_drop_spawn(&ctx->spawn); | |
284 | kfree(inst); | |
285 | } | |
286 | ||
287 | static struct crypto_template cryptd_tmpl = { | |
288 | .name = "cryptd", | |
289 | .alloc = cryptd_alloc, | |
290 | .free = cryptd_free, | |
291 | .module = THIS_MODULE, | |
292 | }; | |
293 | ||
294 | static inline int cryptd_create_thread(struct cryptd_state *state, | |
295 | int (*fn)(void *data), const char *name) | |
296 | { | |
297 | spin_lock_init(&state->lock); | |
298 | mutex_init(&state->mutex); | |
299 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); | |
300 | ||
301 | state->task = kthread_create(fn, state, name); | |
302 | if (IS_ERR(state->task)) | |
303 | return PTR_ERR(state->task); | |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
308 | static inline void cryptd_stop_thread(struct cryptd_state *state) | |
309 | { | |
310 | BUG_ON(state->queue.qlen); | |
311 | kthread_stop(state->task); | |
312 | } | |
313 | ||
314 | static int cryptd_thread(void *data) | |
315 | { | |
316 | struct cryptd_state *state = data; | |
317 | int stop; | |
318 | ||
319 | do { | |
320 | struct crypto_async_request *req, *backlog; | |
321 | ||
322 | mutex_lock(&state->mutex); | |
323 | __set_current_state(TASK_INTERRUPTIBLE); | |
324 | ||
325 | spin_lock_bh(&state->lock); | |
326 | backlog = crypto_get_backlog(&state->queue); | |
327 | req = crypto_dequeue_request(&state->queue); | |
328 | spin_unlock_bh(&state->lock); | |
329 | ||
330 | stop = kthread_should_stop(); | |
331 | ||
332 | if (stop || req) { | |
333 | __set_current_state(TASK_RUNNING); | |
334 | if (req) { | |
335 | if (backlog) | |
336 | backlog->complete(backlog, | |
337 | -EINPROGRESS); | |
338 | req->complete(req, 0); | |
339 | } | |
340 | } | |
341 | ||
342 | mutex_unlock(&state->mutex); | |
343 | ||
344 | schedule(); | |
345 | } while (!stop); | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
350 | static int __init cryptd_init(void) | |
351 | { | |
352 | int err; | |
353 | ||
354 | err = cryptd_create_thread(&state, cryptd_thread, "cryptd"); | |
355 | if (err) | |
356 | return err; | |
357 | ||
358 | err = crypto_register_template(&cryptd_tmpl); | |
359 | if (err) | |
360 | kthread_stop(state.task); | |
361 | ||
362 | return err; | |
363 | } | |
364 | ||
365 | static void __exit cryptd_exit(void) | |
366 | { | |
367 | cryptd_stop_thread(&state); | |
368 | crypto_unregister_template(&cryptd_tmpl); | |
369 | } | |
370 | ||
371 | module_init(cryptd_init); | |
372 | module_exit(cryptd_exit); | |
373 | ||
374 | MODULE_LICENSE("GPL"); | |
375 | MODULE_DESCRIPTION("Software async crypto daemon"); |