]>
Commit | Line | Data |
---|---|---|
5068c7a8 SK |
1 | /* |
2 | * pcrypt - Parallel crypto wrapper. | |
3 | * | |
4 | * Copyright (C) 2009 secunet Security Networks AG | |
5 | * Copyright (C) 2009 Steffen Klassert <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms and conditions of the GNU General Public License, | |
9 | * version 2, as published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope it will be useful, but WITHOUT | |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
14 | * more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License along with | |
17 | * this program; if not, write to the Free Software Foundation, Inc., | |
18 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | */ | |
20 | ||
21 | #include <crypto/algapi.h> | |
22 | #include <crypto/internal/aead.h> | |
23 | #include <linux/err.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/slab.h> | |
27 | #include <crypto/pcrypt.h> | |
28 | ||
29 | static struct padata_instance *pcrypt_enc_padata; | |
30 | static struct padata_instance *pcrypt_dec_padata; | |
31 | static struct workqueue_struct *encwq; | |
32 | static struct workqueue_struct *decwq; | |
33 | ||
34 | struct pcrypt_instance_ctx { | |
35 | struct crypto_spawn spawn; | |
36 | unsigned int tfm_count; | |
37 | }; | |
38 | ||
39 | struct pcrypt_aead_ctx { | |
40 | struct crypto_aead *child; | |
41 | unsigned int cb_cpu; | |
42 | }; | |
43 | ||
44 | static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, | |
45 | struct padata_instance *pinst) | |
46 | { | |
47 | unsigned int cpu_index, cpu, i; | |
48 | ||
49 | cpu = *cb_cpu; | |
50 | ||
51 | if (cpumask_test_cpu(cpu, cpu_active_mask)) | |
52 | goto out; | |
53 | ||
54 | cpu_index = cpu % cpumask_weight(cpu_active_mask); | |
55 | ||
56 | cpu = cpumask_first(cpu_active_mask); | |
57 | for (i = 0; i < cpu_index; i++) | |
58 | cpu = cpumask_next(cpu, cpu_active_mask); | |
59 | ||
60 | *cb_cpu = cpu; | |
61 | ||
62 | out: | |
63 | return padata_do_parallel(pinst, padata, cpu); | |
64 | } | |
65 | ||
66 | static int pcrypt_aead_setkey(struct crypto_aead *parent, | |
67 | const u8 *key, unsigned int keylen) | |
68 | { | |
69 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | |
70 | ||
71 | return crypto_aead_setkey(ctx->child, key, keylen); | |
72 | } | |
73 | ||
74 | static int pcrypt_aead_setauthsize(struct crypto_aead *parent, | |
75 | unsigned int authsize) | |
76 | { | |
77 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); | |
78 | ||
79 | return crypto_aead_setauthsize(ctx->child, authsize); | |
80 | } | |
81 | ||
82 | static void pcrypt_aead_serial(struct padata_priv *padata) | |
83 | { | |
84 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | |
85 | struct aead_request *req = pcrypt_request_ctx(preq); | |
86 | ||
87 | aead_request_complete(req->base.data, padata->info); | |
88 | } | |
89 | ||
90 | static void pcrypt_aead_giv_serial(struct padata_priv *padata) | |
91 | { | |
92 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | |
93 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | |
94 | ||
95 | aead_request_complete(req->areq.base.data, padata->info); | |
96 | } | |
97 | ||
98 | static void pcrypt_aead_done(struct crypto_async_request *areq, int err) | |
99 | { | |
100 | struct aead_request *req = areq->data; | |
101 | struct pcrypt_request *preq = aead_request_ctx(req); | |
102 | struct padata_priv *padata = pcrypt_request_padata(preq); | |
103 | ||
104 | padata->info = err; | |
105 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | |
106 | ||
107 | padata_do_serial(padata); | |
108 | } | |
109 | ||
110 | static void pcrypt_aead_enc(struct padata_priv *padata) | |
111 | { | |
112 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | |
113 | struct aead_request *req = pcrypt_request_ctx(preq); | |
114 | ||
115 | padata->info = crypto_aead_encrypt(req); | |
116 | ||
5a1436be | 117 | if (padata->info == -EINPROGRESS) |
5068c7a8 SK |
118 | return; |
119 | ||
120 | padata_do_serial(padata); | |
121 | } | |
122 | ||
123 | static int pcrypt_aead_encrypt(struct aead_request *req) | |
124 | { | |
125 | int err; | |
126 | struct pcrypt_request *preq = aead_request_ctx(req); | |
127 | struct aead_request *creq = pcrypt_request_ctx(preq); | |
128 | struct padata_priv *padata = pcrypt_request_padata(preq); | |
129 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
130 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | |
131 | u32 flags = aead_request_flags(req); | |
132 | ||
133 | memset(padata, 0, sizeof(struct padata_priv)); | |
134 | ||
135 | padata->parallel = pcrypt_aead_enc; | |
136 | padata->serial = pcrypt_aead_serial; | |
137 | ||
138 | aead_request_set_tfm(creq, ctx->child); | |
139 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | |
140 | pcrypt_aead_done, req); | |
141 | aead_request_set_crypt(creq, req->src, req->dst, | |
142 | req->cryptlen, req->iv); | |
143 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | |
144 | ||
145 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); | |
146 | if (err) | |
147 | return err; | |
148 | else | |
149 | err = crypto_aead_encrypt(creq); | |
150 | ||
151 | return err; | |
152 | } | |
153 | ||
154 | static void pcrypt_aead_dec(struct padata_priv *padata) | |
155 | { | |
156 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | |
157 | struct aead_request *req = pcrypt_request_ctx(preq); | |
158 | ||
159 | padata->info = crypto_aead_decrypt(req); | |
160 | ||
5a1436be | 161 | if (padata->info == -EINPROGRESS) |
5068c7a8 SK |
162 | return; |
163 | ||
164 | padata_do_serial(padata); | |
165 | } | |
166 | ||
167 | static int pcrypt_aead_decrypt(struct aead_request *req) | |
168 | { | |
169 | int err; | |
170 | struct pcrypt_request *preq = aead_request_ctx(req); | |
171 | struct aead_request *creq = pcrypt_request_ctx(preq); | |
172 | struct padata_priv *padata = pcrypt_request_padata(preq); | |
173 | struct crypto_aead *aead = crypto_aead_reqtfm(req); | |
174 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | |
175 | u32 flags = aead_request_flags(req); | |
176 | ||
177 | memset(padata, 0, sizeof(struct padata_priv)); | |
178 | ||
179 | padata->parallel = pcrypt_aead_dec; | |
180 | padata->serial = pcrypt_aead_serial; | |
181 | ||
182 | aead_request_set_tfm(creq, ctx->child); | |
183 | aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | |
184 | pcrypt_aead_done, req); | |
185 | aead_request_set_crypt(creq, req->src, req->dst, | |
186 | req->cryptlen, req->iv); | |
187 | aead_request_set_assoc(creq, req->assoc, req->assoclen); | |
188 | ||
189 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_dec_padata); | |
190 | if (err) | |
191 | return err; | |
192 | else | |
193 | err = crypto_aead_decrypt(creq); | |
194 | ||
195 | return err; | |
196 | } | |
197 | ||
198 | static void pcrypt_aead_givenc(struct padata_priv *padata) | |
199 | { | |
200 | struct pcrypt_request *preq = pcrypt_padata_request(padata); | |
201 | struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); | |
202 | ||
203 | padata->info = crypto_aead_givencrypt(req); | |
204 | ||
5a1436be | 205 | if (padata->info == -EINPROGRESS) |
5068c7a8 SK |
206 | return; |
207 | ||
208 | padata_do_serial(padata); | |
209 | } | |
210 | ||
211 | static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) | |
212 | { | |
213 | int err; | |
214 | struct aead_request *areq = &req->areq; | |
215 | struct pcrypt_request *preq = aead_request_ctx(areq); | |
216 | struct aead_givcrypt_request *creq = pcrypt_request_ctx(preq); | |
217 | struct padata_priv *padata = pcrypt_request_padata(preq); | |
218 | struct crypto_aead *aead = aead_givcrypt_reqtfm(req); | |
219 | struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); | |
220 | u32 flags = aead_request_flags(areq); | |
221 | ||
222 | memset(padata, 0, sizeof(struct padata_priv)); | |
223 | ||
224 | padata->parallel = pcrypt_aead_givenc; | |
225 | padata->serial = pcrypt_aead_giv_serial; | |
226 | ||
227 | aead_givcrypt_set_tfm(creq, ctx->child); | |
228 | aead_givcrypt_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, | |
229 | pcrypt_aead_done, areq); | |
230 | aead_givcrypt_set_crypt(creq, areq->src, areq->dst, | |
231 | areq->cryptlen, areq->iv); | |
232 | aead_givcrypt_set_assoc(creq, areq->assoc, areq->assoclen); | |
233 | aead_givcrypt_set_giv(creq, req->giv, req->seq); | |
234 | ||
235 | err = pcrypt_do_parallel(padata, &ctx->cb_cpu, pcrypt_enc_padata); | |
236 | if (err) | |
237 | return err; | |
238 | else | |
239 | err = crypto_aead_givencrypt(creq); | |
240 | ||
241 | return err; | |
242 | } | |
243 | ||
244 | static int pcrypt_aead_init_tfm(struct crypto_tfm *tfm) | |
245 | { | |
246 | int cpu, cpu_index; | |
247 | struct crypto_instance *inst = crypto_tfm_alg_instance(tfm); | |
248 | struct pcrypt_instance_ctx *ictx = crypto_instance_ctx(inst); | |
249 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
250 | struct crypto_aead *cipher; | |
251 | ||
252 | ictx->tfm_count++; | |
253 | ||
254 | cpu_index = ictx->tfm_count % cpumask_weight(cpu_active_mask); | |
255 | ||
256 | ctx->cb_cpu = cpumask_first(cpu_active_mask); | |
257 | for (cpu = 0; cpu < cpu_index; cpu++) | |
258 | ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_active_mask); | |
259 | ||
260 | cipher = crypto_spawn_aead(crypto_instance_ctx(inst)); | |
261 | ||
262 | if (IS_ERR(cipher)) | |
263 | return PTR_ERR(cipher); | |
264 | ||
265 | ctx->child = cipher; | |
266 | tfm->crt_aead.reqsize = sizeof(struct pcrypt_request) | |
267 | + sizeof(struct aead_givcrypt_request) | |
268 | + crypto_aead_reqsize(cipher); | |
269 | ||
270 | return 0; | |
271 | } | |
272 | ||
273 | static void pcrypt_aead_exit_tfm(struct crypto_tfm *tfm) | |
274 | { | |
275 | struct pcrypt_aead_ctx *ctx = crypto_tfm_ctx(tfm); | |
276 | ||
277 | crypto_free_aead(ctx->child); | |
278 | } | |
279 | ||
280 | static struct crypto_instance *pcrypt_alloc_instance(struct crypto_alg *alg) | |
281 | { | |
282 | struct crypto_instance *inst; | |
283 | struct pcrypt_instance_ctx *ctx; | |
284 | int err; | |
285 | ||
286 | inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); | |
287 | if (!inst) { | |
288 | inst = ERR_PTR(-ENOMEM); | |
289 | goto out; | |
290 | } | |
291 | ||
292 | err = -ENAMETOOLONG; | |
293 | if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, | |
294 | "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) | |
295 | goto out_free_inst; | |
296 | ||
297 | memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); | |
298 | ||
299 | ctx = crypto_instance_ctx(inst); | |
300 | err = crypto_init_spawn(&ctx->spawn, alg, inst, | |
301 | CRYPTO_ALG_TYPE_MASK); | |
302 | if (err) | |
303 | goto out_free_inst; | |
304 | ||
305 | inst->alg.cra_priority = alg->cra_priority + 100; | |
306 | inst->alg.cra_blocksize = alg->cra_blocksize; | |
307 | inst->alg.cra_alignmask = alg->cra_alignmask; | |
308 | ||
309 | out: | |
310 | return inst; | |
311 | ||
312 | out_free_inst: | |
313 | kfree(inst); | |
314 | inst = ERR_PTR(err); | |
315 | goto out; | |
316 | } | |
317 | ||
318 | static struct crypto_instance *pcrypt_alloc_aead(struct rtattr **tb) | |
319 | { | |
320 | struct crypto_instance *inst; | |
321 | struct crypto_alg *alg; | |
322 | struct crypto_attr_type *algt; | |
323 | ||
324 | algt = crypto_get_attr_type(tb); | |
325 | ||
326 | alg = crypto_get_attr_alg(tb, algt->type, | |
327 | (algt->mask & CRYPTO_ALG_TYPE_MASK)); | |
328 | if (IS_ERR(alg)) | |
329 | return ERR_CAST(alg); | |
330 | ||
331 | inst = pcrypt_alloc_instance(alg); | |
332 | if (IS_ERR(inst)) | |
333 | goto out_put_alg; | |
334 | ||
335 | inst->alg.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC; | |
336 | inst->alg.cra_type = &crypto_aead_type; | |
337 | ||
338 | inst->alg.cra_aead.ivsize = alg->cra_aead.ivsize; | |
339 | inst->alg.cra_aead.geniv = alg->cra_aead.geniv; | |
340 | inst->alg.cra_aead.maxauthsize = alg->cra_aead.maxauthsize; | |
341 | ||
342 | inst->alg.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); | |
343 | ||
344 | inst->alg.cra_init = pcrypt_aead_init_tfm; | |
345 | inst->alg.cra_exit = pcrypt_aead_exit_tfm; | |
346 | ||
347 | inst->alg.cra_aead.setkey = pcrypt_aead_setkey; | |
348 | inst->alg.cra_aead.setauthsize = pcrypt_aead_setauthsize; | |
349 | inst->alg.cra_aead.encrypt = pcrypt_aead_encrypt; | |
350 | inst->alg.cra_aead.decrypt = pcrypt_aead_decrypt; | |
351 | inst->alg.cra_aead.givencrypt = pcrypt_aead_givencrypt; | |
352 | ||
353 | out_put_alg: | |
354 | crypto_mod_put(alg); | |
355 | return inst; | |
356 | } | |
357 | ||
358 | static struct crypto_instance *pcrypt_alloc(struct rtattr **tb) | |
359 | { | |
360 | struct crypto_attr_type *algt; | |
361 | ||
362 | algt = crypto_get_attr_type(tb); | |
363 | if (IS_ERR(algt)) | |
364 | return ERR_CAST(algt); | |
365 | ||
366 | switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { | |
367 | case CRYPTO_ALG_TYPE_AEAD: | |
368 | return pcrypt_alloc_aead(tb); | |
369 | } | |
370 | ||
371 | return ERR_PTR(-EINVAL); | |
372 | } | |
373 | ||
374 | static void pcrypt_free(struct crypto_instance *inst) | |
375 | { | |
376 | struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); | |
377 | ||
378 | crypto_drop_spawn(&ctx->spawn); | |
379 | kfree(inst); | |
380 | } | |
381 | ||
382 | static struct crypto_template pcrypt_tmpl = { | |
383 | .name = "pcrypt", | |
384 | .alloc = pcrypt_alloc, | |
385 | .free = pcrypt_free, | |
386 | .module = THIS_MODULE, | |
387 | }; | |
388 | ||
389 | static int __init pcrypt_init(void) | |
390 | { | |
391 | encwq = create_workqueue("pencrypt"); | |
392 | if (!encwq) | |
393 | goto err; | |
394 | ||
395 | decwq = create_workqueue("pdecrypt"); | |
396 | if (!decwq) | |
397 | goto err_destroy_encwq; | |
398 | ||
399 | ||
400 | pcrypt_enc_padata = padata_alloc(cpu_possible_mask, encwq); | |
401 | if (!pcrypt_enc_padata) | |
402 | goto err_destroy_decwq; | |
403 | ||
404 | pcrypt_dec_padata = padata_alloc(cpu_possible_mask, decwq); | |
405 | if (!pcrypt_dec_padata) | |
406 | goto err_free_padata; | |
407 | ||
408 | padata_start(pcrypt_enc_padata); | |
409 | padata_start(pcrypt_dec_padata); | |
410 | ||
411 | return crypto_register_template(&pcrypt_tmpl); | |
412 | ||
413 | err_free_padata: | |
414 | padata_free(pcrypt_enc_padata); | |
415 | ||
416 | err_destroy_decwq: | |
417 | destroy_workqueue(decwq); | |
418 | ||
419 | err_destroy_encwq: | |
420 | destroy_workqueue(encwq); | |
421 | ||
422 | err: | |
423 | return -ENOMEM; | |
424 | } | |
425 | ||
426 | static void __exit pcrypt_exit(void) | |
427 | { | |
428 | padata_stop(pcrypt_enc_padata); | |
429 | padata_stop(pcrypt_dec_padata); | |
430 | ||
431 | destroy_workqueue(encwq); | |
432 | destroy_workqueue(decwq); | |
433 | ||
434 | padata_free(pcrypt_enc_padata); | |
435 | padata_free(pcrypt_dec_padata); | |
436 | ||
437 | crypto_unregister_template(&pcrypt_tmpl); | |
438 | } | |
439 | ||
440 | module_init(pcrypt_init); | |
441 | module_exit(pcrypt_exit); | |
442 | ||
443 | MODULE_LICENSE("GPL"); | |
444 | MODULE_AUTHOR("Steffen Klassert <[email protected]>"); | |
445 | MODULE_DESCRIPTION("Parallel crypto wrapper"); |