]>
Commit | Line | Data |
---|---|---|
a10f554f HX |
1 | /* |
2 | * echainiv: Encrypted Chain IV Generator | |
3 | * | |
4 | * This generator generates an IV based on a sequence number by xoring it | |
5 | * with a salt and then encrypting it with the same key as used to encrypt | |
6 | * the plain text. This algorithm requires that the block size be equal | |
7 | * to the IV size. It is mainly useful for CBC. | |
8 | * | |
9 | * This generator can only be used by algorithms where authentication | |
10 | * is performed after encryption (i.e., authenc). | |
11 | * | |
12 | * Copyright (c) 2015 Herbert Xu <[email protected]> | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify it | |
15 | * under the terms of the GNU General Public License as published by the Free | |
16 | * Software Foundation; either version 2 of the License, or (at your option) | |
17 | * any later version. | |
18 | * | |
19 | */ | |
20 | ||
21 | #include <crypto/internal/aead.h> | |
22 | #include <crypto/null.h> | |
23 | #include <crypto/rng.h> | |
24 | #include <crypto/scatterwalk.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/kernel.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/module.h> | |
30 | #include <linux/percpu.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/string.h> | |
33 | ||
34 | #define MAX_IV_SIZE 16 | |
35 | ||
36 | struct echainiv_request_ctx { | |
37 | struct scatterlist src[2]; | |
38 | struct scatterlist dst[2]; | |
39 | struct scatterlist ivbuf[2]; | |
40 | struct scatterlist *ivsg; | |
41 | struct aead_givcrypt_request subreq; | |
42 | }; | |
43 | ||
44 | struct echainiv_ctx { | |
45 | struct crypto_aead *child; | |
46 | spinlock_t lock; | |
47 | struct crypto_blkcipher *null; | |
48 | u8 salt[] __attribute__ ((aligned(__alignof__(u32)))); | |
49 | }; | |
50 | ||
51 | static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); | |
52 | ||
53 | static int echainiv_setkey(struct crypto_aead *tfm, | |
54 | const u8 *key, unsigned int keylen) | |
55 | { | |
56 | struct echainiv_ctx *ctx = crypto_aead_ctx(tfm); | |
57 | ||
58 | return crypto_aead_setkey(ctx->child, key, keylen); | |
59 | } | |
60 | ||
61 | static int echainiv_setauthsize(struct crypto_aead *tfm, | |
62 | unsigned int authsize) | |
63 | { | |
64 | struct echainiv_ctx *ctx = crypto_aead_ctx(tfm); | |
65 | ||
66 | return crypto_aead_setauthsize(ctx->child, authsize); | |
67 | } | |
68 | ||
69 | /* We don't care if we get preempted and read/write IVs from the next CPU. */ | |
622ff875 | 70 | static void echainiv_read_iv(u8 *dst, unsigned size) |
a10f554f HX |
71 | { |
72 | u32 *a = (u32 *)dst; | |
73 | u32 __percpu *b = echainiv_iv; | |
74 | ||
75 | for (; size >= 4; size -= 4) { | |
76 | *a++ = this_cpu_read(*b); | |
77 | b++; | |
78 | } | |
79 | } | |
80 | ||
622ff875 | 81 | static void echainiv_write_iv(const u8 *src, unsigned size) |
a10f554f HX |
82 | { |
83 | const u32 *a = (const u32 *)src; | |
84 | u32 __percpu *b = echainiv_iv; | |
85 | ||
86 | for (; size >= 4; size -= 4) { | |
87 | this_cpu_write(*b, *a); | |
88 | a++; | |
89 | b++; | |
90 | } | |
91 | } | |
92 | ||
93 | static void echainiv_encrypt_compat_complete2(struct aead_request *req, | |
94 | int err) | |
95 | { | |
96 | struct echainiv_request_ctx *rctx = aead_request_ctx(req); | |
97 | struct aead_givcrypt_request *subreq = &rctx->subreq; | |
98 | struct crypto_aead *geniv; | |
99 | ||
100 | if (err == -EINPROGRESS) | |
101 | return; | |
102 | ||
103 | if (err) | |
104 | goto out; | |
105 | ||
106 | geniv = crypto_aead_reqtfm(req); | |
107 | scatterwalk_map_and_copy(subreq->giv, rctx->ivsg, 0, | |
108 | crypto_aead_ivsize(geniv), 1); | |
109 | ||
110 | out: | |
111 | kzfree(subreq->giv); | |
112 | } | |
113 | ||
114 | static void echainiv_encrypt_compat_complete( | |
115 | struct crypto_async_request *base, int err) | |
116 | { | |
117 | struct aead_request *req = base->data; | |
118 | ||
119 | echainiv_encrypt_compat_complete2(req, err); | |
120 | aead_request_complete(req, err); | |
121 | } | |
122 | ||
123 | static void echainiv_encrypt_complete2(struct aead_request *req, int err) | |
124 | { | |
125 | struct aead_request *subreq = aead_request_ctx(req); | |
126 | struct crypto_aead *geniv; | |
127 | unsigned int ivsize; | |
128 | ||
129 | if (err == -EINPROGRESS) | |
130 | return; | |
131 | ||
132 | if (err) | |
133 | goto out; | |
134 | ||
135 | geniv = crypto_aead_reqtfm(req); | |
136 | ivsize = crypto_aead_ivsize(geniv); | |
137 | ||
138 | echainiv_write_iv(subreq->iv, ivsize); | |
139 | ||
140 | if (req->iv != subreq->iv) | |
141 | memcpy(req->iv, subreq->iv, ivsize); | |
142 | ||
143 | out: | |
144 | if (req->iv != subreq->iv) | |
145 | kzfree(subreq->iv); | |
146 | } | |
147 | ||
148 | static void echainiv_encrypt_complete(struct crypto_async_request *base, | |
149 | int err) | |
150 | { | |
151 | struct aead_request *req = base->data; | |
152 | ||
153 | echainiv_encrypt_complete2(req, err); | |
154 | aead_request_complete(req, err); | |
155 | } | |
156 | ||
157 | static int echainiv_encrypt_compat(struct aead_request *req) | |
158 | { | |
159 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
160 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
161 | struct echainiv_request_ctx *rctx = aead_request_ctx(req); | |
162 | struct aead_givcrypt_request *subreq = &rctx->subreq; | |
163 | unsigned int ivsize = crypto_aead_ivsize(geniv); | |
164 | crypto_completion_t compl; | |
165 | void *data; | |
166 | u8 *info; | |
167 | __be64 seq; | |
168 | int err; | |
169 | ||
170 | compl = req->base.complete; | |
171 | data = req->base.data; | |
172 | ||
173 | rctx->ivsg = scatterwalk_ffwd(rctx->ivbuf, req->dst, req->assoclen); | |
174 | info = PageHighMem(sg_page(rctx->ivsg)) ? NULL : sg_virt(rctx->ivsg); | |
175 | ||
176 | if (!info) { | |
177 | info = kmalloc(ivsize, req->base.flags & | |
178 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | |
179 | GFP_ATOMIC); | |
180 | if (!info) | |
181 | return -ENOMEM; | |
182 | ||
183 | compl = echainiv_encrypt_compat_complete; | |
184 | data = req; | |
185 | } | |
186 | ||
187 | memcpy(&seq, req->iv + ivsize - sizeof(seq), sizeof(seq)); | |
188 | ||
189 | aead_givcrypt_set_tfm(subreq, ctx->child); | |
190 | aead_givcrypt_set_callback(subreq, req->base.flags, | |
191 | req->base.complete, req->base.data); | |
192 | aead_givcrypt_set_crypt(subreq, | |
193 | scatterwalk_ffwd(rctx->src, req->src, | |
194 | req->assoclen + ivsize), | |
195 | scatterwalk_ffwd(rctx->dst, rctx->ivsg, | |
196 | ivsize), | |
197 | req->cryptlen - ivsize, req->iv); | |
198 | aead_givcrypt_set_assoc(subreq, req->src, req->assoclen); | |
199 | aead_givcrypt_set_giv(subreq, info, be64_to_cpu(seq)); | |
200 | ||
201 | err = crypto_aead_givencrypt(subreq); | |
202 | if (unlikely(PageHighMem(sg_page(rctx->ivsg)))) | |
203 | echainiv_encrypt_compat_complete2(req, err); | |
204 | return err; | |
205 | } | |
206 | ||
207 | static int echainiv_encrypt(struct aead_request *req) | |
208 | { | |
209 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
210 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
211 | struct aead_request *subreq = aead_request_ctx(req); | |
212 | crypto_completion_t compl; | |
213 | void *data; | |
214 | u8 *info; | |
215 | unsigned int ivsize; | |
216 | int err; | |
217 | ||
218 | aead_request_set_tfm(subreq, ctx->child); | |
219 | ||
220 | compl = echainiv_encrypt_complete; | |
221 | data = req; | |
222 | info = req->iv; | |
223 | ||
224 | ivsize = crypto_aead_ivsize(geniv); | |
225 | ||
226 | if (req->src != req->dst) { | |
227 | struct scatterlist src[2]; | |
228 | struct scatterlist dst[2]; | |
229 | struct blkcipher_desc desc = { | |
230 | .tfm = ctx->null, | |
231 | }; | |
232 | ||
233 | err = crypto_blkcipher_encrypt( | |
234 | &desc, | |
235 | scatterwalk_ffwd(dst, req->dst, | |
236 | req->assoclen + ivsize), | |
237 | scatterwalk_ffwd(src, req->src, | |
238 | req->assoclen + ivsize), | |
239 | req->cryptlen - ivsize); | |
240 | if (err) | |
241 | return err; | |
242 | } | |
243 | ||
244 | if (unlikely(!IS_ALIGNED((unsigned long)info, | |
245 | crypto_aead_alignmask(geniv) + 1))) { | |
246 | info = kmalloc(ivsize, req->base.flags & | |
247 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | |
248 | GFP_ATOMIC); | |
249 | if (!info) | |
250 | return -ENOMEM; | |
251 | ||
252 | memcpy(info, req->iv, ivsize); | |
253 | } | |
254 | ||
255 | aead_request_set_callback(subreq, req->base.flags, compl, data); | |
256 | aead_request_set_crypt(subreq, req->dst, req->dst, | |
257 | req->cryptlen - ivsize, info); | |
258 | aead_request_set_ad(subreq, req->assoclen + ivsize, 0); | |
259 | ||
260 | crypto_xor(info, ctx->salt, ivsize); | |
261 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); | |
262 | echainiv_read_iv(info, ivsize); | |
263 | ||
264 | err = crypto_aead_encrypt(subreq); | |
265 | echainiv_encrypt_complete2(req, err); | |
266 | return err; | |
267 | } | |
268 | ||
269 | static int echainiv_decrypt_compat(struct aead_request *req) | |
270 | { | |
271 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
272 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
273 | struct aead_request *subreq = aead_request_ctx(req); | |
274 | crypto_completion_t compl; | |
275 | void *data; | |
276 | unsigned int ivsize; | |
277 | ||
278 | aead_request_set_tfm(subreq, ctx->child); | |
279 | ||
280 | compl = req->base.complete; | |
281 | data = req->base.data; | |
282 | ||
283 | ivsize = crypto_aead_ivsize(geniv); | |
284 | ||
285 | aead_request_set_callback(subreq, req->base.flags, compl, data); | |
286 | aead_request_set_crypt(subreq, req->src, req->dst, | |
287 | req->cryptlen - ivsize, req->iv); | |
288 | aead_request_set_ad(subreq, req->assoclen, ivsize); | |
289 | ||
290 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | |
291 | ||
292 | return crypto_aead_decrypt(subreq); | |
293 | } | |
294 | ||
295 | static int echainiv_decrypt(struct aead_request *req) | |
296 | { | |
297 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
298 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
299 | struct aead_request *subreq = aead_request_ctx(req); | |
300 | crypto_completion_t compl; | |
301 | void *data; | |
302 | unsigned int ivsize; | |
303 | ||
304 | aead_request_set_tfm(subreq, ctx->child); | |
305 | ||
306 | compl = req->base.complete; | |
307 | data = req->base.data; | |
308 | ||
309 | ivsize = crypto_aead_ivsize(geniv); | |
310 | ||
311 | aead_request_set_callback(subreq, req->base.flags, compl, data); | |
312 | aead_request_set_crypt(subreq, req->src, req->dst, | |
313 | req->cryptlen - ivsize, req->iv); | |
314 | aead_request_set_ad(subreq, req->assoclen + ivsize, 0); | |
315 | ||
316 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | |
317 | if (req->src != req->dst) | |
318 | scatterwalk_map_and_copy(req->iv, req->dst, | |
319 | req->assoclen, ivsize, 1); | |
320 | ||
321 | return crypto_aead_decrypt(subreq); | |
322 | } | |
323 | ||
324 | static int echainiv_encrypt_compat_first(struct aead_request *req) | |
325 | { | |
326 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
327 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
328 | int err = 0; | |
329 | ||
330 | spin_lock_bh(&ctx->lock); | |
331 | if (geniv->encrypt != echainiv_encrypt_compat_first) | |
332 | goto unlock; | |
333 | ||
334 | geniv->encrypt = echainiv_encrypt_compat; | |
335 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | |
336 | crypto_aead_ivsize(geniv)); | |
337 | ||
338 | unlock: | |
339 | spin_unlock_bh(&ctx->lock); | |
340 | ||
341 | if (err) | |
342 | return err; | |
343 | ||
344 | return echainiv_encrypt_compat(req); | |
345 | } | |
346 | ||
347 | static int echainiv_encrypt_first(struct aead_request *req) | |
348 | { | |
349 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
350 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
351 | int err = 0; | |
352 | ||
353 | spin_lock_bh(&ctx->lock); | |
354 | if (geniv->encrypt != echainiv_encrypt_first) | |
355 | goto unlock; | |
356 | ||
357 | geniv->encrypt = echainiv_encrypt; | |
358 | err = crypto_rng_get_bytes(crypto_default_rng, ctx->salt, | |
359 | crypto_aead_ivsize(geniv)); | |
360 | ||
361 | unlock: | |
362 | spin_unlock_bh(&ctx->lock); | |
363 | ||
364 | if (err) | |
365 | return err; | |
366 | ||
367 | return echainiv_encrypt(req); | |
368 | } | |
369 | ||
370 | static int echainiv_compat_init(struct crypto_tfm *tfm) | |
371 | { | |
372 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | |
373 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
374 | int err; | |
375 | ||
376 | spin_lock_init(&ctx->lock); | |
377 | ||
378 | crypto_aead_set_reqsize(geniv, sizeof(struct echainiv_request_ctx)); | |
379 | ||
380 | err = aead_geniv_init(tfm); | |
381 | ||
382 | ctx->child = geniv->child; | |
383 | geniv->child = geniv; | |
384 | ||
385 | return err; | |
386 | } | |
387 | ||
388 | static int echainiv_init(struct crypto_tfm *tfm) | |
389 | { | |
390 | struct crypto_aead *geniv = __crypto_aead_cast(tfm); | |
391 | struct echainiv_ctx *ctx = crypto_aead_ctx(geniv); | |
392 | int err; | |
393 | ||
394 | spin_lock_init(&ctx->lock); | |
395 | ||
396 | crypto_aead_set_reqsize(geniv, sizeof(struct aead_request)); | |
397 | ||
398 | ctx->null = crypto_get_default_null_skcipher(); | |
399 | err = PTR_ERR(ctx->null); | |
400 | if (IS_ERR(ctx->null)) | |
401 | goto out; | |
402 | ||
403 | err = aead_geniv_init(tfm); | |
404 | if (err) | |
405 | goto drop_null; | |
406 | ||
407 | ctx->child = geniv->child; | |
408 | geniv->child = geniv; | |
409 | ||
410 | out: | |
411 | return err; | |
412 | ||
413 | drop_null: | |
414 | crypto_put_default_null_skcipher(); | |
415 | goto out; | |
416 | } | |
417 | ||
418 | static void echainiv_compat_exit(struct crypto_tfm *tfm) | |
419 | { | |
420 | struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); | |
421 | ||
422 | crypto_free_aead(ctx->child); | |
423 | } | |
424 | ||
425 | static void echainiv_exit(struct crypto_tfm *tfm) | |
426 | { | |
427 | struct echainiv_ctx *ctx = crypto_tfm_ctx(tfm); | |
428 | ||
429 | crypto_free_aead(ctx->child); | |
430 | crypto_put_default_null_skcipher(); | |
431 | } | |
432 | ||
1e419c79 HX |
433 | static int echainiv_aead_create(struct crypto_template *tmpl, |
434 | struct rtattr **tb) | |
a10f554f HX |
435 | { |
436 | struct aead_instance *inst; | |
437 | struct crypto_aead_spawn *spawn; | |
438 | struct aead_alg *alg; | |
1e419c79 | 439 | int err; |
a10f554f | 440 | |
1e419c79 | 441 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); |
a10f554f HX |
442 | |
443 | if (IS_ERR(inst)) | |
1e419c79 | 444 | return PTR_ERR(inst); |
a10f554f | 445 | |
1e419c79 | 446 | err = -EINVAL; |
a10f554f HX |
447 | if (inst->alg.ivsize < sizeof(u64) || |
448 | inst->alg.ivsize & (sizeof(u32) - 1) || | |
1e419c79 HX |
449 | inst->alg.ivsize > MAX_IV_SIZE) |
450 | goto free_inst; | |
a10f554f HX |
451 | |
452 | spawn = aead_instance_ctx(inst); | |
453 | alg = crypto_spawn_aead_alg(spawn); | |
454 | ||
455 | inst->alg.setkey = echainiv_setkey; | |
456 | inst->alg.setauthsize = echainiv_setauthsize; | |
457 | inst->alg.encrypt = echainiv_encrypt_first; | |
458 | inst->alg.decrypt = echainiv_decrypt; | |
459 | ||
460 | inst->alg.base.cra_init = echainiv_init; | |
461 | inst->alg.base.cra_exit = echainiv_exit; | |
462 | ||
463 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | |
464 | inst->alg.base.cra_ctxsize = sizeof(struct echainiv_ctx); | |
465 | inst->alg.base.cra_ctxsize += inst->alg.base.cra_aead.ivsize; | |
466 | ||
467 | if (alg->base.cra_aead.encrypt) { | |
468 | inst->alg.encrypt = echainiv_encrypt_compat_first; | |
469 | inst->alg.decrypt = echainiv_decrypt_compat; | |
470 | ||
471 | inst->alg.base.cra_init = echainiv_compat_init; | |
472 | inst->alg.base.cra_exit = echainiv_compat_exit; | |
473 | } | |
474 | ||
1e419c79 HX |
475 | err = aead_register_instance(tmpl, inst); |
476 | if (err) | |
477 | goto free_inst; | |
478 | ||
a10f554f | 479 | out: |
1e419c79 HX |
480 | return err; |
481 | ||
482 | free_inst: | |
483 | aead_geniv_free(inst); | |
484 | goto out; | |
a10f554f HX |
485 | } |
486 | ||
1e419c79 | 487 | static int echainiv_create(struct crypto_template *tmpl, struct rtattr **tb) |
a10f554f | 488 | { |
a10f554f HX |
489 | int err; |
490 | ||
491 | err = crypto_get_default_rng(); | |
492 | if (err) | |
1e419c79 | 493 | goto out; |
a10f554f | 494 | |
1e419c79 HX |
495 | err = echainiv_aead_create(tmpl, tb); |
496 | if (err) | |
a10f554f HX |
497 | goto put_rng; |
498 | ||
499 | out: | |
1e419c79 | 500 | return err; |
a10f554f HX |
501 | |
502 | put_rng: | |
503 | crypto_put_default_rng(); | |
504 | goto out; | |
505 | } | |
506 | ||
507 | static void echainiv_free(struct crypto_instance *inst) | |
508 | { | |
509 | aead_geniv_free(aead_instance(inst)); | |
510 | crypto_put_default_rng(); | |
511 | } | |
512 | ||
513 | static struct crypto_template echainiv_tmpl = { | |
514 | .name = "echainiv", | |
1e419c79 | 515 | .create = echainiv_create, |
a10f554f HX |
516 | .free = echainiv_free, |
517 | .module = THIS_MODULE, | |
518 | }; | |
519 | ||
520 | static int __init echainiv_module_init(void) | |
521 | { | |
522 | return crypto_register_template(&echainiv_tmpl); | |
523 | } | |
524 | ||
525 | static void __exit echainiv_module_exit(void) | |
526 | { | |
527 | crypto_unregister_template(&echainiv_tmpl); | |
528 | } | |
529 | ||
530 | module_init(echainiv_module_init); | |
531 | module_exit(echainiv_module_exit); | |
532 | ||
533 | MODULE_LICENSE("GPL"); | |
534 | MODULE_DESCRIPTION("Encrypted Chain IV Generator"); | |
535 | MODULE_ALIAS_CRYPTO("echainiv"); |