]>
Commit | Line | Data |
---|---|---|
b5b7f088 HX |
1 | /* |
2 | * Asynchronous block chaining cipher operations. | |
c4ede64a | 3 | * |
b5b7f088 HX |
4 | * This is the asynchronous version of blkcipher.c indicating completion |
5 | * via a callback. | |
6 | * | |
7 | * Copyright (c) 2006 Herbert Xu <[email protected]> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
c4ede64a | 11 | * Software Foundation; either version 2 of the License, or (at your option) |
b5b7f088 HX |
12 | * any later version. |
13 | * | |
14 | */ | |
15 | ||
378f4f51 HX |
16 | #include <crypto/internal/skcipher.h> |
17 | #include <linux/err.h> | |
791b4d5f | 18 | #include <linux/kernel.h> |
791b4d5f | 19 | #include <linux/slab.h> |
b5b7f088 | 20 | #include <linux/seq_file.h> |
29ffc876 | 21 | #include <linux/cryptouser.h> |
d8c34b94 | 22 | #include <linux/compiler.h> |
29ffc876 | 23 | #include <net/netlink.h> |
b5b7f088 | 24 | |
bf06099d DM |
25 | #include <crypto/scatterwalk.h> |
26 | ||
378f4f51 HX |
27 | #include "internal.h" |
28 | ||
bf06099d DM |
29 | struct ablkcipher_buffer { |
30 | struct list_head entry; | |
31 | struct scatter_walk dst; | |
32 | unsigned int len; | |
33 | void *data; | |
34 | }; | |
35 | ||
36 | enum { | |
37 | ABLKCIPHER_WALK_SLOW = 1 << 0, | |
38 | }; | |
39 | ||
40 | static inline void ablkcipher_buffer_write(struct ablkcipher_buffer *p) | |
41 | { | |
42 | scatterwalk_copychunks(p->data, &p->dst, p->len, 1); | |
43 | } | |
44 | ||
45 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk) | |
46 | { | |
47 | struct ablkcipher_buffer *p, *tmp; | |
48 | ||
49 | list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { | |
50 | ablkcipher_buffer_write(p); | |
51 | list_del(&p->entry); | |
52 | kfree(p); | |
53 | } | |
54 | } | |
55 | EXPORT_SYMBOL_GPL(__ablkcipher_walk_complete); | |
56 | ||
57 | static inline void ablkcipher_queue_write(struct ablkcipher_walk *walk, | |
58 | struct ablkcipher_buffer *p) | |
59 | { | |
60 | p->dst = walk->out; | |
61 | list_add_tail(&p->entry, &walk->buffers); | |
62 | } | |
63 | ||
64 | /* Get a spot of the specified length that does not straddle a page. | |
65 | * The caller needs to ensure that there is enough space for this operation. | |
66 | */ | |
67 | static inline u8 *ablkcipher_get_spot(u8 *start, unsigned int len) | |
68 | { | |
69 | u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); | |
a861afbc | 70 | |
bf06099d DM |
71 | return max(start, end_page); |
72 | } | |
73 | ||
74 | static inline unsigned int ablkcipher_done_slow(struct ablkcipher_walk *walk, | |
75 | unsigned int bsize) | |
76 | { | |
77 | unsigned int n = bsize; | |
78 | ||
79 | for (;;) { | |
80 | unsigned int len_this_page = scatterwalk_pagelen(&walk->out); | |
81 | ||
82 | if (len_this_page > n) | |
83 | len_this_page = n; | |
84 | scatterwalk_advance(&walk->out, n); | |
85 | if (n == len_this_page) | |
86 | break; | |
87 | n -= len_this_page; | |
5be4d4c9 | 88 | scatterwalk_start(&walk->out, sg_next(walk->out.sg)); |
bf06099d DM |
89 | } |
90 | ||
91 | return bsize; | |
92 | } | |
93 | ||
94 | static inline unsigned int ablkcipher_done_fast(struct ablkcipher_walk *walk, | |
95 | unsigned int n) | |
96 | { | |
97 | scatterwalk_advance(&walk->in, n); | |
98 | scatterwalk_advance(&walk->out, n); | |
99 | ||
100 | return n; | |
101 | } | |
102 | ||
103 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | |
104 | struct ablkcipher_walk *walk); | |
105 | ||
106 | int ablkcipher_walk_done(struct ablkcipher_request *req, | |
107 | struct ablkcipher_walk *walk, int err) | |
108 | { | |
109 | struct crypto_tfm *tfm = req->base.tfm; | |
110 | unsigned int nbytes = 0; | |
111 | ||
112 | if (likely(err >= 0)) { | |
113 | unsigned int n = walk->nbytes - err; | |
114 | ||
115 | if (likely(!(walk->flags & ABLKCIPHER_WALK_SLOW))) | |
116 | n = ablkcipher_done_fast(walk, n); | |
117 | else if (WARN_ON(err)) { | |
118 | err = -EINVAL; | |
119 | goto err; | |
120 | } else | |
121 | n = ablkcipher_done_slow(walk, n); | |
122 | ||
123 | nbytes = walk->total - n; | |
124 | err = 0; | |
125 | } | |
126 | ||
127 | scatterwalk_done(&walk->in, 0, nbytes); | |
128 | scatterwalk_done(&walk->out, 1, nbytes); | |
129 | ||
130 | err: | |
131 | walk->total = nbytes; | |
132 | walk->nbytes = nbytes; | |
133 | ||
134 | if (nbytes) { | |
135 | crypto_yield(req->base.flags); | |
136 | return ablkcipher_walk_next(req, walk); | |
137 | } | |
138 | ||
139 | if (walk->iv != req->info) | |
140 | memcpy(req->info, walk->iv, tfm->crt_ablkcipher.ivsize); | |
33c7c0fb | 141 | kfree(walk->iv_buffer); |
bf06099d DM |
142 | |
143 | return err; | |
144 | } | |
145 | EXPORT_SYMBOL_GPL(ablkcipher_walk_done); | |
146 | ||
147 | static inline int ablkcipher_next_slow(struct ablkcipher_request *req, | |
148 | struct ablkcipher_walk *walk, | |
149 | unsigned int bsize, | |
150 | unsigned int alignmask, | |
151 | void **src_p, void **dst_p) | |
152 | { | |
153 | unsigned aligned_bsize = ALIGN(bsize, alignmask + 1); | |
154 | struct ablkcipher_buffer *p; | |
155 | void *src, *dst, *base; | |
156 | unsigned int n; | |
157 | ||
158 | n = ALIGN(sizeof(struct ablkcipher_buffer), alignmask + 1); | |
159 | n += (aligned_bsize * 3 - (alignmask + 1) + | |
160 | (alignmask & ~(crypto_tfm_ctx_alignment() - 1))); | |
161 | ||
162 | p = kmalloc(n, GFP_ATOMIC); | |
163 | if (!p) | |
2716fbf6 | 164 | return ablkcipher_walk_done(req, walk, -ENOMEM); |
bf06099d DM |
165 | |
166 | base = p + 1; | |
167 | ||
168 | dst = (u8 *)ALIGN((unsigned long)base, alignmask + 1); | |
169 | src = dst = ablkcipher_get_spot(dst, bsize); | |
170 | ||
171 | p->len = bsize; | |
172 | p->data = dst; | |
173 | ||
174 | scatterwalk_copychunks(src, &walk->in, bsize, 0); | |
175 | ||
176 | ablkcipher_queue_write(walk, p); | |
177 | ||
178 | walk->nbytes = bsize; | |
179 | walk->flags |= ABLKCIPHER_WALK_SLOW; | |
180 | ||
181 | *src_p = src; | |
182 | *dst_p = dst; | |
183 | ||
184 | return 0; | |
185 | } | |
186 | ||
187 | static inline int ablkcipher_copy_iv(struct ablkcipher_walk *walk, | |
188 | struct crypto_tfm *tfm, | |
189 | unsigned int alignmask) | |
190 | { | |
191 | unsigned bs = walk->blocksize; | |
192 | unsigned int ivsize = tfm->crt_ablkcipher.ivsize; | |
193 | unsigned aligned_bs = ALIGN(bs, alignmask + 1); | |
194 | unsigned int size = aligned_bs * 2 + ivsize + max(aligned_bs, ivsize) - | |
195 | (alignmask + 1); | |
196 | u8 *iv; | |
197 | ||
198 | size += alignmask & ~(crypto_tfm_ctx_alignment() - 1); | |
199 | walk->iv_buffer = kmalloc(size, GFP_ATOMIC); | |
200 | if (!walk->iv_buffer) | |
201 | return -ENOMEM; | |
202 | ||
203 | iv = (u8 *)ALIGN((unsigned long)walk->iv_buffer, alignmask + 1); | |
204 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; | |
205 | iv = ablkcipher_get_spot(iv, bs) + aligned_bs; | |
206 | iv = ablkcipher_get_spot(iv, ivsize); | |
207 | ||
208 | walk->iv = memcpy(iv, walk->iv, ivsize); | |
209 | return 0; | |
210 | } | |
211 | ||
212 | static inline int ablkcipher_next_fast(struct ablkcipher_request *req, | |
213 | struct ablkcipher_walk *walk) | |
214 | { | |
215 | walk->src.page = scatterwalk_page(&walk->in); | |
216 | walk->src.offset = offset_in_page(walk->in.offset); | |
217 | walk->dst.page = scatterwalk_page(&walk->out); | |
218 | walk->dst.offset = offset_in_page(walk->out.offset); | |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
223 | static int ablkcipher_walk_next(struct ablkcipher_request *req, | |
224 | struct ablkcipher_walk *walk) | |
225 | { | |
226 | struct crypto_tfm *tfm = req->base.tfm; | |
227 | unsigned int alignmask, bsize, n; | |
228 | void *src, *dst; | |
229 | int err; | |
230 | ||
231 | alignmask = crypto_tfm_alg_alignmask(tfm); | |
232 | n = walk->total; | |
233 | if (unlikely(n < crypto_tfm_alg_blocksize(tfm))) { | |
234 | req->base.flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; | |
235 | return ablkcipher_walk_done(req, walk, -EINVAL); | |
236 | } | |
237 | ||
238 | walk->flags &= ~ABLKCIPHER_WALK_SLOW; | |
239 | src = dst = NULL; | |
240 | ||
241 | bsize = min(walk->blocksize, n); | |
242 | n = scatterwalk_clamp(&walk->in, n); | |
243 | n = scatterwalk_clamp(&walk->out, n); | |
244 | ||
245 | if (n < bsize || | |
246 | !scatterwalk_aligned(&walk->in, alignmask) || | |
247 | !scatterwalk_aligned(&walk->out, alignmask)) { | |
248 | err = ablkcipher_next_slow(req, walk, bsize, alignmask, | |
249 | &src, &dst); | |
250 | goto set_phys_lowmem; | |
251 | } | |
252 | ||
253 | walk->nbytes = n; | |
254 | ||
255 | return ablkcipher_next_fast(req, walk); | |
256 | ||
257 | set_phys_lowmem: | |
258 | if (err >= 0) { | |
259 | walk->src.page = virt_to_page(src); | |
260 | walk->dst.page = virt_to_page(dst); | |
261 | walk->src.offset = ((unsigned long)src & (PAGE_SIZE - 1)); | |
262 | walk->dst.offset = ((unsigned long)dst & (PAGE_SIZE - 1)); | |
263 | } | |
264 | ||
265 | return err; | |
266 | } | |
267 | ||
268 | static int ablkcipher_walk_first(struct ablkcipher_request *req, | |
269 | struct ablkcipher_walk *walk) | |
270 | { | |
271 | struct crypto_tfm *tfm = req->base.tfm; | |
272 | unsigned int alignmask; | |
273 | ||
274 | alignmask = crypto_tfm_alg_alignmask(tfm); | |
275 | if (WARN_ON_ONCE(in_irq())) | |
276 | return -EDEADLK; | |
277 | ||
70d906bc | 278 | walk->iv = req->info; |
bf06099d DM |
279 | walk->nbytes = walk->total; |
280 | if (unlikely(!walk->total)) | |
281 | return 0; | |
282 | ||
283 | walk->iv_buffer = NULL; | |
bf06099d DM |
284 | if (unlikely(((unsigned long)walk->iv & alignmask))) { |
285 | int err = ablkcipher_copy_iv(walk, tfm, alignmask); | |
a861afbc | 286 | |
bf06099d DM |
287 | if (err) |
288 | return err; | |
289 | } | |
290 | ||
291 | scatterwalk_start(&walk->in, walk->in.sg); | |
292 | scatterwalk_start(&walk->out, walk->out.sg); | |
293 | ||
294 | return ablkcipher_walk_next(req, walk); | |
295 | } | |
296 | ||
297 | int ablkcipher_walk_phys(struct ablkcipher_request *req, | |
298 | struct ablkcipher_walk *walk) | |
299 | { | |
300 | walk->blocksize = crypto_tfm_alg_blocksize(req->base.tfm); | |
301 | return ablkcipher_walk_first(req, walk); | |
302 | } | |
303 | EXPORT_SYMBOL_GPL(ablkcipher_walk_phys); | |
304 | ||
791b4d5f HX |
305 | static int setkey_unaligned(struct crypto_ablkcipher *tfm, const u8 *key, |
306 | unsigned int keylen) | |
ca7c3938 SS |
307 | { |
308 | struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); | |
309 | unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); | |
310 | int ret; | |
311 | u8 *buffer, *alignbuffer; | |
312 | unsigned long absize; | |
313 | ||
314 | absize = keylen + alignmask; | |
315 | buffer = kmalloc(absize, GFP_ATOMIC); | |
316 | if (!buffer) | |
317 | return -ENOMEM; | |
318 | ||
319 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
320 | memcpy(alignbuffer, key, keylen); | |
321 | ret = cipher->setkey(tfm, alignbuffer, keylen); | |
06817176 | 322 | memset(alignbuffer, 0, keylen); |
ca7c3938 SS |
323 | kfree(buffer); |
324 | return ret; | |
325 | } | |
326 | ||
b5b7f088 HX |
327 | static int setkey(struct crypto_ablkcipher *tfm, const u8 *key, |
328 | unsigned int keylen) | |
329 | { | |
330 | struct ablkcipher_alg *cipher = crypto_ablkcipher_alg(tfm); | |
ca7c3938 | 331 | unsigned long alignmask = crypto_ablkcipher_alignmask(tfm); |
b5b7f088 HX |
332 | |
333 | if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { | |
334 | crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
335 | return -EINVAL; | |
336 | } | |
337 | ||
ca7c3938 SS |
338 | if ((unsigned long)key & alignmask) |
339 | return setkey_unaligned(tfm, key, keylen); | |
340 | ||
b5b7f088 HX |
341 | return cipher->setkey(tfm, key, keylen); |
342 | } | |
343 | ||
344 | static unsigned int crypto_ablkcipher_ctxsize(struct crypto_alg *alg, u32 type, | |
345 | u32 mask) | |
346 | { | |
347 | return alg->cra_ctxsize; | |
348 | } | |
349 | ||
350 | static int crypto_init_ablkcipher_ops(struct crypto_tfm *tfm, u32 type, | |
351 | u32 mask) | |
352 | { | |
353 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; | |
354 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; | |
355 | ||
356 | if (alg->ivsize > PAGE_SIZE / 8) | |
357 | return -EINVAL; | |
358 | ||
359 | crt->setkey = setkey; | |
360 | crt->encrypt = alg->encrypt; | |
361 | crt->decrypt = alg->decrypt; | |
ecfc4329 | 362 | crt->base = __crypto_ablkcipher_cast(tfm); |
b5b7f088 HX |
363 | crt->ivsize = alg->ivsize; |
364 | ||
365 | return 0; | |
366 | } | |
367 | ||
3acc8473 | 368 | #ifdef CONFIG_NET |
29ffc876 SK |
369 | static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
370 | { | |
371 | struct crypto_report_blkcipher rblkcipher; | |
372 | ||
9a5467bf MK |
373 | strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type)); |
374 | strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>", | |
375 | sizeof(rblkcipher.geniv)); | |
29ffc876 SK |
376 | |
377 | rblkcipher.blocksize = alg->cra_blocksize; | |
378 | rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; | |
379 | rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; | |
380 | rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; | |
381 | ||
6662df33 DM |
382 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
383 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) | |
384 | goto nla_put_failure; | |
29ffc876 SK |
385 | return 0; |
386 | ||
387 | nla_put_failure: | |
388 | return -EMSGSIZE; | |
389 | } | |
3acc8473 HX |
390 | #else |
391 | static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
392 | { | |
393 | return -ENOSYS; | |
394 | } | |
395 | #endif | |
29ffc876 | 396 | |
b5b7f088 | 397 | static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
d8c34b94 | 398 | __maybe_unused; |
b5b7f088 HX |
399 | static void crypto_ablkcipher_show(struct seq_file *m, struct crypto_alg *alg) |
400 | { | |
401 | struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; | |
402 | ||
403 | seq_printf(m, "type : ablkcipher\n"); | |
189ed66e HX |
404 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
405 | "yes" : "no"); | |
b5b7f088 HX |
406 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
407 | seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); | |
408 | seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); | |
409 | seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); | |
23508e11 | 410 | seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<default>"); |
b5b7f088 HX |
411 | } |
412 | ||
413 | const struct crypto_type crypto_ablkcipher_type = { | |
414 | .ctxsize = crypto_ablkcipher_ctxsize, | |
415 | .init = crypto_init_ablkcipher_ops, | |
416 | #ifdef CONFIG_PROC_FS | |
417 | .show = crypto_ablkcipher_show, | |
418 | #endif | |
29ffc876 | 419 | .report = crypto_ablkcipher_report, |
b5b7f088 HX |
420 | }; |
421 | EXPORT_SYMBOL_GPL(crypto_ablkcipher_type); | |
422 | ||
61da88e2 HX |
423 | static int crypto_init_givcipher_ops(struct crypto_tfm *tfm, u32 type, |
424 | u32 mask) | |
425 | { | |
426 | struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; | |
427 | struct ablkcipher_tfm *crt = &tfm->crt_ablkcipher; | |
428 | ||
429 | if (alg->ivsize > PAGE_SIZE / 8) | |
430 | return -EINVAL; | |
431 | ||
ecfc4329 HX |
432 | crt->setkey = tfm->__crt_alg->cra_flags & CRYPTO_ALG_GENIV ? |
433 | alg->setkey : setkey; | |
61da88e2 HX |
434 | crt->encrypt = alg->encrypt; |
435 | crt->decrypt = alg->decrypt; | |
ecfc4329 | 436 | crt->base = __crypto_ablkcipher_cast(tfm); |
61da88e2 HX |
437 | crt->ivsize = alg->ivsize; |
438 | ||
439 | return 0; | |
440 | } | |
441 | ||
3acc8473 | 442 | #ifdef CONFIG_NET |
3e29c109 SK |
443 | static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) |
444 | { | |
445 | struct crypto_report_blkcipher rblkcipher; | |
446 | ||
9a5467bf MK |
447 | strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type)); |
448 | strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>", | |
449 | sizeof(rblkcipher.geniv)); | |
3e29c109 SK |
450 | |
451 | rblkcipher.blocksize = alg->cra_blocksize; | |
452 | rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize; | |
453 | rblkcipher.max_keysize = alg->cra_ablkcipher.max_keysize; | |
454 | rblkcipher.ivsize = alg->cra_ablkcipher.ivsize; | |
455 | ||
6662df33 DM |
456 | if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, |
457 | sizeof(struct crypto_report_blkcipher), &rblkcipher)) | |
458 | goto nla_put_failure; | |
3e29c109 SK |
459 | return 0; |
460 | ||
461 | nla_put_failure: | |
462 | return -EMSGSIZE; | |
463 | } | |
3acc8473 HX |
464 | #else |
465 | static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg) | |
466 | { | |
467 | return -ENOSYS; | |
468 | } | |
469 | #endif | |
3e29c109 | 470 | |
61da88e2 | 471 | static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) |
d8c34b94 | 472 | __maybe_unused; |
61da88e2 HX |
473 | static void crypto_givcipher_show(struct seq_file *m, struct crypto_alg *alg) |
474 | { | |
475 | struct ablkcipher_alg *ablkcipher = &alg->cra_ablkcipher; | |
476 | ||
477 | seq_printf(m, "type : givcipher\n"); | |
189ed66e HX |
478 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? |
479 | "yes" : "no"); | |
61da88e2 HX |
480 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); |
481 | seq_printf(m, "min keysize : %u\n", ablkcipher->min_keysize); | |
482 | seq_printf(m, "max keysize : %u\n", ablkcipher->max_keysize); | |
483 | seq_printf(m, "ivsize : %u\n", ablkcipher->ivsize); | |
23508e11 | 484 | seq_printf(m, "geniv : %s\n", ablkcipher->geniv ?: "<built-in>"); |
61da88e2 HX |
485 | } |
486 | ||
487 | const struct crypto_type crypto_givcipher_type = { | |
488 | .ctxsize = crypto_ablkcipher_ctxsize, | |
489 | .init = crypto_init_givcipher_ops, | |
490 | #ifdef CONFIG_PROC_FS | |
491 | .show = crypto_givcipher_show, | |
492 | #endif | |
3e29c109 | 493 | .report = crypto_givcipher_report, |
61da88e2 HX |
494 | }; |
495 | EXPORT_SYMBOL_GPL(crypto_givcipher_type); |