]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <[email protected]> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <[email protected]> | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/module.h> | |
9 | #include <linux/init.h> | |
10 | #include <linux/kernel.h> | |
11 | #include <linux/bio.h> | |
12 | #include <linux/blkdev.h> | |
13 | #include <linux/mempool.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/crypto.h> | |
16 | #include <linux/workqueue.h> | |
17 | #include <asm/atomic.h> | |
378f058c | 18 | #include <linux/scatterlist.h> |
1da177e4 LT |
19 | #include <asm/page.h> |
20 | ||
21 | #include "dm.h" | |
22 | ||
23 | #define PFX "crypt: " | |
24 | ||
25 | /* | |
26 | * per bio private data | |
27 | */ | |
28 | struct crypt_io { | |
29 | struct dm_target *target; | |
30 | struct bio *bio; | |
31 | struct bio *first_clone; | |
32 | struct work_struct work; | |
33 | atomic_t pending; | |
34 | int error; | |
35 | }; | |
36 | ||
37 | /* | |
38 | * context holding the current state of a multi-part conversion | |
39 | */ | |
40 | struct convert_context { | |
41 | struct bio *bio_in; | |
42 | struct bio *bio_out; | |
43 | unsigned int offset_in; | |
44 | unsigned int offset_out; | |
45 | unsigned int idx_in; | |
46 | unsigned int idx_out; | |
47 | sector_t sector; | |
48 | int write; | |
49 | }; | |
50 | ||
51 | struct crypt_config; | |
52 | ||
53 | struct crypt_iv_operations { | |
54 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
55 | const char *opts); | |
56 | void (*dtr)(struct crypt_config *cc); | |
57 | const char *(*status)(struct crypt_config *cc); | |
58 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); | |
59 | }; | |
60 | ||
61 | /* | |
62 | * Crypt: maps a linear range of a block device | |
63 | * and encrypts / decrypts at the same time. | |
64 | */ | |
65 | struct crypt_config { | |
66 | struct dm_dev *dev; | |
67 | sector_t start; | |
68 | ||
69 | /* | |
70 | * pool for per bio private data and | |
71 | * for encryption buffer pages | |
72 | */ | |
73 | mempool_t *io_pool; | |
74 | mempool_t *page_pool; | |
75 | ||
76 | /* | |
77 | * crypto related data | |
78 | */ | |
79 | struct crypt_iv_operations *iv_gen_ops; | |
80 | char *iv_mode; | |
81 | void *iv_gen_private; | |
82 | sector_t iv_offset; | |
83 | unsigned int iv_size; | |
84 | ||
85 | struct crypto_tfm *tfm; | |
86 | unsigned int key_size; | |
87 | u8 key[0]; | |
88 | }; | |
89 | ||
90 | #define MIN_IOS 256 | |
91 | #define MIN_POOL_PAGES 32 | |
92 | #define MIN_BIO_PAGES 8 | |
93 | ||
94 | static kmem_cache_t *_crypt_io_pool; | |
95 | ||
1da177e4 LT |
96 | /* |
97 | * Different IV generation algorithms: | |
98 | * | |
99 | * plain: the initial vector is the 32-bit low-endian version of the sector | |
100 | * number, padded with zeros if neccessary. | |
101 | * | |
102 | * ess_iv: "encrypted sector|salt initial vector", the sector number is | |
103 | * encrypted with the bulk cipher using a salt as key. The salt | |
104 | * should be derived from the bulk cipher's key via hashing. | |
105 | * | |
106 | * plumb: unimplemented, see: | |
107 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
108 | */ | |
109 | ||
110 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
111 | { | |
112 | memset(iv, 0, cc->iv_size); | |
113 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
114 | ||
115 | return 0; | |
116 | } | |
117 | ||
118 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, | |
119 | const char *opts) | |
120 | { | |
121 | struct crypto_tfm *essiv_tfm; | |
122 | struct crypto_tfm *hash_tfm; | |
123 | struct scatterlist sg; | |
124 | unsigned int saltsize; | |
125 | u8 *salt; | |
126 | ||
127 | if (opts == NULL) { | |
128 | ti->error = PFX "Digest algorithm missing for ESSIV mode"; | |
129 | return -EINVAL; | |
130 | } | |
131 | ||
132 | /* Hash the cipher key with the given hash algorithm */ | |
eb6f1160 | 133 | hash_tfm = crypto_alloc_tfm(opts, CRYPTO_TFM_REQ_MAY_SLEEP); |
1da177e4 LT |
134 | if (hash_tfm == NULL) { |
135 | ti->error = PFX "Error initializing ESSIV hash"; | |
136 | return -EINVAL; | |
137 | } | |
138 | ||
139 | if (crypto_tfm_alg_type(hash_tfm) != CRYPTO_ALG_TYPE_DIGEST) { | |
140 | ti->error = PFX "Expected digest algorithm for ESSIV hash"; | |
141 | crypto_free_tfm(hash_tfm); | |
142 | return -EINVAL; | |
143 | } | |
144 | ||
145 | saltsize = crypto_tfm_alg_digestsize(hash_tfm); | |
146 | salt = kmalloc(saltsize, GFP_KERNEL); | |
147 | if (salt == NULL) { | |
148 | ti->error = PFX "Error kmallocing salt storage in ESSIV"; | |
149 | crypto_free_tfm(hash_tfm); | |
150 | return -ENOMEM; | |
151 | } | |
152 | ||
378f058c | 153 | sg_set_buf(&sg, cc->key, cc->key_size); |
1da177e4 LT |
154 | crypto_digest_digest(hash_tfm, &sg, 1, salt); |
155 | crypto_free_tfm(hash_tfm); | |
156 | ||
157 | /* Setup the essiv_tfm with the given salt */ | |
158 | essiv_tfm = crypto_alloc_tfm(crypto_tfm_alg_name(cc->tfm), | |
eb6f1160 HX |
159 | CRYPTO_TFM_MODE_ECB | |
160 | CRYPTO_TFM_REQ_MAY_SLEEP); | |
1da177e4 LT |
161 | if (essiv_tfm == NULL) { |
162 | ti->error = PFX "Error allocating crypto tfm for ESSIV"; | |
163 | kfree(salt); | |
164 | return -EINVAL; | |
165 | } | |
166 | if (crypto_tfm_alg_blocksize(essiv_tfm) | |
167 | != crypto_tfm_alg_ivsize(cc->tfm)) { | |
168 | ti->error = PFX "Block size of ESSIV cipher does " | |
169 | "not match IV size of block cipher"; | |
170 | crypto_free_tfm(essiv_tfm); | |
171 | kfree(salt); | |
172 | return -EINVAL; | |
173 | } | |
174 | if (crypto_cipher_setkey(essiv_tfm, salt, saltsize) < 0) { | |
175 | ti->error = PFX "Failed to set key for ESSIV cipher"; | |
176 | crypto_free_tfm(essiv_tfm); | |
177 | kfree(salt); | |
178 | return -EINVAL; | |
179 | } | |
180 | kfree(salt); | |
181 | ||
182 | cc->iv_gen_private = (void *)essiv_tfm; | |
183 | return 0; | |
184 | } | |
185 | ||
186 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) | |
187 | { | |
188 | crypto_free_tfm((struct crypto_tfm *)cc->iv_gen_private); | |
189 | cc->iv_gen_private = NULL; | |
190 | } | |
191 | ||
192 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
193 | { | |
378f058c | 194 | struct scatterlist sg; |
1da177e4 LT |
195 | |
196 | memset(iv, 0, cc->iv_size); | |
197 | *(u64 *)iv = cpu_to_le64(sector); | |
198 | ||
378f058c | 199 | sg_set_buf(&sg, iv, cc->iv_size); |
1da177e4 LT |
200 | crypto_cipher_encrypt((struct crypto_tfm *)cc->iv_gen_private, |
201 | &sg, &sg, cc->iv_size); | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | static struct crypt_iv_operations crypt_iv_plain_ops = { | |
207 | .generator = crypt_iv_plain_gen | |
208 | }; | |
209 | ||
210 | static struct crypt_iv_operations crypt_iv_essiv_ops = { | |
211 | .ctr = crypt_iv_essiv_ctr, | |
212 | .dtr = crypt_iv_essiv_dtr, | |
213 | .generator = crypt_iv_essiv_gen | |
214 | }; | |
215 | ||
216 | ||
858119e1 | 217 | static int |
1da177e4 LT |
218 | crypt_convert_scatterlist(struct crypt_config *cc, struct scatterlist *out, |
219 | struct scatterlist *in, unsigned int length, | |
220 | int write, sector_t sector) | |
221 | { | |
222 | u8 iv[cc->iv_size]; | |
223 | int r; | |
224 | ||
225 | if (cc->iv_gen_ops) { | |
226 | r = cc->iv_gen_ops->generator(cc, iv, sector); | |
227 | if (r < 0) | |
228 | return r; | |
229 | ||
230 | if (write) | |
231 | r = crypto_cipher_encrypt_iv(cc->tfm, out, in, length, iv); | |
232 | else | |
233 | r = crypto_cipher_decrypt_iv(cc->tfm, out, in, length, iv); | |
234 | } else { | |
235 | if (write) | |
236 | r = crypto_cipher_encrypt(cc->tfm, out, in, length); | |
237 | else | |
238 | r = crypto_cipher_decrypt(cc->tfm, out, in, length); | |
239 | } | |
240 | ||
241 | return r; | |
242 | } | |
243 | ||
244 | static void | |
245 | crypt_convert_init(struct crypt_config *cc, struct convert_context *ctx, | |
246 | struct bio *bio_out, struct bio *bio_in, | |
247 | sector_t sector, int write) | |
248 | { | |
249 | ctx->bio_in = bio_in; | |
250 | ctx->bio_out = bio_out; | |
251 | ctx->offset_in = 0; | |
252 | ctx->offset_out = 0; | |
253 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
254 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
255 | ctx->sector = sector + cc->iv_offset; | |
256 | ctx->write = write; | |
257 | } | |
258 | ||
259 | /* | |
260 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
261 | */ | |
262 | static int crypt_convert(struct crypt_config *cc, | |
263 | struct convert_context *ctx) | |
264 | { | |
265 | int r = 0; | |
266 | ||
267 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | |
268 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
269 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
270 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
271 | struct scatterlist sg_in = { | |
272 | .page = bv_in->bv_page, | |
273 | .offset = bv_in->bv_offset + ctx->offset_in, | |
274 | .length = 1 << SECTOR_SHIFT | |
275 | }; | |
276 | struct scatterlist sg_out = { | |
277 | .page = bv_out->bv_page, | |
278 | .offset = bv_out->bv_offset + ctx->offset_out, | |
279 | .length = 1 << SECTOR_SHIFT | |
280 | }; | |
281 | ||
282 | ctx->offset_in += sg_in.length; | |
283 | if (ctx->offset_in >= bv_in->bv_len) { | |
284 | ctx->offset_in = 0; | |
285 | ctx->idx_in++; | |
286 | } | |
287 | ||
288 | ctx->offset_out += sg_out.length; | |
289 | if (ctx->offset_out >= bv_out->bv_len) { | |
290 | ctx->offset_out = 0; | |
291 | ctx->idx_out++; | |
292 | } | |
293 | ||
294 | r = crypt_convert_scatterlist(cc, &sg_out, &sg_in, sg_in.length, | |
295 | ctx->write, ctx->sector); | |
296 | if (r < 0) | |
297 | break; | |
298 | ||
299 | ctx->sector++; | |
300 | } | |
301 | ||
302 | return r; | |
303 | } | |
304 | ||
305 | /* | |
306 | * Generate a new unfragmented bio with the given size | |
307 | * This should never violate the device limitations | |
308 | * May return a smaller bio when running out of pages | |
309 | */ | |
310 | static struct bio * | |
311 | crypt_alloc_buffer(struct crypt_config *cc, unsigned int size, | |
312 | struct bio *base_bio, unsigned int *bio_vec_idx) | |
313 | { | |
314 | struct bio *bio; | |
315 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
b4e3ca1a | 316 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
1da177e4 LT |
317 | unsigned int i; |
318 | ||
319 | /* | |
bd53b714 NP |
320 | * Use __GFP_NOMEMALLOC to tell the VM to act less aggressively and |
321 | * to fail earlier. This is not necessary but increases throughput. | |
1da177e4 LT |
322 | * FIXME: Is this really intelligent? |
323 | */ | |
1da177e4 | 324 | if (base_bio) |
bd53b714 | 325 | bio = bio_clone(base_bio, GFP_NOIO|__GFP_NOMEMALLOC); |
1da177e4 | 326 | else |
bd53b714 NP |
327 | bio = bio_alloc(GFP_NOIO|__GFP_NOMEMALLOC, nr_iovecs); |
328 | if (!bio) | |
1da177e4 | 329 | return NULL; |
1da177e4 LT |
330 | |
331 | /* if the last bio was not complete, continue where that one ended */ | |
332 | bio->bi_idx = *bio_vec_idx; | |
333 | bio->bi_vcnt = *bio_vec_idx; | |
334 | bio->bi_size = 0; | |
335 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | |
336 | ||
337 | /* bio->bi_idx pages have already been allocated */ | |
338 | size -= bio->bi_idx * PAGE_SIZE; | |
339 | ||
340 | for(i = bio->bi_idx; i < nr_iovecs; i++) { | |
341 | struct bio_vec *bv = bio_iovec_idx(bio, i); | |
342 | ||
343 | bv->bv_page = mempool_alloc(cc->page_pool, gfp_mask); | |
344 | if (!bv->bv_page) | |
345 | break; | |
346 | ||
347 | /* | |
348 | * if additional pages cannot be allocated without waiting, | |
349 | * return a partially allocated bio, the caller will then try | |
350 | * to allocate additional bios while submitting this partial bio | |
351 | */ | |
352 | if ((i - bio->bi_idx) == (MIN_BIO_PAGES - 1)) | |
353 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; | |
354 | ||
355 | bv->bv_offset = 0; | |
356 | if (size > PAGE_SIZE) | |
357 | bv->bv_len = PAGE_SIZE; | |
358 | else | |
359 | bv->bv_len = size; | |
360 | ||
361 | bio->bi_size += bv->bv_len; | |
362 | bio->bi_vcnt++; | |
363 | size -= bv->bv_len; | |
364 | } | |
365 | ||
1da177e4 LT |
366 | if (!bio->bi_size) { |
367 | bio_put(bio); | |
368 | return NULL; | |
369 | } | |
370 | ||
371 | /* | |
372 | * Remember the last bio_vec allocated to be able | |
373 | * to correctly continue after the splitting. | |
374 | */ | |
375 | *bio_vec_idx = bio->bi_vcnt; | |
376 | ||
377 | return bio; | |
378 | } | |
379 | ||
380 | static void crypt_free_buffer_pages(struct crypt_config *cc, | |
381 | struct bio *bio, unsigned int bytes) | |
382 | { | |
383 | unsigned int i, start, end; | |
384 | struct bio_vec *bv; | |
385 | ||
386 | /* | |
387 | * This is ugly, but Jens Axboe thinks that using bi_idx in the | |
388 | * endio function is too dangerous at the moment, so I calculate the | |
389 | * correct position using bi_vcnt and bi_size. | |
390 | * The bv_offset and bv_len fields might already be modified but we | |
391 | * know that we always allocated whole pages. | |
392 | * A fix to the bi_idx issue in the kernel is in the works, so | |
393 | * we will hopefully be able to revert to the cleaner solution soon. | |
394 | */ | |
395 | i = bio->bi_vcnt - 1; | |
396 | bv = bio_iovec_idx(bio, i); | |
397 | end = (i << PAGE_SHIFT) + (bv->bv_offset + bv->bv_len) - bio->bi_size; | |
398 | start = end - bytes; | |
399 | ||
400 | start >>= PAGE_SHIFT; | |
401 | if (!bio->bi_size) | |
402 | end = bio->bi_vcnt; | |
403 | else | |
404 | end >>= PAGE_SHIFT; | |
405 | ||
406 | for(i = start; i < end; i++) { | |
407 | bv = bio_iovec_idx(bio, i); | |
408 | BUG_ON(!bv->bv_page); | |
409 | mempool_free(bv->bv_page, cc->page_pool); | |
410 | bv->bv_page = NULL; | |
411 | } | |
412 | } | |
413 | ||
414 | /* | |
415 | * One of the bios was finished. Check for completion of | |
416 | * the whole request and correctly clean up the buffer. | |
417 | */ | |
418 | static void dec_pending(struct crypt_io *io, int error) | |
419 | { | |
420 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
421 | ||
422 | if (error < 0) | |
423 | io->error = error; | |
424 | ||
425 | if (!atomic_dec_and_test(&io->pending)) | |
426 | return; | |
427 | ||
428 | if (io->first_clone) | |
429 | bio_put(io->first_clone); | |
430 | ||
431 | bio_endio(io->bio, io->bio->bi_size, io->error); | |
432 | ||
433 | mempool_free(io, cc->io_pool); | |
434 | } | |
435 | ||
436 | /* | |
437 | * kcryptd: | |
438 | * | |
439 | * Needed because it would be very unwise to do decryption in an | |
440 | * interrupt context, so bios returning from read requests get | |
441 | * queued here. | |
442 | */ | |
443 | static struct workqueue_struct *_kcryptd_workqueue; | |
444 | ||
445 | static void kcryptd_do_work(void *data) | |
446 | { | |
447 | struct crypt_io *io = (struct crypt_io *) data; | |
448 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
449 | struct convert_context ctx; | |
450 | int r; | |
451 | ||
452 | crypt_convert_init(cc, &ctx, io->bio, io->bio, | |
453 | io->bio->bi_sector - io->target->begin, 0); | |
454 | r = crypt_convert(cc, &ctx); | |
455 | ||
456 | dec_pending(io, r); | |
457 | } | |
458 | ||
459 | static void kcryptd_queue_io(struct crypt_io *io) | |
460 | { | |
461 | INIT_WORK(&io->work, kcryptd_do_work, io); | |
462 | queue_work(_kcryptd_workqueue, &io->work); | |
463 | } | |
464 | ||
465 | /* | |
466 | * Decode key from its hex representation | |
467 | */ | |
468 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
469 | { | |
470 | char buffer[3]; | |
471 | char *endp; | |
472 | unsigned int i; | |
473 | ||
474 | buffer[2] = '\0'; | |
475 | ||
476 | for(i = 0; i < size; i++) { | |
477 | buffer[0] = *hex++; | |
478 | buffer[1] = *hex++; | |
479 | ||
480 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
481 | ||
482 | if (endp != &buffer[2]) | |
483 | return -EINVAL; | |
484 | } | |
485 | ||
486 | if (*hex != '\0') | |
487 | return -EINVAL; | |
488 | ||
489 | return 0; | |
490 | } | |
491 | ||
492 | /* | |
493 | * Encode key into its hex representation | |
494 | */ | |
495 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |
496 | { | |
497 | unsigned int i; | |
498 | ||
499 | for(i = 0; i < size; i++) { | |
500 | sprintf(hex, "%02x", *key); | |
501 | hex += 2; | |
502 | key++; | |
503 | } | |
504 | } | |
505 | ||
506 | /* | |
507 | * Construct an encryption mapping: | |
508 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
509 | */ | |
510 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
511 | { | |
512 | struct crypt_config *cc; | |
513 | struct crypto_tfm *tfm; | |
514 | char *tmp; | |
515 | char *cipher; | |
516 | char *chainmode; | |
517 | char *ivmode; | |
518 | char *ivopts; | |
519 | unsigned int crypto_flags; | |
520 | unsigned int key_size; | |
4ee218cd | 521 | unsigned long long tmpll; |
1da177e4 LT |
522 | |
523 | if (argc != 5) { | |
524 | ti->error = PFX "Not enough arguments"; | |
525 | return -EINVAL; | |
526 | } | |
527 | ||
528 | tmp = argv[0]; | |
529 | cipher = strsep(&tmp, "-"); | |
530 | chainmode = strsep(&tmp, "-"); | |
531 | ivopts = strsep(&tmp, "-"); | |
532 | ivmode = strsep(&ivopts, ":"); | |
533 | ||
534 | if (tmp) | |
535 | DMWARN(PFX "Unexpected additional cipher options"); | |
536 | ||
537 | key_size = strlen(argv[1]) >> 1; | |
538 | ||
539 | cc = kmalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
540 | if (cc == NULL) { | |
541 | ti->error = | |
542 | PFX "Cannot allocate transparent encryption context"; | |
543 | return -ENOMEM; | |
544 | } | |
545 | ||
546 | cc->key_size = key_size; | |
547 | if ((!key_size && strcmp(argv[1], "-") != 0) || | |
548 | (key_size && crypt_decode_key(cc->key, argv[1], key_size) < 0)) { | |
549 | ti->error = PFX "Error decoding key"; | |
550 | goto bad1; | |
551 | } | |
552 | ||
553 | /* Compatiblity mode for old dm-crypt cipher strings */ | |
554 | if (!chainmode || (strcmp(chainmode, "plain") == 0 && !ivmode)) { | |
555 | chainmode = "cbc"; | |
556 | ivmode = "plain"; | |
557 | } | |
558 | ||
559 | /* Choose crypto_flags according to chainmode */ | |
560 | if (strcmp(chainmode, "cbc") == 0) | |
561 | crypto_flags = CRYPTO_TFM_MODE_CBC; | |
562 | else if (strcmp(chainmode, "ecb") == 0) | |
563 | crypto_flags = CRYPTO_TFM_MODE_ECB; | |
564 | else { | |
565 | ti->error = PFX "Unknown chaining mode"; | |
566 | goto bad1; | |
567 | } | |
568 | ||
569 | if (crypto_flags != CRYPTO_TFM_MODE_ECB && !ivmode) { | |
570 | ti->error = PFX "This chaining mode requires an IV mechanism"; | |
571 | goto bad1; | |
572 | } | |
573 | ||
eb6f1160 | 574 | tfm = crypto_alloc_tfm(cipher, crypto_flags | CRYPTO_TFM_REQ_MAY_SLEEP); |
1da177e4 LT |
575 | if (!tfm) { |
576 | ti->error = PFX "Error allocating crypto tfm"; | |
577 | goto bad1; | |
578 | } | |
579 | if (crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER) { | |
580 | ti->error = PFX "Expected cipher algorithm"; | |
581 | goto bad2; | |
582 | } | |
583 | ||
584 | cc->tfm = tfm; | |
585 | ||
586 | /* | |
587 | * Choose ivmode. Valid modes: "plain", "essiv:<esshash>". | |
588 | * See comments at iv code | |
589 | */ | |
590 | ||
591 | if (ivmode == NULL) | |
592 | cc->iv_gen_ops = NULL; | |
593 | else if (strcmp(ivmode, "plain") == 0) | |
594 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
595 | else if (strcmp(ivmode, "essiv") == 0) | |
596 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
597 | else { | |
598 | ti->error = PFX "Invalid IV mode"; | |
599 | goto bad2; | |
600 | } | |
601 | ||
602 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr && | |
603 | cc->iv_gen_ops->ctr(cc, ti, ivopts) < 0) | |
604 | goto bad2; | |
605 | ||
606 | if (tfm->crt_cipher.cit_decrypt_iv && tfm->crt_cipher.cit_encrypt_iv) | |
607 | /* at least a 64 bit sector number should fit in our buffer */ | |
608 | cc->iv_size = max(crypto_tfm_alg_ivsize(tfm), | |
609 | (unsigned int)(sizeof(u64) / sizeof(u8))); | |
610 | else { | |
611 | cc->iv_size = 0; | |
612 | if (cc->iv_gen_ops) { | |
613 | DMWARN(PFX "Selected cipher does not support IVs"); | |
614 | if (cc->iv_gen_ops->dtr) | |
615 | cc->iv_gen_ops->dtr(cc); | |
616 | cc->iv_gen_ops = NULL; | |
617 | } | |
618 | } | |
619 | ||
93d2341c | 620 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1da177e4 LT |
621 | if (!cc->io_pool) { |
622 | ti->error = PFX "Cannot allocate crypt io mempool"; | |
623 | goto bad3; | |
624 | } | |
625 | ||
a19b27ce | 626 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1da177e4 LT |
627 | if (!cc->page_pool) { |
628 | ti->error = PFX "Cannot allocate page mempool"; | |
629 | goto bad4; | |
630 | } | |
631 | ||
632 | if (tfm->crt_cipher.cit_setkey(tfm, cc->key, key_size) < 0) { | |
633 | ti->error = PFX "Error setting key"; | |
634 | goto bad5; | |
635 | } | |
636 | ||
4ee218cd | 637 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
1da177e4 LT |
638 | ti->error = PFX "Invalid iv_offset sector"; |
639 | goto bad5; | |
640 | } | |
4ee218cd | 641 | cc->iv_offset = tmpll; |
1da177e4 | 642 | |
4ee218cd | 643 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
1da177e4 LT |
644 | ti->error = PFX "Invalid device sector"; |
645 | goto bad5; | |
646 | } | |
4ee218cd | 647 | cc->start = tmpll; |
1da177e4 LT |
648 | |
649 | if (dm_get_device(ti, argv[3], cc->start, ti->len, | |
650 | dm_table_get_mode(ti->table), &cc->dev)) { | |
651 | ti->error = PFX "Device lookup failed"; | |
652 | goto bad5; | |
653 | } | |
654 | ||
655 | if (ivmode && cc->iv_gen_ops) { | |
656 | if (ivopts) | |
657 | *(ivopts - 1) = ':'; | |
658 | cc->iv_mode = kmalloc(strlen(ivmode) + 1, GFP_KERNEL); | |
659 | if (!cc->iv_mode) { | |
660 | ti->error = PFX "Error kmallocing iv_mode string"; | |
661 | goto bad5; | |
662 | } | |
663 | strcpy(cc->iv_mode, ivmode); | |
664 | } else | |
665 | cc->iv_mode = NULL; | |
666 | ||
667 | ti->private = cc; | |
668 | return 0; | |
669 | ||
670 | bad5: | |
671 | mempool_destroy(cc->page_pool); | |
672 | bad4: | |
673 | mempool_destroy(cc->io_pool); | |
674 | bad3: | |
675 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | |
676 | cc->iv_gen_ops->dtr(cc); | |
677 | bad2: | |
678 | crypto_free_tfm(tfm); | |
679 | bad1: | |
9d3520a3 SR |
680 | /* Must zero key material before freeing */ |
681 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
682 | kfree(cc); |
683 | return -EINVAL; | |
684 | } | |
685 | ||
686 | static void crypt_dtr(struct dm_target *ti) | |
687 | { | |
688 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
689 | ||
690 | mempool_destroy(cc->page_pool); | |
691 | mempool_destroy(cc->io_pool); | |
692 | ||
990a8baf | 693 | kfree(cc->iv_mode); |
1da177e4 LT |
694 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) |
695 | cc->iv_gen_ops->dtr(cc); | |
696 | crypto_free_tfm(cc->tfm); | |
697 | dm_put_device(ti, cc->dev); | |
9d3520a3 SR |
698 | |
699 | /* Must zero key material before freeing */ | |
700 | memset(cc, 0, sizeof(*cc) + cc->key_size * sizeof(u8)); | |
1da177e4 LT |
701 | kfree(cc); |
702 | } | |
703 | ||
704 | static int crypt_endio(struct bio *bio, unsigned int done, int error) | |
705 | { | |
706 | struct crypt_io *io = (struct crypt_io *) bio->bi_private; | |
707 | struct crypt_config *cc = (struct crypt_config *) io->target->private; | |
708 | ||
709 | if (bio_data_dir(bio) == WRITE) { | |
710 | /* | |
711 | * free the processed pages, even if | |
712 | * it's only a partially completed write | |
713 | */ | |
714 | crypt_free_buffer_pages(cc, bio, done); | |
715 | } | |
716 | ||
717 | if (bio->bi_size) | |
718 | return 1; | |
719 | ||
720 | bio_put(bio); | |
721 | ||
722 | /* | |
723 | * successful reads are decrypted by the worker thread | |
724 | */ | |
725 | if ((bio_data_dir(bio) == READ) | |
726 | && bio_flagged(bio, BIO_UPTODATE)) { | |
727 | kcryptd_queue_io(io); | |
728 | return 0; | |
729 | } | |
730 | ||
731 | dec_pending(io, error); | |
732 | return error; | |
733 | } | |
734 | ||
735 | static inline struct bio * | |
736 | crypt_clone(struct crypt_config *cc, struct crypt_io *io, struct bio *bio, | |
737 | sector_t sector, unsigned int *bvec_idx, | |
738 | struct convert_context *ctx) | |
739 | { | |
740 | struct bio *clone; | |
741 | ||
742 | if (bio_data_dir(bio) == WRITE) { | |
743 | clone = crypt_alloc_buffer(cc, bio->bi_size, | |
744 | io->first_clone, bvec_idx); | |
745 | if (clone) { | |
746 | ctx->bio_out = clone; | |
747 | if (crypt_convert(cc, ctx) < 0) { | |
748 | crypt_free_buffer_pages(cc, clone, | |
749 | clone->bi_size); | |
750 | bio_put(clone); | |
751 | return NULL; | |
752 | } | |
753 | } | |
754 | } else { | |
755 | /* | |
756 | * The block layer might modify the bvec array, so always | |
757 | * copy the required bvecs because we need the original | |
758 | * one in order to decrypt the whole bio data *afterwards*. | |
759 | */ | |
760 | clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | |
761 | if (clone) { | |
762 | clone->bi_idx = 0; | |
763 | clone->bi_vcnt = bio_segments(bio); | |
764 | clone->bi_size = bio->bi_size; | |
765 | memcpy(clone->bi_io_vec, bio_iovec(bio), | |
766 | sizeof(struct bio_vec) * clone->bi_vcnt); | |
767 | } | |
768 | } | |
769 | ||
770 | if (!clone) | |
771 | return NULL; | |
772 | ||
773 | clone->bi_private = io; | |
774 | clone->bi_end_io = crypt_endio; | |
775 | clone->bi_bdev = cc->dev->bdev; | |
776 | clone->bi_sector = cc->start + sector; | |
777 | clone->bi_rw = bio->bi_rw; | |
778 | ||
779 | return clone; | |
780 | } | |
781 | ||
782 | static int crypt_map(struct dm_target *ti, struct bio *bio, | |
783 | union map_info *map_context) | |
784 | { | |
785 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
786 | struct crypt_io *io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
787 | struct convert_context ctx; | |
788 | struct bio *clone; | |
789 | unsigned int remaining = bio->bi_size; | |
790 | sector_t sector = bio->bi_sector - ti->begin; | |
791 | unsigned int bvec_idx = 0; | |
792 | ||
793 | io->target = ti; | |
794 | io->bio = bio; | |
795 | io->first_clone = NULL; | |
796 | io->error = 0; | |
797 | atomic_set(&io->pending, 1); /* hold a reference */ | |
798 | ||
799 | if (bio_data_dir(bio) == WRITE) | |
800 | crypt_convert_init(cc, &ctx, NULL, bio, sector, 1); | |
801 | ||
802 | /* | |
803 | * The allocated buffers can be smaller than the whole bio, | |
804 | * so repeat the whole process until all the data can be handled. | |
805 | */ | |
806 | while (remaining) { | |
807 | clone = crypt_clone(cc, io, bio, sector, &bvec_idx, &ctx); | |
808 | if (!clone) | |
809 | goto cleanup; | |
810 | ||
811 | if (!io->first_clone) { | |
812 | /* | |
813 | * hold a reference to the first clone, because it | |
814 | * holds the bio_vec array and that can't be freed | |
815 | * before all other clones are released | |
816 | */ | |
817 | bio_get(clone); | |
818 | io->first_clone = clone; | |
819 | } | |
820 | atomic_inc(&io->pending); | |
821 | ||
822 | remaining -= clone->bi_size; | |
823 | sector += bio_sectors(clone); | |
824 | ||
825 | generic_make_request(clone); | |
826 | ||
827 | /* out of memory -> run queues */ | |
828 | if (remaining) | |
829 | blk_congestion_wait(bio_data_dir(clone), HZ/100); | |
830 | } | |
831 | ||
832 | /* drop reference, clones could have returned before we reach this */ | |
833 | dec_pending(io, 0); | |
834 | return 0; | |
835 | ||
836 | cleanup: | |
837 | if (io->first_clone) { | |
838 | dec_pending(io, -ENOMEM); | |
839 | return 0; | |
840 | } | |
841 | ||
842 | /* if no bio has been dispatched yet, we can directly return the error */ | |
843 | mempool_free(io, cc->io_pool); | |
844 | return -ENOMEM; | |
845 | } | |
846 | ||
847 | static int crypt_status(struct dm_target *ti, status_type_t type, | |
848 | char *result, unsigned int maxlen) | |
849 | { | |
850 | struct crypt_config *cc = (struct crypt_config *) ti->private; | |
851 | const char *cipher; | |
852 | const char *chainmode = NULL; | |
853 | unsigned int sz = 0; | |
854 | ||
855 | switch (type) { | |
856 | case STATUSTYPE_INFO: | |
857 | result[0] = '\0'; | |
858 | break; | |
859 | ||
860 | case STATUSTYPE_TABLE: | |
861 | cipher = crypto_tfm_alg_name(cc->tfm); | |
862 | ||
863 | switch(cc->tfm->crt_cipher.cit_mode) { | |
864 | case CRYPTO_TFM_MODE_CBC: | |
865 | chainmode = "cbc"; | |
866 | break; | |
867 | case CRYPTO_TFM_MODE_ECB: | |
868 | chainmode = "ecb"; | |
869 | break; | |
870 | default: | |
871 | BUG(); | |
872 | } | |
873 | ||
874 | if (cc->iv_mode) | |
875 | DMEMIT("%s-%s-%s ", cipher, chainmode, cc->iv_mode); | |
876 | else | |
877 | DMEMIT("%s-%s ", cipher, chainmode); | |
878 | ||
879 | if (cc->key_size > 0) { | |
880 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | |
881 | return -ENOMEM; | |
882 | ||
883 | crypt_encode_key(result + sz, cc->key, cc->key_size); | |
884 | sz += cc->key_size << 1; | |
885 | } else { | |
886 | if (sz >= maxlen) | |
887 | return -ENOMEM; | |
888 | result[sz++] = '-'; | |
889 | } | |
890 | ||
4ee218cd AM |
891 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
892 | cc->dev->name, (unsigned long long)cc->start); | |
1da177e4 LT |
893 | break; |
894 | } | |
895 | return 0; | |
896 | } | |
897 | ||
898 | static struct target_type crypt_target = { | |
899 | .name = "crypt", | |
900 | .version= {1, 1, 0}, | |
901 | .module = THIS_MODULE, | |
902 | .ctr = crypt_ctr, | |
903 | .dtr = crypt_dtr, | |
904 | .map = crypt_map, | |
905 | .status = crypt_status, | |
906 | }; | |
907 | ||
908 | static int __init dm_crypt_init(void) | |
909 | { | |
910 | int r; | |
911 | ||
912 | _crypt_io_pool = kmem_cache_create("dm-crypt_io", | |
913 | sizeof(struct crypt_io), | |
914 | 0, 0, NULL, NULL); | |
915 | if (!_crypt_io_pool) | |
916 | return -ENOMEM; | |
917 | ||
918 | _kcryptd_workqueue = create_workqueue("kcryptd"); | |
919 | if (!_kcryptd_workqueue) { | |
920 | r = -ENOMEM; | |
921 | DMERR(PFX "couldn't create kcryptd"); | |
922 | goto bad1; | |
923 | } | |
924 | ||
925 | r = dm_register_target(&crypt_target); | |
926 | if (r < 0) { | |
927 | DMERR(PFX "register failed %d", r); | |
928 | goto bad2; | |
929 | } | |
930 | ||
931 | return 0; | |
932 | ||
933 | bad2: | |
934 | destroy_workqueue(_kcryptd_workqueue); | |
935 | bad1: | |
936 | kmem_cache_destroy(_crypt_io_pool); | |
937 | return r; | |
938 | } | |
939 | ||
940 | static void __exit dm_crypt_exit(void) | |
941 | { | |
942 | int r = dm_unregister_target(&crypt_target); | |
943 | ||
944 | if (r < 0) | |
945 | DMERR(PFX "unregister failed %d", r); | |
946 | ||
947 | destroy_workqueue(_kcryptd_workqueue); | |
948 | kmem_cache_destroy(_crypt_io_pool); | |
949 | } | |
950 | ||
951 | module_init(dm_crypt_init); | |
952 | module_exit(dm_crypt_exit); | |
953 | ||
954 | MODULE_AUTHOR("Christophe Saout <[email protected]>"); | |
955 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
956 | MODULE_LICENSE("GPL"); |