]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Christophe Saout <[email protected]> | |
3 | * Copyright (C) 2004 Clemens Fruhwirth <[email protected]> | |
542da317 | 4 | * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. |
1da177e4 LT |
5 | * |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
43d69034 | 9 | #include <linux/completion.h> |
d1806f6a | 10 | #include <linux/err.h> |
1da177e4 LT |
11 | #include <linux/module.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/bio.h> | |
15 | #include <linux/blkdev.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/crypto.h> | |
19 | #include <linux/workqueue.h> | |
3fcfab16 | 20 | #include <linux/backing-dev.h> |
1da177e4 | 21 | #include <asm/atomic.h> |
378f058c | 22 | #include <linux/scatterlist.h> |
1da177e4 | 23 | #include <asm/page.h> |
48527fa7 | 24 | #include <asm/unaligned.h> |
1da177e4 | 25 | |
586e80e6 | 26 | #include <linux/device-mapper.h> |
1da177e4 | 27 | |
72d94861 | 28 | #define DM_MSG_PREFIX "crypt" |
e48d4bbf | 29 | #define MESG_STR(x) x, sizeof(x) |
1da177e4 | 30 | |
1da177e4 LT |
31 | /* |
32 | * context holding the current state of a multi-part conversion | |
33 | */ | |
34 | struct convert_context { | |
43d69034 | 35 | struct completion restart; |
1da177e4 LT |
36 | struct bio *bio_in; |
37 | struct bio *bio_out; | |
38 | unsigned int offset_in; | |
39 | unsigned int offset_out; | |
40 | unsigned int idx_in; | |
41 | unsigned int idx_out; | |
42 | sector_t sector; | |
43d69034 | 43 | atomic_t pending; |
1da177e4 LT |
44 | }; |
45 | ||
53017030 MB |
46 | /* |
47 | * per bio private data | |
48 | */ | |
49 | struct dm_crypt_io { | |
50 | struct dm_target *target; | |
51 | struct bio *base_bio; | |
52 | struct work_struct work; | |
53 | ||
54 | struct convert_context ctx; | |
55 | ||
56 | atomic_t pending; | |
57 | int error; | |
0c395b0f | 58 | sector_t sector; |
393b47ef | 59 | struct dm_crypt_io *base_io; |
53017030 MB |
60 | }; |
61 | ||
01482b76 | 62 | struct dm_crypt_request { |
b2174eeb | 63 | struct convert_context *ctx; |
01482b76 MB |
64 | struct scatterlist sg_in; |
65 | struct scatterlist sg_out; | |
66 | }; | |
67 | ||
1da177e4 LT |
68 | struct crypt_config; |
69 | ||
70 | struct crypt_iv_operations { | |
71 | int (*ctr)(struct crypt_config *cc, struct dm_target *ti, | |
d469f841 | 72 | const char *opts); |
1da177e4 | 73 | void (*dtr)(struct crypt_config *cc); |
b95bf2d3 | 74 | int (*init)(struct crypt_config *cc); |
542da317 | 75 | int (*wipe)(struct crypt_config *cc); |
1da177e4 LT |
76 | int (*generator)(struct crypt_config *cc, u8 *iv, sector_t sector); |
77 | }; | |
78 | ||
60473592 MB |
79 | struct iv_essiv_private { |
80 | struct crypto_cipher *tfm; | |
b95bf2d3 MB |
81 | struct crypto_hash *hash_tfm; |
82 | u8 *salt; | |
60473592 MB |
83 | }; |
84 | ||
85 | struct iv_benbi_private { | |
86 | int shift; | |
87 | }; | |
88 | ||
1da177e4 LT |
89 | /* |
90 | * Crypt: maps a linear range of a block device | |
91 | * and encrypts / decrypts at the same time. | |
92 | */ | |
e48d4bbf | 93 | enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; |
1da177e4 LT |
94 | struct crypt_config { |
95 | struct dm_dev *dev; | |
96 | sector_t start; | |
97 | ||
98 | /* | |
ddd42edf MB |
99 | * pool for per bio private data, crypto requests and |
100 | * encryption requeusts/buffer pages | |
1da177e4 LT |
101 | */ |
102 | mempool_t *io_pool; | |
ddd42edf | 103 | mempool_t *req_pool; |
1da177e4 | 104 | mempool_t *page_pool; |
6a24c718 | 105 | struct bio_set *bs; |
1da177e4 | 106 | |
cabf08e4 MB |
107 | struct workqueue_struct *io_queue; |
108 | struct workqueue_struct *crypt_queue; | |
3f1e9070 | 109 | |
5ebaee6d MB |
110 | char *cipher; |
111 | char *cipher_mode; | |
112 | ||
1da177e4 | 113 | struct crypt_iv_operations *iv_gen_ops; |
79066ad3 | 114 | union { |
60473592 MB |
115 | struct iv_essiv_private essiv; |
116 | struct iv_benbi_private benbi; | |
79066ad3 | 117 | } iv_gen_private; |
1da177e4 LT |
118 | sector_t iv_offset; |
119 | unsigned int iv_size; | |
120 | ||
ddd42edf MB |
121 | /* |
122 | * Layout of each crypto request: | |
123 | * | |
124 | * struct ablkcipher_request | |
125 | * context | |
126 | * padding | |
127 | * struct dm_crypt_request | |
128 | * padding | |
129 | * IV | |
130 | * | |
131 | * The padding is added so that dm_crypt_request and the IV are | |
132 | * correctly aligned. | |
133 | */ | |
134 | unsigned int dmreq_start; | |
135 | struct ablkcipher_request *req; | |
136 | ||
3a7f6c99 | 137 | struct crypto_ablkcipher *tfm; |
e48d4bbf | 138 | unsigned long flags; |
1da177e4 LT |
139 | unsigned int key_size; |
140 | u8 key[0]; | |
141 | }; | |
142 | ||
6a24c718 | 143 | #define MIN_IOS 16 |
1da177e4 LT |
144 | #define MIN_POOL_PAGES 32 |
145 | #define MIN_BIO_PAGES 8 | |
146 | ||
e18b890b | 147 | static struct kmem_cache *_crypt_io_pool; |
1da177e4 | 148 | |
028867ac | 149 | static void clone_init(struct dm_crypt_io *, struct bio *); |
395b167c | 150 | static void kcryptd_queue_crypt(struct dm_crypt_io *io); |
027581f3 | 151 | |
1da177e4 LT |
152 | /* |
153 | * Different IV generation algorithms: | |
154 | * | |
3c164bd8 | 155 | * plain: the initial vector is the 32-bit little-endian version of the sector |
3a4fa0a2 | 156 | * number, padded with zeros if necessary. |
1da177e4 | 157 | * |
61afef61 MB |
158 | * plain64: the initial vector is the 64-bit little-endian version of the sector |
159 | * number, padded with zeros if necessary. | |
160 | * | |
3c164bd8 RS |
161 | * essiv: "encrypted sector|salt initial vector", the sector number is |
162 | * encrypted with the bulk cipher using a salt as key. The salt | |
163 | * should be derived from the bulk cipher's key via hashing. | |
1da177e4 | 164 | * |
48527fa7 RS |
165 | * benbi: the 64-bit "big-endian 'narrow block'-count", starting at 1 |
166 | * (needed for LRW-32-AES and possible other narrow block modes) | |
167 | * | |
46b47730 LN |
168 | * null: the initial vector is always zero. Provides compatibility with |
169 | * obsolete loop_fish2 devices. Do not use for new devices. | |
170 | * | |
1da177e4 LT |
171 | * plumb: unimplemented, see: |
172 | * http://article.gmane.org/gmane.linux.kernel.device-mapper.dm-crypt/454 | |
173 | */ | |
174 | ||
175 | static int crypt_iv_plain_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
176 | { | |
177 | memset(iv, 0, cc->iv_size); | |
178 | *(u32 *)iv = cpu_to_le32(sector & 0xffffffff); | |
179 | ||
180 | return 0; | |
181 | } | |
182 | ||
61afef61 MB |
183 | static int crypt_iv_plain64_gen(struct crypt_config *cc, u8 *iv, |
184 | sector_t sector) | |
185 | { | |
186 | memset(iv, 0, cc->iv_size); | |
187 | *(u64 *)iv = cpu_to_le64(sector); | |
188 | ||
189 | return 0; | |
190 | } | |
191 | ||
b95bf2d3 MB |
192 | /* Initialise ESSIV - compute salt but no local memory allocations */ |
193 | static int crypt_iv_essiv_init(struct crypt_config *cc) | |
194 | { | |
195 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
196 | struct hash_desc desc; | |
197 | struct scatterlist sg; | |
198 | int err; | |
199 | ||
200 | sg_init_one(&sg, cc->key, cc->key_size); | |
201 | desc.tfm = essiv->hash_tfm; | |
202 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | |
203 | ||
204 | err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt); | |
205 | if (err) | |
206 | return err; | |
207 | ||
208 | return crypto_cipher_setkey(essiv->tfm, essiv->salt, | |
209 | crypto_hash_digestsize(essiv->hash_tfm)); | |
210 | } | |
211 | ||
542da317 MB |
212 | /* Wipe salt and reset key derived from volume key */ |
213 | static int crypt_iv_essiv_wipe(struct crypt_config *cc) | |
214 | { | |
215 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
216 | unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm); | |
217 | ||
218 | memset(essiv->salt, 0, salt_size); | |
219 | ||
220 | return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size); | |
221 | } | |
222 | ||
60473592 MB |
223 | static void crypt_iv_essiv_dtr(struct crypt_config *cc) |
224 | { | |
225 | struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv; | |
226 | ||
227 | crypto_free_cipher(essiv->tfm); | |
228 | essiv->tfm = NULL; | |
b95bf2d3 MB |
229 | |
230 | crypto_free_hash(essiv->hash_tfm); | |
231 | essiv->hash_tfm = NULL; | |
232 | ||
233 | kzfree(essiv->salt); | |
234 | essiv->salt = NULL; | |
60473592 MB |
235 | } |
236 | ||
1da177e4 | 237 | static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti, |
d469f841 | 238 | const char *opts) |
1da177e4 | 239 | { |
5861f1be MB |
240 | struct crypto_cipher *essiv_tfm = NULL; |
241 | struct crypto_hash *hash_tfm = NULL; | |
5861f1be | 242 | u8 *salt = NULL; |
d1806f6a | 243 | int err; |
1da177e4 | 244 | |
5861f1be | 245 | if (!opts) { |
72d94861 | 246 | ti->error = "Digest algorithm missing for ESSIV mode"; |
1da177e4 LT |
247 | return -EINVAL; |
248 | } | |
249 | ||
b95bf2d3 | 250 | /* Allocate hash algorithm */ |
35058687 HX |
251 | hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC); |
252 | if (IS_ERR(hash_tfm)) { | |
72d94861 | 253 | ti->error = "Error initializing ESSIV hash"; |
5861f1be MB |
254 | err = PTR_ERR(hash_tfm); |
255 | goto bad; | |
1da177e4 LT |
256 | } |
257 | ||
b95bf2d3 | 258 | salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL); |
5861f1be | 259 | if (!salt) { |
72d94861 | 260 | ti->error = "Error kmallocing salt storage in ESSIV"; |
5861f1be MB |
261 | err = -ENOMEM; |
262 | goto bad; | |
1da177e4 LT |
263 | } |
264 | ||
b95bf2d3 | 265 | /* Allocate essiv_tfm */ |
d1806f6a HX |
266 | essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC); |
267 | if (IS_ERR(essiv_tfm)) { | |
72d94861 | 268 | ti->error = "Error allocating crypto tfm for ESSIV"; |
5861f1be MB |
269 | err = PTR_ERR(essiv_tfm); |
270 | goto bad; | |
1da177e4 | 271 | } |
d1806f6a | 272 | if (crypto_cipher_blocksize(essiv_tfm) != |
3a7f6c99 | 273 | crypto_ablkcipher_ivsize(cc->tfm)) { |
72d94861 | 274 | ti->error = "Block size of ESSIV cipher does " |
d469f841 | 275 | "not match IV size of block cipher"; |
5861f1be MB |
276 | err = -EINVAL; |
277 | goto bad; | |
1da177e4 | 278 | } |
1da177e4 | 279 | |
b95bf2d3 | 280 | cc->iv_gen_private.essiv.salt = salt; |
60473592 | 281 | cc->iv_gen_private.essiv.tfm = essiv_tfm; |
b95bf2d3 MB |
282 | cc->iv_gen_private.essiv.hash_tfm = hash_tfm; |
283 | ||
1da177e4 | 284 | return 0; |
5861f1be MB |
285 | |
286 | bad: | |
287 | if (essiv_tfm && !IS_ERR(essiv_tfm)) | |
288 | crypto_free_cipher(essiv_tfm); | |
289 | if (hash_tfm && !IS_ERR(hash_tfm)) | |
290 | crypto_free_hash(hash_tfm); | |
b95bf2d3 | 291 | kfree(salt); |
5861f1be | 292 | return err; |
1da177e4 LT |
293 | } |
294 | ||
1da177e4 LT |
295 | static int crypt_iv_essiv_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
296 | { | |
1da177e4 LT |
297 | memset(iv, 0, cc->iv_size); |
298 | *(u64 *)iv = cpu_to_le64(sector); | |
60473592 | 299 | crypto_cipher_encrypt_one(cc->iv_gen_private.essiv.tfm, iv, iv); |
1da177e4 LT |
300 | return 0; |
301 | } | |
302 | ||
48527fa7 RS |
303 | static int crypt_iv_benbi_ctr(struct crypt_config *cc, struct dm_target *ti, |
304 | const char *opts) | |
305 | { | |
3a7f6c99 | 306 | unsigned bs = crypto_ablkcipher_blocksize(cc->tfm); |
f0d1b0b3 | 307 | int log = ilog2(bs); |
48527fa7 RS |
308 | |
309 | /* we need to calculate how far we must shift the sector count | |
310 | * to get the cipher block count, we use this shift in _gen */ | |
311 | ||
312 | if (1 << log != bs) { | |
313 | ti->error = "cypher blocksize is not a power of 2"; | |
314 | return -EINVAL; | |
315 | } | |
316 | ||
317 | if (log > 9) { | |
318 | ti->error = "cypher blocksize is > 512"; | |
319 | return -EINVAL; | |
320 | } | |
321 | ||
60473592 | 322 | cc->iv_gen_private.benbi.shift = 9 - log; |
48527fa7 RS |
323 | |
324 | return 0; | |
325 | } | |
326 | ||
327 | static void crypt_iv_benbi_dtr(struct crypt_config *cc) | |
328 | { | |
48527fa7 RS |
329 | } |
330 | ||
331 | static int crypt_iv_benbi_gen(struct crypt_config *cc, u8 *iv, sector_t sector) | |
332 | { | |
79066ad3 HX |
333 | __be64 val; |
334 | ||
48527fa7 | 335 | memset(iv, 0, cc->iv_size - sizeof(u64)); /* rest is cleared below */ |
79066ad3 | 336 | |
60473592 | 337 | val = cpu_to_be64(((u64)sector << cc->iv_gen_private.benbi.shift) + 1); |
79066ad3 | 338 | put_unaligned(val, (__be64 *)(iv + cc->iv_size - sizeof(u64))); |
48527fa7 | 339 | |
1da177e4 LT |
340 | return 0; |
341 | } | |
342 | ||
46b47730 LN |
343 | static int crypt_iv_null_gen(struct crypt_config *cc, u8 *iv, sector_t sector) |
344 | { | |
345 | memset(iv, 0, cc->iv_size); | |
346 | ||
347 | return 0; | |
348 | } | |
349 | ||
1da177e4 LT |
350 | static struct crypt_iv_operations crypt_iv_plain_ops = { |
351 | .generator = crypt_iv_plain_gen | |
352 | }; | |
353 | ||
61afef61 MB |
354 | static struct crypt_iv_operations crypt_iv_plain64_ops = { |
355 | .generator = crypt_iv_plain64_gen | |
356 | }; | |
357 | ||
1da177e4 LT |
358 | static struct crypt_iv_operations crypt_iv_essiv_ops = { |
359 | .ctr = crypt_iv_essiv_ctr, | |
360 | .dtr = crypt_iv_essiv_dtr, | |
b95bf2d3 | 361 | .init = crypt_iv_essiv_init, |
542da317 | 362 | .wipe = crypt_iv_essiv_wipe, |
1da177e4 LT |
363 | .generator = crypt_iv_essiv_gen |
364 | }; | |
365 | ||
48527fa7 RS |
366 | static struct crypt_iv_operations crypt_iv_benbi_ops = { |
367 | .ctr = crypt_iv_benbi_ctr, | |
368 | .dtr = crypt_iv_benbi_dtr, | |
369 | .generator = crypt_iv_benbi_gen | |
370 | }; | |
1da177e4 | 371 | |
46b47730 LN |
372 | static struct crypt_iv_operations crypt_iv_null_ops = { |
373 | .generator = crypt_iv_null_gen | |
374 | }; | |
375 | ||
d469f841 MB |
376 | static void crypt_convert_init(struct crypt_config *cc, |
377 | struct convert_context *ctx, | |
378 | struct bio *bio_out, struct bio *bio_in, | |
fcd369da | 379 | sector_t sector) |
1da177e4 LT |
380 | { |
381 | ctx->bio_in = bio_in; | |
382 | ctx->bio_out = bio_out; | |
383 | ctx->offset_in = 0; | |
384 | ctx->offset_out = 0; | |
385 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | |
386 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | |
387 | ctx->sector = sector + cc->iv_offset; | |
43d69034 | 388 | init_completion(&ctx->restart); |
1da177e4 LT |
389 | } |
390 | ||
b2174eeb YH |
391 | static struct dm_crypt_request *dmreq_of_req(struct crypt_config *cc, |
392 | struct ablkcipher_request *req) | |
393 | { | |
394 | return (struct dm_crypt_request *)((char *)req + cc->dmreq_start); | |
395 | } | |
396 | ||
397 | static struct ablkcipher_request *req_of_dmreq(struct crypt_config *cc, | |
398 | struct dm_crypt_request *dmreq) | |
399 | { | |
400 | return (struct ablkcipher_request *)((char *)dmreq - cc->dmreq_start); | |
401 | } | |
402 | ||
01482b76 | 403 | static int crypt_convert_block(struct crypt_config *cc, |
3a7f6c99 MB |
404 | struct convert_context *ctx, |
405 | struct ablkcipher_request *req) | |
01482b76 MB |
406 | { |
407 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | |
408 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | |
3a7f6c99 MB |
409 | struct dm_crypt_request *dmreq; |
410 | u8 *iv; | |
411 | int r = 0; | |
412 | ||
b2174eeb | 413 | dmreq = dmreq_of_req(cc, req); |
3a7f6c99 MB |
414 | iv = (u8 *)ALIGN((unsigned long)(dmreq + 1), |
415 | crypto_ablkcipher_alignmask(cc->tfm) + 1); | |
01482b76 | 416 | |
b2174eeb | 417 | dmreq->ctx = ctx; |
3a7f6c99 MB |
418 | sg_init_table(&dmreq->sg_in, 1); |
419 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | |
01482b76 MB |
420 | bv_in->bv_offset + ctx->offset_in); |
421 | ||
3a7f6c99 MB |
422 | sg_init_table(&dmreq->sg_out, 1); |
423 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, | |
01482b76 MB |
424 | bv_out->bv_offset + ctx->offset_out); |
425 | ||
426 | ctx->offset_in += 1 << SECTOR_SHIFT; | |
427 | if (ctx->offset_in >= bv_in->bv_len) { | |
428 | ctx->offset_in = 0; | |
429 | ctx->idx_in++; | |
430 | } | |
431 | ||
432 | ctx->offset_out += 1 << SECTOR_SHIFT; | |
433 | if (ctx->offset_out >= bv_out->bv_len) { | |
434 | ctx->offset_out = 0; | |
435 | ctx->idx_out++; | |
436 | } | |
437 | ||
3a7f6c99 MB |
438 | if (cc->iv_gen_ops) { |
439 | r = cc->iv_gen_ops->generator(cc, iv, ctx->sector); | |
440 | if (r < 0) | |
441 | return r; | |
442 | } | |
443 | ||
444 | ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out, | |
445 | 1 << SECTOR_SHIFT, iv); | |
446 | ||
447 | if (bio_data_dir(ctx->bio_in) == WRITE) | |
448 | r = crypto_ablkcipher_encrypt(req); | |
449 | else | |
450 | r = crypto_ablkcipher_decrypt(req); | |
451 | ||
452 | return r; | |
01482b76 MB |
453 | } |
454 | ||
95497a96 MB |
455 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
456 | int error); | |
ddd42edf MB |
457 | static void crypt_alloc_req(struct crypt_config *cc, |
458 | struct convert_context *ctx) | |
459 | { | |
460 | if (!cc->req) | |
461 | cc->req = mempool_alloc(cc->req_pool, GFP_NOIO); | |
95497a96 MB |
462 | ablkcipher_request_set_tfm(cc->req, cc->tfm); |
463 | ablkcipher_request_set_callback(cc->req, CRYPTO_TFM_REQ_MAY_BACKLOG | | |
b2174eeb YH |
464 | CRYPTO_TFM_REQ_MAY_SLEEP, |
465 | kcryptd_async_done, | |
466 | dmreq_of_req(cc, cc->req)); | |
ddd42edf MB |
467 | } |
468 | ||
1da177e4 LT |
469 | /* |
470 | * Encrypt / decrypt data from one bio to another one (can be the same one) | |
471 | */ | |
472 | static int crypt_convert(struct crypt_config *cc, | |
d469f841 | 473 | struct convert_context *ctx) |
1da177e4 | 474 | { |
3f1e9070 | 475 | int r; |
1da177e4 | 476 | |
c8081618 MB |
477 | atomic_set(&ctx->pending, 1); |
478 | ||
1da177e4 LT |
479 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && |
480 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | |
1da177e4 | 481 | |
3a7f6c99 MB |
482 | crypt_alloc_req(cc, ctx); |
483 | ||
3f1e9070 MB |
484 | atomic_inc(&ctx->pending); |
485 | ||
3a7f6c99 MB |
486 | r = crypt_convert_block(cc, ctx, cc->req); |
487 | ||
488 | switch (r) { | |
3f1e9070 | 489 | /* async */ |
3a7f6c99 MB |
490 | case -EBUSY: |
491 | wait_for_completion(&ctx->restart); | |
492 | INIT_COMPLETION(ctx->restart); | |
493 | /* fall through*/ | |
494 | case -EINPROGRESS: | |
3a7f6c99 | 495 | cc->req = NULL; |
3f1e9070 MB |
496 | ctx->sector++; |
497 | continue; | |
498 | ||
499 | /* sync */ | |
3a7f6c99 | 500 | case 0: |
3f1e9070 | 501 | atomic_dec(&ctx->pending); |
3a7f6c99 | 502 | ctx->sector++; |
c7f1b204 | 503 | cond_resched(); |
3a7f6c99 | 504 | continue; |
3a7f6c99 | 505 | |
3f1e9070 MB |
506 | /* error */ |
507 | default: | |
508 | atomic_dec(&ctx->pending); | |
509 | return r; | |
510 | } | |
1da177e4 LT |
511 | } |
512 | ||
3f1e9070 | 513 | return 0; |
1da177e4 LT |
514 | } |
515 | ||
d469f841 MB |
516 | static void dm_crypt_bio_destructor(struct bio *bio) |
517 | { | |
028867ac | 518 | struct dm_crypt_io *io = bio->bi_private; |
6a24c718 MB |
519 | struct crypt_config *cc = io->target->private; |
520 | ||
521 | bio_free(bio, cc->bs); | |
d469f841 | 522 | } |
6a24c718 | 523 | |
1da177e4 LT |
524 | /* |
525 | * Generate a new unfragmented bio with the given size | |
526 | * This should never violate the device limitations | |
933f01d4 MB |
527 | * May return a smaller bio when running out of pages, indicated by |
528 | * *out_of_pages set to 1. | |
1da177e4 | 529 | */ |
933f01d4 MB |
530 | static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
531 | unsigned *out_of_pages) | |
1da177e4 | 532 | { |
027581f3 | 533 | struct crypt_config *cc = io->target->private; |
8b004457 | 534 | struct bio *clone; |
1da177e4 | 535 | unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
b4e3ca1a | 536 | gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; |
91e10625 MB |
537 | unsigned i, len; |
538 | struct page *page; | |
1da177e4 | 539 | |
2f9941b6 | 540 | clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); |
8b004457 | 541 | if (!clone) |
1da177e4 | 542 | return NULL; |
1da177e4 | 543 | |
027581f3 | 544 | clone_init(io, clone); |
933f01d4 | 545 | *out_of_pages = 0; |
6a24c718 | 546 | |
f97380bc | 547 | for (i = 0; i < nr_iovecs; i++) { |
91e10625 | 548 | page = mempool_alloc(cc->page_pool, gfp_mask); |
933f01d4 MB |
549 | if (!page) { |
550 | *out_of_pages = 1; | |
1da177e4 | 551 | break; |
933f01d4 | 552 | } |
1da177e4 LT |
553 | |
554 | /* | |
555 | * if additional pages cannot be allocated without waiting, | |
556 | * return a partially allocated bio, the caller will then try | |
557 | * to allocate additional bios while submitting this partial bio | |
558 | */ | |
f97380bc | 559 | if (i == (MIN_BIO_PAGES - 1)) |
1da177e4 LT |
560 | gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
561 | ||
91e10625 MB |
562 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
563 | ||
564 | if (!bio_add_page(clone, page, len, 0)) { | |
565 | mempool_free(page, cc->page_pool); | |
566 | break; | |
567 | } | |
1da177e4 | 568 | |
91e10625 | 569 | size -= len; |
1da177e4 LT |
570 | } |
571 | ||
8b004457 MB |
572 | if (!clone->bi_size) { |
573 | bio_put(clone); | |
1da177e4 LT |
574 | return NULL; |
575 | } | |
576 | ||
8b004457 | 577 | return clone; |
1da177e4 LT |
578 | } |
579 | ||
644bd2f0 | 580 | static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) |
1da177e4 | 581 | { |
644bd2f0 | 582 | unsigned int i; |
1da177e4 LT |
583 | struct bio_vec *bv; |
584 | ||
644bd2f0 | 585 | for (i = 0; i < clone->bi_vcnt; i++) { |
8b004457 | 586 | bv = bio_iovec_idx(clone, i); |
1da177e4 LT |
587 | BUG_ON(!bv->bv_page); |
588 | mempool_free(bv->bv_page, cc->page_pool); | |
589 | bv->bv_page = NULL; | |
590 | } | |
591 | } | |
592 | ||
dc440d1e MB |
593 | static struct dm_crypt_io *crypt_io_alloc(struct dm_target *ti, |
594 | struct bio *bio, sector_t sector) | |
595 | { | |
596 | struct crypt_config *cc = ti->private; | |
597 | struct dm_crypt_io *io; | |
598 | ||
599 | io = mempool_alloc(cc->io_pool, GFP_NOIO); | |
600 | io->target = ti; | |
601 | io->base_bio = bio; | |
602 | io->sector = sector; | |
603 | io->error = 0; | |
393b47ef | 604 | io->base_io = NULL; |
dc440d1e MB |
605 | atomic_set(&io->pending, 0); |
606 | ||
607 | return io; | |
608 | } | |
609 | ||
3e1a8bdd MB |
610 | static void crypt_inc_pending(struct dm_crypt_io *io) |
611 | { | |
612 | atomic_inc(&io->pending); | |
613 | } | |
614 | ||
1da177e4 LT |
615 | /* |
616 | * One of the bios was finished. Check for completion of | |
617 | * the whole request and correctly clean up the buffer. | |
393b47ef | 618 | * If base_io is set, wait for the last fragment to complete. |
1da177e4 | 619 | */ |
5742fd77 | 620 | static void crypt_dec_pending(struct dm_crypt_io *io) |
1da177e4 | 621 | { |
5742fd77 | 622 | struct crypt_config *cc = io->target->private; |
b35f8caa MB |
623 | struct bio *base_bio = io->base_bio; |
624 | struct dm_crypt_io *base_io = io->base_io; | |
625 | int error = io->error; | |
1da177e4 LT |
626 | |
627 | if (!atomic_dec_and_test(&io->pending)) | |
628 | return; | |
629 | ||
b35f8caa MB |
630 | mempool_free(io, cc->io_pool); |
631 | ||
632 | if (likely(!base_io)) | |
633 | bio_endio(base_bio, error); | |
393b47ef | 634 | else { |
b35f8caa MB |
635 | if (error && !base_io->error) |
636 | base_io->error = error; | |
637 | crypt_dec_pending(base_io); | |
393b47ef | 638 | } |
1da177e4 LT |
639 | } |
640 | ||
641 | /* | |
cabf08e4 | 642 | * kcryptd/kcryptd_io: |
1da177e4 LT |
643 | * |
644 | * Needed because it would be very unwise to do decryption in an | |
23541d2d | 645 | * interrupt context. |
cabf08e4 MB |
646 | * |
647 | * kcryptd performs the actual encryption or decryption. | |
648 | * | |
649 | * kcryptd_io performs the IO submission. | |
650 | * | |
651 | * They must be separated as otherwise the final stages could be | |
652 | * starved by new requests which can block in the first stages due | |
653 | * to memory allocation. | |
1da177e4 | 654 | */ |
6712ecf8 | 655 | static void crypt_endio(struct bio *clone, int error) |
8b004457 | 656 | { |
028867ac | 657 | struct dm_crypt_io *io = clone->bi_private; |
8b004457 | 658 | struct crypt_config *cc = io->target->private; |
ee7a491e | 659 | unsigned rw = bio_data_dir(clone); |
8b004457 | 660 | |
adfe4770 MB |
661 | if (unlikely(!bio_flagged(clone, BIO_UPTODATE) && !error)) |
662 | error = -EIO; | |
663 | ||
8b004457 | 664 | /* |
6712ecf8 | 665 | * free the processed pages |
8b004457 | 666 | */ |
ee7a491e | 667 | if (rw == WRITE) |
644bd2f0 | 668 | crypt_free_buffer_pages(cc, clone); |
8b004457 MB |
669 | |
670 | bio_put(clone); | |
8b004457 | 671 | |
ee7a491e MB |
672 | if (rw == READ && !error) { |
673 | kcryptd_queue_crypt(io); | |
674 | return; | |
675 | } | |
5742fd77 MB |
676 | |
677 | if (unlikely(error)) | |
678 | io->error = error; | |
679 | ||
680 | crypt_dec_pending(io); | |
8b004457 MB |
681 | } |
682 | ||
028867ac | 683 | static void clone_init(struct dm_crypt_io *io, struct bio *clone) |
8b004457 MB |
684 | { |
685 | struct crypt_config *cc = io->target->private; | |
686 | ||
687 | clone->bi_private = io; | |
688 | clone->bi_end_io = crypt_endio; | |
689 | clone->bi_bdev = cc->dev->bdev; | |
690 | clone->bi_rw = io->base_bio->bi_rw; | |
027581f3 | 691 | clone->bi_destructor = dm_crypt_bio_destructor; |
8b004457 MB |
692 | } |
693 | ||
4e4eef64 | 694 | static void kcryptd_io_read(struct dm_crypt_io *io) |
8b004457 MB |
695 | { |
696 | struct crypt_config *cc = io->target->private; | |
697 | struct bio *base_bio = io->base_bio; | |
698 | struct bio *clone; | |
93e605c2 | 699 | |
3e1a8bdd | 700 | crypt_inc_pending(io); |
8b004457 MB |
701 | |
702 | /* | |
703 | * The block layer might modify the bvec array, so always | |
704 | * copy the required bvecs because we need the original | |
705 | * one in order to decrypt the whole bio data *afterwards*. | |
706 | */ | |
6a24c718 | 707 | clone = bio_alloc_bioset(GFP_NOIO, bio_segments(base_bio), cc->bs); |
93e605c2 | 708 | if (unlikely(!clone)) { |
5742fd77 MB |
709 | io->error = -ENOMEM; |
710 | crypt_dec_pending(io); | |
23541d2d | 711 | return; |
93e605c2 | 712 | } |
8b004457 MB |
713 | |
714 | clone_init(io, clone); | |
715 | clone->bi_idx = 0; | |
716 | clone->bi_vcnt = bio_segments(base_bio); | |
717 | clone->bi_size = base_bio->bi_size; | |
0c395b0f | 718 | clone->bi_sector = cc->start + io->sector; |
8b004457 MB |
719 | memcpy(clone->bi_io_vec, bio_iovec(base_bio), |
720 | sizeof(struct bio_vec) * clone->bi_vcnt); | |
8b004457 | 721 | |
93e605c2 | 722 | generic_make_request(clone); |
8b004457 MB |
723 | } |
724 | ||
4e4eef64 MB |
725 | static void kcryptd_io_write(struct dm_crypt_io *io) |
726 | { | |
95497a96 | 727 | struct bio *clone = io->ctx.bio_out; |
95497a96 | 728 | generic_make_request(clone); |
4e4eef64 MB |
729 | } |
730 | ||
395b167c AK |
731 | static void kcryptd_io(struct work_struct *work) |
732 | { | |
733 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); | |
734 | ||
735 | if (bio_data_dir(io->base_bio) == READ) | |
736 | kcryptd_io_read(io); | |
737 | else | |
738 | kcryptd_io_write(io); | |
739 | } | |
740 | ||
741 | static void kcryptd_queue_io(struct dm_crypt_io *io) | |
742 | { | |
743 | struct crypt_config *cc = io->target->private; | |
744 | ||
745 | INIT_WORK(&io->work, kcryptd_io); | |
746 | queue_work(cc->io_queue, &io->work); | |
747 | } | |
748 | ||
95497a96 MB |
749 | static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
750 | int error, int async) | |
4e4eef64 | 751 | { |
dec1cedf MB |
752 | struct bio *clone = io->ctx.bio_out; |
753 | struct crypt_config *cc = io->target->private; | |
754 | ||
755 | if (unlikely(error < 0)) { | |
756 | crypt_free_buffer_pages(cc, clone); | |
757 | bio_put(clone); | |
758 | io->error = -EIO; | |
6c031f41 | 759 | crypt_dec_pending(io); |
dec1cedf MB |
760 | return; |
761 | } | |
762 | ||
763 | /* crypt_convert should have filled the clone bio */ | |
764 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | |
765 | ||
766 | clone->bi_sector = cc->start + io->sector; | |
899c95d3 | 767 | |
95497a96 MB |
768 | if (async) |
769 | kcryptd_queue_io(io); | |
1e37bb8e | 770 | else |
95497a96 | 771 | generic_make_request(clone); |
4e4eef64 MB |
772 | } |
773 | ||
fc5a5e9a | 774 | static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
8b004457 MB |
775 | { |
776 | struct crypt_config *cc = io->target->private; | |
8b004457 | 777 | struct bio *clone; |
393b47ef | 778 | struct dm_crypt_io *new_io; |
c8081618 | 779 | int crypt_finished; |
933f01d4 | 780 | unsigned out_of_pages = 0; |
dec1cedf | 781 | unsigned remaining = io->base_bio->bi_size; |
b635b00e | 782 | sector_t sector = io->sector; |
dec1cedf | 783 | int r; |
8b004457 | 784 | |
fc5a5e9a MB |
785 | /* |
786 | * Prevent io from disappearing until this function completes. | |
787 | */ | |
788 | crypt_inc_pending(io); | |
b635b00e | 789 | crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); |
fc5a5e9a | 790 | |
93e605c2 MB |
791 | /* |
792 | * The allocated buffers can be smaller than the whole bio, | |
793 | * so repeat the whole process until all the data can be handled. | |
794 | */ | |
795 | while (remaining) { | |
933f01d4 | 796 | clone = crypt_alloc_buffer(io, remaining, &out_of_pages); |
23541d2d | 797 | if (unlikely(!clone)) { |
5742fd77 | 798 | io->error = -ENOMEM; |
fc5a5e9a | 799 | break; |
23541d2d | 800 | } |
93e605c2 | 801 | |
53017030 MB |
802 | io->ctx.bio_out = clone; |
803 | io->ctx.idx_out = 0; | |
93e605c2 | 804 | |
dec1cedf | 805 | remaining -= clone->bi_size; |
b635b00e | 806 | sector += bio_sectors(clone); |
93e605c2 | 807 | |
4e594098 | 808 | crypt_inc_pending(io); |
dec1cedf | 809 | r = crypt_convert(cc, &io->ctx); |
c8081618 | 810 | crypt_finished = atomic_dec_and_test(&io->ctx.pending); |
f97380bc | 811 | |
c8081618 MB |
812 | /* Encryption was already finished, submit io now */ |
813 | if (crypt_finished) { | |
3a7f6c99 | 814 | kcryptd_crypt_write_io_submit(io, r, 0); |
c8081618 MB |
815 | |
816 | /* | |
817 | * If there was an error, do not try next fragments. | |
818 | * For async, error is processed in async handler. | |
819 | */ | |
6c031f41 | 820 | if (unlikely(r < 0)) |
fc5a5e9a | 821 | break; |
b635b00e MB |
822 | |
823 | io->sector = sector; | |
4e594098 | 824 | } |
93e605c2 | 825 | |
933f01d4 MB |
826 | /* |
827 | * Out of memory -> run queues | |
828 | * But don't wait if split was due to the io size restriction | |
829 | */ | |
830 | if (unlikely(out_of_pages)) | |
8aa7e847 | 831 | congestion_wait(BLK_RW_ASYNC, HZ/100); |
933f01d4 | 832 | |
393b47ef MB |
833 | /* |
834 | * With async crypto it is unsafe to share the crypto context | |
835 | * between fragments, so switch to a new dm_crypt_io structure. | |
836 | */ | |
837 | if (unlikely(!crypt_finished && remaining)) { | |
838 | new_io = crypt_io_alloc(io->target, io->base_bio, | |
839 | sector); | |
840 | crypt_inc_pending(new_io); | |
841 | crypt_convert_init(cc, &new_io->ctx, NULL, | |
842 | io->base_bio, sector); | |
843 | new_io->ctx.idx_in = io->ctx.idx_in; | |
844 | new_io->ctx.offset_in = io->ctx.offset_in; | |
845 | ||
846 | /* | |
847 | * Fragments after the first use the base_io | |
848 | * pending count. | |
849 | */ | |
850 | if (!io->base_io) | |
851 | new_io->base_io = io; | |
852 | else { | |
853 | new_io->base_io = io->base_io; | |
854 | crypt_inc_pending(io->base_io); | |
855 | crypt_dec_pending(io); | |
856 | } | |
857 | ||
858 | io = new_io; | |
859 | } | |
93e605c2 | 860 | } |
899c95d3 MB |
861 | |
862 | crypt_dec_pending(io); | |
84131db6 MB |
863 | } |
864 | ||
4e4eef64 | 865 | static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) |
5742fd77 MB |
866 | { |
867 | if (unlikely(error < 0)) | |
868 | io->error = -EIO; | |
869 | ||
870 | crypt_dec_pending(io); | |
871 | } | |
872 | ||
4e4eef64 | 873 | static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
8b004457 MB |
874 | { |
875 | struct crypt_config *cc = io->target->private; | |
5742fd77 | 876 | int r = 0; |
1da177e4 | 877 | |
3e1a8bdd | 878 | crypt_inc_pending(io); |
3a7f6c99 | 879 | |
53017030 | 880 | crypt_convert_init(cc, &io->ctx, io->base_bio, io->base_bio, |
0c395b0f | 881 | io->sector); |
1da177e4 | 882 | |
5742fd77 MB |
883 | r = crypt_convert(cc, &io->ctx); |
884 | ||
3f1e9070 | 885 | if (atomic_dec_and_test(&io->ctx.pending)) |
3a7f6c99 MB |
886 | kcryptd_crypt_read_done(io, r); |
887 | ||
888 | crypt_dec_pending(io); | |
1da177e4 LT |
889 | } |
890 | ||
95497a96 MB |
891 | static void kcryptd_async_done(struct crypto_async_request *async_req, |
892 | int error) | |
893 | { | |
b2174eeb YH |
894 | struct dm_crypt_request *dmreq = async_req->data; |
895 | struct convert_context *ctx = dmreq->ctx; | |
95497a96 MB |
896 | struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx); |
897 | struct crypt_config *cc = io->target->private; | |
898 | ||
899 | if (error == -EINPROGRESS) { | |
900 | complete(&ctx->restart); | |
901 | return; | |
902 | } | |
903 | ||
b2174eeb | 904 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); |
95497a96 MB |
905 | |
906 | if (!atomic_dec_and_test(&ctx->pending)) | |
907 | return; | |
908 | ||
909 | if (bio_data_dir(io->base_bio) == READ) | |
910 | kcryptd_crypt_read_done(io, error); | |
911 | else | |
912 | kcryptd_crypt_write_io_submit(io, error, 1); | |
913 | } | |
914 | ||
395b167c | 915 | static void kcryptd_crypt(struct work_struct *work) |
1da177e4 | 916 | { |
028867ac | 917 | struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); |
8b004457 | 918 | |
cabf08e4 | 919 | if (bio_data_dir(io->base_bio) == READ) |
395b167c | 920 | kcryptd_crypt_read_convert(io); |
4e4eef64 | 921 | else |
395b167c | 922 | kcryptd_crypt_write_convert(io); |
cabf08e4 MB |
923 | } |
924 | ||
395b167c | 925 | static void kcryptd_queue_crypt(struct dm_crypt_io *io) |
cabf08e4 | 926 | { |
395b167c | 927 | struct crypt_config *cc = io->target->private; |
cabf08e4 | 928 | |
395b167c AK |
929 | INIT_WORK(&io->work, kcryptd_crypt); |
930 | queue_work(cc->crypt_queue, &io->work); | |
1da177e4 LT |
931 | } |
932 | ||
933 | /* | |
934 | * Decode key from its hex representation | |
935 | */ | |
936 | static int crypt_decode_key(u8 *key, char *hex, unsigned int size) | |
937 | { | |
938 | char buffer[3]; | |
939 | char *endp; | |
940 | unsigned int i; | |
941 | ||
942 | buffer[2] = '\0'; | |
943 | ||
8b004457 | 944 | for (i = 0; i < size; i++) { |
1da177e4 LT |
945 | buffer[0] = *hex++; |
946 | buffer[1] = *hex++; | |
947 | ||
948 | key[i] = (u8)simple_strtoul(buffer, &endp, 16); | |
949 | ||
950 | if (endp != &buffer[2]) | |
951 | return -EINVAL; | |
952 | } | |
953 | ||
954 | if (*hex != '\0') | |
955 | return -EINVAL; | |
956 | ||
957 | return 0; | |
958 | } | |
959 | ||
960 | /* | |
961 | * Encode key into its hex representation | |
962 | */ | |
963 | static void crypt_encode_key(char *hex, u8 *key, unsigned int size) | |
964 | { | |
965 | unsigned int i; | |
966 | ||
8b004457 | 967 | for (i = 0; i < size; i++) { |
1da177e4 LT |
968 | sprintf(hex, "%02x", *key); |
969 | hex += 2; | |
970 | key++; | |
971 | } | |
972 | } | |
973 | ||
e48d4bbf MB |
974 | static int crypt_set_key(struct crypt_config *cc, char *key) |
975 | { | |
976 | unsigned key_size = strlen(key) >> 1; | |
977 | ||
978 | if (cc->key_size && cc->key_size != key_size) | |
979 | return -EINVAL; | |
980 | ||
981 | cc->key_size = key_size; /* initial settings */ | |
982 | ||
983 | if ((!key_size && strcmp(key, "-")) || | |
d469f841 | 984 | (key_size && crypt_decode_key(cc->key, key, key_size) < 0)) |
e48d4bbf MB |
985 | return -EINVAL; |
986 | ||
987 | set_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
988 | ||
0b430958 | 989 | return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); |
e48d4bbf MB |
990 | } |
991 | ||
992 | static int crypt_wipe_key(struct crypt_config *cc) | |
993 | { | |
994 | clear_bit(DM_CRYPT_KEY_VALID, &cc->flags); | |
995 | memset(&cc->key, 0, cc->key_size * sizeof(u8)); | |
0b430958 | 996 | return crypto_ablkcipher_setkey(cc->tfm, cc->key, cc->key_size); |
e48d4bbf MB |
997 | } |
998 | ||
28513fcc MB |
999 | static void crypt_dtr(struct dm_target *ti) |
1000 | { | |
1001 | struct crypt_config *cc = ti->private; | |
1002 | ||
1003 | ti->private = NULL; | |
1004 | ||
1005 | if (!cc) | |
1006 | return; | |
1007 | ||
1008 | if (cc->io_queue) | |
1009 | destroy_workqueue(cc->io_queue); | |
1010 | if (cc->crypt_queue) | |
1011 | destroy_workqueue(cc->crypt_queue); | |
1012 | ||
1013 | if (cc->bs) | |
1014 | bioset_free(cc->bs); | |
1015 | ||
1016 | if (cc->page_pool) | |
1017 | mempool_destroy(cc->page_pool); | |
1018 | if (cc->req_pool) | |
1019 | mempool_destroy(cc->req_pool); | |
1020 | if (cc->io_pool) | |
1021 | mempool_destroy(cc->io_pool); | |
1022 | ||
1023 | if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) | |
1024 | cc->iv_gen_ops->dtr(cc); | |
1025 | ||
1026 | if (cc->tfm && !IS_ERR(cc->tfm)) | |
1027 | crypto_free_ablkcipher(cc->tfm); | |
1028 | ||
1029 | if (cc->dev) | |
1030 | dm_put_device(ti, cc->dev); | |
1031 | ||
5ebaee6d MB |
1032 | kzfree(cc->cipher); |
1033 | kzfree(cc->cipher_mode); | |
28513fcc MB |
1034 | |
1035 | /* Must zero key material before freeing */ | |
1036 | kzfree(cc); | |
1037 | } | |
1038 | ||
5ebaee6d MB |
1039 | static int crypt_ctr_cipher(struct dm_target *ti, |
1040 | char *cipher_in, char *key) | |
1da177e4 | 1041 | { |
5ebaee6d MB |
1042 | struct crypt_config *cc = ti->private; |
1043 | char *tmp, *cipher, *chainmode, *ivmode, *ivopts; | |
1044 | char *cipher_api = NULL; | |
28513fcc | 1045 | int ret = -EINVAL; |
1da177e4 | 1046 | |
5ebaee6d MB |
1047 | /* Convert to crypto api definition? */ |
1048 | if (strchr(cipher_in, '(')) { | |
1049 | ti->error = "Bad cipher specification"; | |
1da177e4 LT |
1050 | return -EINVAL; |
1051 | } | |
1052 | ||
5ebaee6d MB |
1053 | /* |
1054 | * Legacy dm-crypt cipher specification | |
1055 | * cipher-mode-iv:ivopts | |
1056 | */ | |
1057 | tmp = cipher_in; | |
1da177e4 | 1058 | cipher = strsep(&tmp, "-"); |
5ebaee6d MB |
1059 | |
1060 | cc->cipher = kstrdup(cipher, GFP_KERNEL); | |
1061 | if (!cc->cipher) | |
1062 | goto bad_mem; | |
1063 | ||
1064 | if (tmp) { | |
1065 | cc->cipher_mode = kstrdup(tmp, GFP_KERNEL); | |
1066 | if (!cc->cipher_mode) | |
1067 | goto bad_mem; | |
1068 | } | |
1069 | ||
1da177e4 LT |
1070 | chainmode = strsep(&tmp, "-"); |
1071 | ivopts = strsep(&tmp, "-"); | |
1072 | ivmode = strsep(&ivopts, ":"); | |
1073 | ||
1074 | if (tmp) | |
5ebaee6d | 1075 | DMWARN("Ignoring unexpected additional cipher options"); |
1da177e4 | 1076 | |
5ebaee6d MB |
1077 | /* Compatibility mode for old dm-crypt mappings */ |
1078 | if (!chainmode || (!strcmp(chainmode, "plain") && !ivmode)) { | |
1079 | kfree(cc->cipher_mode); | |
1080 | cc->cipher_mode = kstrdup("cbc-plain", GFP_KERNEL); | |
1da177e4 LT |
1081 | chainmode = "cbc"; |
1082 | ivmode = "plain"; | |
1083 | } | |
1084 | ||
d1806f6a | 1085 | if (strcmp(chainmode, "ecb") && !ivmode) { |
5ebaee6d MB |
1086 | ti->error = "IV mechanism required"; |
1087 | return -EINVAL; | |
1da177e4 LT |
1088 | } |
1089 | ||
5ebaee6d MB |
1090 | cipher_api = kmalloc(CRYPTO_MAX_ALG_NAME, GFP_KERNEL); |
1091 | if (!cipher_api) | |
1092 | goto bad_mem; | |
1093 | ||
1094 | ret = snprintf(cipher_api, CRYPTO_MAX_ALG_NAME, | |
1095 | "%s(%s)", chainmode, cipher); | |
1096 | if (ret < 0) { | |
1097 | kfree(cipher_api); | |
1098 | goto bad_mem; | |
1da177e4 LT |
1099 | } |
1100 | ||
5ebaee6d MB |
1101 | /* Allocate cipher */ |
1102 | cc->tfm = crypto_alloc_ablkcipher(cipher_api, 0, 0); | |
28513fcc | 1103 | if (IS_ERR(cc->tfm)) { |
5ebaee6d | 1104 | ret = PTR_ERR(cc->tfm); |
72d94861 | 1105 | ti->error = "Error allocating crypto tfm"; |
28513fcc | 1106 | goto bad; |
1da177e4 | 1107 | } |
1da177e4 | 1108 | |
5ebaee6d MB |
1109 | /* Initialize and set key */ |
1110 | ret = crypt_set_key(cc, key); | |
28513fcc | 1111 | if (ret < 0) { |
0b430958 | 1112 | ti->error = "Error decoding and setting key"; |
28513fcc | 1113 | goto bad; |
0b430958 MB |
1114 | } |
1115 | ||
5ebaee6d MB |
1116 | /* Initialize IV */ |
1117 | cc->iv_size = crypto_ablkcipher_ivsize(cc->tfm); | |
1118 | if (cc->iv_size) | |
1119 | /* at least a 64 bit sector number should fit in our buffer */ | |
1120 | cc->iv_size = max(cc->iv_size, | |
1121 | (unsigned int)(sizeof(u64) / sizeof(u8))); | |
1122 | else if (ivmode) { | |
1123 | DMWARN("Selected cipher does not support IVs"); | |
1124 | ivmode = NULL; | |
1125 | } | |
1126 | ||
1127 | /* Choose ivmode, see comments at iv code. */ | |
1da177e4 LT |
1128 | if (ivmode == NULL) |
1129 | cc->iv_gen_ops = NULL; | |
1130 | else if (strcmp(ivmode, "plain") == 0) | |
1131 | cc->iv_gen_ops = &crypt_iv_plain_ops; | |
61afef61 MB |
1132 | else if (strcmp(ivmode, "plain64") == 0) |
1133 | cc->iv_gen_ops = &crypt_iv_plain64_ops; | |
1da177e4 LT |
1134 | else if (strcmp(ivmode, "essiv") == 0) |
1135 | cc->iv_gen_ops = &crypt_iv_essiv_ops; | |
48527fa7 RS |
1136 | else if (strcmp(ivmode, "benbi") == 0) |
1137 | cc->iv_gen_ops = &crypt_iv_benbi_ops; | |
46b47730 LN |
1138 | else if (strcmp(ivmode, "null") == 0) |
1139 | cc->iv_gen_ops = &crypt_iv_null_ops; | |
1da177e4 | 1140 | else { |
5ebaee6d | 1141 | ret = -EINVAL; |
72d94861 | 1142 | ti->error = "Invalid IV mode"; |
28513fcc | 1143 | goto bad; |
1da177e4 LT |
1144 | } |
1145 | ||
28513fcc MB |
1146 | /* Allocate IV */ |
1147 | if (cc->iv_gen_ops && cc->iv_gen_ops->ctr) { | |
1148 | ret = cc->iv_gen_ops->ctr(cc, ti, ivopts); | |
1149 | if (ret < 0) { | |
1150 | ti->error = "Error creating IV"; | |
1151 | goto bad; | |
1152 | } | |
1153 | } | |
1da177e4 | 1154 | |
28513fcc MB |
1155 | /* Initialize IV (set keys for ESSIV etc) */ |
1156 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) { | |
1157 | ret = cc->iv_gen_ops->init(cc); | |
1158 | if (ret < 0) { | |
1159 | ti->error = "Error initialising IV"; | |
1160 | goto bad; | |
1161 | } | |
b95bf2d3 MB |
1162 | } |
1163 | ||
5ebaee6d MB |
1164 | ret = 0; |
1165 | bad: | |
1166 | kfree(cipher_api); | |
1167 | return ret; | |
1168 | ||
1169 | bad_mem: | |
1170 | ti->error = "Cannot allocate cipher strings"; | |
1171 | return -ENOMEM; | |
1172 | } | |
1173 | ||
1174 | /* | |
1175 | * Construct an encryption mapping: | |
1176 | * <cipher> <key> <iv_offset> <dev_path> <start> | |
1177 | */ | |
1178 | static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1179 | { | |
1180 | struct crypt_config *cc; | |
1181 | unsigned int key_size; | |
1182 | unsigned long long tmpll; | |
1183 | int ret; | |
1184 | ||
1185 | if (argc != 5) { | |
1186 | ti->error = "Not enough arguments"; | |
1187 | return -EINVAL; | |
1da177e4 LT |
1188 | } |
1189 | ||
5ebaee6d MB |
1190 | key_size = strlen(argv[1]) >> 1; |
1191 | ||
1192 | cc = kzalloc(sizeof(*cc) + key_size * sizeof(u8), GFP_KERNEL); | |
1193 | if (!cc) { | |
1194 | ti->error = "Cannot allocate encryption context"; | |
1195 | return -ENOMEM; | |
1196 | } | |
1197 | ||
1198 | ti->private = cc; | |
1199 | ret = crypt_ctr_cipher(ti, argv[0], argv[1]); | |
1200 | if (ret < 0) | |
1201 | goto bad; | |
1202 | ||
28513fcc | 1203 | ret = -ENOMEM; |
93d2341c | 1204 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
1da177e4 | 1205 | if (!cc->io_pool) { |
72d94861 | 1206 | ti->error = "Cannot allocate crypt io mempool"; |
28513fcc | 1207 | goto bad; |
1da177e4 LT |
1208 | } |
1209 | ||
ddd42edf | 1210 | cc->dmreq_start = sizeof(struct ablkcipher_request); |
28513fcc | 1211 | cc->dmreq_start += crypto_ablkcipher_reqsize(cc->tfm); |
ddd42edf | 1212 | cc->dmreq_start = ALIGN(cc->dmreq_start, crypto_tfm_ctx_alignment()); |
28513fcc | 1213 | cc->dmreq_start += crypto_ablkcipher_alignmask(cc->tfm) & |
3a7f6c99 | 1214 | ~(crypto_tfm_ctx_alignment() - 1); |
ddd42edf MB |
1215 | |
1216 | cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + | |
1217 | sizeof(struct dm_crypt_request) + cc->iv_size); | |
1218 | if (!cc->req_pool) { | |
1219 | ti->error = "Cannot allocate crypt request mempool"; | |
28513fcc | 1220 | goto bad; |
ddd42edf MB |
1221 | } |
1222 | cc->req = NULL; | |
1223 | ||
a19b27ce | 1224 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
1da177e4 | 1225 | if (!cc->page_pool) { |
72d94861 | 1226 | ti->error = "Cannot allocate page mempool"; |
28513fcc | 1227 | goto bad; |
1da177e4 LT |
1228 | } |
1229 | ||
bb799ca0 | 1230 | cc->bs = bioset_create(MIN_IOS, 0); |
6a24c718 MB |
1231 | if (!cc->bs) { |
1232 | ti->error = "Cannot allocate crypt bioset"; | |
28513fcc | 1233 | goto bad; |
6a24c718 MB |
1234 | } |
1235 | ||
28513fcc | 1236 | ret = -EINVAL; |
4ee218cd | 1237 | if (sscanf(argv[2], "%llu", &tmpll) != 1) { |
72d94861 | 1238 | ti->error = "Invalid iv_offset sector"; |
28513fcc | 1239 | goto bad; |
1da177e4 | 1240 | } |
4ee218cd | 1241 | cc->iv_offset = tmpll; |
1da177e4 | 1242 | |
28513fcc MB |
1243 | if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) { |
1244 | ti->error = "Device lookup failed"; | |
1245 | goto bad; | |
1246 | } | |
1247 | ||
4ee218cd | 1248 | if (sscanf(argv[4], "%llu", &tmpll) != 1) { |
72d94861 | 1249 | ti->error = "Invalid device sector"; |
28513fcc | 1250 | goto bad; |
1da177e4 | 1251 | } |
4ee218cd | 1252 | cc->start = tmpll; |
1da177e4 | 1253 | |
28513fcc | 1254 | ret = -ENOMEM; |
cabf08e4 MB |
1255 | cc->io_queue = create_singlethread_workqueue("kcryptd_io"); |
1256 | if (!cc->io_queue) { | |
1257 | ti->error = "Couldn't create kcryptd io queue"; | |
28513fcc | 1258 | goto bad; |
cabf08e4 MB |
1259 | } |
1260 | ||
1261 | cc->crypt_queue = create_singlethread_workqueue("kcryptd"); | |
1262 | if (!cc->crypt_queue) { | |
9934a8be | 1263 | ti->error = "Couldn't create kcryptd queue"; |
28513fcc | 1264 | goto bad; |
9934a8be MB |
1265 | } |
1266 | ||
647c7db1 | 1267 | ti->num_flush_requests = 1; |
1da177e4 LT |
1268 | return 0; |
1269 | ||
28513fcc MB |
1270 | bad: |
1271 | crypt_dtr(ti); | |
1272 | return ret; | |
1da177e4 LT |
1273 | } |
1274 | ||
1da177e4 LT |
1275 | static int crypt_map(struct dm_target *ti, struct bio *bio, |
1276 | union map_info *map_context) | |
1277 | { | |
028867ac | 1278 | struct dm_crypt_io *io; |
647c7db1 MP |
1279 | struct crypt_config *cc; |
1280 | ||
d87f4c14 | 1281 | if (bio->bi_rw & REQ_FLUSH) { |
647c7db1 MP |
1282 | cc = ti->private; |
1283 | bio->bi_bdev = cc->dev->bdev; | |
1284 | return DM_MAPIO_REMAPPED; | |
1285 | } | |
1da177e4 | 1286 | |
b441a262 | 1287 | io = crypt_io_alloc(ti, bio, dm_target_offset(ti, bio->bi_sector)); |
cabf08e4 MB |
1288 | |
1289 | if (bio_data_dir(io->base_bio) == READ) | |
1290 | kcryptd_queue_io(io); | |
1291 | else | |
1292 | kcryptd_queue_crypt(io); | |
1da177e4 | 1293 | |
d2a7ad29 | 1294 | return DM_MAPIO_SUBMITTED; |
1da177e4 LT |
1295 | } |
1296 | ||
1297 | static int crypt_status(struct dm_target *ti, status_type_t type, | |
1298 | char *result, unsigned int maxlen) | |
1299 | { | |
5ebaee6d | 1300 | struct crypt_config *cc = ti->private; |
1da177e4 LT |
1301 | unsigned int sz = 0; |
1302 | ||
1303 | switch (type) { | |
1304 | case STATUSTYPE_INFO: | |
1305 | result[0] = '\0'; | |
1306 | break; | |
1307 | ||
1308 | case STATUSTYPE_TABLE: | |
5ebaee6d MB |
1309 | if (cc->cipher_mode) |
1310 | DMEMIT("%s-%s ", cc->cipher, cc->cipher_mode); | |
1da177e4 | 1311 | else |
5ebaee6d | 1312 | DMEMIT("%s ", cc->cipher); |
1da177e4 LT |
1313 | |
1314 | if (cc->key_size > 0) { | |
1315 | if ((maxlen - sz) < ((cc->key_size << 1) + 1)) | |
1316 | return -ENOMEM; | |
1317 | ||
1318 | crypt_encode_key(result + sz, cc->key, cc->key_size); | |
1319 | sz += cc->key_size << 1; | |
1320 | } else { | |
1321 | if (sz >= maxlen) | |
1322 | return -ENOMEM; | |
1323 | result[sz++] = '-'; | |
1324 | } | |
1325 | ||
4ee218cd AM |
1326 | DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, |
1327 | cc->dev->name, (unsigned long long)cc->start); | |
1da177e4 LT |
1328 | break; |
1329 | } | |
1330 | return 0; | |
1331 | } | |
1332 | ||
e48d4bbf MB |
1333 | static void crypt_postsuspend(struct dm_target *ti) |
1334 | { | |
1335 | struct crypt_config *cc = ti->private; | |
1336 | ||
1337 | set_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1338 | } | |
1339 | ||
1340 | static int crypt_preresume(struct dm_target *ti) | |
1341 | { | |
1342 | struct crypt_config *cc = ti->private; | |
1343 | ||
1344 | if (!test_bit(DM_CRYPT_KEY_VALID, &cc->flags)) { | |
1345 | DMERR("aborting resume - crypt key is not set."); | |
1346 | return -EAGAIN; | |
1347 | } | |
1348 | ||
1349 | return 0; | |
1350 | } | |
1351 | ||
1352 | static void crypt_resume(struct dm_target *ti) | |
1353 | { | |
1354 | struct crypt_config *cc = ti->private; | |
1355 | ||
1356 | clear_bit(DM_CRYPT_SUSPENDED, &cc->flags); | |
1357 | } | |
1358 | ||
1359 | /* Message interface | |
1360 | * key set <key> | |
1361 | * key wipe | |
1362 | */ | |
1363 | static int crypt_message(struct dm_target *ti, unsigned argc, char **argv) | |
1364 | { | |
1365 | struct crypt_config *cc = ti->private; | |
542da317 | 1366 | int ret = -EINVAL; |
e48d4bbf MB |
1367 | |
1368 | if (argc < 2) | |
1369 | goto error; | |
1370 | ||
1371 | if (!strnicmp(argv[0], MESG_STR("key"))) { | |
1372 | if (!test_bit(DM_CRYPT_SUSPENDED, &cc->flags)) { | |
1373 | DMWARN("not suspended during key manipulation."); | |
1374 | return -EINVAL; | |
1375 | } | |
542da317 MB |
1376 | if (argc == 3 && !strnicmp(argv[1], MESG_STR("set"))) { |
1377 | ret = crypt_set_key(cc, argv[2]); | |
1378 | if (ret) | |
1379 | return ret; | |
1380 | if (cc->iv_gen_ops && cc->iv_gen_ops->init) | |
1381 | ret = cc->iv_gen_ops->init(cc); | |
1382 | return ret; | |
1383 | } | |
1384 | if (argc == 2 && !strnicmp(argv[1], MESG_STR("wipe"))) { | |
1385 | if (cc->iv_gen_ops && cc->iv_gen_ops->wipe) { | |
1386 | ret = cc->iv_gen_ops->wipe(cc); | |
1387 | if (ret) | |
1388 | return ret; | |
1389 | } | |
e48d4bbf | 1390 | return crypt_wipe_key(cc); |
542da317 | 1391 | } |
e48d4bbf MB |
1392 | } |
1393 | ||
1394 | error: | |
1395 | DMWARN("unrecognised message received."); | |
1396 | return -EINVAL; | |
1397 | } | |
1398 | ||
d41e26b9 MB |
1399 | static int crypt_merge(struct dm_target *ti, struct bvec_merge_data *bvm, |
1400 | struct bio_vec *biovec, int max_size) | |
1401 | { | |
1402 | struct crypt_config *cc = ti->private; | |
1403 | struct request_queue *q = bdev_get_queue(cc->dev->bdev); | |
1404 | ||
1405 | if (!q->merge_bvec_fn) | |
1406 | return max_size; | |
1407 | ||
1408 | bvm->bi_bdev = cc->dev->bdev; | |
b441a262 | 1409 | bvm->bi_sector = cc->start + dm_target_offset(ti, bvm->bi_sector); |
d41e26b9 MB |
1410 | |
1411 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | |
1412 | } | |
1413 | ||
af4874e0 MS |
1414 | static int crypt_iterate_devices(struct dm_target *ti, |
1415 | iterate_devices_callout_fn fn, void *data) | |
1416 | { | |
1417 | struct crypt_config *cc = ti->private; | |
1418 | ||
5dea271b | 1419 | return fn(ti, cc->dev, cc->start, ti->len, data); |
af4874e0 MS |
1420 | } |
1421 | ||
1da177e4 LT |
1422 | static struct target_type crypt_target = { |
1423 | .name = "crypt", | |
af4874e0 | 1424 | .version = {1, 7, 0}, |
1da177e4 LT |
1425 | .module = THIS_MODULE, |
1426 | .ctr = crypt_ctr, | |
1427 | .dtr = crypt_dtr, | |
1428 | .map = crypt_map, | |
1429 | .status = crypt_status, | |
e48d4bbf MB |
1430 | .postsuspend = crypt_postsuspend, |
1431 | .preresume = crypt_preresume, | |
1432 | .resume = crypt_resume, | |
1433 | .message = crypt_message, | |
d41e26b9 | 1434 | .merge = crypt_merge, |
af4874e0 | 1435 | .iterate_devices = crypt_iterate_devices, |
1da177e4 LT |
1436 | }; |
1437 | ||
1438 | static int __init dm_crypt_init(void) | |
1439 | { | |
1440 | int r; | |
1441 | ||
028867ac | 1442 | _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); |
1da177e4 LT |
1443 | if (!_crypt_io_pool) |
1444 | return -ENOMEM; | |
1445 | ||
1da177e4 LT |
1446 | r = dm_register_target(&crypt_target); |
1447 | if (r < 0) { | |
72d94861 | 1448 | DMERR("register failed %d", r); |
9934a8be | 1449 | kmem_cache_destroy(_crypt_io_pool); |
1da177e4 LT |
1450 | } |
1451 | ||
1da177e4 LT |
1452 | return r; |
1453 | } | |
1454 | ||
1455 | static void __exit dm_crypt_exit(void) | |
1456 | { | |
10d3bd09 | 1457 | dm_unregister_target(&crypt_target); |
1da177e4 LT |
1458 | kmem_cache_destroy(_crypt_io_pool); |
1459 | } | |
1460 | ||
1461 | module_init(dm_crypt_init); | |
1462 | module_exit(dm_crypt_exit); | |
1463 | ||
1464 | MODULE_AUTHOR("Christophe Saout <[email protected]>"); | |
1465 | MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); | |
1466 | MODULE_LICENSE("GPL"); |