]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software | |
891ce207 | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
952b3557 MP |
8 | #include "dm.h" |
9 | ||
586e80e6 | 10 | #include <linux/device-mapper.h> |
1da177e4 LT |
11 | |
12 | #include <linux/bio.h> | |
13 | #include <linux/mempool.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/slab.h> | |
a765e20e | 17 | #include <linux/dm-io.h> |
1da177e4 | 18 | |
f1e53987 MP |
19 | #define DM_MSG_PREFIX "io" |
20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | |
22 | ||
891ce207 HM |
23 | struct dm_io_client { |
24 | mempool_t *pool; | |
25 | struct bio_set *bios; | |
26 | }; | |
27 | ||
f1e53987 MP |
28 | /* |
29 | * Aligning 'struct io' reduces the number of bits required to store | |
30 | * its address. Refer to store_io_and_region_in_bio() below. | |
31 | */ | |
1da177e4 | 32 | struct io { |
e01fd7ee | 33 | unsigned long error_bits; |
1da177e4 LT |
34 | atomic_t count; |
35 | struct task_struct *sleeper; | |
891ce207 | 36 | struct dm_io_client *client; |
1da177e4 LT |
37 | io_notify_fn callback; |
38 | void *context; | |
f1e53987 | 39 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
1da177e4 | 40 | |
952b3557 MP |
41 | static struct kmem_cache *_dm_io_cache; |
42 | ||
1da177e4 LT |
43 | /* |
44 | * io contexts are only dynamically allocated for asynchronous | |
45 | * io. Since async io is likely to be the majority of io we'll | |
891ce207 | 46 | * have the same number of io contexts as bios! (FIXME: must reduce this). |
1da177e4 | 47 | */ |
891ce207 | 48 | |
1da177e4 LT |
49 | static unsigned int pages_to_ios(unsigned int pages) |
50 | { | |
51 | return 4 * pages; /* too many ? */ | |
52 | } | |
53 | ||
c8b03afe HM |
54 | /* |
55 | * Create a client with mempool and bioset. | |
56 | */ | |
57 | struct dm_io_client *dm_io_client_create(unsigned num_pages) | |
58 | { | |
59 | unsigned ios = pages_to_ios(num_pages); | |
60 | struct dm_io_client *client; | |
61 | ||
62 | client = kmalloc(sizeof(*client), GFP_KERNEL); | |
63 | if (!client) | |
64 | return ERR_PTR(-ENOMEM); | |
65 | ||
952b3557 | 66 | client->pool = mempool_create_slab_pool(ios, _dm_io_cache); |
c8b03afe HM |
67 | if (!client->pool) |
68 | goto bad; | |
69 | ||
bb799ca0 | 70 | client->bios = bioset_create(16, 0); |
c8b03afe HM |
71 | if (!client->bios) |
72 | goto bad; | |
73 | ||
74 | return client; | |
75 | ||
76 | bad: | |
77 | if (client->pool) | |
78 | mempool_destroy(client->pool); | |
79 | kfree(client); | |
80 | return ERR_PTR(-ENOMEM); | |
81 | } | |
82 | EXPORT_SYMBOL(dm_io_client_create); | |
83 | ||
84 | int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) | |
85 | { | |
86 | return mempool_resize(client->pool, pages_to_ios(num_pages), | |
87 | GFP_KERNEL); | |
88 | } | |
89 | EXPORT_SYMBOL(dm_io_client_resize); | |
90 | ||
91 | void dm_io_client_destroy(struct dm_io_client *client) | |
92 | { | |
93 | mempool_destroy(client->pool); | |
94 | bioset_free(client->bios); | |
95 | kfree(client); | |
96 | } | |
97 | EXPORT_SYMBOL(dm_io_client_destroy); | |
98 | ||
1da177e4 LT |
99 | /*----------------------------------------------------------------- |
100 | * We need to keep track of which region a bio is doing io for. | |
f1e53987 MP |
101 | * To avoid a memory allocation to store just 5 or 6 bits, we |
102 | * ensure the 'struct io' pointer is aligned so enough low bits are | |
103 | * always zero and then combine it with the region number directly in | |
104 | * bi_private. | |
1da177e4 | 105 | *---------------------------------------------------------------*/ |
f1e53987 MP |
106 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
107 | unsigned region) | |
1da177e4 | 108 | { |
f1e53987 MP |
109 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
110 | DMCRIT("Unaligned struct io pointer %p", io); | |
111 | BUG(); | |
112 | } | |
113 | ||
114 | bio->bi_private = (void *)((unsigned long)io | region); | |
1da177e4 LT |
115 | } |
116 | ||
f1e53987 MP |
117 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
118 | unsigned *region) | |
1da177e4 | 119 | { |
f1e53987 MP |
120 | unsigned long val = (unsigned long)bio->bi_private; |
121 | ||
122 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); | |
123 | *region = val & (DM_IO_MAX_REGIONS - 1); | |
1da177e4 LT |
124 | } |
125 | ||
126 | /*----------------------------------------------------------------- | |
127 | * We need an io object to keep track of the number of bios that | |
128 | * have been dispatched for a particular io. | |
129 | *---------------------------------------------------------------*/ | |
130 | static void dec_count(struct io *io, unsigned int region, int error) | |
131 | { | |
d87f4c14 | 132 | if (error) |
e01fd7ee | 133 | set_bit(region, &io->error_bits); |
1da177e4 LT |
134 | |
135 | if (atomic_dec_and_test(&io->count)) { | |
136 | if (io->sleeper) | |
137 | wake_up_process(io->sleeper); | |
138 | ||
139 | else { | |
e01fd7ee | 140 | unsigned long r = io->error_bits; |
1da177e4 LT |
141 | io_notify_fn fn = io->callback; |
142 | void *context = io->context; | |
143 | ||
bf17ce3a | 144 | mempool_free(io, io->client->pool); |
1da177e4 LT |
145 | fn(r, context); |
146 | } | |
147 | } | |
148 | } | |
149 | ||
6712ecf8 | 150 | static void endio(struct bio *bio, int error) |
1da177e4 | 151 | { |
c897feb3 HM |
152 | struct io *io; |
153 | unsigned region; | |
1da177e4 | 154 | |
1da177e4 LT |
155 | if (error && bio_data_dir(bio) == READ) |
156 | zero_fill_bio(bio); | |
157 | ||
c897feb3 HM |
158 | /* |
159 | * The bio destructor in bio_put() may use the io object. | |
160 | */ | |
f1e53987 | 161 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
c897feb3 | 162 | |
1da177e4 LT |
163 | bio_put(bio); |
164 | ||
c897feb3 | 165 | dec_count(io, region, error); |
1da177e4 LT |
166 | } |
167 | ||
168 | /*----------------------------------------------------------------- | |
169 | * These little objects provide an abstraction for getting a new | |
170 | * destination page for io. | |
171 | *---------------------------------------------------------------*/ | |
172 | struct dpages { | |
173 | void (*get_page)(struct dpages *dp, | |
174 | struct page **p, unsigned long *len, unsigned *offset); | |
175 | void (*next_page)(struct dpages *dp); | |
176 | ||
177 | unsigned context_u; | |
178 | void *context_ptr; | |
179 | }; | |
180 | ||
181 | /* | |
182 | * Functions for getting the pages from a list. | |
183 | */ | |
184 | static void list_get_page(struct dpages *dp, | |
185 | struct page **p, unsigned long *len, unsigned *offset) | |
186 | { | |
187 | unsigned o = dp->context_u; | |
188 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
189 | ||
190 | *p = pl->page; | |
191 | *len = PAGE_SIZE - o; | |
192 | *offset = o; | |
193 | } | |
194 | ||
195 | static void list_next_page(struct dpages *dp) | |
196 | { | |
197 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
198 | dp->context_ptr = pl->next; | |
199 | dp->context_u = 0; | |
200 | } | |
201 | ||
202 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) | |
203 | { | |
204 | dp->get_page = list_get_page; | |
205 | dp->next_page = list_next_page; | |
206 | dp->context_u = offset; | |
207 | dp->context_ptr = pl; | |
208 | } | |
209 | ||
210 | /* | |
211 | * Functions for getting the pages from a bvec. | |
212 | */ | |
213 | static void bvec_get_page(struct dpages *dp, | |
214 | struct page **p, unsigned long *len, unsigned *offset) | |
215 | { | |
216 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
217 | *p = bvec->bv_page; | |
218 | *len = bvec->bv_len; | |
219 | *offset = bvec->bv_offset; | |
220 | } | |
221 | ||
222 | static void bvec_next_page(struct dpages *dp) | |
223 | { | |
224 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
225 | dp->context_ptr = bvec + 1; | |
226 | } | |
227 | ||
228 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) | |
229 | { | |
230 | dp->get_page = bvec_get_page; | |
231 | dp->next_page = bvec_next_page; | |
232 | dp->context_ptr = bvec; | |
233 | } | |
234 | ||
c8b03afe HM |
235 | /* |
236 | * Functions for getting the pages from a VMA. | |
237 | */ | |
1da177e4 LT |
238 | static void vm_get_page(struct dpages *dp, |
239 | struct page **p, unsigned long *len, unsigned *offset) | |
240 | { | |
241 | *p = vmalloc_to_page(dp->context_ptr); | |
242 | *offset = dp->context_u; | |
243 | *len = PAGE_SIZE - dp->context_u; | |
244 | } | |
245 | ||
246 | static void vm_next_page(struct dpages *dp) | |
247 | { | |
248 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
249 | dp->context_u = 0; | |
250 | } | |
251 | ||
252 | static void vm_dp_init(struct dpages *dp, void *data) | |
253 | { | |
254 | dp->get_page = vm_get_page; | |
255 | dp->next_page = vm_next_page; | |
256 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
257 | dp->context_ptr = data; | |
258 | } | |
259 | ||
3676347a PO |
260 | static void dm_bio_destructor(struct bio *bio) |
261 | { | |
f1e53987 MP |
262 | unsigned region; |
263 | struct io *io; | |
264 | ||
265 | retrieve_io_and_region_from_bio(bio, &io, ®ion); | |
891ce207 | 266 | |
bf17ce3a | 267 | bio_free(bio, io->client->bios); |
3676347a PO |
268 | } |
269 | ||
c8b03afe HM |
270 | /* |
271 | * Functions for getting the pages from kernel memory. | |
272 | */ | |
273 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, | |
274 | unsigned *offset) | |
275 | { | |
276 | *p = virt_to_page(dp->context_ptr); | |
277 | *offset = dp->context_u; | |
278 | *len = PAGE_SIZE - dp->context_u; | |
279 | } | |
280 | ||
281 | static void km_next_page(struct dpages *dp) | |
282 | { | |
283 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
284 | dp->context_u = 0; | |
285 | } | |
286 | ||
287 | static void km_dp_init(struct dpages *dp, void *data) | |
288 | { | |
289 | dp->get_page = km_get_page; | |
290 | dp->next_page = km_next_page; | |
291 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
292 | dp->context_ptr = data; | |
293 | } | |
294 | ||
1da177e4 LT |
295 | /*----------------------------------------------------------------- |
296 | * IO routines that accept a list of pages. | |
297 | *---------------------------------------------------------------*/ | |
22a1ceb1 | 298 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
1da177e4 LT |
299 | struct dpages *dp, struct io *io) |
300 | { | |
301 | struct bio *bio; | |
302 | struct page *page; | |
303 | unsigned long len; | |
304 | unsigned offset; | |
305 | unsigned num_bvecs; | |
306 | sector_t remaining = where->count; | |
307 | ||
12fc0f49 | 308 | /* |
d87f4c14 TH |
309 | * where->count may be zero if rw holds a flush and we need to |
310 | * send a zero-sized flush. | |
12fc0f49 MP |
311 | */ |
312 | do { | |
1da177e4 | 313 | /* |
f1e53987 | 314 | * Allocate a suitably sized-bio. |
1da177e4 | 315 | */ |
596f138e JN |
316 | num_bvecs = dm_sector_div_up(remaining, |
317 | (PAGE_SIZE >> SECTOR_SHIFT)); | |
f1e53987 | 318 | num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); |
bf17ce3a | 319 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
1da177e4 LT |
320 | bio->bi_sector = where->sector + (where->count - remaining); |
321 | bio->bi_bdev = where->bdev; | |
322 | bio->bi_end_io = endio; | |
3676347a | 323 | bio->bi_destructor = dm_bio_destructor; |
f1e53987 | 324 | store_io_and_region_in_bio(bio, io, region); |
1da177e4 LT |
325 | |
326 | /* | |
327 | * Try and add as many pages as possible. | |
328 | */ | |
329 | while (remaining) { | |
330 | dp->get_page(dp, &page, &len, &offset); | |
331 | len = min(len, to_bytes(remaining)); | |
332 | if (!bio_add_page(bio, page, len, offset)) | |
333 | break; | |
334 | ||
335 | offset = 0; | |
336 | remaining -= to_sector(len); | |
337 | dp->next_page(dp); | |
338 | } | |
339 | ||
340 | atomic_inc(&io->count); | |
341 | submit_bio(rw, bio); | |
12fc0f49 | 342 | } while (remaining); |
1da177e4 LT |
343 | } |
344 | ||
345 | static void dispatch_io(int rw, unsigned int num_regions, | |
22a1ceb1 | 346 | struct dm_io_region *where, struct dpages *dp, |
1da177e4 LT |
347 | struct io *io, int sync) |
348 | { | |
349 | int i; | |
350 | struct dpages old_pages = *dp; | |
351 | ||
f1e53987 MP |
352 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
353 | ||
1da177e4 | 354 | if (sync) |
721a9602 | 355 | rw |= REQ_SYNC; |
1da177e4 LT |
356 | |
357 | /* | |
358 | * For multiple regions we need to be careful to rewind | |
359 | * the dp object for each call to do_region. | |
360 | */ | |
361 | for (i = 0; i < num_regions; i++) { | |
362 | *dp = old_pages; | |
d87f4c14 | 363 | if (where[i].count || (rw & REQ_FLUSH)) |
1da177e4 LT |
364 | do_region(rw, i, where + i, dp, io); |
365 | } | |
366 | ||
367 | /* | |
f00b16ad | 368 | * Drop the extra reference that we were holding to avoid |
1da177e4 LT |
369 | * the io being completed too early. |
370 | */ | |
371 | dec_count(io, 0, 0); | |
372 | } | |
373 | ||
891ce207 | 374 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 375 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 376 | unsigned long *error_bits) |
1da177e4 | 377 | { |
f1e53987 MP |
378 | /* |
379 | * gcc <= 4.3 can't do the alignment for stack variables, so we must | |
380 | * align it on our own. | |
381 | * volatile prevents the optimizer from removing or reusing | |
382 | * "io_" field from the stack frame (allowed in ANSI C). | |
383 | */ | |
384 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; | |
385 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); | |
1da177e4 | 386 | |
7ff14a36 | 387 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
388 | WARN_ON(1); |
389 | return -EIO; | |
390 | } | |
391 | ||
f1e53987 | 392 | io->error_bits = 0; |
f1e53987 MP |
393 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
394 | io->sleeper = current; | |
395 | io->client = client; | |
1da177e4 | 396 | |
f1e53987 | 397 | dispatch_io(rw, num_regions, where, dp, io, 1); |
1da177e4 LT |
398 | |
399 | while (1) { | |
400 | set_current_state(TASK_UNINTERRUPTIBLE); | |
401 | ||
f1e53987 | 402 | if (!atomic_read(&io->count)) |
1da177e4 LT |
403 | break; |
404 | ||
405 | io_schedule(); | |
406 | } | |
407 | set_current_state(TASK_RUNNING); | |
408 | ||
891ce207 | 409 | if (error_bits) |
f1e53987 | 410 | *error_bits = io->error_bits; |
891ce207 | 411 | |
f1e53987 | 412 | return io->error_bits ? -EIO : 0; |
1da177e4 LT |
413 | } |
414 | ||
891ce207 | 415 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 416 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 417 | io_notify_fn fn, void *context) |
1da177e4 LT |
418 | { |
419 | struct io *io; | |
420 | ||
7ff14a36 | 421 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
422 | WARN_ON(1); |
423 | fn(1, context); | |
424 | return -EIO; | |
425 | } | |
426 | ||
bf17ce3a | 427 | io = mempool_alloc(client->pool, GFP_NOIO); |
e01fd7ee | 428 | io->error_bits = 0; |
1da177e4 LT |
429 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
430 | io->sleeper = NULL; | |
891ce207 | 431 | io->client = client; |
1da177e4 LT |
432 | io->callback = fn; |
433 | io->context = context; | |
434 | ||
435 | dispatch_io(rw, num_regions, where, dp, io, 0); | |
436 | return 0; | |
437 | } | |
438 | ||
c8b03afe HM |
439 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp) |
440 | { | |
441 | /* Set up dpages based on memory type */ | |
442 | switch (io_req->mem.type) { | |
443 | case DM_IO_PAGE_LIST: | |
444 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | |
445 | break; | |
446 | ||
447 | case DM_IO_BVEC: | |
448 | bvec_dp_init(dp, io_req->mem.ptr.bvec); | |
449 | break; | |
450 | ||
451 | case DM_IO_VMA: | |
452 | vm_dp_init(dp, io_req->mem.ptr.vma); | |
453 | break; | |
454 | ||
455 | case DM_IO_KMEM: | |
456 | km_dp_init(dp, io_req->mem.ptr.addr); | |
457 | break; | |
458 | ||
459 | default: | |
460 | return -EINVAL; | |
461 | } | |
462 | ||
463 | return 0; | |
464 | } | |
465 | ||
466 | /* | |
7ff14a36 MP |
467 | * New collapsed (a)synchronous interface. |
468 | * | |
469 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | |
7b6d91da CH |
470 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
471 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | |
7ff14a36 | 472 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
c8b03afe HM |
473 | */ |
474 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |
22a1ceb1 | 475 | struct dm_io_region *where, unsigned long *sync_error_bits) |
c8b03afe HM |
476 | { |
477 | int r; | |
478 | struct dpages dp; | |
479 | ||
480 | r = dp_init(io_req, &dp); | |
481 | if (r) | |
482 | return r; | |
483 | ||
484 | if (!io_req->notify.fn) | |
485 | return sync_io(io_req->client, num_regions, where, | |
486 | io_req->bi_rw, &dp, sync_error_bits); | |
487 | ||
488 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, | |
489 | &dp, io_req->notify.fn, io_req->notify.context); | |
490 | } | |
491 | EXPORT_SYMBOL(dm_io); | |
952b3557 MP |
492 | |
493 | int __init dm_io_init(void) | |
494 | { | |
495 | _dm_io_cache = KMEM_CACHE(io, 0); | |
496 | if (!_dm_io_cache) | |
497 | return -ENOMEM; | |
498 | ||
499 | return 0; | |
500 | } | |
501 | ||
502 | void dm_io_exit(void) | |
503 | { | |
504 | kmem_cache_destroy(_dm_io_cache); | |
505 | _dm_io_cache = NULL; | |
506 | } |