]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2002 Sistina Software (UK) Limited. | |
373a392b | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | * | |
7 | * Kcopyd provides a simple interface for copying an area of one | |
8 | * block-device to one or more other block-devices, with an asynchronous | |
9 | * completion notification. | |
10 | */ | |
11 | ||
eb69aca5 | 12 | #include <linux/types.h> |
1da177e4 | 13 | #include <asm/atomic.h> |
1da177e4 | 14 | #include <linux/blkdev.h> |
1da177e4 LT |
15 | #include <linux/fs.h> |
16 | #include <linux/init.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/mempool.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/pagemap.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/vmalloc.h> | |
23 | #include <linux/workqueue.h> | |
48c9c27b | 24 | #include <linux/mutex.h> |
586e80e6 | 25 | #include <linux/device-mapper.h> |
a765e20e | 26 | #include <linux/dm-kcopyd.h> |
1da177e4 | 27 | |
22a1ceb1 | 28 | #include "dm.h" |
1da177e4 | 29 | |
1da177e4 LT |
30 | /*----------------------------------------------------------------- |
31 | * Each kcopyd client has its own little pool of preallocated | |
32 | * pages for kcopyd io. | |
33 | *---------------------------------------------------------------*/ | |
eb69aca5 | 34 | struct dm_kcopyd_client { |
1da177e4 LT |
35 | spinlock_t lock; |
36 | struct page_list *pages; | |
37 | unsigned int nr_pages; | |
38 | unsigned int nr_free_pages; | |
138728dc | 39 | |
8d35d3e3 MP |
40 | /* |
41 | * Block devices to unplug. | |
42 | * Non-NULL pointer means that a block device has some pending requests | |
43 | * and needs to be unplugged. | |
44 | */ | |
45 | struct block_device *unplug[2]; | |
46 | ||
373a392b MB |
47 | struct dm_io_client *io_client; |
48 | ||
138728dc AK |
49 | wait_queue_head_t destroyq; |
50 | atomic_t nr_jobs; | |
8c0cbc2f | 51 | |
08d8757a MP |
52 | mempool_t *job_pool; |
53 | ||
8c0cbc2f MP |
54 | struct workqueue_struct *kcopyd_wq; |
55 | struct work_struct kcopyd_work; | |
56 | ||
57 | /* | |
58 | * We maintain three lists of jobs: | |
59 | * | |
60 | * i) jobs waiting for pages | |
61 | * ii) jobs that have pages, and are waiting for the io to be issued. | |
62 | * iii) jobs that have completed. | |
63 | * | |
64 | * All three of these are protected by job_lock. | |
65 | */ | |
66 | spinlock_t job_lock; | |
67 | struct list_head complete_jobs; | |
68 | struct list_head io_jobs; | |
69 | struct list_head pages_jobs; | |
1da177e4 LT |
70 | }; |
71 | ||
8c0cbc2f MP |
72 | static void wake(struct dm_kcopyd_client *kc) |
73 | { | |
74 | queue_work(kc->kcopyd_wq, &kc->kcopyd_work); | |
75 | } | |
76 | ||
1da177e4 LT |
77 | static struct page_list *alloc_pl(void) |
78 | { | |
79 | struct page_list *pl; | |
80 | ||
81 | pl = kmalloc(sizeof(*pl), GFP_KERNEL); | |
82 | if (!pl) | |
83 | return NULL; | |
84 | ||
85 | pl->page = alloc_page(GFP_KERNEL); | |
86 | if (!pl->page) { | |
87 | kfree(pl); | |
88 | return NULL; | |
89 | } | |
90 | ||
91 | return pl; | |
92 | } | |
93 | ||
94 | static void free_pl(struct page_list *pl) | |
95 | { | |
96 | __free_page(pl->page); | |
97 | kfree(pl); | |
98 | } | |
99 | ||
eb69aca5 | 100 | static int kcopyd_get_pages(struct dm_kcopyd_client *kc, |
1da177e4 LT |
101 | unsigned int nr, struct page_list **pages) |
102 | { | |
103 | struct page_list *pl; | |
104 | ||
105 | spin_lock(&kc->lock); | |
106 | if (kc->nr_free_pages < nr) { | |
107 | spin_unlock(&kc->lock); | |
108 | return -ENOMEM; | |
109 | } | |
110 | ||
111 | kc->nr_free_pages -= nr; | |
112 | for (*pages = pl = kc->pages; --nr; pl = pl->next) | |
113 | ; | |
114 | ||
115 | kc->pages = pl->next; | |
116 | pl->next = NULL; | |
117 | ||
118 | spin_unlock(&kc->lock); | |
119 | ||
120 | return 0; | |
121 | } | |
122 | ||
eb69aca5 | 123 | static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) |
1da177e4 LT |
124 | { |
125 | struct page_list *cursor; | |
126 | ||
127 | spin_lock(&kc->lock); | |
128 | for (cursor = pl; cursor->next; cursor = cursor->next) | |
129 | kc->nr_free_pages++; | |
130 | ||
131 | kc->nr_free_pages++; | |
132 | cursor->next = kc->pages; | |
133 | kc->pages = pl; | |
134 | spin_unlock(&kc->lock); | |
135 | } | |
136 | ||
137 | /* | |
138 | * These three functions resize the page pool. | |
139 | */ | |
140 | static void drop_pages(struct page_list *pl) | |
141 | { | |
142 | struct page_list *next; | |
143 | ||
144 | while (pl) { | |
145 | next = pl->next; | |
146 | free_pl(pl); | |
147 | pl = next; | |
148 | } | |
149 | } | |
150 | ||
eb69aca5 | 151 | static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) |
1da177e4 LT |
152 | { |
153 | unsigned int i; | |
154 | struct page_list *pl = NULL, *next; | |
155 | ||
156 | for (i = 0; i < nr; i++) { | |
157 | next = alloc_pl(); | |
158 | if (!next) { | |
159 | if (pl) | |
160 | drop_pages(pl); | |
161 | return -ENOMEM; | |
162 | } | |
163 | next->next = pl; | |
164 | pl = next; | |
165 | } | |
166 | ||
167 | kcopyd_put_pages(kc, pl); | |
168 | kc->nr_pages += nr; | |
169 | return 0; | |
170 | } | |
171 | ||
eb69aca5 | 172 | static void client_free_pages(struct dm_kcopyd_client *kc) |
1da177e4 LT |
173 | { |
174 | BUG_ON(kc->nr_free_pages != kc->nr_pages); | |
175 | drop_pages(kc->pages); | |
176 | kc->pages = NULL; | |
177 | kc->nr_free_pages = kc->nr_pages = 0; | |
178 | } | |
179 | ||
180 | /*----------------------------------------------------------------- | |
181 | * kcopyd_jobs need to be allocated by the *clients* of kcopyd, | |
182 | * for this reason we use a mempool to prevent the client from | |
183 | * ever having to do io (which could cause a deadlock). | |
184 | *---------------------------------------------------------------*/ | |
185 | struct kcopyd_job { | |
eb69aca5 | 186 | struct dm_kcopyd_client *kc; |
1da177e4 LT |
187 | struct list_head list; |
188 | unsigned long flags; | |
189 | ||
190 | /* | |
191 | * Error state of the job. | |
192 | */ | |
193 | int read_err; | |
4cdc1d1f | 194 | unsigned long write_err; |
1da177e4 LT |
195 | |
196 | /* | |
197 | * Either READ or WRITE | |
198 | */ | |
199 | int rw; | |
22a1ceb1 | 200 | struct dm_io_region source; |
1da177e4 LT |
201 | |
202 | /* | |
203 | * The destinations for the transfer. | |
204 | */ | |
205 | unsigned int num_dests; | |
eb69aca5 | 206 | struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; |
1da177e4 LT |
207 | |
208 | sector_t offset; | |
209 | unsigned int nr_pages; | |
210 | struct page_list *pages; | |
211 | ||
212 | /* | |
213 | * Set this to ensure you are notified when the job has | |
214 | * completed. 'context' is for callback to use. | |
215 | */ | |
eb69aca5 | 216 | dm_kcopyd_notify_fn fn; |
1da177e4 LT |
217 | void *context; |
218 | ||
219 | /* | |
220 | * These fields are only used if the job has been split | |
221 | * into more manageable parts. | |
222 | */ | |
def5b5b2 | 223 | struct mutex lock; |
1da177e4 LT |
224 | atomic_t sub_jobs; |
225 | sector_t progress; | |
226 | }; | |
227 | ||
228 | /* FIXME: this should scale with the number of pages */ | |
229 | #define MIN_JOBS 512 | |
230 | ||
e18b890b | 231 | static struct kmem_cache *_job_cache; |
1da177e4 | 232 | |
945fa4d2 | 233 | int __init dm_kcopyd_init(void) |
1da177e4 | 234 | { |
028867ac | 235 | _job_cache = KMEM_CACHE(kcopyd_job, 0); |
1da177e4 LT |
236 | if (!_job_cache) |
237 | return -ENOMEM; | |
238 | ||
1da177e4 LT |
239 | return 0; |
240 | } | |
241 | ||
945fa4d2 | 242 | void dm_kcopyd_exit(void) |
1da177e4 | 243 | { |
1da177e4 | 244 | kmem_cache_destroy(_job_cache); |
1da177e4 LT |
245 | _job_cache = NULL; |
246 | } | |
247 | ||
248 | /* | |
249 | * Functions to push and pop a job onto the head of a given job | |
250 | * list. | |
251 | */ | |
8c0cbc2f MP |
252 | static struct kcopyd_job *pop(struct list_head *jobs, |
253 | struct dm_kcopyd_client *kc) | |
1da177e4 LT |
254 | { |
255 | struct kcopyd_job *job = NULL; | |
256 | unsigned long flags; | |
257 | ||
8c0cbc2f | 258 | spin_lock_irqsave(&kc->job_lock, flags); |
1da177e4 LT |
259 | |
260 | if (!list_empty(jobs)) { | |
261 | job = list_entry(jobs->next, struct kcopyd_job, list); | |
262 | list_del(&job->list); | |
263 | } | |
8c0cbc2f | 264 | spin_unlock_irqrestore(&kc->job_lock, flags); |
1da177e4 LT |
265 | |
266 | return job; | |
267 | } | |
268 | ||
028867ac | 269 | static void push(struct list_head *jobs, struct kcopyd_job *job) |
1da177e4 LT |
270 | { |
271 | unsigned long flags; | |
8c0cbc2f | 272 | struct dm_kcopyd_client *kc = job->kc; |
1da177e4 | 273 | |
8c0cbc2f | 274 | spin_lock_irqsave(&kc->job_lock, flags); |
1da177e4 | 275 | list_add_tail(&job->list, jobs); |
8c0cbc2f | 276 | spin_unlock_irqrestore(&kc->job_lock, flags); |
1da177e4 LT |
277 | } |
278 | ||
b673c3a8 KI |
279 | |
280 | static void push_head(struct list_head *jobs, struct kcopyd_job *job) | |
281 | { | |
282 | unsigned long flags; | |
283 | struct dm_kcopyd_client *kc = job->kc; | |
284 | ||
285 | spin_lock_irqsave(&kc->job_lock, flags); | |
286 | list_add(&job->list, jobs); | |
287 | spin_unlock_irqrestore(&kc->job_lock, flags); | |
288 | } | |
289 | ||
1da177e4 LT |
290 | /* |
291 | * These three functions process 1 item from the corresponding | |
292 | * job list. | |
293 | * | |
294 | * They return: | |
295 | * < 0: error | |
296 | * 0: success | |
297 | * > 0: can't process yet. | |
298 | */ | |
299 | static int run_complete_job(struct kcopyd_job *job) | |
300 | { | |
301 | void *context = job->context; | |
302 | int read_err = job->read_err; | |
4cdc1d1f | 303 | unsigned long write_err = job->write_err; |
eb69aca5 HM |
304 | dm_kcopyd_notify_fn fn = job->fn; |
305 | struct dm_kcopyd_client *kc = job->kc; | |
1da177e4 | 306 | |
73830857 MP |
307 | if (job->pages) |
308 | kcopyd_put_pages(kc, job->pages); | |
08d8757a | 309 | mempool_free(job, kc->job_pool); |
1da177e4 | 310 | fn(read_err, write_err, context); |
138728dc AK |
311 | |
312 | if (atomic_dec_and_test(&kc->nr_jobs)) | |
313 | wake_up(&kc->destroyq); | |
314 | ||
1da177e4 LT |
315 | return 0; |
316 | } | |
317 | ||
8d35d3e3 MP |
318 | /* |
319 | * Unplug the block device at the specified index. | |
320 | */ | |
321 | static void unplug(struct dm_kcopyd_client *kc, int rw) | |
322 | { | |
323 | if (kc->unplug[rw] != NULL) { | |
324 | blk_unplug(bdev_get_queue(kc->unplug[rw])); | |
325 | kc->unplug[rw] = NULL; | |
326 | } | |
327 | } | |
328 | ||
329 | /* | |
330 | * Prepare block device unplug. If there's another device | |
331 | * to be unplugged at the same array index, we unplug that | |
332 | * device first. | |
333 | */ | |
334 | static void prepare_unplug(struct dm_kcopyd_client *kc, int rw, | |
335 | struct block_device *bdev) | |
336 | { | |
337 | if (likely(kc->unplug[rw] == bdev)) | |
338 | return; | |
339 | unplug(kc, rw); | |
340 | kc->unplug[rw] = bdev; | |
341 | } | |
342 | ||
1da177e4 LT |
343 | static void complete_io(unsigned long error, void *context) |
344 | { | |
345 | struct kcopyd_job *job = (struct kcopyd_job *) context; | |
8c0cbc2f | 346 | struct dm_kcopyd_client *kc = job->kc; |
1da177e4 LT |
347 | |
348 | if (error) { | |
349 | if (job->rw == WRITE) | |
ce503f59 | 350 | job->write_err |= error; |
1da177e4 LT |
351 | else |
352 | job->read_err = 1; | |
353 | ||
eb69aca5 | 354 | if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { |
8c0cbc2f MP |
355 | push(&kc->complete_jobs, job); |
356 | wake(kc); | |
1da177e4 LT |
357 | return; |
358 | } | |
359 | } | |
360 | ||
361 | if (job->rw == WRITE) | |
8c0cbc2f | 362 | push(&kc->complete_jobs, job); |
1da177e4 LT |
363 | |
364 | else { | |
365 | job->rw = WRITE; | |
8c0cbc2f | 366 | push(&kc->io_jobs, job); |
1da177e4 LT |
367 | } |
368 | ||
8c0cbc2f | 369 | wake(kc); |
1da177e4 LT |
370 | } |
371 | ||
372 | /* | |
373 | * Request io on as many buffer heads as we can currently get for | |
374 | * a particular job. | |
375 | */ | |
376 | static int run_io_job(struct kcopyd_job *job) | |
377 | { | |
378 | int r; | |
373a392b | 379 | struct dm_io_request io_req = { |
8d35d3e3 | 380 | .bi_rw = job->rw, |
373a392b MB |
381 | .mem.type = DM_IO_PAGE_LIST, |
382 | .mem.ptr.pl = job->pages, | |
383 | .mem.offset = job->offset, | |
384 | .notify.fn = complete_io, | |
385 | .notify.context = job, | |
386 | .client = job->kc->io_client, | |
387 | }; | |
1da177e4 | 388 | |
8d35d3e3 | 389 | if (job->rw == READ) { |
373a392b | 390 | r = dm_io(&io_req, 1, &job->source, NULL); |
8d35d3e3 MP |
391 | prepare_unplug(job->kc, READ, job->source.bdev); |
392 | } else { | |
393 | if (job->num_dests > 1) | |
394 | io_req.bi_rw |= REQ_UNPLUG; | |
373a392b | 395 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
8d35d3e3 MP |
396 | if (!(io_req.bi_rw & REQ_UNPLUG)) |
397 | prepare_unplug(job->kc, WRITE, job->dests[0].bdev); | |
398 | } | |
1da177e4 LT |
399 | |
400 | return r; | |
401 | } | |
402 | ||
403 | static int run_pages_job(struct kcopyd_job *job) | |
404 | { | |
405 | int r; | |
406 | ||
407 | job->nr_pages = dm_div_up(job->dests[0].count + job->offset, | |
408 | PAGE_SIZE >> 9); | |
409 | r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); | |
410 | if (!r) { | |
411 | /* this job is ready for io */ | |
8c0cbc2f | 412 | push(&job->kc->io_jobs, job); |
1da177e4 LT |
413 | return 0; |
414 | } | |
415 | ||
416 | if (r == -ENOMEM) | |
417 | /* can't complete now */ | |
418 | return 1; | |
419 | ||
420 | return r; | |
421 | } | |
422 | ||
423 | /* | |
424 | * Run through a list for as long as possible. Returns the count | |
425 | * of successful jobs. | |
426 | */ | |
8c0cbc2f MP |
427 | static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, |
428 | int (*fn) (struct kcopyd_job *)) | |
1da177e4 LT |
429 | { |
430 | struct kcopyd_job *job; | |
431 | int r, count = 0; | |
432 | ||
8c0cbc2f | 433 | while ((job = pop(jobs, kc))) { |
1da177e4 LT |
434 | |
435 | r = fn(job); | |
436 | ||
437 | if (r < 0) { | |
438 | /* error this rogue job */ | |
439 | if (job->rw == WRITE) | |
4cdc1d1f | 440 | job->write_err = (unsigned long) -1L; |
1da177e4 LT |
441 | else |
442 | job->read_err = 1; | |
8c0cbc2f | 443 | push(&kc->complete_jobs, job); |
1da177e4 LT |
444 | break; |
445 | } | |
446 | ||
447 | if (r > 0) { | |
448 | /* | |
449 | * We couldn't service this job ATM, so | |
450 | * push this job back onto the list. | |
451 | */ | |
b673c3a8 | 452 | push_head(jobs, job); |
1da177e4 LT |
453 | break; |
454 | } | |
455 | ||
456 | count++; | |
457 | } | |
458 | ||
459 | return count; | |
460 | } | |
461 | ||
462 | /* | |
463 | * kcopyd does this every time it's woken up. | |
464 | */ | |
8c0cbc2f | 465 | static void do_work(struct work_struct *work) |
1da177e4 | 466 | { |
8c0cbc2f MP |
467 | struct dm_kcopyd_client *kc = container_of(work, |
468 | struct dm_kcopyd_client, kcopyd_work); | |
469 | ||
1da177e4 LT |
470 | /* |
471 | * The order that these are called is *very* important. | |
472 | * complete jobs can free some pages for pages jobs. | |
473 | * Pages jobs when successful will jump onto the io jobs | |
474 | * list. io jobs call wake when they complete and it all | |
475 | * starts again. | |
8d35d3e3 MP |
476 | * |
477 | * Note that io_jobs add block devices to the unplug array, | |
478 | * this array is cleared with "unplug" calls. It is thus | |
479 | * forbidden to run complete_jobs after io_jobs and before | |
480 | * unplug because the block device could be destroyed in | |
481 | * job completion callback. | |
1da177e4 | 482 | */ |
8c0cbc2f MP |
483 | process_jobs(&kc->complete_jobs, kc, run_complete_job); |
484 | process_jobs(&kc->pages_jobs, kc, run_pages_job); | |
485 | process_jobs(&kc->io_jobs, kc, run_io_job); | |
8d35d3e3 MP |
486 | unplug(kc, READ); |
487 | unplug(kc, WRITE); | |
1da177e4 LT |
488 | } |
489 | ||
490 | /* | |
491 | * If we are copying a small region we just dispatch a single job | |
492 | * to do the copy, otherwise the io has to be split up into many | |
493 | * jobs. | |
494 | */ | |
495 | static void dispatch_job(struct kcopyd_job *job) | |
496 | { | |
8c0cbc2f MP |
497 | struct dm_kcopyd_client *kc = job->kc; |
498 | atomic_inc(&kc->nr_jobs); | |
9ca170a3 MP |
499 | if (unlikely(!job->source.count)) |
500 | push(&kc->complete_jobs, job); | |
501 | else | |
502 | push(&kc->pages_jobs, job); | |
8c0cbc2f | 503 | wake(kc); |
1da177e4 LT |
504 | } |
505 | ||
506 | #define SUB_JOB_SIZE 128 | |
4cdc1d1f AK |
507 | static void segment_complete(int read_err, unsigned long write_err, |
508 | void *context) | |
1da177e4 LT |
509 | { |
510 | /* FIXME: tidy this function */ | |
511 | sector_t progress = 0; | |
512 | sector_t count = 0; | |
513 | struct kcopyd_job *job = (struct kcopyd_job *) context; | |
73830857 | 514 | struct dm_kcopyd_client *kc = job->kc; |
1da177e4 | 515 | |
def5b5b2 | 516 | mutex_lock(&job->lock); |
1da177e4 LT |
517 | |
518 | /* update the error */ | |
519 | if (read_err) | |
520 | job->read_err = 1; | |
521 | ||
522 | if (write_err) | |
ce503f59 | 523 | job->write_err |= write_err; |
1da177e4 LT |
524 | |
525 | /* | |
526 | * Only dispatch more work if there hasn't been an error. | |
527 | */ | |
528 | if ((!job->read_err && !job->write_err) || | |
eb69aca5 | 529 | test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { |
1da177e4 LT |
530 | /* get the next chunk of work */ |
531 | progress = job->progress; | |
532 | count = job->source.count - progress; | |
533 | if (count) { | |
534 | if (count > SUB_JOB_SIZE) | |
535 | count = SUB_JOB_SIZE; | |
536 | ||
537 | job->progress += count; | |
538 | } | |
539 | } | |
def5b5b2 | 540 | mutex_unlock(&job->lock); |
1da177e4 LT |
541 | |
542 | if (count) { | |
543 | int i; | |
73830857 | 544 | struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, |
08d8757a | 545 | GFP_NOIO); |
1da177e4 LT |
546 | |
547 | *sub_job = *job; | |
548 | sub_job->source.sector += progress; | |
549 | sub_job->source.count = count; | |
550 | ||
551 | for (i = 0; i < job->num_dests; i++) { | |
552 | sub_job->dests[i].sector += progress; | |
553 | sub_job->dests[i].count = count; | |
554 | } | |
555 | ||
556 | sub_job->fn = segment_complete; | |
557 | sub_job->context = job; | |
558 | dispatch_job(sub_job); | |
559 | ||
560 | } else if (atomic_dec_and_test(&job->sub_jobs)) { | |
561 | ||
562 | /* | |
340cd444 MP |
563 | * Queue the completion callback to the kcopyd thread. |
564 | * | |
565 | * Some callers assume that all the completions are called | |
566 | * from a single thread and don't race with each other. | |
567 | * | |
568 | * We must not call the callback directly here because this | |
569 | * code may not be executing in the thread. | |
1da177e4 | 570 | */ |
340cd444 MP |
571 | push(&kc->complete_jobs, job); |
572 | wake(kc); | |
1da177e4 LT |
573 | } |
574 | } | |
575 | ||
576 | /* | |
577 | * Create some little jobs that will do the move between | |
578 | * them. | |
579 | */ | |
580 | #define SPLIT_COUNT 8 | |
581 | static void split_job(struct kcopyd_job *job) | |
582 | { | |
583 | int i; | |
584 | ||
340cd444 MP |
585 | atomic_inc(&job->kc->nr_jobs); |
586 | ||
1da177e4 LT |
587 | atomic_set(&job->sub_jobs, SPLIT_COUNT); |
588 | for (i = 0; i < SPLIT_COUNT; i++) | |
589 | segment_complete(0, 0u, job); | |
590 | } | |
591 | ||
eb69aca5 HM |
592 | int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, |
593 | unsigned int num_dests, struct dm_io_region *dests, | |
594 | unsigned int flags, dm_kcopyd_notify_fn fn, void *context) | |
1da177e4 LT |
595 | { |
596 | struct kcopyd_job *job; | |
597 | ||
598 | /* | |
599 | * Allocate a new job. | |
600 | */ | |
08d8757a | 601 | job = mempool_alloc(kc->job_pool, GFP_NOIO); |
1da177e4 LT |
602 | |
603 | /* | |
604 | * set up for the read. | |
605 | */ | |
606 | job->kc = kc; | |
607 | job->flags = flags; | |
608 | job->read_err = 0; | |
609 | job->write_err = 0; | |
610 | job->rw = READ; | |
611 | ||
612 | job->source = *from; | |
613 | ||
614 | job->num_dests = num_dests; | |
615 | memcpy(&job->dests, dests, sizeof(*dests) * num_dests); | |
616 | ||
617 | job->offset = 0; | |
618 | job->nr_pages = 0; | |
619 | job->pages = NULL; | |
620 | ||
621 | job->fn = fn; | |
622 | job->context = context; | |
623 | ||
624 | if (job->source.count < SUB_JOB_SIZE) | |
625 | dispatch_job(job); | |
626 | ||
627 | else { | |
def5b5b2 | 628 | mutex_init(&job->lock); |
1da177e4 LT |
629 | job->progress = 0; |
630 | split_job(job); | |
631 | } | |
632 | ||
633 | return 0; | |
634 | } | |
eb69aca5 | 635 | EXPORT_SYMBOL(dm_kcopyd_copy); |
1da177e4 LT |
636 | |
637 | /* | |
638 | * Cancels a kcopyd job, eg. someone might be deactivating a | |
639 | * mirror. | |
640 | */ | |
0b56306e | 641 | #if 0 |
1da177e4 LT |
642 | int kcopyd_cancel(struct kcopyd_job *job, int block) |
643 | { | |
644 | /* FIXME: finish */ | |
645 | return -1; | |
646 | } | |
0b56306e | 647 | #endif /* 0 */ |
1da177e4 LT |
648 | |
649 | /*----------------------------------------------------------------- | |
945fa4d2 | 650 | * Client setup |
1da177e4 | 651 | *---------------------------------------------------------------*/ |
eb69aca5 HM |
652 | int dm_kcopyd_client_create(unsigned int nr_pages, |
653 | struct dm_kcopyd_client **result) | |
1da177e4 | 654 | { |
945fa4d2 | 655 | int r = -ENOMEM; |
eb69aca5 | 656 | struct dm_kcopyd_client *kc; |
1da177e4 | 657 | |
1da177e4 | 658 | kc = kmalloc(sizeof(*kc), GFP_KERNEL); |
945fa4d2 MP |
659 | if (!kc) |
660 | return -ENOMEM; | |
1da177e4 LT |
661 | |
662 | spin_lock_init(&kc->lock); | |
8c0cbc2f MP |
663 | spin_lock_init(&kc->job_lock); |
664 | INIT_LIST_HEAD(&kc->complete_jobs); | |
665 | INIT_LIST_HEAD(&kc->io_jobs); | |
666 | INIT_LIST_HEAD(&kc->pages_jobs); | |
667 | ||
8d35d3e3 MP |
668 | memset(kc->unplug, 0, sizeof(kc->unplug)); |
669 | ||
08d8757a | 670 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
945fa4d2 MP |
671 | if (!kc->job_pool) |
672 | goto bad_slab; | |
08d8757a | 673 | |
8c0cbc2f | 674 | INIT_WORK(&kc->kcopyd_work, do_work); |
9c4376de TH |
675 | kc->kcopyd_wq = alloc_workqueue("kcopyd", |
676 | WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0); | |
945fa4d2 MP |
677 | if (!kc->kcopyd_wq) |
678 | goto bad_workqueue; | |
8c0cbc2f | 679 | |
1da177e4 LT |
680 | kc->pages = NULL; |
681 | kc->nr_pages = kc->nr_free_pages = 0; | |
682 | r = client_alloc_pages(kc, nr_pages); | |
945fa4d2 MP |
683 | if (r) |
684 | goto bad_client_pages; | |
1da177e4 | 685 | |
373a392b MB |
686 | kc->io_client = dm_io_client_create(nr_pages); |
687 | if (IS_ERR(kc->io_client)) { | |
688 | r = PTR_ERR(kc->io_client); | |
945fa4d2 | 689 | goto bad_io_client; |
1da177e4 LT |
690 | } |
691 | ||
138728dc AK |
692 | init_waitqueue_head(&kc->destroyq); |
693 | atomic_set(&kc->nr_jobs, 0); | |
694 | ||
1da177e4 LT |
695 | *result = kc; |
696 | return 0; | |
945fa4d2 MP |
697 | |
698 | bad_io_client: | |
699 | client_free_pages(kc); | |
700 | bad_client_pages: | |
701 | destroy_workqueue(kc->kcopyd_wq); | |
702 | bad_workqueue: | |
703 | mempool_destroy(kc->job_pool); | |
704 | bad_slab: | |
705 | kfree(kc); | |
706 | ||
707 | return r; | |
1da177e4 | 708 | } |
eb69aca5 | 709 | EXPORT_SYMBOL(dm_kcopyd_client_create); |
1da177e4 | 710 | |
eb69aca5 | 711 | void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) |
1da177e4 | 712 | { |
138728dc AK |
713 | /* Wait for completion of all jobs submitted by this client. */ |
714 | wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); | |
715 | ||
8c0cbc2f MP |
716 | BUG_ON(!list_empty(&kc->complete_jobs)); |
717 | BUG_ON(!list_empty(&kc->io_jobs)); | |
718 | BUG_ON(!list_empty(&kc->pages_jobs)); | |
719 | destroy_workqueue(kc->kcopyd_wq); | |
373a392b | 720 | dm_io_client_destroy(kc->io_client); |
1da177e4 | 721 | client_free_pages(kc); |
08d8757a | 722 | mempool_destroy(kc->job_pool); |
1da177e4 | 723 | kfree(kc); |
1da177e4 | 724 | } |
eb69aca5 | 725 | EXPORT_SYMBOL(dm_kcopyd_client_destroy); |