2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/device-mapper.h>
26 #include <linux/dm-kcopyd.h>
30 /*-----------------------------------------------------------------
31 * Each kcopyd client has its own little pool of preallocated
32 * pages for kcopyd io.
33 *---------------------------------------------------------------*/
34 struct dm_kcopyd_client {
36 struct page_list *pages;
37 unsigned int nr_pages;
38 unsigned int nr_free_pages;
40 struct dm_io_client *io_client;
42 wait_queue_head_t destroyq;
47 struct workqueue_struct *kcopyd_wq;
48 struct work_struct kcopyd_work;
51 * We maintain three lists of jobs:
53 * i) jobs waiting for pages
54 * ii) jobs that have pages, and are waiting for the io to be issued.
55 * iii) jobs that have completed.
57 * All three of these are protected by job_lock.
60 struct list_head complete_jobs;
61 struct list_head io_jobs;
62 struct list_head pages_jobs;
65 static void wake(struct dm_kcopyd_client *kc)
67 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
70 static struct page_list *alloc_pl(void)
74 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
78 pl->page = alloc_page(GFP_KERNEL);
87 static void free_pl(struct page_list *pl)
89 __free_page(pl->page);
93 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
94 unsigned int nr, struct page_list **pages)
99 if (kc->nr_free_pages < nr) {
100 spin_unlock(&kc->lock);
104 kc->nr_free_pages -= nr;
105 for (*pages = pl = kc->pages; --nr; pl = pl->next)
108 kc->pages = pl->next;
111 spin_unlock(&kc->lock);
116 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
118 struct page_list *cursor;
120 spin_lock(&kc->lock);
121 for (cursor = pl; cursor->next; cursor = cursor->next)
125 cursor->next = kc->pages;
127 spin_unlock(&kc->lock);
131 * These three functions resize the page pool.
133 static void drop_pages(struct page_list *pl)
135 struct page_list *next;
144 static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
147 struct page_list *pl = NULL, *next;
149 for (i = 0; i < nr; i++) {
160 kcopyd_put_pages(kc, pl);
165 static void client_free_pages(struct dm_kcopyd_client *kc)
167 BUG_ON(kc->nr_free_pages != kc->nr_pages);
168 drop_pages(kc->pages);
170 kc->nr_free_pages = kc->nr_pages = 0;
173 /*-----------------------------------------------------------------
174 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
175 * for this reason we use a mempool to prevent the client from
176 * ever having to do io (which could cause a deadlock).
177 *---------------------------------------------------------------*/
179 struct dm_kcopyd_client *kc;
180 struct list_head list;
184 * Error state of the job.
187 unsigned long write_err;
190 * Either READ or WRITE
193 struct dm_io_region source;
196 * The destinations for the transfer.
198 unsigned int num_dests;
199 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
202 unsigned int nr_pages;
203 struct page_list *pages;
206 * Set this to ensure you are notified when the job has
207 * completed. 'context' is for callback to use.
209 dm_kcopyd_notify_fn fn;
213 * These fields are only used if the job has been split
214 * into more manageable parts.
221 /* FIXME: this should scale with the number of pages */
224 static struct kmem_cache *_job_cache;
226 int __init dm_kcopyd_init(void)
228 _job_cache = KMEM_CACHE(kcopyd_job, 0);
235 void dm_kcopyd_exit(void)
237 kmem_cache_destroy(_job_cache);
242 * Functions to push and pop a job onto the head of a given job
245 static struct kcopyd_job *pop(struct list_head *jobs,
246 struct dm_kcopyd_client *kc)
248 struct kcopyd_job *job = NULL;
251 spin_lock_irqsave(&kc->job_lock, flags);
253 if (!list_empty(jobs)) {
254 job = list_entry(jobs->next, struct kcopyd_job, list);
255 list_del(&job->list);
257 spin_unlock_irqrestore(&kc->job_lock, flags);
262 static void push(struct list_head *jobs, struct kcopyd_job *job)
265 struct dm_kcopyd_client *kc = job->kc;
267 spin_lock_irqsave(&kc->job_lock, flags);
268 list_add_tail(&job->list, jobs);
269 spin_unlock_irqrestore(&kc->job_lock, flags);
273 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
276 struct dm_kcopyd_client *kc = job->kc;
278 spin_lock_irqsave(&kc->job_lock, flags);
279 list_add(&job->list, jobs);
280 spin_unlock_irqrestore(&kc->job_lock, flags);
284 * These three functions process 1 item from the corresponding
290 * > 0: can't process yet.
292 static int run_complete_job(struct kcopyd_job *job)
294 void *context = job->context;
295 int read_err = job->read_err;
296 unsigned long write_err = job->write_err;
297 dm_kcopyd_notify_fn fn = job->fn;
298 struct dm_kcopyd_client *kc = job->kc;
301 kcopyd_put_pages(kc, job->pages);
302 mempool_free(job, kc->job_pool);
303 fn(read_err, write_err, context);
305 if (atomic_dec_and_test(&kc->nr_jobs))
306 wake_up(&kc->destroyq);
311 static void complete_io(unsigned long error, void *context)
313 struct kcopyd_job *job = (struct kcopyd_job *) context;
314 struct dm_kcopyd_client *kc = job->kc;
317 if (job->rw == WRITE)
318 job->write_err |= error;
322 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
323 push(&kc->complete_jobs, job);
329 if (job->rw == WRITE)
330 push(&kc->complete_jobs, job);
334 push(&kc->io_jobs, job);
341 * Request io on as many buffer heads as we can currently get for
344 static int run_io_job(struct kcopyd_job *job)
347 struct dm_io_request io_req = {
349 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset,
352 .notify.fn = complete_io,
353 .notify.context = job,
354 .client = job->kc->io_client,
358 r = dm_io(&io_req, 1, &job->source, NULL);
360 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
365 static int run_pages_job(struct kcopyd_job *job)
369 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
371 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
373 /* this job is ready for io */
374 push(&job->kc->io_jobs, job);
379 /* can't complete now */
386 * Run through a list for as long as possible. Returns the count
387 * of successful jobs.
389 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
390 int (*fn) (struct kcopyd_job *))
392 struct kcopyd_job *job;
395 while ((job = pop(jobs, kc))) {
400 /* error this rogue job */
401 if (job->rw == WRITE)
402 job->write_err = (unsigned long) -1L;
405 push(&kc->complete_jobs, job);
411 * We couldn't service this job ATM, so
412 * push this job back onto the list.
414 push_head(jobs, job);
425 * kcopyd does this every time it's woken up.
427 static void do_work(struct work_struct *work)
429 struct dm_kcopyd_client *kc = container_of(work,
430 struct dm_kcopyd_client, kcopyd_work);
431 struct blk_plug plug;
434 * The order that these are called is *very* important.
435 * complete jobs can free some pages for pages jobs.
436 * Pages jobs when successful will jump onto the io jobs
437 * list. io jobs call wake when they complete and it all
440 blk_start_plug(&plug);
441 process_jobs(&kc->complete_jobs, kc, run_complete_job);
442 process_jobs(&kc->pages_jobs, kc, run_pages_job);
443 process_jobs(&kc->io_jobs, kc, run_io_job);
444 blk_finish_plug(&plug);
448 * If we are copying a small region we just dispatch a single job
449 * to do the copy, otherwise the io has to be split up into many
452 static void dispatch_job(struct kcopyd_job *job)
454 struct dm_kcopyd_client *kc = job->kc;
455 atomic_inc(&kc->nr_jobs);
456 if (unlikely(!job->source.count))
457 push(&kc->complete_jobs, job);
459 push(&kc->pages_jobs, job);
463 #define SUB_JOB_SIZE 128
464 static void segment_complete(int read_err, unsigned long write_err,
467 /* FIXME: tidy this function */
468 sector_t progress = 0;
470 struct kcopyd_job *job = (struct kcopyd_job *) context;
471 struct dm_kcopyd_client *kc = job->kc;
473 mutex_lock(&job->lock);
475 /* update the error */
480 job->write_err |= write_err;
483 * Only dispatch more work if there hasn't been an error.
485 if ((!job->read_err && !job->write_err) ||
486 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
487 /* get the next chunk of work */
488 progress = job->progress;
489 count = job->source.count - progress;
491 if (count > SUB_JOB_SIZE)
492 count = SUB_JOB_SIZE;
494 job->progress += count;
497 mutex_unlock(&job->lock);
501 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
505 sub_job->source.sector += progress;
506 sub_job->source.count = count;
508 for (i = 0; i < job->num_dests; i++) {
509 sub_job->dests[i].sector += progress;
510 sub_job->dests[i].count = count;
513 sub_job->fn = segment_complete;
514 sub_job->context = job;
515 dispatch_job(sub_job);
517 } else if (atomic_dec_and_test(&job->sub_jobs)) {
520 * Queue the completion callback to the kcopyd thread.
522 * Some callers assume that all the completions are called
523 * from a single thread and don't race with each other.
525 * We must not call the callback directly here because this
526 * code may not be executing in the thread.
528 push(&kc->complete_jobs, job);
534 * Create some little jobs that will do the move between
537 #define SPLIT_COUNT 8
538 static void split_job(struct kcopyd_job *job)
542 atomic_inc(&job->kc->nr_jobs);
544 atomic_set(&job->sub_jobs, SPLIT_COUNT);
545 for (i = 0; i < SPLIT_COUNT; i++)
546 segment_complete(0, 0u, job);
549 int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
550 unsigned int num_dests, struct dm_io_region *dests,
551 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
553 struct kcopyd_job *job;
556 * Allocate a new job.
558 job = mempool_alloc(kc->job_pool, GFP_NOIO);
561 * set up for the read.
571 job->num_dests = num_dests;
572 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
579 job->context = context;
581 if (job->source.count < SUB_JOB_SIZE)
585 mutex_init(&job->lock);
592 EXPORT_SYMBOL(dm_kcopyd_copy);
595 * Cancels a kcopyd job, eg. someone might be deactivating a
599 int kcopyd_cancel(struct kcopyd_job *job, int block)
606 /*-----------------------------------------------------------------
608 *---------------------------------------------------------------*/
609 int dm_kcopyd_client_create(unsigned int nr_pages,
610 struct dm_kcopyd_client **result)
613 struct dm_kcopyd_client *kc;
615 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
619 spin_lock_init(&kc->lock);
620 spin_lock_init(&kc->job_lock);
621 INIT_LIST_HEAD(&kc->complete_jobs);
622 INIT_LIST_HEAD(&kc->io_jobs);
623 INIT_LIST_HEAD(&kc->pages_jobs);
625 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
629 INIT_WORK(&kc->kcopyd_work, do_work);
630 kc->kcopyd_wq = alloc_workqueue("kcopyd",
631 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
636 kc->nr_pages = kc->nr_free_pages = 0;
637 r = client_alloc_pages(kc, nr_pages);
639 goto bad_client_pages;
641 kc->io_client = dm_io_client_create(nr_pages);
642 if (IS_ERR(kc->io_client)) {
643 r = PTR_ERR(kc->io_client);
647 init_waitqueue_head(&kc->destroyq);
648 atomic_set(&kc->nr_jobs, 0);
654 client_free_pages(kc);
656 destroy_workqueue(kc->kcopyd_wq);
658 mempool_destroy(kc->job_pool);
664 EXPORT_SYMBOL(dm_kcopyd_client_create);
666 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
668 /* Wait for completion of all jobs submitted by this client. */
669 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
671 BUG_ON(!list_empty(&kc->complete_jobs));
672 BUG_ON(!list_empty(&kc->io_jobs));
673 BUG_ON(!list_empty(&kc->pages_jobs));
674 destroy_workqueue(kc->kcopyd_wq);
675 dm_io_client_destroy(kc->io_client);
676 client_free_pages(kc);
677 mempool_destroy(kc->job_pool);
680 EXPORT_SYMBOL(dm_kcopyd_client_destroy);