]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * dm-snapshot.c | |
3 | * | |
4 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
5 | * | |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
9 | #include <linux/blkdev.h> | |
1da177e4 | 10 | #include <linux/device-mapper.h> |
90fa1527 | 11 | #include <linux/delay.h> |
1da177e4 LT |
12 | #include <linux/fs.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/kdev_t.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/vmalloc.h> | |
6f3c3f0a | 20 | #include <linux/log2.h> |
a765e20e | 21 | #include <linux/dm-kcopyd.h> |
ccc45ea8 | 22 | #include <linux/workqueue.h> |
1da177e4 | 23 | |
aea53d92 | 24 | #include "dm-exception-store.h" |
1da177e4 | 25 | |
72d94861 AK |
26 | #define DM_MSG_PREFIX "snapshots" |
27 | ||
1da177e4 LT |
28 | /* |
29 | * The percentage increment we will wake up users at | |
30 | */ | |
31 | #define WAKE_UP_PERCENT 5 | |
32 | ||
33 | /* | |
34 | * kcopyd priority of snapshot operations | |
35 | */ | |
36 | #define SNAPSHOT_COPY_PRIORITY 2 | |
37 | ||
38 | /* | |
8ee2767a | 39 | * Reserve 1MB for each snapshot initially (with minimum of 1 page). |
1da177e4 | 40 | */ |
8ee2767a | 41 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) |
1da177e4 | 42 | |
cd45daff MP |
43 | /* |
44 | * The size of the mempool used to track chunks in use. | |
45 | */ | |
46 | #define MIN_IOS 256 | |
47 | ||
ccc45ea8 JB |
48 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 |
49 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ | |
50 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) | |
51 | ||
52 | struct exception_table { | |
53 | uint32_t hash_mask; | |
54 | unsigned hash_shift; | |
55 | struct list_head *table; | |
56 | }; | |
57 | ||
58 | struct dm_snapshot { | |
59 | struct rw_semaphore lock; | |
60 | ||
61 | struct dm_dev *origin; | |
62 | ||
63 | /* List of snapshots per Origin */ | |
64 | struct list_head list; | |
65 | ||
66 | /* You can't use a snapshot if this is 0 (e.g. if full) */ | |
67 | int valid; | |
68 | ||
69 | /* Origin writes don't trigger exceptions until this is set */ | |
70 | int active; | |
71 | ||
ccc45ea8 JB |
72 | mempool_t *pending_pool; |
73 | ||
74 | atomic_t pending_exceptions_count; | |
75 | ||
76 | struct exception_table pending; | |
77 | struct exception_table complete; | |
78 | ||
79 | /* | |
80 | * pe_lock protects all pending_exception operations and access | |
81 | * as well as the snapshot_bios list. | |
82 | */ | |
83 | spinlock_t pe_lock; | |
84 | ||
85 | /* The on disk metadata handler */ | |
86 | struct dm_exception_store *store; | |
87 | ||
88 | struct dm_kcopyd_client *kcopyd_client; | |
89 | ||
90 | /* Queue of snapshot writes for ksnapd to flush */ | |
91 | struct bio_list queued_bios; | |
92 | struct work_struct queued_bios_work; | |
93 | ||
94 | /* Chunks with outstanding reads */ | |
95 | mempool_t *tracked_chunk_pool; | |
96 | spinlock_t tracked_chunk_lock; | |
97 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; | |
98 | }; | |
99 | ||
c642f9e0 | 100 | static struct workqueue_struct *ksnapd; |
c4028958 | 101 | static void flush_queued_bios(struct work_struct *work); |
ca3a931f | 102 | |
ccc45ea8 JB |
103 | static sector_t chunk_to_sector(struct dm_exception_store *store, |
104 | chunk_t chunk) | |
105 | { | |
106 | return chunk << store->chunk_shift; | |
107 | } | |
108 | ||
109 | static int bdev_equal(struct block_device *lhs, struct block_device *rhs) | |
110 | { | |
111 | /* | |
112 | * There is only ever one instance of a particular block | |
113 | * device so we can compare pointers safely. | |
114 | */ | |
115 | return lhs == rhs; | |
116 | } | |
117 | ||
028867ac AK |
118 | struct dm_snap_pending_exception { |
119 | struct dm_snap_exception e; | |
1da177e4 LT |
120 | |
121 | /* | |
122 | * Origin buffers waiting for this to complete are held | |
123 | * in a bio list | |
124 | */ | |
125 | struct bio_list origin_bios; | |
126 | struct bio_list snapshot_bios; | |
127 | ||
eccf0817 AK |
128 | /* |
129 | * Short-term queue of pending exceptions prior to submission. | |
130 | */ | |
131 | struct list_head list; | |
132 | ||
1da177e4 | 133 | /* |
b4b610f6 | 134 | * The primary pending_exception is the one that holds |
4b832e8d | 135 | * the ref_count and the list of origin_bios for a |
b4b610f6 AK |
136 | * group of pending_exceptions. It is always last to get freed. |
137 | * These fields get set up when writing to the origin. | |
1da177e4 | 138 | */ |
028867ac | 139 | struct dm_snap_pending_exception *primary_pe; |
b4b610f6 AK |
140 | |
141 | /* | |
142 | * Number of pending_exceptions processing this chunk. | |
143 | * When this drops to zero we must complete the origin bios. | |
144 | * If incrementing or decrementing this, hold pe->snap->lock for | |
145 | * the sibling concerned and not pe->primary_pe->snap->lock unless | |
146 | * they are the same. | |
147 | */ | |
4b832e8d | 148 | atomic_t ref_count; |
1da177e4 LT |
149 | |
150 | /* Pointer back to snapshot context */ | |
151 | struct dm_snapshot *snap; | |
152 | ||
153 | /* | |
154 | * 1 indicates the exception has already been sent to | |
155 | * kcopyd. | |
156 | */ | |
157 | int started; | |
158 | }; | |
159 | ||
160 | /* | |
161 | * Hash table mapping origin volumes to lists of snapshots and | |
162 | * a lock to protect it | |
163 | */ | |
e18b890b CL |
164 | static struct kmem_cache *exception_cache; |
165 | static struct kmem_cache *pending_cache; | |
1da177e4 | 166 | |
cd45daff MP |
167 | struct dm_snap_tracked_chunk { |
168 | struct hlist_node node; | |
169 | chunk_t chunk; | |
170 | }; | |
171 | ||
172 | static struct kmem_cache *tracked_chunk_cache; | |
173 | ||
174 | static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, | |
175 | chunk_t chunk) | |
176 | { | |
177 | struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, | |
178 | GFP_NOIO); | |
179 | unsigned long flags; | |
180 | ||
181 | c->chunk = chunk; | |
182 | ||
183 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
184 | hlist_add_head(&c->node, | |
185 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); | |
186 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
187 | ||
188 | return c; | |
189 | } | |
190 | ||
191 | static void stop_tracking_chunk(struct dm_snapshot *s, | |
192 | struct dm_snap_tracked_chunk *c) | |
193 | { | |
194 | unsigned long flags; | |
195 | ||
196 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
197 | hlist_del(&c->node); | |
198 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
199 | ||
200 | mempool_free(c, s->tracked_chunk_pool); | |
201 | } | |
202 | ||
a8d41b59 MP |
203 | static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) |
204 | { | |
205 | struct dm_snap_tracked_chunk *c; | |
206 | struct hlist_node *hn; | |
207 | int found = 0; | |
208 | ||
209 | spin_lock_irq(&s->tracked_chunk_lock); | |
210 | ||
211 | hlist_for_each_entry(c, hn, | |
212 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { | |
213 | if (c->chunk == chunk) { | |
214 | found = 1; | |
215 | break; | |
216 | } | |
217 | } | |
218 | ||
219 | spin_unlock_irq(&s->tracked_chunk_lock); | |
220 | ||
221 | return found; | |
222 | } | |
223 | ||
1da177e4 LT |
224 | /* |
225 | * One of these per registered origin, held in the snapshot_origins hash | |
226 | */ | |
227 | struct origin { | |
228 | /* The origin device */ | |
229 | struct block_device *bdev; | |
230 | ||
231 | struct list_head hash_list; | |
232 | ||
233 | /* List of snapshots for this origin */ | |
234 | struct list_head snapshots; | |
235 | }; | |
236 | ||
237 | /* | |
238 | * Size of the hash table for origin volumes. If we make this | |
239 | * the size of the minors list then it should be nearly perfect | |
240 | */ | |
241 | #define ORIGIN_HASH_SIZE 256 | |
242 | #define ORIGIN_MASK 0xFF | |
243 | static struct list_head *_origins; | |
244 | static struct rw_semaphore _origins_lock; | |
245 | ||
246 | static int init_origin_hash(void) | |
247 | { | |
248 | int i; | |
249 | ||
250 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | |
251 | GFP_KERNEL); | |
252 | if (!_origins) { | |
72d94861 | 253 | DMERR("unable to allocate memory"); |
1da177e4 LT |
254 | return -ENOMEM; |
255 | } | |
256 | ||
257 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | |
258 | INIT_LIST_HEAD(_origins + i); | |
259 | init_rwsem(&_origins_lock); | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static void exit_origin_hash(void) | |
265 | { | |
266 | kfree(_origins); | |
267 | } | |
268 | ||
028867ac | 269 | static unsigned origin_hash(struct block_device *bdev) |
1da177e4 LT |
270 | { |
271 | return bdev->bd_dev & ORIGIN_MASK; | |
272 | } | |
273 | ||
274 | static struct origin *__lookup_origin(struct block_device *origin) | |
275 | { | |
276 | struct list_head *ol; | |
277 | struct origin *o; | |
278 | ||
279 | ol = &_origins[origin_hash(origin)]; | |
280 | list_for_each_entry (o, ol, hash_list) | |
281 | if (bdev_equal(o->bdev, origin)) | |
282 | return o; | |
283 | ||
284 | return NULL; | |
285 | } | |
286 | ||
287 | static void __insert_origin(struct origin *o) | |
288 | { | |
289 | struct list_head *sl = &_origins[origin_hash(o->bdev)]; | |
290 | list_add_tail(&o->hash_list, sl); | |
291 | } | |
292 | ||
293 | /* | |
294 | * Make a note of the snapshot and its origin so we can look it | |
295 | * up when the origin has a write on it. | |
296 | */ | |
297 | static int register_snapshot(struct dm_snapshot *snap) | |
298 | { | |
6d45d93e | 299 | struct dm_snapshot *l; |
60c856c8 | 300 | struct origin *o, *new_o; |
1da177e4 LT |
301 | struct block_device *bdev = snap->origin->bdev; |
302 | ||
60c856c8 MP |
303 | new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); |
304 | if (!new_o) | |
305 | return -ENOMEM; | |
306 | ||
1da177e4 LT |
307 | down_write(&_origins_lock); |
308 | o = __lookup_origin(bdev); | |
309 | ||
60c856c8 MP |
310 | if (o) |
311 | kfree(new_o); | |
312 | else { | |
1da177e4 | 313 | /* New origin */ |
60c856c8 | 314 | o = new_o; |
1da177e4 LT |
315 | |
316 | /* Initialise the struct */ | |
317 | INIT_LIST_HEAD(&o->snapshots); | |
318 | o->bdev = bdev; | |
319 | ||
320 | __insert_origin(o); | |
321 | } | |
322 | ||
6d45d93e MP |
323 | /* Sort the list according to chunk size, largest-first smallest-last */ |
324 | list_for_each_entry(l, &o->snapshots, list) | |
325 | if (l->store->chunk_size < snap->store->chunk_size) | |
326 | break; | |
327 | list_add_tail(&snap->list, &l->list); | |
1da177e4 LT |
328 | |
329 | up_write(&_origins_lock); | |
330 | return 0; | |
331 | } | |
332 | ||
333 | static void unregister_snapshot(struct dm_snapshot *s) | |
334 | { | |
335 | struct origin *o; | |
336 | ||
337 | down_write(&_origins_lock); | |
338 | o = __lookup_origin(s->origin->bdev); | |
339 | ||
340 | list_del(&s->list); | |
341 | if (list_empty(&o->snapshots)) { | |
342 | list_del(&o->hash_list); | |
343 | kfree(o); | |
344 | } | |
345 | ||
346 | up_write(&_origins_lock); | |
347 | } | |
348 | ||
349 | /* | |
350 | * Implementation of the exception hash tables. | |
d74f81f8 MB |
351 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
352 | * some consecutive chunks to be grouped together. | |
1da177e4 | 353 | */ |
d74f81f8 MB |
354 | static int init_exception_table(struct exception_table *et, uint32_t size, |
355 | unsigned hash_shift) | |
1da177e4 LT |
356 | { |
357 | unsigned int i; | |
358 | ||
d74f81f8 | 359 | et->hash_shift = hash_shift; |
1da177e4 LT |
360 | et->hash_mask = size - 1; |
361 | et->table = dm_vcalloc(size, sizeof(struct list_head)); | |
362 | if (!et->table) | |
363 | return -ENOMEM; | |
364 | ||
365 | for (i = 0; i < size; i++) | |
366 | INIT_LIST_HEAD(et->table + i); | |
367 | ||
368 | return 0; | |
369 | } | |
370 | ||
e18b890b | 371 | static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem) |
1da177e4 LT |
372 | { |
373 | struct list_head *slot; | |
028867ac | 374 | struct dm_snap_exception *ex, *next; |
1da177e4 LT |
375 | int i, size; |
376 | ||
377 | size = et->hash_mask + 1; | |
378 | for (i = 0; i < size; i++) { | |
379 | slot = et->table + i; | |
380 | ||
381 | list_for_each_entry_safe (ex, next, slot, hash_list) | |
382 | kmem_cache_free(mem, ex); | |
383 | } | |
384 | ||
385 | vfree(et->table); | |
386 | } | |
387 | ||
028867ac | 388 | static uint32_t exception_hash(struct exception_table *et, chunk_t chunk) |
1da177e4 | 389 | { |
d74f81f8 | 390 | return (chunk >> et->hash_shift) & et->hash_mask; |
1da177e4 LT |
391 | } |
392 | ||
028867ac AK |
393 | static void insert_exception(struct exception_table *eh, |
394 | struct dm_snap_exception *e) | |
1da177e4 LT |
395 | { |
396 | struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)]; | |
397 | list_add(&e->hash_list, l); | |
398 | } | |
399 | ||
028867ac | 400 | static void remove_exception(struct dm_snap_exception *e) |
1da177e4 LT |
401 | { |
402 | list_del(&e->hash_list); | |
403 | } | |
404 | ||
405 | /* | |
406 | * Return the exception data for a sector, or NULL if not | |
407 | * remapped. | |
408 | */ | |
028867ac AK |
409 | static struct dm_snap_exception *lookup_exception(struct exception_table *et, |
410 | chunk_t chunk) | |
1da177e4 LT |
411 | { |
412 | struct list_head *slot; | |
028867ac | 413 | struct dm_snap_exception *e; |
1da177e4 LT |
414 | |
415 | slot = &et->table[exception_hash(et, chunk)]; | |
416 | list_for_each_entry (e, slot, hash_list) | |
d74f81f8 MB |
417 | if (chunk >= e->old_chunk && |
418 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) | |
1da177e4 LT |
419 | return e; |
420 | ||
421 | return NULL; | |
422 | } | |
423 | ||
028867ac | 424 | static struct dm_snap_exception *alloc_exception(void) |
1da177e4 | 425 | { |
028867ac | 426 | struct dm_snap_exception *e; |
1da177e4 LT |
427 | |
428 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); | |
429 | if (!e) | |
430 | e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); | |
431 | ||
432 | return e; | |
433 | } | |
434 | ||
028867ac | 435 | static void free_exception(struct dm_snap_exception *e) |
1da177e4 LT |
436 | { |
437 | kmem_cache_free(exception_cache, e); | |
438 | } | |
439 | ||
92e86812 | 440 | static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) |
1da177e4 | 441 | { |
92e86812 MP |
442 | struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, |
443 | GFP_NOIO); | |
444 | ||
879129d2 | 445 | atomic_inc(&s->pending_exceptions_count); |
92e86812 MP |
446 | pe->snap = s; |
447 | ||
448 | return pe; | |
1da177e4 LT |
449 | } |
450 | ||
028867ac | 451 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
1da177e4 | 452 | { |
879129d2 MP |
453 | struct dm_snapshot *s = pe->snap; |
454 | ||
455 | mempool_free(pe, s->pending_pool); | |
456 | smp_mb__before_atomic_dec(); | |
457 | atomic_dec(&s->pending_exceptions_count); | |
1da177e4 LT |
458 | } |
459 | ||
d74f81f8 MB |
460 | static void insert_completed_exception(struct dm_snapshot *s, |
461 | struct dm_snap_exception *new_e) | |
462 | { | |
463 | struct exception_table *eh = &s->complete; | |
464 | struct list_head *l; | |
465 | struct dm_snap_exception *e = NULL; | |
466 | ||
467 | l = &eh->table[exception_hash(eh, new_e->old_chunk)]; | |
468 | ||
469 | /* Add immediately if this table doesn't support consecutive chunks */ | |
470 | if (!eh->hash_shift) | |
471 | goto out; | |
472 | ||
473 | /* List is ordered by old_chunk */ | |
474 | list_for_each_entry_reverse(e, l, hash_list) { | |
475 | /* Insert after an existing chunk? */ | |
476 | if (new_e->old_chunk == (e->old_chunk + | |
477 | dm_consecutive_chunk_count(e) + 1) && | |
478 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | |
479 | dm_consecutive_chunk_count(e) + 1)) { | |
480 | dm_consecutive_chunk_count_inc(e); | |
481 | free_exception(new_e); | |
482 | return; | |
483 | } | |
484 | ||
485 | /* Insert before an existing chunk? */ | |
486 | if (new_e->old_chunk == (e->old_chunk - 1) && | |
487 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { | |
488 | dm_consecutive_chunk_count_inc(e); | |
489 | e->old_chunk--; | |
490 | e->new_chunk--; | |
491 | free_exception(new_e); | |
492 | return; | |
493 | } | |
494 | ||
495 | if (new_e->old_chunk > e->old_chunk) | |
496 | break; | |
497 | } | |
498 | ||
499 | out: | |
500 | list_add(&new_e->hash_list, e ? &e->hash_list : l); | |
501 | } | |
502 | ||
a159c1ac JB |
503 | /* |
504 | * Callback used by the exception stores to load exceptions when | |
505 | * initialising. | |
506 | */ | |
507 | static int dm_add_exception(void *context, chunk_t old, chunk_t new) | |
1da177e4 | 508 | { |
a159c1ac | 509 | struct dm_snapshot *s = context; |
028867ac | 510 | struct dm_snap_exception *e; |
1da177e4 LT |
511 | |
512 | e = alloc_exception(); | |
513 | if (!e) | |
514 | return -ENOMEM; | |
515 | ||
516 | e->old_chunk = old; | |
d74f81f8 MB |
517 | |
518 | /* Consecutive_count is implicitly initialised to zero */ | |
1da177e4 | 519 | e->new_chunk = new; |
d74f81f8 MB |
520 | |
521 | insert_completed_exception(s, e); | |
522 | ||
1da177e4 LT |
523 | return 0; |
524 | } | |
525 | ||
526 | /* | |
527 | * Hard coded magic. | |
528 | */ | |
529 | static int calc_max_buckets(void) | |
530 | { | |
531 | /* use a fixed size of 2MB */ | |
532 | unsigned long mem = 2 * 1024 * 1024; | |
533 | mem /= sizeof(struct list_head); | |
534 | ||
535 | return mem; | |
536 | } | |
537 | ||
1da177e4 LT |
538 | /* |
539 | * Allocate room for a suitable hash table. | |
540 | */ | |
fee1998e | 541 | static int init_hash_tables(struct dm_snapshot *s) |
1da177e4 LT |
542 | { |
543 | sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; | |
544 | ||
545 | /* | |
546 | * Calculate based on the size of the original volume or | |
547 | * the COW volume... | |
548 | */ | |
fee1998e | 549 | cow_dev_size = get_dev_size(s->store->cow->bdev); |
1da177e4 LT |
550 | origin_dev_size = get_dev_size(s->origin->bdev); |
551 | max_buckets = calc_max_buckets(); | |
552 | ||
fee1998e | 553 | hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; |
1da177e4 LT |
554 | hash_size = min(hash_size, max_buckets); |
555 | ||
8defd830 | 556 | hash_size = rounddown_pow_of_two(hash_size); |
d74f81f8 MB |
557 | if (init_exception_table(&s->complete, hash_size, |
558 | DM_CHUNK_CONSECUTIVE_BITS)) | |
1da177e4 LT |
559 | return -ENOMEM; |
560 | ||
561 | /* | |
562 | * Allocate hash table for in-flight exceptions | |
563 | * Make this smaller than the real hash table | |
564 | */ | |
565 | hash_size >>= 3; | |
566 | if (hash_size < 64) | |
567 | hash_size = 64; | |
568 | ||
d74f81f8 | 569 | if (init_exception_table(&s->pending, hash_size, 0)) { |
1da177e4 LT |
570 | exit_exception_table(&s->complete, exception_cache); |
571 | return -ENOMEM; | |
572 | } | |
573 | ||
574 | return 0; | |
575 | } | |
576 | ||
1da177e4 LT |
577 | /* |
578 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | |
579 | */ | |
580 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
581 | { | |
582 | struct dm_snapshot *s; | |
cd45daff | 583 | int i; |
1da177e4 | 584 | int r = -EINVAL; |
1da177e4 | 585 | char *origin_path; |
fee1998e JB |
586 | struct dm_exception_store *store; |
587 | unsigned args_used; | |
1da177e4 | 588 | |
4c7e3bf4 | 589 | if (argc != 4) { |
72d94861 | 590 | ti->error = "requires exactly 4 arguments"; |
1da177e4 | 591 | r = -EINVAL; |
fee1998e | 592 | goto bad_args; |
1da177e4 LT |
593 | } |
594 | ||
595 | origin_path = argv[0]; | |
fee1998e JB |
596 | argv++; |
597 | argc--; | |
1da177e4 | 598 | |
fee1998e JB |
599 | r = dm_exception_store_create(ti, argc, argv, &args_used, &store); |
600 | if (r) { | |
601 | ti->error = "Couldn't create exception store"; | |
1da177e4 | 602 | r = -EINVAL; |
fee1998e | 603 | goto bad_args; |
1da177e4 LT |
604 | } |
605 | ||
fee1998e JB |
606 | argv += args_used; |
607 | argc -= args_used; | |
608 | ||
1da177e4 | 609 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
fee1998e | 610 | if (!s) { |
1da177e4 LT |
611 | ti->error = "Cannot allocate snapshot context private " |
612 | "structure"; | |
613 | r = -ENOMEM; | |
fee1998e | 614 | goto bad_snap; |
1da177e4 LT |
615 | } |
616 | ||
617 | r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin); | |
618 | if (r) { | |
619 | ti->error = "Cannot get origin device"; | |
fee1998e | 620 | goto bad_origin; |
1da177e4 LT |
621 | } |
622 | ||
fee1998e | 623 | s->store = store; |
1da177e4 | 624 | s->valid = 1; |
aa14edeb | 625 | s->active = 0; |
879129d2 | 626 | atomic_set(&s->pending_exceptions_count, 0); |
1da177e4 | 627 | init_rwsem(&s->lock); |
ca3a931f | 628 | spin_lock_init(&s->pe_lock); |
1da177e4 LT |
629 | |
630 | /* Allocate hash table for COW data */ | |
fee1998e | 631 | if (init_hash_tables(s)) { |
1da177e4 LT |
632 | ti->error = "Unable to allocate hash table space"; |
633 | r = -ENOMEM; | |
fee1998e | 634 | goto bad_hash_tables; |
1da177e4 LT |
635 | } |
636 | ||
eb69aca5 | 637 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); |
1da177e4 LT |
638 | if (r) { |
639 | ti->error = "Could not create kcopyd client"; | |
fee1998e | 640 | goto bad_kcopyd; |
1da177e4 LT |
641 | } |
642 | ||
92e86812 MP |
643 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); |
644 | if (!s->pending_pool) { | |
645 | ti->error = "Could not allocate mempool for pending exceptions"; | |
fee1998e | 646 | goto bad_pending_pool; |
92e86812 MP |
647 | } |
648 | ||
cd45daff MP |
649 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, |
650 | tracked_chunk_cache); | |
651 | if (!s->tracked_chunk_pool) { | |
652 | ti->error = "Could not allocate tracked_chunk mempool for " | |
653 | "tracking reads"; | |
92e86812 | 654 | goto bad_tracked_chunk_pool; |
cd45daff MP |
655 | } |
656 | ||
657 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
658 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); | |
659 | ||
660 | spin_lock_init(&s->tracked_chunk_lock); | |
661 | ||
aa14edeb | 662 | /* Metadata must only be loaded into one table at once */ |
493df71c JB |
663 | r = s->store->type->read_metadata(s->store, dm_add_exception, |
664 | (void *)s); | |
0764147b | 665 | if (r < 0) { |
f9cea4f7 | 666 | ti->error = "Failed to read snapshot metadata"; |
cd45daff | 667 | goto bad_load_and_register; |
0764147b MB |
668 | } else if (r > 0) { |
669 | s->valid = 0; | |
670 | DMWARN("Snapshot is marked invalid."); | |
f9cea4f7 | 671 | } |
aa14edeb | 672 | |
ca3a931f | 673 | bio_list_init(&s->queued_bios); |
c4028958 | 674 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); |
ca3a931f | 675 | |
3f2412dc MP |
676 | if (!s->store->chunk_size) { |
677 | ti->error = "Chunk size not set"; | |
678 | goto bad_load_and_register; | |
679 | } | |
680 | ||
1da177e4 | 681 | /* Add snapshot to the list of snapshots for this origin */ |
aa14edeb | 682 | /* Exceptions aren't triggered till snapshot_resume() is called */ |
1da177e4 LT |
683 | if (register_snapshot(s)) { |
684 | r = -EINVAL; | |
685 | ti->error = "Cannot register snapshot origin"; | |
cd45daff | 686 | goto bad_load_and_register; |
1da177e4 LT |
687 | } |
688 | ||
689 | ti->private = s; | |
d0216849 | 690 | ti->split_io = s->store->chunk_size; |
494b3ee7 | 691 | ti->num_flush_requests = 1; |
1da177e4 LT |
692 | |
693 | return 0; | |
694 | ||
fee1998e | 695 | bad_load_and_register: |
cd45daff MP |
696 | mempool_destroy(s->tracked_chunk_pool); |
697 | ||
fee1998e | 698 | bad_tracked_chunk_pool: |
92e86812 MP |
699 | mempool_destroy(s->pending_pool); |
700 | ||
fee1998e | 701 | bad_pending_pool: |
eb69aca5 | 702 | dm_kcopyd_client_destroy(s->kcopyd_client); |
1da177e4 | 703 | |
fee1998e | 704 | bad_kcopyd: |
1da177e4 LT |
705 | exit_exception_table(&s->pending, pending_cache); |
706 | exit_exception_table(&s->complete, exception_cache); | |
707 | ||
fee1998e | 708 | bad_hash_tables: |
1da177e4 LT |
709 | dm_put_device(ti, s->origin); |
710 | ||
fee1998e | 711 | bad_origin: |
1da177e4 LT |
712 | kfree(s); |
713 | ||
fee1998e JB |
714 | bad_snap: |
715 | dm_exception_store_destroy(store); | |
716 | ||
717 | bad_args: | |
1da177e4 LT |
718 | return r; |
719 | } | |
720 | ||
31c93a0c MB |
721 | static void __free_exceptions(struct dm_snapshot *s) |
722 | { | |
eb69aca5 | 723 | dm_kcopyd_client_destroy(s->kcopyd_client); |
31c93a0c MB |
724 | s->kcopyd_client = NULL; |
725 | ||
726 | exit_exception_table(&s->pending, pending_cache); | |
727 | exit_exception_table(&s->complete, exception_cache); | |
31c93a0c MB |
728 | } |
729 | ||
1da177e4 LT |
730 | static void snapshot_dtr(struct dm_target *ti) |
731 | { | |
cd45daff MP |
732 | #ifdef CONFIG_DM_DEBUG |
733 | int i; | |
734 | #endif | |
028867ac | 735 | struct dm_snapshot *s = ti->private; |
1da177e4 | 736 | |
ca3a931f AK |
737 | flush_workqueue(ksnapd); |
738 | ||
138728dc AK |
739 | /* Prevent further origin writes from using this snapshot. */ |
740 | /* After this returns there can be no new kcopyd jobs. */ | |
1da177e4 LT |
741 | unregister_snapshot(s); |
742 | ||
879129d2 | 743 | while (atomic_read(&s->pending_exceptions_count)) |
90fa1527 | 744 | msleep(1); |
879129d2 MP |
745 | /* |
746 | * Ensure instructions in mempool_destroy aren't reordered | |
747 | * before atomic_read. | |
748 | */ | |
749 | smp_mb(); | |
750 | ||
cd45daff MP |
751 | #ifdef CONFIG_DM_DEBUG |
752 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
753 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); | |
754 | #endif | |
755 | ||
756 | mempool_destroy(s->tracked_chunk_pool); | |
757 | ||
31c93a0c | 758 | __free_exceptions(s); |
1da177e4 | 759 | |
92e86812 MP |
760 | mempool_destroy(s->pending_pool); |
761 | ||
1da177e4 | 762 | dm_put_device(ti, s->origin); |
fee1998e JB |
763 | |
764 | dm_exception_store_destroy(s->store); | |
138728dc | 765 | |
1da177e4 LT |
766 | kfree(s); |
767 | } | |
768 | ||
769 | /* | |
770 | * Flush a list of buffers. | |
771 | */ | |
772 | static void flush_bios(struct bio *bio) | |
773 | { | |
774 | struct bio *n; | |
775 | ||
776 | while (bio) { | |
777 | n = bio->bi_next; | |
778 | bio->bi_next = NULL; | |
779 | generic_make_request(bio); | |
780 | bio = n; | |
781 | } | |
782 | } | |
783 | ||
c4028958 | 784 | static void flush_queued_bios(struct work_struct *work) |
ca3a931f | 785 | { |
c4028958 DH |
786 | struct dm_snapshot *s = |
787 | container_of(work, struct dm_snapshot, queued_bios_work); | |
ca3a931f AK |
788 | struct bio *queued_bios; |
789 | unsigned long flags; | |
790 | ||
791 | spin_lock_irqsave(&s->pe_lock, flags); | |
792 | queued_bios = bio_list_get(&s->queued_bios); | |
793 | spin_unlock_irqrestore(&s->pe_lock, flags); | |
794 | ||
795 | flush_bios(queued_bios); | |
796 | } | |
797 | ||
1da177e4 LT |
798 | /* |
799 | * Error a list of buffers. | |
800 | */ | |
801 | static void error_bios(struct bio *bio) | |
802 | { | |
803 | struct bio *n; | |
804 | ||
805 | while (bio) { | |
806 | n = bio->bi_next; | |
807 | bio->bi_next = NULL; | |
6712ecf8 | 808 | bio_io_error(bio); |
1da177e4 LT |
809 | bio = n; |
810 | } | |
811 | } | |
812 | ||
695368ac | 813 | static void __invalidate_snapshot(struct dm_snapshot *s, int err) |
76df1c65 AK |
814 | { |
815 | if (!s->valid) | |
816 | return; | |
817 | ||
818 | if (err == -EIO) | |
819 | DMERR("Invalidating snapshot: Error reading/writing."); | |
820 | else if (err == -ENOMEM) | |
821 | DMERR("Invalidating snapshot: Unable to allocate exception."); | |
822 | ||
493df71c JB |
823 | if (s->store->type->drop_snapshot) |
824 | s->store->type->drop_snapshot(s->store); | |
76df1c65 AK |
825 | |
826 | s->valid = 0; | |
827 | ||
0cea9c78 | 828 | dm_table_event(s->store->ti->table); |
76df1c65 AK |
829 | } |
830 | ||
028867ac | 831 | static void get_pending_exception(struct dm_snap_pending_exception *pe) |
4b832e8d AK |
832 | { |
833 | atomic_inc(&pe->ref_count); | |
834 | } | |
835 | ||
028867ac | 836 | static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe) |
4b832e8d | 837 | { |
028867ac | 838 | struct dm_snap_pending_exception *primary_pe; |
4b832e8d AK |
839 | struct bio *origin_bios = NULL; |
840 | ||
841 | primary_pe = pe->primary_pe; | |
842 | ||
843 | /* | |
844 | * If this pe is involved in a write to the origin and | |
845 | * it is the last sibling to complete then release | |
846 | * the bios for the original write to the origin. | |
847 | */ | |
848 | if (primary_pe && | |
7c5f78b9 | 849 | atomic_dec_and_test(&primary_pe->ref_count)) { |
4b832e8d | 850 | origin_bios = bio_list_get(&primary_pe->origin_bios); |
7c5f78b9 MP |
851 | free_pending_exception(primary_pe); |
852 | } | |
4b832e8d AK |
853 | |
854 | /* | |
855 | * Free the pe if it's not linked to an origin write or if | |
856 | * it's not itself a primary pe. | |
857 | */ | |
858 | if (!primary_pe || primary_pe != pe) | |
859 | free_pending_exception(pe); | |
860 | ||
4b832e8d AK |
861 | return origin_bios; |
862 | } | |
863 | ||
028867ac | 864 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
1da177e4 | 865 | { |
028867ac | 866 | struct dm_snap_exception *e; |
1da177e4 | 867 | struct dm_snapshot *s = pe->snap; |
9d493fa8 AK |
868 | struct bio *origin_bios = NULL; |
869 | struct bio *snapshot_bios = NULL; | |
870 | int error = 0; | |
1da177e4 | 871 | |
76df1c65 AK |
872 | if (!success) { |
873 | /* Read/write error - snapshot is unusable */ | |
1da177e4 | 874 | down_write(&s->lock); |
695368ac | 875 | __invalidate_snapshot(s, -EIO); |
9d493fa8 | 876 | error = 1; |
76df1c65 AK |
877 | goto out; |
878 | } | |
879 | ||
880 | e = alloc_exception(); | |
881 | if (!e) { | |
1da177e4 | 882 | down_write(&s->lock); |
695368ac | 883 | __invalidate_snapshot(s, -ENOMEM); |
9d493fa8 | 884 | error = 1; |
76df1c65 AK |
885 | goto out; |
886 | } | |
887 | *e = pe->e; | |
1da177e4 | 888 | |
76df1c65 AK |
889 | down_write(&s->lock); |
890 | if (!s->valid) { | |
76df1c65 | 891 | free_exception(e); |
9d493fa8 | 892 | error = 1; |
76df1c65 | 893 | goto out; |
1da177e4 LT |
894 | } |
895 | ||
a8d41b59 MP |
896 | /* |
897 | * Check for conflicting reads. This is extremely improbable, | |
90fa1527 | 898 | * so msleep(1) is sufficient and there is no need for a wait queue. |
a8d41b59 MP |
899 | */ |
900 | while (__chunk_is_tracked(s, pe->e.old_chunk)) | |
90fa1527 | 901 | msleep(1); |
a8d41b59 | 902 | |
9d493fa8 AK |
903 | /* |
904 | * Add a proper exception, and remove the | |
905 | * in-flight exception from the list. | |
906 | */ | |
d74f81f8 | 907 | insert_completed_exception(s, e); |
76df1c65 | 908 | |
1da177e4 | 909 | out: |
695368ac | 910 | remove_exception(&pe->e); |
9d493fa8 | 911 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
4b832e8d | 912 | origin_bios = put_pending_exception(pe); |
1da177e4 | 913 | |
9d493fa8 AK |
914 | up_write(&s->lock); |
915 | ||
916 | /* Submit any pending write bios */ | |
917 | if (error) | |
918 | error_bios(snapshot_bios); | |
919 | else | |
920 | flush_bios(snapshot_bios); | |
921 | ||
922 | flush_bios(origin_bios); | |
1da177e4 LT |
923 | } |
924 | ||
925 | static void commit_callback(void *context, int success) | |
926 | { | |
028867ac AK |
927 | struct dm_snap_pending_exception *pe = context; |
928 | ||
1da177e4 LT |
929 | pending_complete(pe, success); |
930 | } | |
931 | ||
932 | /* | |
933 | * Called when the copy I/O has finished. kcopyd actually runs | |
934 | * this code so don't block. | |
935 | */ | |
4cdc1d1f | 936 | static void copy_callback(int read_err, unsigned long write_err, void *context) |
1da177e4 | 937 | { |
028867ac | 938 | struct dm_snap_pending_exception *pe = context; |
1da177e4 LT |
939 | struct dm_snapshot *s = pe->snap; |
940 | ||
941 | if (read_err || write_err) | |
942 | pending_complete(pe, 0); | |
943 | ||
944 | else | |
945 | /* Update the metadata if we are persistent */ | |
493df71c JB |
946 | s->store->type->commit_exception(s->store, &pe->e, |
947 | commit_callback, pe); | |
1da177e4 LT |
948 | } |
949 | ||
950 | /* | |
951 | * Dispatches the copy operation to kcopyd. | |
952 | */ | |
028867ac | 953 | static void start_copy(struct dm_snap_pending_exception *pe) |
1da177e4 LT |
954 | { |
955 | struct dm_snapshot *s = pe->snap; | |
22a1ceb1 | 956 | struct dm_io_region src, dest; |
1da177e4 LT |
957 | struct block_device *bdev = s->origin->bdev; |
958 | sector_t dev_size; | |
959 | ||
960 | dev_size = get_dev_size(bdev); | |
961 | ||
962 | src.bdev = bdev; | |
71fab00a | 963 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); |
df96eee6 | 964 | src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); |
1da177e4 | 965 | |
49beb2b8 | 966 | dest.bdev = s->store->cow->bdev; |
71fab00a | 967 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); |
1da177e4 LT |
968 | dest.count = src.count; |
969 | ||
970 | /* Hand over to kcopyd */ | |
eb69aca5 | 971 | dm_kcopyd_copy(s->kcopyd_client, |
1da177e4 LT |
972 | &src, 1, &dest, 0, copy_callback, pe); |
973 | } | |
974 | ||
2913808e MP |
975 | static struct dm_snap_pending_exception * |
976 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) | |
977 | { | |
978 | struct dm_snap_exception *e = lookup_exception(&s->pending, chunk); | |
979 | ||
980 | if (!e) | |
981 | return NULL; | |
982 | ||
983 | return container_of(e, struct dm_snap_pending_exception, e); | |
984 | } | |
985 | ||
1da177e4 LT |
986 | /* |
987 | * Looks to see if this snapshot already has a pending exception | |
988 | * for this chunk, otherwise it allocates a new one and inserts | |
989 | * it into the pending table. | |
990 | * | |
991 | * NOTE: a write lock must be held on snap->lock before calling | |
992 | * this. | |
993 | */ | |
028867ac | 994 | static struct dm_snap_pending_exception * |
c6621392 MP |
995 | __find_pending_exception(struct dm_snapshot *s, |
996 | struct dm_snap_pending_exception *pe, chunk_t chunk) | |
1da177e4 | 997 | { |
c6621392 | 998 | struct dm_snap_pending_exception *pe2; |
1da177e4 | 999 | |
2913808e MP |
1000 | pe2 = __lookup_pending_exception(s, chunk); |
1001 | if (pe2) { | |
76df1c65 | 1002 | free_pending_exception(pe); |
2913808e | 1003 | return pe2; |
1da177e4 LT |
1004 | } |
1005 | ||
76df1c65 AK |
1006 | pe->e.old_chunk = chunk; |
1007 | bio_list_init(&pe->origin_bios); | |
1008 | bio_list_init(&pe->snapshot_bios); | |
1009 | pe->primary_pe = NULL; | |
4b832e8d | 1010 | atomic_set(&pe->ref_count, 0); |
76df1c65 AK |
1011 | pe->started = 0; |
1012 | ||
493df71c | 1013 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
76df1c65 AK |
1014 | free_pending_exception(pe); |
1015 | return NULL; | |
1016 | } | |
1017 | ||
4b832e8d | 1018 | get_pending_exception(pe); |
76df1c65 AK |
1019 | insert_exception(&s->pending, &pe->e); |
1020 | ||
1da177e4 LT |
1021 | return pe; |
1022 | } | |
1023 | ||
028867ac | 1024 | static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e, |
d74f81f8 | 1025 | struct bio *bio, chunk_t chunk) |
1da177e4 | 1026 | { |
49beb2b8 | 1027 | bio->bi_bdev = s->store->cow->bdev; |
71fab00a JB |
1028 | bio->bi_sector = chunk_to_sector(s->store, |
1029 | dm_chunk_number(e->new_chunk) + | |
1030 | (chunk - e->old_chunk)) + | |
1031 | (bio->bi_sector & | |
1032 | s->store->chunk_mask); | |
1da177e4 LT |
1033 | } |
1034 | ||
1035 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | |
1036 | union map_info *map_context) | |
1037 | { | |
028867ac AK |
1038 | struct dm_snap_exception *e; |
1039 | struct dm_snapshot *s = ti->private; | |
d2a7ad29 | 1040 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 1041 | chunk_t chunk; |
028867ac | 1042 | struct dm_snap_pending_exception *pe = NULL; |
1da177e4 | 1043 | |
494b3ee7 MP |
1044 | if (unlikely(bio_empty_barrier(bio))) { |
1045 | bio->bi_bdev = s->store->cow->bdev; | |
1046 | return DM_MAPIO_REMAPPED; | |
1047 | } | |
1048 | ||
71fab00a | 1049 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1da177e4 LT |
1050 | |
1051 | /* Full snapshots are not usable */ | |
76df1c65 | 1052 | /* To get here the table must be live so s->active is always set. */ |
1da177e4 | 1053 | if (!s->valid) |
f6a80ea8 | 1054 | return -EIO; |
1da177e4 | 1055 | |
ba40a2aa AK |
1056 | /* FIXME: should only take write lock if we need |
1057 | * to copy an exception */ | |
1058 | down_write(&s->lock); | |
1059 | ||
1060 | if (!s->valid) { | |
1061 | r = -EIO; | |
1062 | goto out_unlock; | |
1063 | } | |
1064 | ||
1065 | /* If the block is already remapped - use that, else remap it */ | |
1066 | e = lookup_exception(&s->complete, chunk); | |
1067 | if (e) { | |
d74f81f8 | 1068 | remap_exception(s, e, bio, chunk); |
ba40a2aa AK |
1069 | goto out_unlock; |
1070 | } | |
1071 | ||
1da177e4 LT |
1072 | /* |
1073 | * Write to snapshot - higher level takes care of RW/RO | |
1074 | * flags so we should only get this if we are | |
1075 | * writeable. | |
1076 | */ | |
1077 | if (bio_rw(bio) == WRITE) { | |
2913808e | 1078 | pe = __lookup_pending_exception(s, chunk); |
76df1c65 | 1079 | if (!pe) { |
c6621392 MP |
1080 | up_write(&s->lock); |
1081 | pe = alloc_pending_exception(s); | |
1082 | down_write(&s->lock); | |
1083 | ||
1084 | if (!s->valid) { | |
1085 | free_pending_exception(pe); | |
1086 | r = -EIO; | |
1087 | goto out_unlock; | |
1088 | } | |
1089 | ||
35bf659b MP |
1090 | e = lookup_exception(&s->complete, chunk); |
1091 | if (e) { | |
1092 | free_pending_exception(pe); | |
1093 | remap_exception(s, e, bio, chunk); | |
1094 | goto out_unlock; | |
1095 | } | |
1096 | ||
c6621392 | 1097 | pe = __find_pending_exception(s, pe, chunk); |
2913808e MP |
1098 | if (!pe) { |
1099 | __invalidate_snapshot(s, -ENOMEM); | |
1100 | r = -EIO; | |
1101 | goto out_unlock; | |
1102 | } | |
1da177e4 LT |
1103 | } |
1104 | ||
d74f81f8 | 1105 | remap_exception(s, &pe->e, bio, chunk); |
76df1c65 AK |
1106 | bio_list_add(&pe->snapshot_bios, bio); |
1107 | ||
d2a7ad29 | 1108 | r = DM_MAPIO_SUBMITTED; |
ba40a2aa | 1109 | |
76df1c65 AK |
1110 | if (!pe->started) { |
1111 | /* this is protected by snap->lock */ | |
1112 | pe->started = 1; | |
ba40a2aa | 1113 | up_write(&s->lock); |
76df1c65 | 1114 | start_copy(pe); |
ba40a2aa AK |
1115 | goto out; |
1116 | } | |
cd45daff | 1117 | } else { |
ba40a2aa | 1118 | bio->bi_bdev = s->origin->bdev; |
cd45daff MP |
1119 | map_context->ptr = track_chunk(s, chunk); |
1120 | } | |
1da177e4 | 1121 | |
ba40a2aa AK |
1122 | out_unlock: |
1123 | up_write(&s->lock); | |
1124 | out: | |
1da177e4 LT |
1125 | return r; |
1126 | } | |
1127 | ||
cd45daff MP |
1128 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
1129 | int error, union map_info *map_context) | |
1130 | { | |
1131 | struct dm_snapshot *s = ti->private; | |
1132 | struct dm_snap_tracked_chunk *c = map_context->ptr; | |
1133 | ||
1134 | if (c) | |
1135 | stop_tracking_chunk(s, c); | |
1136 | ||
1137 | return 0; | |
1138 | } | |
1139 | ||
1da177e4 LT |
1140 | static void snapshot_resume(struct dm_target *ti) |
1141 | { | |
028867ac | 1142 | struct dm_snapshot *s = ti->private; |
1da177e4 | 1143 | |
aa14edeb AK |
1144 | down_write(&s->lock); |
1145 | s->active = 1; | |
1146 | up_write(&s->lock); | |
1da177e4 LT |
1147 | } |
1148 | ||
1149 | static int snapshot_status(struct dm_target *ti, status_type_t type, | |
1150 | char *result, unsigned int maxlen) | |
1151 | { | |
2e4a31df | 1152 | unsigned sz = 0; |
028867ac | 1153 | struct dm_snapshot *snap = ti->private; |
1da177e4 LT |
1154 | |
1155 | switch (type) { | |
1156 | case STATUSTYPE_INFO: | |
94e76572 MP |
1157 | |
1158 | down_write(&snap->lock); | |
1159 | ||
1da177e4 | 1160 | if (!snap->valid) |
2e4a31df | 1161 | DMEMIT("Invalid"); |
1da177e4 | 1162 | else { |
493df71c | 1163 | if (snap->store->type->fraction_full) { |
1da177e4 | 1164 | sector_t numerator, denominator; |
493df71c JB |
1165 | snap->store->type->fraction_full(snap->store, |
1166 | &numerator, | |
1167 | &denominator); | |
2e4a31df JB |
1168 | DMEMIT("%llu/%llu", |
1169 | (unsigned long long)numerator, | |
1170 | (unsigned long long)denominator); | |
1da177e4 LT |
1171 | } |
1172 | else | |
2e4a31df | 1173 | DMEMIT("Unknown"); |
1da177e4 | 1174 | } |
94e76572 MP |
1175 | |
1176 | up_write(&snap->lock); | |
1177 | ||
1da177e4 LT |
1178 | break; |
1179 | ||
1180 | case STATUSTYPE_TABLE: | |
1181 | /* | |
1182 | * kdevname returns a static pointer so we need | |
1183 | * to make private copies if the output is to | |
1184 | * make sense. | |
1185 | */ | |
2e4a31df | 1186 | DMEMIT("%s", snap->origin->name); |
1e302a92 JB |
1187 | snap->store->type->status(snap->store, type, result + sz, |
1188 | maxlen - sz); | |
1da177e4 LT |
1189 | break; |
1190 | } | |
1191 | ||
1192 | return 0; | |
1193 | } | |
1194 | ||
8811f46c MS |
1195 | static int snapshot_iterate_devices(struct dm_target *ti, |
1196 | iterate_devices_callout_fn fn, void *data) | |
1197 | { | |
1198 | struct dm_snapshot *snap = ti->private; | |
1199 | ||
1200 | return fn(ti, snap->origin, 0, ti->len, data); | |
1201 | } | |
1202 | ||
1203 | ||
1da177e4 LT |
1204 | /*----------------------------------------------------------------- |
1205 | * Origin methods | |
1206 | *---------------------------------------------------------------*/ | |
1da177e4 LT |
1207 | static int __origin_write(struct list_head *snapshots, struct bio *bio) |
1208 | { | |
d2a7ad29 | 1209 | int r = DM_MAPIO_REMAPPED, first = 0; |
1da177e4 | 1210 | struct dm_snapshot *snap; |
028867ac AK |
1211 | struct dm_snap_exception *e; |
1212 | struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL; | |
1da177e4 | 1213 | chunk_t chunk; |
eccf0817 | 1214 | LIST_HEAD(pe_queue); |
1da177e4 LT |
1215 | |
1216 | /* Do all the snapshots on this origin */ | |
1217 | list_for_each_entry (snap, snapshots, list) { | |
1218 | ||
76df1c65 AK |
1219 | down_write(&snap->lock); |
1220 | ||
aa14edeb AK |
1221 | /* Only deal with valid and active snapshots */ |
1222 | if (!snap->valid || !snap->active) | |
76df1c65 | 1223 | goto next_snapshot; |
1da177e4 | 1224 | |
d5e404c1 | 1225 | /* Nothing to do if writing beyond end of snapshot */ |
0cea9c78 | 1226 | if (bio->bi_sector >= dm_table_get_size(snap->store->ti->table)) |
76df1c65 | 1227 | goto next_snapshot; |
1da177e4 LT |
1228 | |
1229 | /* | |
1230 | * Remember, different snapshots can have | |
1231 | * different chunk sizes. | |
1232 | */ | |
71fab00a | 1233 | chunk = sector_to_chunk(snap->store, bio->bi_sector); |
1da177e4 LT |
1234 | |
1235 | /* | |
1236 | * Check exception table to see if block | |
1237 | * is already remapped in this snapshot | |
1238 | * and trigger an exception if not. | |
b4b610f6 | 1239 | * |
4b832e8d | 1240 | * ref_count is initialised to 1 so pending_complete() |
b4b610f6 | 1241 | * won't destroy the primary_pe while we're inside this loop. |
1da177e4 LT |
1242 | */ |
1243 | e = lookup_exception(&snap->complete, chunk); | |
76df1c65 AK |
1244 | if (e) |
1245 | goto next_snapshot; | |
1246 | ||
2913808e | 1247 | pe = __lookup_pending_exception(snap, chunk); |
76df1c65 | 1248 | if (!pe) { |
c6621392 MP |
1249 | up_write(&snap->lock); |
1250 | pe = alloc_pending_exception(snap); | |
1251 | down_write(&snap->lock); | |
1252 | ||
1253 | if (!snap->valid) { | |
1254 | free_pending_exception(pe); | |
1255 | goto next_snapshot; | |
1256 | } | |
1257 | ||
35bf659b MP |
1258 | e = lookup_exception(&snap->complete, chunk); |
1259 | if (e) { | |
1260 | free_pending_exception(pe); | |
1261 | goto next_snapshot; | |
1262 | } | |
1263 | ||
c6621392 | 1264 | pe = __find_pending_exception(snap, pe, chunk); |
2913808e MP |
1265 | if (!pe) { |
1266 | __invalidate_snapshot(snap, -ENOMEM); | |
1267 | goto next_snapshot; | |
1268 | } | |
76df1c65 AK |
1269 | } |
1270 | ||
1271 | if (!primary_pe) { | |
1272 | /* | |
1273 | * Either every pe here has same | |
1274 | * primary_pe or none has one yet. | |
1275 | */ | |
1276 | if (pe->primary_pe) | |
1277 | primary_pe = pe->primary_pe; | |
1278 | else { | |
1279 | primary_pe = pe; | |
1280 | first = 1; | |
1da177e4 | 1281 | } |
76df1c65 AK |
1282 | |
1283 | bio_list_add(&primary_pe->origin_bios, bio); | |
1284 | ||
d2a7ad29 | 1285 | r = DM_MAPIO_SUBMITTED; |
76df1c65 AK |
1286 | } |
1287 | ||
1288 | if (!pe->primary_pe) { | |
76df1c65 | 1289 | pe->primary_pe = primary_pe; |
4b832e8d | 1290 | get_pending_exception(primary_pe); |
76df1c65 AK |
1291 | } |
1292 | ||
1293 | if (!pe->started) { | |
1294 | pe->started = 1; | |
1295 | list_add_tail(&pe->list, &pe_queue); | |
1da177e4 LT |
1296 | } |
1297 | ||
76df1c65 | 1298 | next_snapshot: |
1da177e4 LT |
1299 | up_write(&snap->lock); |
1300 | } | |
1301 | ||
b4b610f6 | 1302 | if (!primary_pe) |
4b832e8d | 1303 | return r; |
b4b610f6 AK |
1304 | |
1305 | /* | |
1306 | * If this is the first time we're processing this chunk and | |
4b832e8d | 1307 | * ref_count is now 1 it means all the pending exceptions |
b4b610f6 AK |
1308 | * got completed while we were in the loop above, so it falls to |
1309 | * us here to remove the primary_pe and submit any origin_bios. | |
1310 | */ | |
1311 | ||
4b832e8d | 1312 | if (first && atomic_dec_and_test(&primary_pe->ref_count)) { |
b4b610f6 AK |
1313 | flush_bios(bio_list_get(&primary_pe->origin_bios)); |
1314 | free_pending_exception(primary_pe); | |
1315 | /* If we got here, pe_queue is necessarily empty. */ | |
4b832e8d | 1316 | return r; |
b4b610f6 AK |
1317 | } |
1318 | ||
1da177e4 LT |
1319 | /* |
1320 | * Now that we have a complete pe list we can start the copying. | |
1321 | */ | |
eccf0817 AK |
1322 | list_for_each_entry_safe(pe, next_pe, &pe_queue, list) |
1323 | start_copy(pe); | |
1da177e4 LT |
1324 | |
1325 | return r; | |
1326 | } | |
1327 | ||
1328 | /* | |
1329 | * Called on a write from the origin driver. | |
1330 | */ | |
1331 | static int do_origin(struct dm_dev *origin, struct bio *bio) | |
1332 | { | |
1333 | struct origin *o; | |
d2a7ad29 | 1334 | int r = DM_MAPIO_REMAPPED; |
1da177e4 LT |
1335 | |
1336 | down_read(&_origins_lock); | |
1337 | o = __lookup_origin(origin->bdev); | |
1338 | if (o) | |
1339 | r = __origin_write(&o->snapshots, bio); | |
1340 | up_read(&_origins_lock); | |
1341 | ||
1342 | return r; | |
1343 | } | |
1344 | ||
1345 | /* | |
1346 | * Origin: maps a linear range of a device, with hooks for snapshotting. | |
1347 | */ | |
1348 | ||
1349 | /* | |
1350 | * Construct an origin mapping: <dev_path> | |
1351 | * The context for an origin is merely a 'struct dm_dev *' | |
1352 | * pointing to the real device. | |
1353 | */ | |
1354 | static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1355 | { | |
1356 | int r; | |
1357 | struct dm_dev *dev; | |
1358 | ||
1359 | if (argc != 1) { | |
72d94861 | 1360 | ti->error = "origin: incorrect number of arguments"; |
1da177e4 LT |
1361 | return -EINVAL; |
1362 | } | |
1363 | ||
1364 | r = dm_get_device(ti, argv[0], 0, ti->len, | |
1365 | dm_table_get_mode(ti->table), &dev); | |
1366 | if (r) { | |
1367 | ti->error = "Cannot get target device"; | |
1368 | return r; | |
1369 | } | |
1370 | ||
1371 | ti->private = dev; | |
494b3ee7 MP |
1372 | ti->num_flush_requests = 1; |
1373 | ||
1da177e4 LT |
1374 | return 0; |
1375 | } | |
1376 | ||
1377 | static void origin_dtr(struct dm_target *ti) | |
1378 | { | |
028867ac | 1379 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1380 | dm_put_device(ti, dev); |
1381 | } | |
1382 | ||
1383 | static int origin_map(struct dm_target *ti, struct bio *bio, | |
1384 | union map_info *map_context) | |
1385 | { | |
028867ac | 1386 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1387 | bio->bi_bdev = dev->bdev; |
1388 | ||
494b3ee7 MP |
1389 | if (unlikely(bio_empty_barrier(bio))) |
1390 | return DM_MAPIO_REMAPPED; | |
1391 | ||
1da177e4 | 1392 | /* Only tell snapshots if this is a write */ |
d2a7ad29 | 1393 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1da177e4 LT |
1394 | } |
1395 | ||
1396 | #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r)) | |
1397 | ||
1398 | /* | |
1399 | * Set the target "split_io" field to the minimum of all the snapshots' | |
1400 | * chunk sizes. | |
1401 | */ | |
1402 | static void origin_resume(struct dm_target *ti) | |
1403 | { | |
028867ac | 1404 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1405 | struct dm_snapshot *snap; |
1406 | struct origin *o; | |
df96eee6 | 1407 | unsigned chunk_size = 0; |
1da177e4 LT |
1408 | |
1409 | down_read(&_origins_lock); | |
1410 | o = __lookup_origin(dev->bdev); | |
1411 | if (o) | |
1412 | list_for_each_entry (snap, &o->snapshots, list) | |
d0216849 JB |
1413 | chunk_size = min_not_zero(chunk_size, |
1414 | snap->store->chunk_size); | |
1da177e4 LT |
1415 | up_read(&_origins_lock); |
1416 | ||
1417 | ti->split_io = chunk_size; | |
1418 | } | |
1419 | ||
1420 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |
1421 | unsigned int maxlen) | |
1422 | { | |
028867ac | 1423 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
1424 | |
1425 | switch (type) { | |
1426 | case STATUSTYPE_INFO: | |
1427 | result[0] = '\0'; | |
1428 | break; | |
1429 | ||
1430 | case STATUSTYPE_TABLE: | |
1431 | snprintf(result, maxlen, "%s", dev->name); | |
1432 | break; | |
1433 | } | |
1434 | ||
1435 | return 0; | |
1436 | } | |
1437 | ||
8811f46c MS |
1438 | static int origin_iterate_devices(struct dm_target *ti, |
1439 | iterate_devices_callout_fn fn, void *data) | |
1440 | { | |
1441 | struct dm_dev *dev = ti->private; | |
1442 | ||
1443 | return fn(ti, dev, 0, ti->len, data); | |
1444 | } | |
1445 | ||
1da177e4 LT |
1446 | static struct target_type origin_target = { |
1447 | .name = "snapshot-origin", | |
8811f46c | 1448 | .version = {1, 7, 0}, |
1da177e4 LT |
1449 | .module = THIS_MODULE, |
1450 | .ctr = origin_ctr, | |
1451 | .dtr = origin_dtr, | |
1452 | .map = origin_map, | |
1453 | .resume = origin_resume, | |
1454 | .status = origin_status, | |
8811f46c | 1455 | .iterate_devices = origin_iterate_devices, |
1da177e4 LT |
1456 | }; |
1457 | ||
1458 | static struct target_type snapshot_target = { | |
1459 | .name = "snapshot", | |
8811f46c | 1460 | .version = {1, 7, 0}, |
1da177e4 LT |
1461 | .module = THIS_MODULE, |
1462 | .ctr = snapshot_ctr, | |
1463 | .dtr = snapshot_dtr, | |
1464 | .map = snapshot_map, | |
cd45daff | 1465 | .end_io = snapshot_end_io, |
1da177e4 LT |
1466 | .resume = snapshot_resume, |
1467 | .status = snapshot_status, | |
8811f46c | 1468 | .iterate_devices = snapshot_iterate_devices, |
1da177e4 LT |
1469 | }; |
1470 | ||
1471 | static int __init dm_snapshot_init(void) | |
1472 | { | |
1473 | int r; | |
1474 | ||
4db6bfe0 AK |
1475 | r = dm_exception_store_init(); |
1476 | if (r) { | |
1477 | DMERR("Failed to initialize exception stores"); | |
1478 | return r; | |
1479 | } | |
1480 | ||
1da177e4 LT |
1481 | r = dm_register_target(&snapshot_target); |
1482 | if (r) { | |
1483 | DMERR("snapshot target register failed %d", r); | |
034a186d | 1484 | goto bad_register_snapshot_target; |
1da177e4 LT |
1485 | } |
1486 | ||
1487 | r = dm_register_target(&origin_target); | |
1488 | if (r < 0) { | |
72d94861 | 1489 | DMERR("Origin target register failed %d", r); |
1da177e4 LT |
1490 | goto bad1; |
1491 | } | |
1492 | ||
1493 | r = init_origin_hash(); | |
1494 | if (r) { | |
1495 | DMERR("init_origin_hash failed."); | |
1496 | goto bad2; | |
1497 | } | |
1498 | ||
028867ac | 1499 | exception_cache = KMEM_CACHE(dm_snap_exception, 0); |
1da177e4 LT |
1500 | if (!exception_cache) { |
1501 | DMERR("Couldn't create exception cache."); | |
1502 | r = -ENOMEM; | |
1503 | goto bad3; | |
1504 | } | |
1505 | ||
028867ac | 1506 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1da177e4 LT |
1507 | if (!pending_cache) { |
1508 | DMERR("Couldn't create pending cache."); | |
1509 | r = -ENOMEM; | |
1510 | goto bad4; | |
1511 | } | |
1512 | ||
cd45daff MP |
1513 | tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); |
1514 | if (!tracked_chunk_cache) { | |
1515 | DMERR("Couldn't create cache to track chunks in use."); | |
1516 | r = -ENOMEM; | |
1517 | goto bad5; | |
1518 | } | |
1519 | ||
ca3a931f AK |
1520 | ksnapd = create_singlethread_workqueue("ksnapd"); |
1521 | if (!ksnapd) { | |
1522 | DMERR("Failed to create ksnapd workqueue."); | |
1523 | r = -ENOMEM; | |
92e86812 | 1524 | goto bad_pending_pool; |
ca3a931f AK |
1525 | } |
1526 | ||
1da177e4 LT |
1527 | return 0; |
1528 | ||
4db6bfe0 | 1529 | bad_pending_pool: |
cd45daff | 1530 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 | 1531 | bad5: |
1da177e4 | 1532 | kmem_cache_destroy(pending_cache); |
4db6bfe0 | 1533 | bad4: |
1da177e4 | 1534 | kmem_cache_destroy(exception_cache); |
4db6bfe0 | 1535 | bad3: |
1da177e4 | 1536 | exit_origin_hash(); |
4db6bfe0 | 1537 | bad2: |
1da177e4 | 1538 | dm_unregister_target(&origin_target); |
4db6bfe0 | 1539 | bad1: |
1da177e4 | 1540 | dm_unregister_target(&snapshot_target); |
034a186d JB |
1541 | |
1542 | bad_register_snapshot_target: | |
1543 | dm_exception_store_exit(); | |
1da177e4 LT |
1544 | return r; |
1545 | } | |
1546 | ||
1547 | static void __exit dm_snapshot_exit(void) | |
1548 | { | |
ca3a931f AK |
1549 | destroy_workqueue(ksnapd); |
1550 | ||
10d3bd09 MP |
1551 | dm_unregister_target(&snapshot_target); |
1552 | dm_unregister_target(&origin_target); | |
1da177e4 LT |
1553 | |
1554 | exit_origin_hash(); | |
1da177e4 LT |
1555 | kmem_cache_destroy(pending_cache); |
1556 | kmem_cache_destroy(exception_cache); | |
cd45daff | 1557 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 AK |
1558 | |
1559 | dm_exception_store_exit(); | |
1da177e4 LT |
1560 | } |
1561 | ||
1562 | /* Module hooks */ | |
1563 | module_init(dm_snapshot_init); | |
1564 | module_exit(dm_snapshot_exit); | |
1565 | ||
1566 | MODULE_DESCRIPTION(DM_NAME " snapshot target"); | |
1567 | MODULE_AUTHOR("Joe Thornber"); | |
1568 | MODULE_LICENSE("GPL"); |