]>
Commit | Line | Data |
---|---|---|
1f965b19 HM |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software Limited. | |
3 | * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. | |
4 | * | |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
8 | #include <linux/dm-dirty-log.h> | |
9 | #include <linux/dm-region-hash.h> | |
10 | ||
11 | #include <linux/ctype.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/module.h> | |
5a0e3ad6 | 14 | #include <linux/slab.h> |
1f965b19 HM |
15 | #include <linux/vmalloc.h> |
16 | ||
17 | #include "dm.h" | |
1f965b19 HM |
18 | |
19 | #define DM_MSG_PREFIX "region hash" | |
20 | ||
21 | /*----------------------------------------------------------------- | |
22 | * Region hash | |
23 | * | |
24 | * The mirror splits itself up into discrete regions. Each | |
25 | * region can be in one of three states: clean, dirty, | |
26 | * nosync. There is no need to put clean regions in the hash. | |
27 | * | |
28 | * In addition to being present in the hash table a region _may_ | |
29 | * be present on one of three lists. | |
30 | * | |
31 | * clean_regions: Regions on this list have no io pending to | |
32 | * them, they are in sync, we are no longer interested in them, | |
33 | * they are dull. dm_rh_update_states() will remove them from the | |
34 | * hash table. | |
35 | * | |
36 | * quiesced_regions: These regions have been spun down, ready | |
37 | * for recovery. rh_recovery_start() will remove regions from | |
38 | * this list and hand them to kmirrord, which will schedule the | |
39 | * recovery io with kcopyd. | |
40 | * | |
41 | * recovered_regions: Regions that kcopyd has successfully | |
42 | * recovered. dm_rh_update_states() will now schedule any delayed | |
43 | * io, up the recovery_count, and remove the region from the | |
44 | * hash. | |
45 | * | |
46 | * There are 2 locks: | |
47 | * A rw spin lock 'hash_lock' protects just the hash table, | |
48 | * this is never held in write mode from interrupt context, | |
49 | * which I believe means that we only have to disable irqs when | |
50 | * doing a write lock. | |
51 | * | |
52 | * An ordinary spin lock 'region_lock' that protects the three | |
53 | * lists in the region_hash, with the 'state', 'list' and | |
54 | * 'delayed_bios' fields of the regions. This is used from irq | |
55 | * context, so all other uses will have to suspend local irqs. | |
56 | *---------------------------------------------------------------*/ | |
57 | struct dm_region_hash { | |
58 | uint32_t region_size; | |
59 | unsigned region_shift; | |
60 | ||
61 | /* holds persistent region state */ | |
62 | struct dm_dirty_log *log; | |
63 | ||
64 | /* hash table */ | |
65 | rwlock_t hash_lock; | |
66 | mempool_t *region_pool; | |
67 | unsigned mask; | |
68 | unsigned nr_buckets; | |
69 | unsigned prime; | |
70 | unsigned shift; | |
71 | struct list_head *buckets; | |
72 | ||
73 | unsigned max_recovery; /* Max # of regions to recover in parallel */ | |
74 | ||
75 | spinlock_t region_lock; | |
76 | atomic_t recovery_in_flight; | |
77 | struct semaphore recovery_count; | |
78 | struct list_head clean_regions; | |
79 | struct list_head quiesced_regions; | |
80 | struct list_head recovered_regions; | |
81 | struct list_head failed_recovered_regions; | |
82 | ||
4184153f | 83 | /* |
d87f4c14 | 84 | * If there was a flush failure no regions can be marked clean. |
4184153f | 85 | */ |
d87f4c14 | 86 | int flush_failure; |
4184153f | 87 | |
1f965b19 HM |
88 | void *context; |
89 | sector_t target_begin; | |
90 | ||
91 | /* Callback function to schedule bios writes */ | |
92 | void (*dispatch_bios)(void *context, struct bio_list *bios); | |
93 | ||
94 | /* Callback function to wakeup callers worker thread. */ | |
95 | void (*wakeup_workers)(void *context); | |
96 | ||
97 | /* Callback function to wakeup callers recovery waiters. */ | |
98 | void (*wakeup_all_recovery_waiters)(void *context); | |
99 | }; | |
100 | ||
101 | struct dm_region { | |
102 | struct dm_region_hash *rh; /* FIXME: can we get rid of this ? */ | |
103 | region_t key; | |
104 | int state; | |
105 | ||
106 | struct list_head hash_list; | |
107 | struct list_head list; | |
108 | ||
109 | atomic_t pending; | |
110 | struct bio_list delayed_bios; | |
111 | }; | |
112 | ||
113 | /* | |
114 | * Conversion fns | |
115 | */ | |
116 | static region_t dm_rh_sector_to_region(struct dm_region_hash *rh, sector_t sector) | |
117 | { | |
118 | return sector >> rh->region_shift; | |
119 | } | |
120 | ||
121 | sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region) | |
122 | { | |
123 | return region << rh->region_shift; | |
124 | } | |
125 | EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); | |
126 | ||
127 | region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) | |
128 | { | |
4f024f37 KO |
129 | return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - |
130 | rh->target_begin); | |
1f965b19 HM |
131 | } |
132 | EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); | |
133 | ||
134 | void *dm_rh_region_context(struct dm_region *reg) | |
135 | { | |
136 | return reg->rh->context; | |
137 | } | |
138 | EXPORT_SYMBOL_GPL(dm_rh_region_context); | |
139 | ||
140 | region_t dm_rh_get_region_key(struct dm_region *reg) | |
141 | { | |
142 | return reg->key; | |
143 | } | |
144 | EXPORT_SYMBOL_GPL(dm_rh_get_region_key); | |
145 | ||
146 | sector_t dm_rh_get_region_size(struct dm_region_hash *rh) | |
147 | { | |
148 | return rh->region_size; | |
149 | } | |
150 | EXPORT_SYMBOL_GPL(dm_rh_get_region_size); | |
151 | ||
152 | /* | |
153 | * FIXME: shall we pass in a structure instead of all these args to | |
154 | * dm_region_hash_create()???? | |
155 | */ | |
156 | #define RH_HASH_MULT 2654435387U | |
157 | #define RH_HASH_SHIFT 12 | |
158 | ||
159 | #define MIN_REGIONS 64 | |
160 | struct dm_region_hash *dm_region_hash_create( | |
161 | void *context, void (*dispatch_bios)(void *context, | |
162 | struct bio_list *bios), | |
163 | void (*wakeup_workers)(void *context), | |
164 | void (*wakeup_all_recovery_waiters)(void *context), | |
165 | sector_t target_begin, unsigned max_recovery, | |
166 | struct dm_dirty_log *log, uint32_t region_size, | |
167 | region_t nr_regions) | |
168 | { | |
169 | struct dm_region_hash *rh; | |
170 | unsigned nr_buckets, max_buckets; | |
171 | size_t i; | |
172 | ||
173 | /* | |
174 | * Calculate a suitable number of buckets for our hash | |
175 | * table. | |
176 | */ | |
177 | max_buckets = nr_regions >> 6; | |
178 | for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1) | |
179 | ; | |
180 | nr_buckets >>= 1; | |
181 | ||
182 | rh = kmalloc(sizeof(*rh), GFP_KERNEL); | |
183 | if (!rh) { | |
184 | DMERR("unable to allocate region hash memory"); | |
185 | return ERR_PTR(-ENOMEM); | |
186 | } | |
187 | ||
188 | rh->context = context; | |
189 | rh->dispatch_bios = dispatch_bios; | |
190 | rh->wakeup_workers = wakeup_workers; | |
191 | rh->wakeup_all_recovery_waiters = wakeup_all_recovery_waiters; | |
192 | rh->target_begin = target_begin; | |
193 | rh->max_recovery = max_recovery; | |
194 | rh->log = log; | |
195 | rh->region_size = region_size; | |
a3d939ae | 196 | rh->region_shift = __ffs(region_size); |
1f965b19 HM |
197 | rwlock_init(&rh->hash_lock); |
198 | rh->mask = nr_buckets - 1; | |
199 | rh->nr_buckets = nr_buckets; | |
200 | ||
201 | rh->shift = RH_HASH_SHIFT; | |
202 | rh->prime = RH_HASH_MULT; | |
203 | ||
204 | rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets)); | |
205 | if (!rh->buckets) { | |
206 | DMERR("unable to allocate region hash bucket memory"); | |
207 | kfree(rh); | |
208 | return ERR_PTR(-ENOMEM); | |
209 | } | |
210 | ||
211 | for (i = 0; i < nr_buckets; i++) | |
212 | INIT_LIST_HEAD(rh->buckets + i); | |
213 | ||
214 | spin_lock_init(&rh->region_lock); | |
215 | sema_init(&rh->recovery_count, 0); | |
216 | atomic_set(&rh->recovery_in_flight, 0); | |
217 | INIT_LIST_HEAD(&rh->clean_regions); | |
218 | INIT_LIST_HEAD(&rh->quiesced_regions); | |
219 | INIT_LIST_HEAD(&rh->recovered_regions); | |
220 | INIT_LIST_HEAD(&rh->failed_recovered_regions); | |
d87f4c14 | 221 | rh->flush_failure = 0; |
1f965b19 HM |
222 | |
223 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, | |
224 | sizeof(struct dm_region)); | |
225 | if (!rh->region_pool) { | |
226 | vfree(rh->buckets); | |
227 | kfree(rh); | |
228 | rh = ERR_PTR(-ENOMEM); | |
229 | } | |
230 | ||
231 | return rh; | |
232 | } | |
233 | EXPORT_SYMBOL_GPL(dm_region_hash_create); | |
234 | ||
235 | void dm_region_hash_destroy(struct dm_region_hash *rh) | |
236 | { | |
237 | unsigned h; | |
238 | struct dm_region *reg, *nreg; | |
239 | ||
240 | BUG_ON(!list_empty(&rh->quiesced_regions)); | |
241 | for (h = 0; h < rh->nr_buckets; h++) { | |
242 | list_for_each_entry_safe(reg, nreg, rh->buckets + h, | |
243 | hash_list) { | |
244 | BUG_ON(atomic_read(®->pending)); | |
245 | mempool_free(reg, rh->region_pool); | |
246 | } | |
247 | } | |
248 | ||
249 | if (rh->log) | |
250 | dm_dirty_log_destroy(rh->log); | |
251 | ||
6f65985e | 252 | mempool_destroy(rh->region_pool); |
1f965b19 HM |
253 | vfree(rh->buckets); |
254 | kfree(rh); | |
255 | } | |
256 | EXPORT_SYMBOL_GPL(dm_region_hash_destroy); | |
257 | ||
258 | struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh) | |
259 | { | |
260 | return rh->log; | |
261 | } | |
262 | EXPORT_SYMBOL_GPL(dm_rh_dirty_log); | |
263 | ||
264 | static unsigned rh_hash(struct dm_region_hash *rh, region_t region) | |
265 | { | |
266 | return (unsigned) ((region * rh->prime) >> rh->shift) & rh->mask; | |
267 | } | |
268 | ||
269 | static struct dm_region *__rh_lookup(struct dm_region_hash *rh, region_t region) | |
270 | { | |
271 | struct dm_region *reg; | |
272 | struct list_head *bucket = rh->buckets + rh_hash(rh, region); | |
273 | ||
274 | list_for_each_entry(reg, bucket, hash_list) | |
275 | if (reg->key == region) | |
276 | return reg; | |
277 | ||
278 | return NULL; | |
279 | } | |
280 | ||
281 | static void __rh_insert(struct dm_region_hash *rh, struct dm_region *reg) | |
282 | { | |
283 | list_add(®->hash_list, rh->buckets + rh_hash(rh, reg->key)); | |
284 | } | |
285 | ||
286 | static struct dm_region *__rh_alloc(struct dm_region_hash *rh, region_t region) | |
287 | { | |
288 | struct dm_region *reg, *nreg; | |
289 | ||
290 | nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC); | |
291 | if (unlikely(!nreg)) | |
a72986c5 | 292 | nreg = kmalloc(sizeof(*nreg), GFP_NOIO | __GFP_NOFAIL); |
1f965b19 HM |
293 | |
294 | nreg->state = rh->log->type->in_sync(rh->log, region, 1) ? | |
295 | DM_RH_CLEAN : DM_RH_NOSYNC; | |
296 | nreg->rh = rh; | |
297 | nreg->key = region; | |
298 | INIT_LIST_HEAD(&nreg->list); | |
299 | atomic_set(&nreg->pending, 0); | |
300 | bio_list_init(&nreg->delayed_bios); | |
301 | ||
302 | write_lock_irq(&rh->hash_lock); | |
303 | reg = __rh_lookup(rh, region); | |
304 | if (reg) | |
305 | /* We lost the race. */ | |
306 | mempool_free(nreg, rh->region_pool); | |
307 | else { | |
308 | __rh_insert(rh, nreg); | |
309 | if (nreg->state == DM_RH_CLEAN) { | |
310 | spin_lock(&rh->region_lock); | |
311 | list_add(&nreg->list, &rh->clean_regions); | |
312 | spin_unlock(&rh->region_lock); | |
313 | } | |
314 | ||
315 | reg = nreg; | |
316 | } | |
317 | write_unlock_irq(&rh->hash_lock); | |
318 | ||
319 | return reg; | |
320 | } | |
321 | ||
322 | static struct dm_region *__rh_find(struct dm_region_hash *rh, region_t region) | |
323 | { | |
324 | struct dm_region *reg; | |
325 | ||
326 | reg = __rh_lookup(rh, region); | |
327 | if (!reg) { | |
328 | read_unlock(&rh->hash_lock); | |
329 | reg = __rh_alloc(rh, region); | |
330 | read_lock(&rh->hash_lock); | |
331 | } | |
332 | ||
333 | return reg; | |
334 | } | |
335 | ||
336 | int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block) | |
337 | { | |
338 | int r; | |
339 | struct dm_region *reg; | |
340 | ||
341 | read_lock(&rh->hash_lock); | |
342 | reg = __rh_lookup(rh, region); | |
343 | read_unlock(&rh->hash_lock); | |
344 | ||
345 | if (reg) | |
346 | return reg->state; | |
347 | ||
348 | /* | |
349 | * The region wasn't in the hash, so we fall back to the | |
350 | * dirty log. | |
351 | */ | |
352 | r = rh->log->type->in_sync(rh->log, region, may_block); | |
353 | ||
354 | /* | |
355 | * Any error from the dirty log (eg. -EWOULDBLOCK) gets | |
356 | * taken as a DM_RH_NOSYNC | |
357 | */ | |
358 | return r == 1 ? DM_RH_CLEAN : DM_RH_NOSYNC; | |
359 | } | |
360 | EXPORT_SYMBOL_GPL(dm_rh_get_state); | |
361 | ||
362 | static void complete_resync_work(struct dm_region *reg, int success) | |
363 | { | |
364 | struct dm_region_hash *rh = reg->rh; | |
365 | ||
366 | rh->log->type->set_region_sync(rh->log, reg->key, success); | |
367 | ||
368 | /* | |
369 | * Dispatch the bios before we call 'wake_up_all'. | |
370 | * This is important because if we are suspending, | |
371 | * we want to know that recovery is complete and | |
372 | * the work queue is flushed. If we wake_up_all | |
373 | * before we dispatch_bios (queue bios and call wake()), | |
374 | * then we risk suspending before the work queue | |
375 | * has been properly flushed. | |
376 | */ | |
377 | rh->dispatch_bios(rh->context, ®->delayed_bios); | |
378 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
379 | rh->wakeup_all_recovery_waiters(rh->context); | |
380 | up(&rh->recovery_count); | |
381 | } | |
382 | ||
383 | /* dm_rh_mark_nosync | |
384 | * @ms | |
385 | * @bio | |
1f965b19 HM |
386 | * |
387 | * The bio was written on some mirror(s) but failed on other mirror(s). | |
388 | * We can successfully endio the bio but should avoid the region being | |
389 | * marked clean by setting the state DM_RH_NOSYNC. | |
390 | * | |
391 | * This function is _not_ safe in interrupt context! | |
392 | */ | |
c58098be | 393 | void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio) |
1f965b19 HM |
394 | { |
395 | unsigned long flags; | |
396 | struct dm_dirty_log *log = rh->log; | |
397 | struct dm_region *reg; | |
398 | region_t region = dm_rh_bio_to_region(rh, bio); | |
399 | int recovering = 0; | |
400 | ||
1eff9d32 | 401 | if (bio->bi_opf & REQ_PREFLUSH) { |
d87f4c14 | 402 | rh->flush_failure = 1; |
4184153f MP |
403 | return; |
404 | } | |
405 | ||
e6047149 | 406 | if (bio_op(bio) == REQ_OP_DISCARD) |
751f188d MP |
407 | return; |
408 | ||
1f965b19 HM |
409 | /* We must inform the log that the sync count has changed. */ |
410 | log->type->set_region_sync(log, region, 0); | |
411 | ||
412 | read_lock(&rh->hash_lock); | |
413 | reg = __rh_find(rh, region); | |
414 | read_unlock(&rh->hash_lock); | |
415 | ||
416 | /* region hash entry should exist because write was in-flight */ | |
417 | BUG_ON(!reg); | |
418 | BUG_ON(!list_empty(®->list)); | |
419 | ||
420 | spin_lock_irqsave(&rh->region_lock, flags); | |
421 | /* | |
422 | * Possible cases: | |
423 | * 1) DM_RH_DIRTY | |
25985edc | 424 | * 2) DM_RH_NOSYNC: was dirty, other preceding writes failed |
1f965b19 HM |
425 | * 3) DM_RH_RECOVERING: flushing pending writes |
426 | * Either case, the region should have not been connected to list. | |
427 | */ | |
428 | recovering = (reg->state == DM_RH_RECOVERING); | |
429 | reg->state = DM_RH_NOSYNC; | |
430 | BUG_ON(!list_empty(®->list)); | |
431 | spin_unlock_irqrestore(&rh->region_lock, flags); | |
432 | ||
1f965b19 HM |
433 | if (recovering) |
434 | complete_resync_work(reg, 0); | |
435 | } | |
436 | EXPORT_SYMBOL_GPL(dm_rh_mark_nosync); | |
437 | ||
438 | void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled) | |
439 | { | |
440 | struct dm_region *reg, *next; | |
441 | ||
442 | LIST_HEAD(clean); | |
443 | LIST_HEAD(recovered); | |
444 | LIST_HEAD(failed_recovered); | |
445 | ||
446 | /* | |
447 | * Quickly grab the lists. | |
448 | */ | |
449 | write_lock_irq(&rh->hash_lock); | |
450 | spin_lock(&rh->region_lock); | |
451 | if (!list_empty(&rh->clean_regions)) { | |
452 | list_splice_init(&rh->clean_regions, &clean); | |
453 | ||
454 | list_for_each_entry(reg, &clean, list) | |
455 | list_del(®->hash_list); | |
456 | } | |
457 | ||
458 | if (!list_empty(&rh->recovered_regions)) { | |
459 | list_splice_init(&rh->recovered_regions, &recovered); | |
460 | ||
461 | list_for_each_entry(reg, &recovered, list) | |
462 | list_del(®->hash_list); | |
463 | } | |
464 | ||
465 | if (!list_empty(&rh->failed_recovered_regions)) { | |
466 | list_splice_init(&rh->failed_recovered_regions, | |
467 | &failed_recovered); | |
468 | ||
469 | list_for_each_entry(reg, &failed_recovered, list) | |
470 | list_del(®->hash_list); | |
471 | } | |
472 | ||
473 | spin_unlock(&rh->region_lock); | |
474 | write_unlock_irq(&rh->hash_lock); | |
475 | ||
476 | /* | |
477 | * All the regions on the recovered and clean lists have | |
478 | * now been pulled out of the system, so no need to do | |
479 | * any more locking. | |
480 | */ | |
481 | list_for_each_entry_safe(reg, next, &recovered, list) { | |
482 | rh->log->type->clear_region(rh->log, reg->key); | |
483 | complete_resync_work(reg, 1); | |
484 | mempool_free(reg, rh->region_pool); | |
485 | } | |
486 | ||
487 | list_for_each_entry_safe(reg, next, &failed_recovered, list) { | |
488 | complete_resync_work(reg, errors_handled ? 0 : 1); | |
489 | mempool_free(reg, rh->region_pool); | |
490 | } | |
491 | ||
492 | list_for_each_entry_safe(reg, next, &clean, list) { | |
493 | rh->log->type->clear_region(rh->log, reg->key); | |
494 | mempool_free(reg, rh->region_pool); | |
495 | } | |
496 | ||
497 | rh->log->type->flush(rh->log); | |
498 | } | |
499 | EXPORT_SYMBOL_GPL(dm_rh_update_states); | |
500 | ||
501 | static void rh_inc(struct dm_region_hash *rh, region_t region) | |
502 | { | |
503 | struct dm_region *reg; | |
504 | ||
505 | read_lock(&rh->hash_lock); | |
506 | reg = __rh_find(rh, region); | |
507 | ||
508 | spin_lock_irq(&rh->region_lock); | |
509 | atomic_inc(®->pending); | |
510 | ||
511 | if (reg->state == DM_RH_CLEAN) { | |
512 | reg->state = DM_RH_DIRTY; | |
513 | list_del_init(®->list); /* take off the clean list */ | |
514 | spin_unlock_irq(&rh->region_lock); | |
515 | ||
516 | rh->log->type->mark_region(rh->log, reg->key); | |
517 | } else | |
518 | spin_unlock_irq(&rh->region_lock); | |
519 | ||
520 | ||
521 | read_unlock(&rh->hash_lock); | |
522 | } | |
523 | ||
524 | void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios) | |
525 | { | |
526 | struct bio *bio; | |
527 | ||
4184153f | 528 | for (bio = bios->head; bio; bio = bio->bi_next) { |
1eff9d32 | 529 | if (bio->bi_opf & REQ_PREFLUSH || bio_op(bio) == REQ_OP_DISCARD) |
4184153f | 530 | continue; |
1f965b19 | 531 | rh_inc(rh, dm_rh_bio_to_region(rh, bio)); |
4184153f | 532 | } |
1f965b19 HM |
533 | } |
534 | EXPORT_SYMBOL_GPL(dm_rh_inc_pending); | |
535 | ||
536 | void dm_rh_dec(struct dm_region_hash *rh, region_t region) | |
537 | { | |
538 | unsigned long flags; | |
539 | struct dm_region *reg; | |
540 | int should_wake = 0; | |
541 | ||
542 | read_lock(&rh->hash_lock); | |
543 | reg = __rh_lookup(rh, region); | |
544 | read_unlock(&rh->hash_lock); | |
545 | ||
546 | spin_lock_irqsave(&rh->region_lock, flags); | |
547 | if (atomic_dec_and_test(®->pending)) { | |
548 | /* | |
549 | * There is no pending I/O for this region. | |
550 | * We can move the region to corresponding list for next action. | |
551 | * At this point, the region is not yet connected to any list. | |
552 | * | |
553 | * If the state is DM_RH_NOSYNC, the region should be kept off | |
554 | * from clean list. | |
555 | * The hash entry for DM_RH_NOSYNC will remain in memory | |
556 | * until the region is recovered or the map is reloaded. | |
557 | */ | |
558 | ||
559 | /* do nothing for DM_RH_NOSYNC */ | |
d87f4c14 | 560 | if (unlikely(rh->flush_failure)) { |
4184153f | 561 | /* |
d87f4c14 | 562 | * If a write flush failed some time ago, we |
4184153f MP |
563 | * don't know whether or not this write made it |
564 | * to the disk, so we must resync the device. | |
565 | */ | |
566 | reg->state = DM_RH_NOSYNC; | |
567 | } else if (reg->state == DM_RH_RECOVERING) { | |
1f965b19 HM |
568 | list_add_tail(®->list, &rh->quiesced_regions); |
569 | } else if (reg->state == DM_RH_DIRTY) { | |
570 | reg->state = DM_RH_CLEAN; | |
571 | list_add(®->list, &rh->clean_regions); | |
572 | } | |
573 | should_wake = 1; | |
574 | } | |
575 | spin_unlock_irqrestore(&rh->region_lock, flags); | |
576 | ||
577 | if (should_wake) | |
578 | rh->wakeup_workers(rh->context); | |
579 | } | |
580 | EXPORT_SYMBOL_GPL(dm_rh_dec); | |
581 | ||
582 | /* | |
583 | * Starts quiescing a region in preparation for recovery. | |
584 | */ | |
585 | static int __rh_recovery_prepare(struct dm_region_hash *rh) | |
586 | { | |
587 | int r; | |
588 | region_t region; | |
589 | struct dm_region *reg; | |
590 | ||
591 | /* | |
592 | * Ask the dirty log what's next. | |
593 | */ | |
594 | r = rh->log->type->get_resync_work(rh->log, ®ion); | |
595 | if (r <= 0) | |
596 | return r; | |
597 | ||
598 | /* | |
599 | * Get this region, and start it quiescing by setting the | |
600 | * recovering flag. | |
601 | */ | |
602 | read_lock(&rh->hash_lock); | |
603 | reg = __rh_find(rh, region); | |
604 | read_unlock(&rh->hash_lock); | |
605 | ||
606 | spin_lock_irq(&rh->region_lock); | |
607 | reg->state = DM_RH_RECOVERING; | |
608 | ||
609 | /* Already quiesced ? */ | |
610 | if (atomic_read(®->pending)) | |
611 | list_del_init(®->list); | |
612 | else | |
613 | list_move(®->list, &rh->quiesced_regions); | |
614 | ||
615 | spin_unlock_irq(&rh->region_lock); | |
616 | ||
617 | return 1; | |
618 | } | |
619 | ||
620 | void dm_rh_recovery_prepare(struct dm_region_hash *rh) | |
621 | { | |
622 | /* Extra reference to avoid race with dm_rh_stop_recovery */ | |
623 | atomic_inc(&rh->recovery_in_flight); | |
624 | ||
625 | while (!down_trylock(&rh->recovery_count)) { | |
626 | atomic_inc(&rh->recovery_in_flight); | |
627 | if (__rh_recovery_prepare(rh) <= 0) { | |
628 | atomic_dec(&rh->recovery_in_flight); | |
629 | up(&rh->recovery_count); | |
630 | break; | |
631 | } | |
632 | } | |
633 | ||
634 | /* Drop the extra reference */ | |
635 | if (atomic_dec_and_test(&rh->recovery_in_flight)) | |
636 | rh->wakeup_all_recovery_waiters(rh->context); | |
637 | } | |
638 | EXPORT_SYMBOL_GPL(dm_rh_recovery_prepare); | |
639 | ||
640 | /* | |
641 | * Returns any quiesced regions. | |
642 | */ | |
643 | struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh) | |
644 | { | |
645 | struct dm_region *reg = NULL; | |
646 | ||
647 | spin_lock_irq(&rh->region_lock); | |
648 | if (!list_empty(&rh->quiesced_regions)) { | |
649 | reg = list_entry(rh->quiesced_regions.next, | |
650 | struct dm_region, list); | |
651 | list_del_init(®->list); /* remove from the quiesced list */ | |
652 | } | |
653 | spin_unlock_irq(&rh->region_lock); | |
654 | ||
655 | return reg; | |
656 | } | |
657 | EXPORT_SYMBOL_GPL(dm_rh_recovery_start); | |
658 | ||
659 | void dm_rh_recovery_end(struct dm_region *reg, int success) | |
660 | { | |
661 | struct dm_region_hash *rh = reg->rh; | |
662 | ||
663 | spin_lock_irq(&rh->region_lock); | |
664 | if (success) | |
665 | list_add(®->list, ®->rh->recovered_regions); | |
558569aa | 666 | else |
1f965b19 | 667 | list_add(®->list, ®->rh->failed_recovered_regions); |
558569aa | 668 | |
1f965b19 HM |
669 | spin_unlock_irq(&rh->region_lock); |
670 | ||
671 | rh->wakeup_workers(rh->context); | |
672 | } | |
673 | EXPORT_SYMBOL_GPL(dm_rh_recovery_end); | |
674 | ||
675 | /* Return recovery in flight count. */ | |
676 | int dm_rh_recovery_in_flight(struct dm_region_hash *rh) | |
677 | { | |
678 | return atomic_read(&rh->recovery_in_flight); | |
679 | } | |
680 | EXPORT_SYMBOL_GPL(dm_rh_recovery_in_flight); | |
681 | ||
682 | int dm_rh_flush(struct dm_region_hash *rh) | |
683 | { | |
684 | return rh->log->type->flush(rh->log); | |
685 | } | |
686 | EXPORT_SYMBOL_GPL(dm_rh_flush); | |
687 | ||
688 | void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio) | |
689 | { | |
690 | struct dm_region *reg; | |
691 | ||
692 | read_lock(&rh->hash_lock); | |
693 | reg = __rh_find(rh, dm_rh_bio_to_region(rh, bio)); | |
694 | bio_list_add(®->delayed_bios, bio); | |
695 | read_unlock(&rh->hash_lock); | |
696 | } | |
697 | EXPORT_SYMBOL_GPL(dm_rh_delay); | |
698 | ||
699 | void dm_rh_stop_recovery(struct dm_region_hash *rh) | |
700 | { | |
701 | int i; | |
702 | ||
703 | /* wait for any recovering regions */ | |
704 | for (i = 0; i < rh->max_recovery; i++) | |
705 | down(&rh->recovery_count); | |
706 | } | |
707 | EXPORT_SYMBOL_GPL(dm_rh_stop_recovery); | |
708 | ||
709 | void dm_rh_start_recovery(struct dm_region_hash *rh) | |
710 | { | |
711 | int i; | |
712 | ||
713 | for (i = 0; i < rh->max_recovery; i++) | |
714 | up(&rh->recovery_count); | |
715 | ||
716 | rh->wakeup_workers(rh->context); | |
717 | } | |
718 | EXPORT_SYMBOL_GPL(dm_rh_start_recovery); | |
719 | ||
720 | MODULE_DESCRIPTION(DM_NAME " region hash"); | |
721 | MODULE_AUTHOR("Joe Thornber/Heinz Mauelshagen <[email protected]>"); | |
722 | MODULE_LICENSE("GPL"); |