]>
Commit | Line | Data |
---|---|---|
4f81a417 MS |
1 | /* |
2 | * Copyright (C) 2011-2012 Red Hat, Inc. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #ifndef DM_BIO_PRISON_H | |
8 | #define DM_BIO_PRISON_H | |
9 | ||
10 | #include "persistent-data/dm-block-manager.h" /* FIXME: for dm_block_t */ | |
11 | #include "dm-thin-metadata.h" /* FIXME: for dm_thin_id */ | |
12 | ||
4f81a417 | 13 | #include <linux/bio.h> |
a195db2d | 14 | #include <linux/rbtree.h> |
4f81a417 MS |
15 | |
16 | /*----------------------------------------------------------------*/ | |
17 | ||
18 | /* | |
19 | * Sometimes we can't deal with a bio straight away. We put them in prison | |
20 | * where they can't cause any mischief. Bios are put in a cell identified | |
21 | * by a key, multiple bios can be in the same cell. When the cell is | |
22 | * subsequently unlocked the bios become available. | |
23 | */ | |
24 | struct dm_bio_prison; | |
4f81a417 | 25 | |
5f274d88 JT |
26 | /* |
27 | * Keys define a range of blocks within either a virtual or physical | |
28 | * device. | |
29 | */ | |
4f81a417 MS |
30 | struct dm_cell_key { |
31 | int virtual; | |
32 | dm_thin_id dev; | |
5f274d88 | 33 | dm_block_t block_begin, block_end; |
4f81a417 MS |
34 | }; |
35 | ||
025b9685 JT |
36 | /* |
37 | * Treat this as opaque, only in header so callers can manage allocation | |
38 | * themselves. | |
39 | */ | |
40 | struct dm_bio_prison_cell { | |
a374bb21 | 41 | struct list_head user_list; /* for client use */ |
a195db2d JT |
42 | struct rb_node node; |
43 | ||
025b9685 JT |
44 | struct dm_cell_key key; |
45 | struct bio *holder; | |
46 | struct bio_list bios; | |
47 | }; | |
48 | ||
a195db2d | 49 | struct dm_bio_prison *dm_bio_prison_create(void); |
4f81a417 MS |
50 | void dm_bio_prison_destroy(struct dm_bio_prison *prison); |
51 | ||
52 | /* | |
6beca5eb JT |
53 | * These two functions just wrap a mempool. This is a transitory step: |
54 | * Eventually all bio prison clients should manage their own cell memory. | |
4f81a417 | 55 | * |
6beca5eb JT |
56 | * Like mempool_alloc(), dm_bio_prison_alloc_cell() can only fail if called |
57 | * in interrupt context or passed GFP_NOWAIT. | |
4f81a417 | 58 | */ |
6beca5eb JT |
59 | struct dm_bio_prison_cell *dm_bio_prison_alloc_cell(struct dm_bio_prison *prison, |
60 | gfp_t gfp); | |
61 | void dm_bio_prison_free_cell(struct dm_bio_prison *prison, | |
62 | struct dm_bio_prison_cell *cell); | |
4f81a417 | 63 | |
c6b4fcba | 64 | /* |
5f274d88 | 65 | * Creates, or retrieves a cell that overlaps the given key. |
c6b4fcba JT |
66 | * |
67 | * Returns 1 if pre-existing cell returned, zero if new cell created using | |
68 | * @cell_prealloc. | |
69 | */ | |
70 | int dm_get_cell(struct dm_bio_prison *prison, | |
71 | struct dm_cell_key *key, | |
72 | struct dm_bio_prison_cell *cell_prealloc, | |
73 | struct dm_bio_prison_cell **cell_result); | |
74 | ||
6beca5eb | 75 | /* |
5f274d88 JT |
76 | * An atomic op that combines retrieving or creating a cell, and adding a |
77 | * bio to it. | |
6beca5eb JT |
78 | * |
79 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. | |
80 | */ | |
81 | int dm_bio_detain(struct dm_bio_prison *prison, | |
82 | struct dm_cell_key *key, | |
83 | struct bio *inmate, | |
84 | struct dm_bio_prison_cell *cell_prealloc, | |
85 | struct dm_bio_prison_cell **cell_result); | |
86 | ||
87 | void dm_cell_release(struct dm_bio_prison *prison, | |
88 | struct dm_bio_prison_cell *cell, | |
89 | struct bio_list *bios); | |
90 | void dm_cell_release_no_holder(struct dm_bio_prison *prison, | |
91 | struct dm_bio_prison_cell *cell, | |
92 | struct bio_list *inmates); | |
93 | void dm_cell_error(struct dm_bio_prison *prison, | |
af91805a | 94 | struct dm_bio_prison_cell *cell, int error); |
4f81a417 | 95 | |
2d759a46 JT |
96 | /* |
97 | * Visits the cell and then releases. Guarantees no new inmates are | |
98 | * inserted between the visit and release. | |
99 | */ | |
100 | void dm_cell_visit_release(struct dm_bio_prison *prison, | |
101 | void (*visit_fn)(void *, struct dm_bio_prison_cell *), | |
102 | void *context, struct dm_bio_prison_cell *cell); | |
103 | ||
3cdf93f9 JT |
104 | /* |
105 | * Rather than always releasing the prisoners in a cell, the client may | |
106 | * want to promote one of them to be the new holder. There is a race here | |
107 | * though between releasing an empty cell, and other threads adding new | |
108 | * inmates. So this function makes the decision with its lock held. | |
109 | * | |
110 | * This function can have two outcomes: | |
111 | * i) An inmate is promoted to be the holder of the cell (return value of 0). | |
112 | * ii) The cell has no inmate for promotion and is released (return value of 1). | |
113 | */ | |
114 | int dm_cell_promote_or_release(struct dm_bio_prison *prison, | |
115 | struct dm_bio_prison_cell *cell); | |
116 | ||
4f81a417 MS |
117 | /*----------------------------------------------------------------*/ |
118 | ||
119 | /* | |
120 | * We use the deferred set to keep track of pending reads to shared blocks. | |
121 | * We do this to ensure the new mapping caused by a write isn't performed | |
122 | * until these prior reads have completed. Otherwise the insertion of the | |
123 | * new mapping could free the old block that the read bios are mapped to. | |
124 | */ | |
125 | ||
126 | struct dm_deferred_set; | |
127 | struct dm_deferred_entry; | |
128 | ||
129 | struct dm_deferred_set *dm_deferred_set_create(void); | |
130 | void dm_deferred_set_destroy(struct dm_deferred_set *ds); | |
131 | ||
132 | struct dm_deferred_entry *dm_deferred_entry_inc(struct dm_deferred_set *ds); | |
133 | void dm_deferred_entry_dec(struct dm_deferred_entry *entry, struct list_head *head); | |
134 | int dm_deferred_set_add_work(struct dm_deferred_set *ds, struct list_head *work); | |
135 | ||
136 | /*----------------------------------------------------------------*/ | |
137 | ||
138 | #endif |