]>
Commit | Line | Data |
---|---|---|
f31e7e40 DM |
1 | /* |
2 | * Functions related to generic helpers functions | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
4e49ea4a | 12 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87b | 13 | gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 26 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 27 | struct bio **biop) |
f31e7e40 | 28 | { |
f31e7e40 | 29 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 30 | struct bio *bio = *biop; |
a22c4d7e | 31 | unsigned int granularity; |
ef295ecf | 32 | unsigned int op; |
a22c4d7e | 33 | int alignment; |
28b2be20 | 34 | sector_t bs_mask; |
f31e7e40 DM |
35 | |
36 | if (!q) | |
37 | return -ENXIO; | |
288dab8a CH |
38 | |
39 | if (flags & BLKDEV_DISCARD_SECURE) { | |
40 | if (!blk_queue_secure_erase(q)) | |
41 | return -EOPNOTSUPP; | |
42 | op = REQ_OP_SECURE_ERASE; | |
43 | } else { | |
44 | if (!blk_queue_discard(q)) | |
45 | return -EOPNOTSUPP; | |
46 | op = REQ_OP_DISCARD; | |
47 | } | |
f31e7e40 | 48 | |
28b2be20 DW |
49 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
50 | if ((sector | nr_sects) & bs_mask) | |
51 | return -EINVAL; | |
52 | ||
a22c4d7e ML |
53 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
54 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
55 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
56 | ||
5dba3089 | 57 | while (nr_sects) { |
c6e66634 | 58 | unsigned int req_sects; |
a22c4d7e | 59 | sector_t end_sect, tmp; |
c6e66634 | 60 | |
a22c4d7e ML |
61 | /* Make sure bi_size doesn't overflow */ |
62 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
63 | ||
9082e87b | 64 | /** |
a22c4d7e ML |
65 | * If splitting a request, and the next starting sector would be |
66 | * misaligned, stop the discard at the previous aligned sector. | |
67 | */ | |
c6e66634 | 68 | end_sect = sector + req_sects; |
a22c4d7e ML |
69 | tmp = end_sect; |
70 | if (req_sects < nr_sects && | |
71 | sector_div(tmp, granularity) != alignment) { | |
72 | end_sect = end_sect - alignment; | |
73 | sector_div(end_sect, granularity); | |
74 | end_sect = end_sect * granularity + alignment; | |
75 | req_sects = end_sect - sector; | |
76 | } | |
c6e66634 | 77 | |
f9d03f96 | 78 | bio = next_bio(bio, 0, gfp_mask); |
4f024f37 | 79 | bio->bi_iter.bi_sector = sector; |
f31e7e40 | 80 | bio->bi_bdev = bdev; |
288dab8a | 81 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 82 | |
4f024f37 | 83 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
84 | nr_sects -= req_sects; |
85 | sector = end_sect; | |
f31e7e40 | 86 | |
c8123f8c JA |
87 | /* |
88 | * We can loop for a long time in here, if someone does | |
89 | * full device discards (like mkfs). Be nice and allow | |
90 | * us to schedule out to avoid softlocking if preempt | |
91 | * is disabled. | |
92 | */ | |
93 | cond_resched(); | |
5dba3089 | 94 | } |
38f25255 CH |
95 | |
96 | *biop = bio; | |
97 | return 0; | |
98 | } | |
99 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
100 | ||
101 | /** | |
102 | * blkdev_issue_discard - queue a discard | |
103 | * @bdev: blockdev to issue discard for | |
104 | * @sector: start sector | |
105 | * @nr_sects: number of sectors to discard | |
106 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e554911c | 107 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f25255 CH |
108 | * |
109 | * Description: | |
110 | * Issue a discard request for the sectors in question. | |
111 | */ | |
112 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
113 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
114 | { | |
38f25255 CH |
115 | struct bio *bio = NULL; |
116 | struct blk_plug plug; | |
117 | int ret; | |
118 | ||
38f25255 | 119 | blk_start_plug(&plug); |
288dab8a | 120 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 121 | &bio); |
bbd848e0 | 122 | if (!ret && bio) { |
4e49ea4a | 123 | ret = submit_bio_wait(bio); |
48920ff2 | 124 | if (ret == -EOPNOTSUPP) |
bbd848e0 | 125 | ret = 0; |
05bd92dd | 126 | bio_put(bio); |
bbd848e0 | 127 | } |
0cfbcafc | 128 | blk_finish_plug(&plug); |
f31e7e40 | 129 | |
bbd848e0 | 130 | return ret; |
f31e7e40 DM |
131 | } |
132 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 133 | |
4363ac7c | 134 | /** |
e73c23ff | 135 | * __blkdev_issue_write_same - generate number of bios with same page |
4363ac7c MP |
136 | * @bdev: target blockdev |
137 | * @sector: start sector | |
138 | * @nr_sects: number of sectors to write | |
139 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
140 | * @page: page containing data to write | |
e73c23ff | 141 | * @biop: pointer to anchor bio |
4363ac7c MP |
142 | * |
143 | * Description: | |
e73c23ff | 144 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
4363ac7c | 145 | */ |
e73c23ff CK |
146 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
147 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, | |
148 | struct bio **biop) | |
4363ac7c | 149 | { |
4363ac7c MP |
150 | struct request_queue *q = bdev_get_queue(bdev); |
151 | unsigned int max_write_same_sectors; | |
e73c23ff | 152 | struct bio *bio = *biop; |
28b2be20 | 153 | sector_t bs_mask; |
4363ac7c MP |
154 | |
155 | if (!q) | |
156 | return -ENXIO; | |
157 | ||
28b2be20 DW |
158 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
159 | if ((sector | nr_sects) & bs_mask) | |
160 | return -EINVAL; | |
161 | ||
e73c23ff CK |
162 | if (!bdev_write_same(bdev)) |
163 | return -EOPNOTSUPP; | |
164 | ||
b49a0871 ML |
165 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
166 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 167 | |
4363ac7c | 168 | while (nr_sects) { |
4e49ea4a | 169 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 170 | bio->bi_iter.bi_sector = sector; |
4363ac7c | 171 | bio->bi_bdev = bdev; |
4363ac7c MP |
172 | bio->bi_vcnt = 1; |
173 | bio->bi_io_vec->bv_page = page; | |
174 | bio->bi_io_vec->bv_offset = 0; | |
175 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 176 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
177 | |
178 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 179 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
180 | nr_sects -= max_write_same_sectors; |
181 | sector += max_write_same_sectors; | |
182 | } else { | |
4f024f37 | 183 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
184 | nr_sects = 0; |
185 | } | |
e73c23ff | 186 | cond_resched(); |
4363ac7c MP |
187 | } |
188 | ||
e73c23ff CK |
189 | *biop = bio; |
190 | return 0; | |
191 | } | |
192 | ||
193 | /** | |
194 | * blkdev_issue_write_same - queue a write same operation | |
195 | * @bdev: target blockdev | |
196 | * @sector: start sector | |
197 | * @nr_sects: number of sectors to write | |
198 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
199 | * @page: page containing data | |
200 | * | |
201 | * Description: | |
202 | * Issue a write same request for the sectors in question. | |
203 | */ | |
204 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
205 | sector_t nr_sects, gfp_t gfp_mask, | |
206 | struct page *page) | |
207 | { | |
208 | struct bio *bio = NULL; | |
209 | struct blk_plug plug; | |
210 | int ret; | |
211 | ||
212 | blk_start_plug(&plug); | |
213 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | |
214 | &bio); | |
215 | if (ret == 0 && bio) { | |
4e49ea4a | 216 | ret = submit_bio_wait(bio); |
05bd92dd ST |
217 | bio_put(bio); |
218 | } | |
e73c23ff | 219 | blk_finish_plug(&plug); |
3f40bf2c | 220 | return ret; |
4363ac7c MP |
221 | } |
222 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
223 | ||
a6f0788e CK |
224 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
225 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
d928be9f | 226 | struct bio **biop, unsigned flags) |
a6f0788e CK |
227 | { |
228 | struct bio *bio = *biop; | |
229 | unsigned int max_write_zeroes_sectors; | |
230 | struct request_queue *q = bdev_get_queue(bdev); | |
231 | ||
232 | if (!q) | |
233 | return -ENXIO; | |
234 | ||
235 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ | |
236 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
237 | ||
238 | if (max_write_zeroes_sectors == 0) | |
239 | return -EOPNOTSUPP; | |
240 | ||
241 | while (nr_sects) { | |
242 | bio = next_bio(bio, 0, gfp_mask); | |
243 | bio->bi_iter.bi_sector = sector; | |
244 | bio->bi_bdev = bdev; | |
d928be9f CH |
245 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
246 | if (flags & BLKDEV_ZERO_NOUNMAP) | |
247 | bio->bi_opf |= REQ_NOUNMAP; | |
a6f0788e CK |
248 | |
249 | if (nr_sects > max_write_zeroes_sectors) { | |
250 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
251 | nr_sects -= max_write_zeroes_sectors; | |
252 | sector += max_write_zeroes_sectors; | |
253 | } else { | |
254 | bio->bi_iter.bi_size = nr_sects << 9; | |
255 | nr_sects = 0; | |
256 | } | |
257 | cond_resched(); | |
258 | } | |
259 | ||
260 | *biop = bio; | |
261 | return 0; | |
262 | } | |
263 | ||
615d22a5 DLM |
264 | /* |
265 | * Convert a number of 512B sectors to a number of pages. | |
266 | * The result is limited to a number of pages that can fit into a BIO. | |
267 | * Also make sure that the result is always at least 1 (page) for the cases | |
268 | * where nr_sects is lower than the number of sectors in a page. | |
269 | */ | |
270 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | |
271 | { | |
272 | sector_t bytes = (nr_sects << 9) + PAGE_SIZE - 1; | |
273 | ||
274 | return min(bytes >> PAGE_SHIFT, (sector_t)BIO_MAX_PAGES); | |
275 | } | |
276 | ||
3f14d792 | 277 | /** |
e73c23ff | 278 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
279 | * @bdev: blockdev to issue |
280 | * @sector: start sector | |
281 | * @nr_sects: number of sectors to write | |
282 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e73c23ff | 283 | * @biop: pointer to anchor bio |
ee472d83 | 284 | * @flags: controls detailed behavior |
3f14d792 DM |
285 | * |
286 | * Description: | |
ee472d83 CH |
287 | * Zero-fill a block range, either using hardware offload or by explicitly |
288 | * writing zeroes to the device. | |
289 | * | |
71027e97 CH |
290 | * Note that this function may fail with -EOPNOTSUPP if the driver signals |
291 | * zeroing offload support, but the device fails to process the command (for | |
292 | * some devices there is no non-destructive way to verify whether this | |
293 | * operation is actually supported). In this case the caller should call | |
294 | * retry the call to blkdev_issue_zeroout() and the fallback path will be used. | |
295 | * | |
ee472d83 CH |
296 | * If a device is using logical block provisioning, the underlying space will |
297 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | |
cb365b96 CH |
298 | * |
299 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | |
300 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | |
3f14d792 | 301 | */ |
e73c23ff CK |
302 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
303 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
ee472d83 | 304 | unsigned flags) |
3f14d792 | 305 | { |
18edc8ea | 306 | int ret; |
e73c23ff CK |
307 | int bi_size = 0; |
308 | struct bio *bio = *biop; | |
0aeea189 | 309 | unsigned int sz; |
28b2be20 DW |
310 | sector_t bs_mask; |
311 | ||
312 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
313 | if ((sector | nr_sects) & bs_mask) | |
314 | return -EINVAL; | |
3f14d792 | 315 | |
a6f0788e | 316 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f | 317 | biop, flags); |
cb365b96 | 318 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
a6f0788e CK |
319 | goto out; |
320 | ||
e73c23ff | 321 | ret = 0; |
3f14d792 | 322 | while (nr_sects != 0) { |
615d22a5 DLM |
323 | bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
324 | gfp_mask); | |
4f024f37 | 325 | bio->bi_iter.bi_sector = sector; |
3f14d792 | 326 | bio->bi_bdev = bdev; |
95fe6c1a | 327 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
3f14d792 | 328 | |
0341aafb | 329 | while (nr_sects != 0) { |
615d22a5 DLM |
330 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); |
331 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | |
e73c23ff CK |
332 | nr_sects -= bi_size >> 9; |
333 | sector += bi_size >> 9; | |
615d22a5 | 334 | if (bi_size < sz) |
3f14d792 DM |
335 | break; |
336 | } | |
e73c23ff | 337 | cond_resched(); |
3f14d792 | 338 | } |
3f14d792 | 339 | |
e73c23ff CK |
340 | *biop = bio; |
341 | out: | |
342 | return ret; | |
3f14d792 | 343 | } |
e73c23ff | 344 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c MP |
345 | |
346 | /** | |
347 | * blkdev_issue_zeroout - zero-fill a block range | |
348 | * @bdev: blockdev to write | |
349 | * @sector: start sector | |
350 | * @nr_sects: number of sectors to write | |
351 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
ee472d83 | 352 | * @flags: controls detailed behavior |
579e8f3c MP |
353 | * |
354 | * Description: | |
ee472d83 CH |
355 | * Zero-fill a block range, either using hardware offload or by explicitly |
356 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the | |
357 | * valid values for %flags. | |
579e8f3c | 358 | */ |
579e8f3c | 359 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d83 | 360 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c | 361 | { |
e73c23ff CK |
362 | int ret; |
363 | struct bio *bio = NULL; | |
364 | struct blk_plug plug; | |
d93ba7a5 | 365 | |
e73c23ff CK |
366 | blk_start_plug(&plug); |
367 | ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, | |
ee472d83 | 368 | &bio, flags); |
e73c23ff CK |
369 | if (ret == 0 && bio) { |
370 | ret = submit_bio_wait(bio); | |
371 | bio_put(bio); | |
372 | } | |
373 | blk_finish_plug(&plug); | |
579e8f3c | 374 | |
e73c23ff | 375 | return ret; |
579e8f3c | 376 | } |
3f14d792 | 377 | EXPORT_SYMBOL(blkdev_issue_zeroout); |