]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f31e7e40 DM |
2 | /* |
3 | * Functions related to generic helpers functions | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
4e49ea4a | 13 | static struct bio *next_bio(struct bio *bio, unsigned int nr_pages, |
9082e87b | 14 | gfp_t gfp) |
f31e7e40 | 15 | { |
9082e87b CH |
16 | struct bio *new = bio_alloc(gfp, nr_pages); |
17 | ||
18 | if (bio) { | |
19 | bio_chain(bio, new); | |
4e49ea4a | 20 | submit_bio(bio); |
9082e87b | 21 | } |
5dba3089 | 22 | |
9082e87b | 23 | return new; |
f31e7e40 DM |
24 | } |
25 | ||
38f25255 | 26 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 27 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 28 | struct bio **biop) |
f31e7e40 | 29 | { |
f31e7e40 | 30 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 31 | struct bio *bio = *biop; |
a22c4d7e | 32 | unsigned int granularity; |
ef295ecf | 33 | unsigned int op; |
a22c4d7e | 34 | int alignment; |
28b2be20 | 35 | sector_t bs_mask; |
f31e7e40 DM |
36 | |
37 | if (!q) | |
38 | return -ENXIO; | |
288dab8a CH |
39 | |
40 | if (flags & BLKDEV_DISCARD_SECURE) { | |
41 | if (!blk_queue_secure_erase(q)) | |
42 | return -EOPNOTSUPP; | |
43 | op = REQ_OP_SECURE_ERASE; | |
44 | } else { | |
45 | if (!blk_queue_discard(q)) | |
46 | return -EOPNOTSUPP; | |
47 | op = REQ_OP_DISCARD; | |
48 | } | |
f31e7e40 | 49 | |
28b2be20 DW |
50 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
51 | if ((sector | nr_sects) & bs_mask) | |
52 | return -EINVAL; | |
53 | ||
a22c4d7e ML |
54 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
55 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
56 | alignment = (bdev_discard_alignment(bdev) >> 9) % granularity; | |
57 | ||
5dba3089 | 58 | while (nr_sects) { |
c6e66634 | 59 | unsigned int req_sects; |
a22c4d7e | 60 | sector_t end_sect, tmp; |
c6e66634 | 61 | |
a22c4d7e ML |
62 | /* Make sure bi_size doesn't overflow */ |
63 | req_sects = min_t(sector_t, nr_sects, UINT_MAX >> 9); | |
64 | ||
9082e87b | 65 | /** |
a22c4d7e ML |
66 | * If splitting a request, and the next starting sector would be |
67 | * misaligned, stop the discard at the previous aligned sector. | |
68 | */ | |
c6e66634 | 69 | end_sect = sector + req_sects; |
a22c4d7e ML |
70 | tmp = end_sect; |
71 | if (req_sects < nr_sects && | |
72 | sector_div(tmp, granularity) != alignment) { | |
73 | end_sect = end_sect - alignment; | |
74 | sector_div(end_sect, granularity); | |
75 | end_sect = end_sect * granularity + alignment; | |
76 | req_sects = end_sect - sector; | |
77 | } | |
c6e66634 | 78 | |
f9d03f96 | 79 | bio = next_bio(bio, 0, gfp_mask); |
4f024f37 | 80 | bio->bi_iter.bi_sector = sector; |
74d46992 | 81 | bio_set_dev(bio, bdev); |
288dab8a | 82 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 83 | |
4f024f37 | 84 | bio->bi_iter.bi_size = req_sects << 9; |
c6e66634 PB |
85 | nr_sects -= req_sects; |
86 | sector = end_sect; | |
f31e7e40 | 87 | |
c8123f8c JA |
88 | /* |
89 | * We can loop for a long time in here, if someone does | |
90 | * full device discards (like mkfs). Be nice and allow | |
91 | * us to schedule out to avoid softlocking if preempt | |
92 | * is disabled. | |
93 | */ | |
94 | cond_resched(); | |
5dba3089 | 95 | } |
38f25255 CH |
96 | |
97 | *biop = bio; | |
98 | return 0; | |
99 | } | |
100 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
101 | ||
102 | /** | |
103 | * blkdev_issue_discard - queue a discard | |
104 | * @bdev: blockdev to issue discard for | |
105 | * @sector: start sector | |
106 | * @nr_sects: number of sectors to discard | |
107 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e554911c | 108 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f25255 CH |
109 | * |
110 | * Description: | |
111 | * Issue a discard request for the sectors in question. | |
112 | */ | |
113 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
114 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
115 | { | |
38f25255 CH |
116 | struct bio *bio = NULL; |
117 | struct blk_plug plug; | |
118 | int ret; | |
119 | ||
38f25255 | 120 | blk_start_plug(&plug); |
288dab8a | 121 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 122 | &bio); |
bbd848e0 | 123 | if (!ret && bio) { |
4e49ea4a | 124 | ret = submit_bio_wait(bio); |
48920ff2 | 125 | if (ret == -EOPNOTSUPP) |
bbd848e0 | 126 | ret = 0; |
05bd92dd | 127 | bio_put(bio); |
bbd848e0 | 128 | } |
0cfbcafc | 129 | blk_finish_plug(&plug); |
f31e7e40 | 130 | |
bbd848e0 | 131 | return ret; |
f31e7e40 DM |
132 | } |
133 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 134 | |
4363ac7c | 135 | /** |
e73c23ff | 136 | * __blkdev_issue_write_same - generate number of bios with same page |
4363ac7c MP |
137 | * @bdev: target blockdev |
138 | * @sector: start sector | |
139 | * @nr_sects: number of sectors to write | |
140 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
141 | * @page: page containing data to write | |
e73c23ff | 142 | * @biop: pointer to anchor bio |
4363ac7c MP |
143 | * |
144 | * Description: | |
e73c23ff | 145 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
4363ac7c | 146 | */ |
e73c23ff CK |
147 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
148 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, | |
149 | struct bio **biop) | |
4363ac7c | 150 | { |
4363ac7c MP |
151 | struct request_queue *q = bdev_get_queue(bdev); |
152 | unsigned int max_write_same_sectors; | |
e73c23ff | 153 | struct bio *bio = *biop; |
28b2be20 | 154 | sector_t bs_mask; |
4363ac7c MP |
155 | |
156 | if (!q) | |
157 | return -ENXIO; | |
158 | ||
28b2be20 DW |
159 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
160 | if ((sector | nr_sects) & bs_mask) | |
161 | return -EINVAL; | |
162 | ||
e73c23ff CK |
163 | if (!bdev_write_same(bdev)) |
164 | return -EOPNOTSUPP; | |
165 | ||
b49a0871 ML |
166 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
167 | max_write_same_sectors = UINT_MAX >> 9; | |
4363ac7c | 168 | |
4363ac7c | 169 | while (nr_sects) { |
4e49ea4a | 170 | bio = next_bio(bio, 1, gfp_mask); |
4f024f37 | 171 | bio->bi_iter.bi_sector = sector; |
74d46992 | 172 | bio_set_dev(bio, bdev); |
4363ac7c MP |
173 | bio->bi_vcnt = 1; |
174 | bio->bi_io_vec->bv_page = page; | |
175 | bio->bi_io_vec->bv_offset = 0; | |
176 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 177 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
178 | |
179 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 180 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
181 | nr_sects -= max_write_same_sectors; |
182 | sector += max_write_same_sectors; | |
183 | } else { | |
4f024f37 | 184 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
185 | nr_sects = 0; |
186 | } | |
e73c23ff | 187 | cond_resched(); |
4363ac7c MP |
188 | } |
189 | ||
e73c23ff CK |
190 | *biop = bio; |
191 | return 0; | |
192 | } | |
193 | ||
194 | /** | |
195 | * blkdev_issue_write_same - queue a write same operation | |
196 | * @bdev: target blockdev | |
197 | * @sector: start sector | |
198 | * @nr_sects: number of sectors to write | |
199 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
200 | * @page: page containing data | |
201 | * | |
202 | * Description: | |
203 | * Issue a write same request for the sectors in question. | |
204 | */ | |
205 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
206 | sector_t nr_sects, gfp_t gfp_mask, | |
207 | struct page *page) | |
208 | { | |
209 | struct bio *bio = NULL; | |
210 | struct blk_plug plug; | |
211 | int ret; | |
212 | ||
213 | blk_start_plug(&plug); | |
214 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | |
215 | &bio); | |
216 | if (ret == 0 && bio) { | |
4e49ea4a | 217 | ret = submit_bio_wait(bio); |
05bd92dd ST |
218 | bio_put(bio); |
219 | } | |
e73c23ff | 220 | blk_finish_plug(&plug); |
3f40bf2c | 221 | return ret; |
4363ac7c MP |
222 | } |
223 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
224 | ||
a6f0788e CK |
225 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
226 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
d928be9f | 227 | struct bio **biop, unsigned flags) |
a6f0788e CK |
228 | { |
229 | struct bio *bio = *biop; | |
230 | unsigned int max_write_zeroes_sectors; | |
231 | struct request_queue *q = bdev_get_queue(bdev); | |
232 | ||
233 | if (!q) | |
234 | return -ENXIO; | |
235 | ||
236 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ | |
237 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
238 | ||
239 | if (max_write_zeroes_sectors == 0) | |
240 | return -EOPNOTSUPP; | |
241 | ||
242 | while (nr_sects) { | |
243 | bio = next_bio(bio, 0, gfp_mask); | |
244 | bio->bi_iter.bi_sector = sector; | |
74d46992 | 245 | bio_set_dev(bio, bdev); |
d928be9f CH |
246 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
247 | if (flags & BLKDEV_ZERO_NOUNMAP) | |
248 | bio->bi_opf |= REQ_NOUNMAP; | |
a6f0788e CK |
249 | |
250 | if (nr_sects > max_write_zeroes_sectors) { | |
251 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
252 | nr_sects -= max_write_zeroes_sectors; | |
253 | sector += max_write_zeroes_sectors; | |
254 | } else { | |
255 | bio->bi_iter.bi_size = nr_sects << 9; | |
256 | nr_sects = 0; | |
257 | } | |
258 | cond_resched(); | |
259 | } | |
260 | ||
261 | *biop = bio; | |
262 | return 0; | |
263 | } | |
264 | ||
615d22a5 DLM |
265 | /* |
266 | * Convert a number of 512B sectors to a number of pages. | |
267 | * The result is limited to a number of pages that can fit into a BIO. | |
268 | * Also make sure that the result is always at least 1 (page) for the cases | |
269 | * where nr_sects is lower than the number of sectors in a page. | |
270 | */ | |
271 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | |
272 | { | |
09c2c359 | 273 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
615d22a5 | 274 | |
09c2c359 | 275 | return min(pages, (sector_t)BIO_MAX_PAGES); |
615d22a5 DLM |
276 | } |
277 | ||
425a4dba ID |
278 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
279 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
280 | struct bio **biop) | |
281 | { | |
282 | struct request_queue *q = bdev_get_queue(bdev); | |
283 | struct bio *bio = *biop; | |
284 | int bi_size = 0; | |
285 | unsigned int sz; | |
286 | ||
287 | if (!q) | |
288 | return -ENXIO; | |
289 | ||
290 | while (nr_sects != 0) { | |
291 | bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), | |
292 | gfp_mask); | |
293 | bio->bi_iter.bi_sector = sector; | |
294 | bio_set_dev(bio, bdev); | |
295 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
296 | ||
297 | while (nr_sects != 0) { | |
298 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | |
299 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | |
300 | nr_sects -= bi_size >> 9; | |
301 | sector += bi_size >> 9; | |
302 | if (bi_size < sz) | |
303 | break; | |
304 | } | |
305 | cond_resched(); | |
306 | } | |
307 | ||
308 | *biop = bio; | |
309 | return 0; | |
310 | } | |
311 | ||
3f14d792 | 312 | /** |
e73c23ff | 313 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
314 | * @bdev: blockdev to issue |
315 | * @sector: start sector | |
316 | * @nr_sects: number of sectors to write | |
317 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e73c23ff | 318 | * @biop: pointer to anchor bio |
ee472d83 | 319 | * @flags: controls detailed behavior |
3f14d792 DM |
320 | * |
321 | * Description: | |
ee472d83 CH |
322 | * Zero-fill a block range, either using hardware offload or by explicitly |
323 | * writing zeroes to the device. | |
324 | * | |
325 | * If a device is using logical block provisioning, the underlying space will | |
326 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | |
cb365b96 CH |
327 | * |
328 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | |
329 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | |
3f14d792 | 330 | */ |
e73c23ff CK |
331 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
332 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
ee472d83 | 333 | unsigned flags) |
3f14d792 | 334 | { |
18edc8ea | 335 | int ret; |
28b2be20 DW |
336 | sector_t bs_mask; |
337 | ||
338 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
339 | if ((sector | nr_sects) & bs_mask) | |
340 | return -EINVAL; | |
3f14d792 | 341 | |
a6f0788e | 342 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f | 343 | biop, flags); |
cb365b96 | 344 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
425a4dba | 345 | return ret; |
3f14d792 | 346 | |
425a4dba ID |
347 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
348 | biop); | |
3f14d792 | 349 | } |
e73c23ff | 350 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c MP |
351 | |
352 | /** | |
353 | * blkdev_issue_zeroout - zero-fill a block range | |
354 | * @bdev: blockdev to write | |
355 | * @sector: start sector | |
356 | * @nr_sects: number of sectors to write | |
357 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
ee472d83 | 358 | * @flags: controls detailed behavior |
579e8f3c MP |
359 | * |
360 | * Description: | |
ee472d83 CH |
361 | * Zero-fill a block range, either using hardware offload or by explicitly |
362 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the | |
363 | * valid values for %flags. | |
579e8f3c | 364 | */ |
579e8f3c | 365 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d83 | 366 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c | 367 | { |
d5ce4c31 ID |
368 | int ret = 0; |
369 | sector_t bs_mask; | |
370 | struct bio *bio; | |
e73c23ff | 371 | struct blk_plug plug; |
d5ce4c31 | 372 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
d93ba7a5 | 373 | |
d5ce4c31 ID |
374 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
375 | if ((sector | nr_sects) & bs_mask) | |
376 | return -EINVAL; | |
377 | ||
378 | retry: | |
379 | bio = NULL; | |
e73c23ff | 380 | blk_start_plug(&plug); |
d5ce4c31 ID |
381 | if (try_write_zeroes) { |
382 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | |
383 | gfp_mask, &bio, flags); | |
384 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
385 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | |
386 | gfp_mask, &bio); | |
387 | } else { | |
388 | /* No zeroing offload support */ | |
389 | ret = -EOPNOTSUPP; | |
390 | } | |
e73c23ff CK |
391 | if (ret == 0 && bio) { |
392 | ret = submit_bio_wait(bio); | |
393 | bio_put(bio); | |
394 | } | |
395 | blk_finish_plug(&plug); | |
d5ce4c31 ID |
396 | if (ret && try_write_zeroes) { |
397 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
398 | try_write_zeroes = false; | |
399 | goto retry; | |
400 | } | |
401 | if (!bdev_write_zeroes_sectors(bdev)) { | |
402 | /* | |
403 | * Zeroing offload support was indicated, but the | |
404 | * device reported ILLEGAL REQUEST (for some devices | |
405 | * there is no non-destructive way to verify whether | |
406 | * WRITE ZEROES is actually supported). | |
407 | */ | |
408 | ret = -EOPNOTSUPP; | |
409 | } | |
410 | } | |
579e8f3c | 411 | |
e73c23ff | 412 | return ret; |
579e8f3c | 413 | } |
3f14d792 | 414 | EXPORT_SYMBOL(blkdev_issue_zeroout); |