]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f31e7e40 DM |
2 | /* |
3 | * Functions related to generic helpers functions | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
a2d6b3a2 | 13 | struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) |
f31e7e40 | 14 | { |
9082e87b CH |
15 | struct bio *new = bio_alloc(gfp, nr_pages); |
16 | ||
17 | if (bio) { | |
18 | bio_chain(bio, new); | |
4e49ea4a | 19 | submit_bio(bio); |
9082e87b | 20 | } |
5dba3089 | 21 | |
9082e87b | 22 | return new; |
f31e7e40 DM |
23 | } |
24 | ||
38f25255 | 25 | int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
288dab8a | 26 | sector_t nr_sects, gfp_t gfp_mask, int flags, |
469e3216 | 27 | struct bio **biop) |
f31e7e40 | 28 | { |
f31e7e40 | 29 | struct request_queue *q = bdev_get_queue(bdev); |
38f25255 | 30 | struct bio *bio = *biop; |
ef295ecf | 31 | unsigned int op; |
28b2be20 | 32 | sector_t bs_mask; |
f31e7e40 DM |
33 | |
34 | if (!q) | |
35 | return -ENXIO; | |
288dab8a | 36 | |
a13553c7 ID |
37 | if (bdev_read_only(bdev)) |
38 | return -EPERM; | |
39 | ||
288dab8a CH |
40 | if (flags & BLKDEV_DISCARD_SECURE) { |
41 | if (!blk_queue_secure_erase(q)) | |
42 | return -EOPNOTSUPP; | |
43 | op = REQ_OP_SECURE_ERASE; | |
44 | } else { | |
45 | if (!blk_queue_discard(q)) | |
46 | return -EOPNOTSUPP; | |
47 | op = REQ_OP_DISCARD; | |
48 | } | |
f31e7e40 | 49 | |
28b2be20 DW |
50 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
51 | if ((sector | nr_sects) & bs_mask) | |
52 | return -EINVAL; | |
53 | ||
ba5d7385 ML |
54 | if (!nr_sects) |
55 | return -EINVAL; | |
a22c4d7e | 56 | |
ba5d7385 | 57 | while (nr_sects) { |
4800bf7b | 58 | sector_t req_sects = min_t(sector_t, nr_sects, |
ba5d7385 | 59 | bio_allowed_max_sectors(q)); |
c6e66634 | 60 | |
4800bf7b DC |
61 | WARN_ON_ONCE((req_sects << 9) > UINT_MAX); |
62 | ||
a2d6b3a2 | 63 | bio = blk_next_bio(bio, 0, gfp_mask); |
4f024f37 | 64 | bio->bi_iter.bi_sector = sector; |
74d46992 | 65 | bio_set_dev(bio, bdev); |
288dab8a | 66 | bio_set_op_attrs(bio, op, 0); |
f31e7e40 | 67 | |
4f024f37 | 68 | bio->bi_iter.bi_size = req_sects << 9; |
ba5d7385 | 69 | sector += req_sects; |
c6e66634 | 70 | nr_sects -= req_sects; |
f31e7e40 | 71 | |
c8123f8c JA |
72 | /* |
73 | * We can loop for a long time in here, if someone does | |
74 | * full device discards (like mkfs). Be nice and allow | |
75 | * us to schedule out to avoid softlocking if preempt | |
76 | * is disabled. | |
77 | */ | |
78 | cond_resched(); | |
5dba3089 | 79 | } |
38f25255 CH |
80 | |
81 | *biop = bio; | |
82 | return 0; | |
83 | } | |
84 | EXPORT_SYMBOL(__blkdev_issue_discard); | |
85 | ||
86 | /** | |
87 | * blkdev_issue_discard - queue a discard | |
88 | * @bdev: blockdev to issue discard for | |
89 | * @sector: start sector | |
90 | * @nr_sects: number of sectors to discard | |
91 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e554911c | 92 | * @flags: BLKDEV_DISCARD_* flags to control behaviour |
38f25255 CH |
93 | * |
94 | * Description: | |
95 | * Issue a discard request for the sectors in question. | |
96 | */ | |
97 | int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |
98 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) | |
99 | { | |
38f25255 CH |
100 | struct bio *bio = NULL; |
101 | struct blk_plug plug; | |
102 | int ret; | |
103 | ||
38f25255 | 104 | blk_start_plug(&plug); |
288dab8a | 105 | ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, |
38f25255 | 106 | &bio); |
bbd848e0 | 107 | if (!ret && bio) { |
4e49ea4a | 108 | ret = submit_bio_wait(bio); |
48920ff2 | 109 | if (ret == -EOPNOTSUPP) |
bbd848e0 | 110 | ret = 0; |
05bd92dd | 111 | bio_put(bio); |
bbd848e0 | 112 | } |
0cfbcafc | 113 | blk_finish_plug(&plug); |
f31e7e40 | 114 | |
bbd848e0 | 115 | return ret; |
f31e7e40 DM |
116 | } |
117 | EXPORT_SYMBOL(blkdev_issue_discard); | |
3f14d792 | 118 | |
4363ac7c | 119 | /** |
e73c23ff | 120 | * __blkdev_issue_write_same - generate number of bios with same page |
4363ac7c MP |
121 | * @bdev: target blockdev |
122 | * @sector: start sector | |
123 | * @nr_sects: number of sectors to write | |
124 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
125 | * @page: page containing data to write | |
e73c23ff | 126 | * @biop: pointer to anchor bio |
4363ac7c MP |
127 | * |
128 | * Description: | |
e73c23ff | 129 | * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page. |
4363ac7c | 130 | */ |
e73c23ff CK |
131 | static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, |
132 | sector_t nr_sects, gfp_t gfp_mask, struct page *page, | |
133 | struct bio **biop) | |
4363ac7c | 134 | { |
4363ac7c MP |
135 | struct request_queue *q = bdev_get_queue(bdev); |
136 | unsigned int max_write_same_sectors; | |
e73c23ff | 137 | struct bio *bio = *biop; |
28b2be20 | 138 | sector_t bs_mask; |
4363ac7c MP |
139 | |
140 | if (!q) | |
141 | return -ENXIO; | |
142 | ||
a13553c7 ID |
143 | if (bdev_read_only(bdev)) |
144 | return -EPERM; | |
145 | ||
28b2be20 DW |
146 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
147 | if ((sector | nr_sects) & bs_mask) | |
148 | return -EINVAL; | |
149 | ||
e73c23ff CK |
150 | if (!bdev_write_same(bdev)) |
151 | return -EOPNOTSUPP; | |
152 | ||
b49a0871 | 153 | /* Ensure that max_write_same_sectors doesn't overflow bi_size */ |
34ffec60 | 154 | max_write_same_sectors = bio_allowed_max_sectors(q); |
4363ac7c | 155 | |
4363ac7c | 156 | while (nr_sects) { |
a2d6b3a2 | 157 | bio = blk_next_bio(bio, 1, gfp_mask); |
4f024f37 | 158 | bio->bi_iter.bi_sector = sector; |
74d46992 | 159 | bio_set_dev(bio, bdev); |
4363ac7c MP |
160 | bio->bi_vcnt = 1; |
161 | bio->bi_io_vec->bv_page = page; | |
162 | bio->bi_io_vec->bv_offset = 0; | |
163 | bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); | |
95fe6c1a | 164 | bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0); |
4363ac7c MP |
165 | |
166 | if (nr_sects > max_write_same_sectors) { | |
4f024f37 | 167 | bio->bi_iter.bi_size = max_write_same_sectors << 9; |
4363ac7c MP |
168 | nr_sects -= max_write_same_sectors; |
169 | sector += max_write_same_sectors; | |
170 | } else { | |
4f024f37 | 171 | bio->bi_iter.bi_size = nr_sects << 9; |
4363ac7c MP |
172 | nr_sects = 0; |
173 | } | |
e73c23ff | 174 | cond_resched(); |
4363ac7c MP |
175 | } |
176 | ||
e73c23ff CK |
177 | *biop = bio; |
178 | return 0; | |
179 | } | |
180 | ||
181 | /** | |
182 | * blkdev_issue_write_same - queue a write same operation | |
183 | * @bdev: target blockdev | |
184 | * @sector: start sector | |
185 | * @nr_sects: number of sectors to write | |
186 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
187 | * @page: page containing data | |
188 | * | |
189 | * Description: | |
190 | * Issue a write same request for the sectors in question. | |
191 | */ | |
192 | int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, | |
193 | sector_t nr_sects, gfp_t gfp_mask, | |
194 | struct page *page) | |
195 | { | |
196 | struct bio *bio = NULL; | |
197 | struct blk_plug plug; | |
198 | int ret; | |
199 | ||
200 | blk_start_plug(&plug); | |
201 | ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, | |
202 | &bio); | |
203 | if (ret == 0 && bio) { | |
4e49ea4a | 204 | ret = submit_bio_wait(bio); |
05bd92dd ST |
205 | bio_put(bio); |
206 | } | |
e73c23ff | 207 | blk_finish_plug(&plug); |
3f40bf2c | 208 | return ret; |
4363ac7c MP |
209 | } |
210 | EXPORT_SYMBOL(blkdev_issue_write_same); | |
211 | ||
a6f0788e CK |
212 | static int __blkdev_issue_write_zeroes(struct block_device *bdev, |
213 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
d928be9f | 214 | struct bio **biop, unsigned flags) |
a6f0788e CK |
215 | { |
216 | struct bio *bio = *biop; | |
217 | unsigned int max_write_zeroes_sectors; | |
218 | struct request_queue *q = bdev_get_queue(bdev); | |
219 | ||
220 | if (!q) | |
221 | return -ENXIO; | |
222 | ||
a13553c7 ID |
223 | if (bdev_read_only(bdev)) |
224 | return -EPERM; | |
225 | ||
a6f0788e CK |
226 | /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */ |
227 | max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev); | |
228 | ||
229 | if (max_write_zeroes_sectors == 0) | |
230 | return -EOPNOTSUPP; | |
231 | ||
232 | while (nr_sects) { | |
a2d6b3a2 | 233 | bio = blk_next_bio(bio, 0, gfp_mask); |
a6f0788e | 234 | bio->bi_iter.bi_sector = sector; |
74d46992 | 235 | bio_set_dev(bio, bdev); |
d928be9f CH |
236 | bio->bi_opf = REQ_OP_WRITE_ZEROES; |
237 | if (flags & BLKDEV_ZERO_NOUNMAP) | |
238 | bio->bi_opf |= REQ_NOUNMAP; | |
a6f0788e CK |
239 | |
240 | if (nr_sects > max_write_zeroes_sectors) { | |
241 | bio->bi_iter.bi_size = max_write_zeroes_sectors << 9; | |
242 | nr_sects -= max_write_zeroes_sectors; | |
243 | sector += max_write_zeroes_sectors; | |
244 | } else { | |
245 | bio->bi_iter.bi_size = nr_sects << 9; | |
246 | nr_sects = 0; | |
247 | } | |
248 | cond_resched(); | |
249 | } | |
250 | ||
251 | *biop = bio; | |
252 | return 0; | |
253 | } | |
254 | ||
615d22a5 DLM |
255 | /* |
256 | * Convert a number of 512B sectors to a number of pages. | |
257 | * The result is limited to a number of pages that can fit into a BIO. | |
258 | * Also make sure that the result is always at least 1 (page) for the cases | |
259 | * where nr_sects is lower than the number of sectors in a page. | |
260 | */ | |
261 | static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects) | |
262 | { | |
09c2c359 | 263 | sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512); |
615d22a5 | 264 | |
09c2c359 | 265 | return min(pages, (sector_t)BIO_MAX_PAGES); |
615d22a5 DLM |
266 | } |
267 | ||
425a4dba ID |
268 | static int __blkdev_issue_zero_pages(struct block_device *bdev, |
269 | sector_t sector, sector_t nr_sects, gfp_t gfp_mask, | |
270 | struct bio **biop) | |
271 | { | |
272 | struct request_queue *q = bdev_get_queue(bdev); | |
273 | struct bio *bio = *biop; | |
274 | int bi_size = 0; | |
275 | unsigned int sz; | |
276 | ||
277 | if (!q) | |
278 | return -ENXIO; | |
279 | ||
a13553c7 ID |
280 | if (bdev_read_only(bdev)) |
281 | return -EPERM; | |
282 | ||
425a4dba | 283 | while (nr_sects != 0) { |
a2d6b3a2 DLM |
284 | bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), |
285 | gfp_mask); | |
425a4dba ID |
286 | bio->bi_iter.bi_sector = sector; |
287 | bio_set_dev(bio, bdev); | |
288 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | |
289 | ||
290 | while (nr_sects != 0) { | |
291 | sz = min((sector_t) PAGE_SIZE, nr_sects << 9); | |
292 | bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0); | |
293 | nr_sects -= bi_size >> 9; | |
294 | sector += bi_size >> 9; | |
295 | if (bi_size < sz) | |
296 | break; | |
297 | } | |
298 | cond_resched(); | |
299 | } | |
300 | ||
301 | *biop = bio; | |
302 | return 0; | |
303 | } | |
304 | ||
3f14d792 | 305 | /** |
e73c23ff | 306 | * __blkdev_issue_zeroout - generate number of zero filed write bios |
3f14d792 DM |
307 | * @bdev: blockdev to issue |
308 | * @sector: start sector | |
309 | * @nr_sects: number of sectors to write | |
310 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
e73c23ff | 311 | * @biop: pointer to anchor bio |
ee472d83 | 312 | * @flags: controls detailed behavior |
3f14d792 DM |
313 | * |
314 | * Description: | |
ee472d83 CH |
315 | * Zero-fill a block range, either using hardware offload or by explicitly |
316 | * writing zeroes to the device. | |
317 | * | |
318 | * If a device is using logical block provisioning, the underlying space will | |
319 | * not be released if %flags contains BLKDEV_ZERO_NOUNMAP. | |
cb365b96 CH |
320 | * |
321 | * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return | |
322 | * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided. | |
3f14d792 | 323 | */ |
e73c23ff CK |
324 | int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
325 | sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, | |
ee472d83 | 326 | unsigned flags) |
3f14d792 | 327 | { |
18edc8ea | 328 | int ret; |
28b2be20 DW |
329 | sector_t bs_mask; |
330 | ||
331 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; | |
332 | if ((sector | nr_sects) & bs_mask) | |
333 | return -EINVAL; | |
3f14d792 | 334 | |
a6f0788e | 335 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask, |
d928be9f | 336 | biop, flags); |
cb365b96 | 337 | if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK)) |
425a4dba | 338 | return ret; |
3f14d792 | 339 | |
425a4dba ID |
340 | return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask, |
341 | biop); | |
3f14d792 | 342 | } |
e73c23ff | 343 | EXPORT_SYMBOL(__blkdev_issue_zeroout); |
579e8f3c MP |
344 | |
345 | /** | |
346 | * blkdev_issue_zeroout - zero-fill a block range | |
347 | * @bdev: blockdev to write | |
348 | * @sector: start sector | |
349 | * @nr_sects: number of sectors to write | |
350 | * @gfp_mask: memory allocation flags (for bio_alloc) | |
ee472d83 | 351 | * @flags: controls detailed behavior |
579e8f3c MP |
352 | * |
353 | * Description: | |
ee472d83 CH |
354 | * Zero-fill a block range, either using hardware offload or by explicitly |
355 | * writing zeroes to the device. See __blkdev_issue_zeroout() for the | |
356 | * valid values for %flags. | |
579e8f3c | 357 | */ |
579e8f3c | 358 | int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
ee472d83 | 359 | sector_t nr_sects, gfp_t gfp_mask, unsigned flags) |
579e8f3c | 360 | { |
d5ce4c31 ID |
361 | int ret = 0; |
362 | sector_t bs_mask; | |
363 | struct bio *bio; | |
e73c23ff | 364 | struct blk_plug plug; |
d5ce4c31 | 365 | bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev); |
d93ba7a5 | 366 | |
d5ce4c31 ID |
367 | bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1; |
368 | if ((sector | nr_sects) & bs_mask) | |
369 | return -EINVAL; | |
370 | ||
371 | retry: | |
372 | bio = NULL; | |
e73c23ff | 373 | blk_start_plug(&plug); |
d5ce4c31 ID |
374 | if (try_write_zeroes) { |
375 | ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, | |
376 | gfp_mask, &bio, flags); | |
377 | } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
378 | ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects, | |
379 | gfp_mask, &bio); | |
380 | } else { | |
381 | /* No zeroing offload support */ | |
382 | ret = -EOPNOTSUPP; | |
383 | } | |
e73c23ff CK |
384 | if (ret == 0 && bio) { |
385 | ret = submit_bio_wait(bio); | |
386 | bio_put(bio); | |
387 | } | |
388 | blk_finish_plug(&plug); | |
d5ce4c31 ID |
389 | if (ret && try_write_zeroes) { |
390 | if (!(flags & BLKDEV_ZERO_NOFALLBACK)) { | |
391 | try_write_zeroes = false; | |
392 | goto retry; | |
393 | } | |
394 | if (!bdev_write_zeroes_sectors(bdev)) { | |
395 | /* | |
396 | * Zeroing offload support was indicated, but the | |
397 | * device reported ILLEGAL REQUEST (for some devices | |
398 | * there is no non-destructive way to verify whether | |
399 | * WRITE ZEROES is actually supported). | |
400 | */ | |
401 | ret = -EOPNOTSUPP; | |
402 | } | |
403 | } | |
579e8f3c | 404 | |
e73c23ff | 405 | return ret; |
579e8f3c | 406 | } |
3f14d792 | 407 | EXPORT_SYMBOL(blkdev_issue_zeroout); |