1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp)
15 struct bio *new = bio_alloc(gfp, nr_pages);
25 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
26 sector_t nr_sects, gfp_t gfp_mask, int flags,
29 struct request_queue *q = bdev_get_queue(bdev);
30 struct bio *bio = *biop;
37 if (bdev_read_only(bdev))
40 if (flags & BLKDEV_DISCARD_SECURE) {
41 if (!blk_queue_secure_erase(q))
43 op = REQ_OP_SECURE_ERASE;
45 if (!blk_queue_discard(q))
50 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 if ((sector | nr_sects) & bs_mask)
58 unsigned int req_sects = min_t(unsigned int, nr_sects,
59 bio_allowed_max_sectors(q));
61 bio = blk_next_bio(bio, 0, gfp_mask);
62 bio->bi_iter.bi_sector = sector;
63 bio_set_dev(bio, bdev);
64 bio_set_op_attrs(bio, op, 0);
66 bio->bi_iter.bi_size = req_sects << 9;
68 nr_sects -= req_sects;
71 * We can loop for a long time in here, if someone does
72 * full device discards (like mkfs). Be nice and allow
73 * us to schedule out to avoid softlocking if preempt
82 EXPORT_SYMBOL(__blkdev_issue_discard);
85 * blkdev_issue_discard - queue a discard
86 * @bdev: blockdev to issue discard for
87 * @sector: start sector
88 * @nr_sects: number of sectors to discard
89 * @gfp_mask: memory allocation flags (for bio_alloc)
90 * @flags: BLKDEV_DISCARD_* flags to control behaviour
93 * Issue a discard request for the sectors in question.
95 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
96 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
98 struct bio *bio = NULL;
102 blk_start_plug(&plug);
103 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
106 ret = submit_bio_wait(bio);
107 if (ret == -EOPNOTSUPP)
111 blk_finish_plug(&plug);
115 EXPORT_SYMBOL(blkdev_issue_discard);
118 * __blkdev_issue_write_same - generate number of bios with same page
119 * @bdev: target blockdev
120 * @sector: start sector
121 * @nr_sects: number of sectors to write
122 * @gfp_mask: memory allocation flags (for bio_alloc)
123 * @page: page containing data to write
124 * @biop: pointer to anchor bio
127 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
129 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
130 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
133 struct request_queue *q = bdev_get_queue(bdev);
134 unsigned int max_write_same_sectors;
135 struct bio *bio = *biop;
141 if (bdev_read_only(bdev))
144 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
145 if ((sector | nr_sects) & bs_mask)
148 if (!bdev_write_same(bdev))
151 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
152 max_write_same_sectors = bio_allowed_max_sectors(q);
155 bio = blk_next_bio(bio, 1, gfp_mask);
156 bio->bi_iter.bi_sector = sector;
157 bio_set_dev(bio, bdev);
159 bio->bi_io_vec->bv_page = page;
160 bio->bi_io_vec->bv_offset = 0;
161 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
162 bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
164 if (nr_sects > max_write_same_sectors) {
165 bio->bi_iter.bi_size = max_write_same_sectors << 9;
166 nr_sects -= max_write_same_sectors;
167 sector += max_write_same_sectors;
169 bio->bi_iter.bi_size = nr_sects << 9;
180 * blkdev_issue_write_same - queue a write same operation
181 * @bdev: target blockdev
182 * @sector: start sector
183 * @nr_sects: number of sectors to write
184 * @gfp_mask: memory allocation flags (for bio_alloc)
185 * @page: page containing data
188 * Issue a write same request for the sectors in question.
190 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
191 sector_t nr_sects, gfp_t gfp_mask,
194 struct bio *bio = NULL;
195 struct blk_plug plug;
198 blk_start_plug(&plug);
199 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
201 if (ret == 0 && bio) {
202 ret = submit_bio_wait(bio);
205 blk_finish_plug(&plug);
208 EXPORT_SYMBOL(blkdev_issue_write_same);
210 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
211 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
212 struct bio **biop, unsigned flags)
214 struct bio *bio = *biop;
215 unsigned int max_write_zeroes_sectors;
216 struct request_queue *q = bdev_get_queue(bdev);
221 if (bdev_read_only(bdev))
224 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
225 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
227 if (max_write_zeroes_sectors == 0)
231 bio = blk_next_bio(bio, 0, gfp_mask);
232 bio->bi_iter.bi_sector = sector;
233 bio_set_dev(bio, bdev);
234 bio->bi_opf = REQ_OP_WRITE_ZEROES;
235 if (flags & BLKDEV_ZERO_NOUNMAP)
236 bio->bi_opf |= REQ_NOUNMAP;
238 if (nr_sects > max_write_zeroes_sectors) {
239 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
240 nr_sects -= max_write_zeroes_sectors;
241 sector += max_write_zeroes_sectors;
243 bio->bi_iter.bi_size = nr_sects << 9;
254 * Convert a number of 512B sectors to a number of pages.
255 * The result is limited to a number of pages that can fit into a BIO.
256 * Also make sure that the result is always at least 1 (page) for the cases
257 * where nr_sects is lower than the number of sectors in a page.
259 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
261 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
263 return min(pages, (sector_t)BIO_MAX_PAGES);
266 static int __blkdev_issue_zero_pages(struct block_device *bdev,
267 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
270 struct request_queue *q = bdev_get_queue(bdev);
271 struct bio *bio = *biop;
278 if (bdev_read_only(bdev))
281 while (nr_sects != 0) {
282 bio = blk_next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
284 bio->bi_iter.bi_sector = sector;
285 bio_set_dev(bio, bdev);
286 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
288 while (nr_sects != 0) {
289 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
290 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
291 nr_sects -= bi_size >> 9;
292 sector += bi_size >> 9;
304 * __blkdev_issue_zeroout - generate number of zero filed write bios
305 * @bdev: blockdev to issue
306 * @sector: start sector
307 * @nr_sects: number of sectors to write
308 * @gfp_mask: memory allocation flags (for bio_alloc)
309 * @biop: pointer to anchor bio
310 * @flags: controls detailed behavior
313 * Zero-fill a block range, either using hardware offload or by explicitly
314 * writing zeroes to the device.
316 * If a device is using logical block provisioning, the underlying space will
317 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
319 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
320 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
322 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
323 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
329 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
330 if ((sector | nr_sects) & bs_mask)
333 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
335 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
338 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
341 EXPORT_SYMBOL(__blkdev_issue_zeroout);
344 * blkdev_issue_zeroout - zero-fill a block range
345 * @bdev: blockdev to write
346 * @sector: start sector
347 * @nr_sects: number of sectors to write
348 * @gfp_mask: memory allocation flags (for bio_alloc)
349 * @flags: controls detailed behavior
352 * Zero-fill a block range, either using hardware offload or by explicitly
353 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
354 * valid values for %flags.
356 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
357 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
362 struct blk_plug plug;
363 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
365 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
366 if ((sector | nr_sects) & bs_mask)
371 blk_start_plug(&plug);
372 if (try_write_zeroes) {
373 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
374 gfp_mask, &bio, flags);
375 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
376 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
379 /* No zeroing offload support */
382 if (ret == 0 && bio) {
383 ret = submit_bio_wait(bio);
386 blk_finish_plug(&plug);
387 if (ret && try_write_zeroes) {
388 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
389 try_write_zeroes = false;
392 if (!bdev_write_zeroes_sectors(bdev)) {
394 * Zeroing offload support was indicated, but the
395 * device reported ILLEGAL REQUEST (for some devices
396 * there is no non-destructive way to verify whether
397 * WRITE ZEROES is actually supported).
405 EXPORT_SYMBOL(blkdev_issue_zeroout);