1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to generic helpers functions
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
13 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
14 sector_t nr_sects, gfp_t gfp_mask, int flags,
17 struct request_queue *q = bdev_get_queue(bdev);
18 struct bio *bio = *biop;
20 sector_t bs_mask, part_offset = 0;
25 if (bdev_read_only(bdev))
28 if (flags & BLKDEV_DISCARD_SECURE) {
29 if (!blk_queue_secure_erase(q))
31 op = REQ_OP_SECURE_ERASE;
33 if (!blk_queue_discard(q))
38 /* In case the discard granularity isn't set by buggy device driver */
39 if (WARN_ON_ONCE(!q->limits.discard_granularity)) {
40 char dev_name[BDEVNAME_SIZE];
42 bdevname(bdev, dev_name);
43 pr_err_ratelimited("%s: Error: discard_granularity is 0.\n", dev_name);
47 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
48 if ((sector | nr_sects) & bs_mask)
54 /* In case the discard request is in a partition */
55 if (bdev_is_partition(bdev))
56 part_offset = bdev->bd_start_sect;
59 sector_t granularity_aligned_lba, req_sects;
60 sector_t sector_mapped = sector + part_offset;
62 granularity_aligned_lba = round_up(sector_mapped,
63 q->limits.discard_granularity >> SECTOR_SHIFT);
66 * Check whether the discard bio starts at a discard_granularity
68 * - If no: set (granularity_aligned_lba - sector_mapped) to
69 * bi_size of the first split bio, then the second bio will
70 * start at a discard_granularity aligned LBA on the device.
71 * - If yes: use bio_aligned_discard_max_sectors() as the max
72 * possible bi_size of the first split bio. Then when this bio
73 * is split in device drive, the split ones are very probably
74 * to be aligned to discard_granularity of the device's queue.
76 if (granularity_aligned_lba == sector_mapped)
77 req_sects = min_t(sector_t, nr_sects,
78 bio_aligned_discard_max_sectors(q));
80 req_sects = min_t(sector_t, nr_sects,
81 granularity_aligned_lba - sector_mapped);
83 WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
85 bio = blk_next_bio(bio, bdev, 0, op, gfp_mask);
86 bio->bi_iter.bi_sector = sector;
87 bio->bi_iter.bi_size = req_sects << 9;
89 nr_sects -= req_sects;
92 * We can loop for a long time in here, if someone does
93 * full device discards (like mkfs). Be nice and allow
94 * us to schedule out to avoid softlocking if preempt
103 EXPORT_SYMBOL(__blkdev_issue_discard);
106 * blkdev_issue_discard - queue a discard
107 * @bdev: blockdev to issue discard for
108 * @sector: start sector
109 * @nr_sects: number of sectors to discard
110 * @gfp_mask: memory allocation flags (for bio_alloc)
111 * @flags: BLKDEV_DISCARD_* flags to control behaviour
114 * Issue a discard request for the sectors in question.
116 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
117 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
119 struct bio *bio = NULL;
120 struct blk_plug plug;
123 blk_start_plug(&plug);
124 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
127 ret = submit_bio_wait(bio);
128 if (ret == -EOPNOTSUPP)
132 blk_finish_plug(&plug);
136 EXPORT_SYMBOL(blkdev_issue_discard);
139 * __blkdev_issue_write_same - generate number of bios with same page
140 * @bdev: target blockdev
141 * @sector: start sector
142 * @nr_sects: number of sectors to write
143 * @gfp_mask: memory allocation flags (for bio_alloc)
144 * @page: page containing data to write
145 * @biop: pointer to anchor bio
148 * Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
150 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
151 sector_t nr_sects, gfp_t gfp_mask, struct page *page,
154 struct request_queue *q = bdev_get_queue(bdev);
155 unsigned int max_write_same_sectors;
156 struct bio *bio = *biop;
162 if (bdev_read_only(bdev))
165 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
166 if ((sector | nr_sects) & bs_mask)
169 if (!bdev_write_same(bdev))
172 /* Ensure that max_write_same_sectors doesn't overflow bi_size */
173 max_write_same_sectors = bio_allowed_max_sectors(q);
176 bio = blk_next_bio(bio, bdev, 1, REQ_OP_WRITE_SAME, gfp_mask);
177 bio->bi_iter.bi_sector = sector;
179 bio->bi_io_vec->bv_page = page;
180 bio->bi_io_vec->bv_offset = 0;
181 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
183 if (nr_sects > max_write_same_sectors) {
184 bio->bi_iter.bi_size = max_write_same_sectors << 9;
185 nr_sects -= max_write_same_sectors;
186 sector += max_write_same_sectors;
188 bio->bi_iter.bi_size = nr_sects << 9;
199 * blkdev_issue_write_same - queue a write same operation
200 * @bdev: target blockdev
201 * @sector: start sector
202 * @nr_sects: number of sectors to write
203 * @gfp_mask: memory allocation flags (for bio_alloc)
204 * @page: page containing data
207 * Issue a write same request for the sectors in question.
209 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
210 sector_t nr_sects, gfp_t gfp_mask,
213 struct bio *bio = NULL;
214 struct blk_plug plug;
217 blk_start_plug(&plug);
218 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
220 if (ret == 0 && bio) {
221 ret = submit_bio_wait(bio);
224 blk_finish_plug(&plug);
227 EXPORT_SYMBOL(blkdev_issue_write_same);
229 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
230 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
231 struct bio **biop, unsigned flags)
233 struct bio *bio = *biop;
234 unsigned int max_write_zeroes_sectors;
235 struct request_queue *q = bdev_get_queue(bdev);
240 if (bdev_read_only(bdev))
243 /* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
244 max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
246 if (max_write_zeroes_sectors == 0)
250 bio = blk_next_bio(bio, bdev, 0, REQ_OP_WRITE_ZEROES, gfp_mask);
251 bio->bi_iter.bi_sector = sector;
252 if (flags & BLKDEV_ZERO_NOUNMAP)
253 bio->bi_opf |= REQ_NOUNMAP;
255 if (nr_sects > max_write_zeroes_sectors) {
256 bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
257 nr_sects -= max_write_zeroes_sectors;
258 sector += max_write_zeroes_sectors;
260 bio->bi_iter.bi_size = nr_sects << 9;
271 * Convert a number of 512B sectors to a number of pages.
272 * The result is limited to a number of pages that can fit into a BIO.
273 * Also make sure that the result is always at least 1 (page) for the cases
274 * where nr_sects is lower than the number of sectors in a page.
276 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
278 sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
280 return min(pages, (sector_t)BIO_MAX_VECS);
283 static int __blkdev_issue_zero_pages(struct block_device *bdev,
284 sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
287 struct request_queue *q = bdev_get_queue(bdev);
288 struct bio *bio = *biop;
295 if (bdev_read_only(bdev))
298 while (nr_sects != 0) {
299 bio = blk_next_bio(bio, bdev, __blkdev_sectors_to_bio_pages(nr_sects),
300 REQ_OP_WRITE, gfp_mask);
301 bio->bi_iter.bi_sector = sector;
303 while (nr_sects != 0) {
304 sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
305 bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
306 nr_sects -= bi_size >> 9;
307 sector += bi_size >> 9;
319 * __blkdev_issue_zeroout - generate number of zero filed write bios
320 * @bdev: blockdev to issue
321 * @sector: start sector
322 * @nr_sects: number of sectors to write
323 * @gfp_mask: memory allocation flags (for bio_alloc)
324 * @biop: pointer to anchor bio
325 * @flags: controls detailed behavior
328 * Zero-fill a block range, either using hardware offload or by explicitly
329 * writing zeroes to the device.
331 * If a device is using logical block provisioning, the underlying space will
332 * not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
334 * If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
335 * -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
337 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
338 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
344 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
345 if ((sector | nr_sects) & bs_mask)
348 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
350 if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
353 return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
356 EXPORT_SYMBOL(__blkdev_issue_zeroout);
359 * blkdev_issue_zeroout - zero-fill a block range
360 * @bdev: blockdev to write
361 * @sector: start sector
362 * @nr_sects: number of sectors to write
363 * @gfp_mask: memory allocation flags (for bio_alloc)
364 * @flags: controls detailed behavior
367 * Zero-fill a block range, either using hardware offload or by explicitly
368 * writing zeroes to the device. See __blkdev_issue_zeroout() for the
369 * valid values for %flags.
371 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
372 sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
377 struct blk_plug plug;
378 bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
380 bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
381 if ((sector | nr_sects) & bs_mask)
386 blk_start_plug(&plug);
387 if (try_write_zeroes) {
388 ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
389 gfp_mask, &bio, flags);
390 } else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
391 ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
394 /* No zeroing offload support */
397 if (ret == 0 && bio) {
398 ret = submit_bio_wait(bio);
401 blk_finish_plug(&plug);
402 if (ret && try_write_zeroes) {
403 if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
404 try_write_zeroes = false;
407 if (!bdev_write_zeroes_sectors(bdev)) {
409 * Zeroing offload support was indicated, but the
410 * device reported ILLEGAL REQUEST (for some devices
411 * there is no non-destructive way to verify whether
412 * WRITE ZEROES is actually supported).
420 EXPORT_SYMBOL(blkdev_issue_zeroout);