]>
Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
0fe23479 | 3 | * Copyright (C) 2001 Jens Axboe <[email protected]> |
1da177e4 LT |
4 | */ |
5 | #include <linux/mm.h> | |
6 | #include <linux/swap.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
a27bb332 | 9 | #include <linux/uio.h> |
852c788f | 10 | #include <linux/iocontext.h> |
1da177e4 LT |
11 | #include <linux/slab.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
630d9c47 | 14 | #include <linux/export.h> |
1da177e4 LT |
15 | #include <linux/mempool.h> |
16 | #include <linux/workqueue.h> | |
852c788f | 17 | #include <linux/cgroup.h> |
b4c5875d | 18 | #include <linux/highmem.h> |
a892c8d5 | 19 | #include <linux/blk-crypto.h> |
49d1ec85 | 20 | #include <linux/xarray.h> |
1da177e4 | 21 | |
55782138 | 22 | #include <trace/events/block.h> |
9e234eea | 23 | #include "blk.h" |
67b42d0b | 24 | #include "blk-rq-qos.h" |
672fdcf0 | 25 | #include "blk-cgroup.h" |
0bfc2455 | 26 | |
b99182c5 | 27 | #define ALLOC_CACHE_THRESHOLD 16 |
42b2b2fb | 28 | #define ALLOC_CACHE_MAX 256 |
b99182c5 | 29 | |
be4d234d | 30 | struct bio_alloc_cache { |
fcade2ce | 31 | struct bio *free_list; |
b99182c5 | 32 | struct bio *free_list_irq; |
be4d234d | 33 | unsigned int nr; |
b99182c5 | 34 | unsigned int nr_irq; |
be4d234d JA |
35 | }; |
36 | ||
de76fd89 | 37 | static struct biovec_slab { |
6ac0b715 CH |
38 | int nr_vecs; |
39 | char *name; | |
40 | struct kmem_cache *slab; | |
de76fd89 CH |
41 | } bvec_slabs[] __read_mostly = { |
42 | { .nr_vecs = 16, .name = "biovec-16" }, | |
43 | { .nr_vecs = 64, .name = "biovec-64" }, | |
44 | { .nr_vecs = 128, .name = "biovec-128" }, | |
a8affc03 | 45 | { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, |
1da177e4 | 46 | }; |
6ac0b715 | 47 | |
7a800a20 CH |
48 | static struct biovec_slab *biovec_slab(unsigned short nr_vecs) |
49 | { | |
50 | switch (nr_vecs) { | |
51 | /* smaller bios use inline vecs */ | |
52 | case 5 ... 16: | |
53 | return &bvec_slabs[0]; | |
54 | case 17 ... 64: | |
55 | return &bvec_slabs[1]; | |
56 | case 65 ... 128: | |
57 | return &bvec_slabs[2]; | |
a8affc03 | 58 | case 129 ... BIO_MAX_VECS: |
7a800a20 CH |
59 | return &bvec_slabs[3]; |
60 | default: | |
61 | BUG(); | |
62 | return NULL; | |
63 | } | |
64 | } | |
1da177e4 | 65 | |
1da177e4 LT |
66 | /* |
67 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by | |
68 | * IO code that does not need private memory pools. | |
69 | */ | |
f4f8154a | 70 | struct bio_set fs_bio_set; |
3f86a82a | 71 | EXPORT_SYMBOL(fs_bio_set); |
1da177e4 | 72 | |
bb799ca0 JA |
73 | /* |
74 | * Our slab pool management | |
75 | */ | |
76 | struct bio_slab { | |
77 | struct kmem_cache *slab; | |
78 | unsigned int slab_ref; | |
79 | unsigned int slab_size; | |
80 | char name[8]; | |
81 | }; | |
82 | static DEFINE_MUTEX(bio_slab_lock); | |
49d1ec85 | 83 | static DEFINE_XARRAY(bio_slabs); |
bb799ca0 | 84 | |
49d1ec85 | 85 | static struct bio_slab *create_bio_slab(unsigned int size) |
bb799ca0 | 86 | { |
49d1ec85 | 87 | struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); |
bb799ca0 | 88 | |
49d1ec85 ML |
89 | if (!bslab) |
90 | return NULL; | |
bb799ca0 | 91 | |
49d1ec85 ML |
92 | snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); |
93 | bslab->slab = kmem_cache_create(bslab->name, size, | |
1a7e76e4 CH |
94 | ARCH_KMALLOC_MINALIGN, |
95 | SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); | |
49d1ec85 ML |
96 | if (!bslab->slab) |
97 | goto fail_alloc_slab; | |
bb799ca0 | 98 | |
49d1ec85 ML |
99 | bslab->slab_ref = 1; |
100 | bslab->slab_size = size; | |
bb799ca0 | 101 | |
49d1ec85 ML |
102 | if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) |
103 | return bslab; | |
bb799ca0 | 104 | |
49d1ec85 | 105 | kmem_cache_destroy(bslab->slab); |
bb799ca0 | 106 | |
49d1ec85 ML |
107 | fail_alloc_slab: |
108 | kfree(bslab); | |
109 | return NULL; | |
110 | } | |
bb799ca0 | 111 | |
49d1ec85 ML |
112 | static inline unsigned int bs_bio_slab_size(struct bio_set *bs) |
113 | { | |
9f180e31 | 114 | return bs->front_pad + sizeof(struct bio) + bs->back_pad; |
49d1ec85 | 115 | } |
bb799ca0 | 116 | |
49d1ec85 ML |
117 | static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) |
118 | { | |
119 | unsigned int size = bs_bio_slab_size(bs); | |
120 | struct bio_slab *bslab; | |
bb799ca0 | 121 | |
49d1ec85 ML |
122 | mutex_lock(&bio_slab_lock); |
123 | bslab = xa_load(&bio_slabs, size); | |
124 | if (bslab) | |
125 | bslab->slab_ref++; | |
126 | else | |
127 | bslab = create_bio_slab(size); | |
bb799ca0 | 128 | mutex_unlock(&bio_slab_lock); |
49d1ec85 ML |
129 | |
130 | if (bslab) | |
131 | return bslab->slab; | |
132 | return NULL; | |
bb799ca0 JA |
133 | } |
134 | ||
135 | static void bio_put_slab(struct bio_set *bs) | |
136 | { | |
137 | struct bio_slab *bslab = NULL; | |
49d1ec85 | 138 | unsigned int slab_size = bs_bio_slab_size(bs); |
bb799ca0 JA |
139 | |
140 | mutex_lock(&bio_slab_lock); | |
141 | ||
49d1ec85 | 142 | bslab = xa_load(&bio_slabs, slab_size); |
bb799ca0 JA |
143 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) |
144 | goto out; | |
145 | ||
49d1ec85 ML |
146 | WARN_ON_ONCE(bslab->slab != bs->bio_slab); |
147 | ||
bb799ca0 JA |
148 | WARN_ON(!bslab->slab_ref); |
149 | ||
150 | if (--bslab->slab_ref) | |
151 | goto out; | |
152 | ||
49d1ec85 ML |
153 | xa_erase(&bio_slabs, slab_size); |
154 | ||
bb799ca0 | 155 | kmem_cache_destroy(bslab->slab); |
49d1ec85 | 156 | kfree(bslab); |
bb799ca0 JA |
157 | |
158 | out: | |
159 | mutex_unlock(&bio_slab_lock); | |
160 | } | |
161 | ||
7a800a20 | 162 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) |
7ba1ba12 | 163 | { |
9e8c0d0d | 164 | BUG_ON(nr_vecs > BIO_MAX_VECS); |
ed996a52 | 165 | |
a8affc03 | 166 | if (nr_vecs == BIO_MAX_VECS) |
9f060e22 | 167 | mempool_free(bv, pool); |
7a800a20 CH |
168 | else if (nr_vecs > BIO_INLINE_VECS) |
169 | kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); | |
bb799ca0 | 170 | } |
bb799ca0 | 171 | |
f2c3eb9b CH |
172 | /* |
173 | * Make the first allocation restricted and don't dump info on allocation | |
174 | * failures, since we'll fall back to the mempool in case of failure. | |
175 | */ | |
176 | static inline gfp_t bvec_alloc_gfp(gfp_t gfp) | |
177 | { | |
178 | return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | | |
179 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; | |
bb799ca0 JA |
180 | } |
181 | ||
7a800a20 CH |
182 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
183 | gfp_t gfp_mask) | |
1da177e4 | 184 | { |
7a800a20 | 185 | struct biovec_slab *bvs = biovec_slab(*nr_vecs); |
1da177e4 | 186 | |
7a800a20 | 187 | if (WARN_ON_ONCE(!bvs)) |
7ff9345f | 188 | return NULL; |
7ff9345f JA |
189 | |
190 | /* | |
7a800a20 CH |
191 | * Upgrade the nr_vecs request to take full advantage of the allocation. |
192 | * We also rely on this in the bvec_free path. | |
7ff9345f | 193 | */ |
7a800a20 | 194 | *nr_vecs = bvs->nr_vecs; |
7ff9345f | 195 | |
7ff9345f | 196 | /* |
f007a3d6 CH |
197 | * Try a slab allocation first for all smaller allocations. If that |
198 | * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. | |
a8affc03 | 199 | * The mempool is sized to handle up to BIO_MAX_VECS entries. |
7ff9345f | 200 | */ |
a8affc03 | 201 | if (*nr_vecs < BIO_MAX_VECS) { |
f007a3d6 | 202 | struct bio_vec *bvl; |
1da177e4 | 203 | |
f2c3eb9b | 204 | bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); |
7a800a20 | 205 | if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) |
f007a3d6 | 206 | return bvl; |
a8affc03 | 207 | *nr_vecs = BIO_MAX_VECS; |
7ff9345f JA |
208 | } |
209 | ||
f007a3d6 | 210 | return mempool_alloc(pool, gfp_mask); |
1da177e4 LT |
211 | } |
212 | ||
9ae3b3f5 | 213 | void bio_uninit(struct bio *bio) |
1da177e4 | 214 | { |
db9819c7 CH |
215 | #ifdef CONFIG_BLK_CGROUP |
216 | if (bio->bi_blkg) { | |
217 | blkg_put(bio->bi_blkg); | |
218 | bio->bi_blkg = NULL; | |
219 | } | |
220 | #endif | |
ece841ab JT |
221 | if (bio_integrity(bio)) |
222 | bio_integrity_free(bio); | |
a892c8d5 ST |
223 | |
224 | bio_crypt_free_ctx(bio); | |
4254bba1 | 225 | } |
9ae3b3f5 | 226 | EXPORT_SYMBOL(bio_uninit); |
7ba1ba12 | 227 | |
4254bba1 KO |
228 | static void bio_free(struct bio *bio) |
229 | { | |
230 | struct bio_set *bs = bio->bi_pool; | |
066ff571 | 231 | void *p = bio; |
4254bba1 | 232 | |
066ff571 | 233 | WARN_ON_ONCE(!bs); |
4254bba1 | 234 | |
066ff571 CH |
235 | bio_uninit(bio); |
236 | bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); | |
237 | mempool_free(p - bs->front_pad, &bs->bio_pool); | |
3676347a PO |
238 | } |
239 | ||
9ae3b3f5 JA |
240 | /* |
241 | * Users of this function have their own bio allocation. Subsequently, | |
242 | * they must remember to pair any call to bio_init() with bio_uninit() | |
243 | * when IO has completed, or when the bio is released. | |
244 | */ | |
49add496 | 245 | void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, |
16458cf3 | 246 | unsigned short max_vecs, blk_opf_t opf) |
1da177e4 | 247 | { |
da521626 | 248 | bio->bi_next = NULL; |
49add496 CH |
249 | bio->bi_bdev = bdev; |
250 | bio->bi_opf = opf; | |
da521626 JA |
251 | bio->bi_flags = 0; |
252 | bio->bi_ioprio = 0; | |
44981351 | 253 | bio->bi_write_hint = 0; |
da521626 JA |
254 | bio->bi_status = 0; |
255 | bio->bi_iter.bi_sector = 0; | |
256 | bio->bi_iter.bi_size = 0; | |
257 | bio->bi_iter.bi_idx = 0; | |
258 | bio->bi_iter.bi_bvec_done = 0; | |
259 | bio->bi_end_io = NULL; | |
260 | bio->bi_private = NULL; | |
261 | #ifdef CONFIG_BLK_CGROUP | |
262 | bio->bi_blkg = NULL; | |
263 | bio->bi_issue.value = 0; | |
49add496 CH |
264 | if (bdev) |
265 | bio_associate_blkg(bio); | |
da521626 JA |
266 | #ifdef CONFIG_BLK_CGROUP_IOCOST |
267 | bio->bi_iocost_cost = 0; | |
268 | #endif | |
269 | #endif | |
270 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
271 | bio->bi_crypt_context = NULL; | |
272 | #endif | |
273 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
274 | bio->bi_integrity = NULL; | |
275 | #endif | |
276 | bio->bi_vcnt = 0; | |
277 | ||
c4cf5261 | 278 | atomic_set(&bio->__bi_remaining, 1); |
dac56212 | 279 | atomic_set(&bio->__bi_cnt, 1); |
3e08773c | 280 | bio->bi_cookie = BLK_QC_T_NONE; |
3a83f467 | 281 | |
3a83f467 | 282 | bio->bi_max_vecs = max_vecs; |
da521626 JA |
283 | bio->bi_io_vec = table; |
284 | bio->bi_pool = NULL; | |
1da177e4 | 285 | } |
a112a71d | 286 | EXPORT_SYMBOL(bio_init); |
1da177e4 | 287 | |
f44b48c7 KO |
288 | /** |
289 | * bio_reset - reinitialize a bio | |
290 | * @bio: bio to reset | |
a7c50c94 CH |
291 | * @bdev: block device to use the bio for |
292 | * @opf: operation and flags for bio | |
f44b48c7 KO |
293 | * |
294 | * Description: | |
295 | * After calling bio_reset(), @bio will be in the same state as a freshly | |
296 | * allocated bio returned bio bio_alloc_bioset() - the only fields that are | |
297 | * preserved are the ones that are initialized by bio_alloc_bioset(). See | |
298 | * comment in struct bio. | |
299 | */ | |
16458cf3 | 300 | void bio_reset(struct bio *bio, struct block_device *bdev, blk_opf_t opf) |
f44b48c7 | 301 | { |
9ae3b3f5 | 302 | bio_uninit(bio); |
f44b48c7 | 303 | memset(bio, 0, BIO_RESET_BYTES); |
c4cf5261 | 304 | atomic_set(&bio->__bi_remaining, 1); |
a7c50c94 | 305 | bio->bi_bdev = bdev; |
78e34374 CH |
306 | if (bio->bi_bdev) |
307 | bio_associate_blkg(bio); | |
a7c50c94 | 308 | bio->bi_opf = opf; |
f44b48c7 KO |
309 | } |
310 | EXPORT_SYMBOL(bio_reset); | |
311 | ||
38f8baae | 312 | static struct bio *__bio_chain_endio(struct bio *bio) |
196d38bc | 313 | { |
4246a0b6 CH |
314 | struct bio *parent = bio->bi_private; |
315 | ||
3edf5346 | 316 | if (bio->bi_status && !parent->bi_status) |
4e4cbee9 | 317 | parent->bi_status = bio->bi_status; |
196d38bc | 318 | bio_put(bio); |
38f8baae CH |
319 | return parent; |
320 | } | |
321 | ||
322 | static void bio_chain_endio(struct bio *bio) | |
323 | { | |
324 | bio_endio(__bio_chain_endio(bio)); | |
196d38bc KO |
325 | } |
326 | ||
327 | /** | |
328 | * bio_chain - chain bio completions | |
1051a902 | 329 | * @bio: the target bio |
5b874af6 | 330 | * @parent: the parent bio of @bio |
196d38bc KO |
331 | * |
332 | * The caller won't have a bi_end_io called when @bio completes - instead, | |
333 | * @parent's bi_end_io won't be called until both @parent and @bio have | |
334 | * completed; the chained bio will also be freed when it completes. | |
335 | * | |
336 | * The caller must not set bi_private or bi_end_io in @bio. | |
337 | */ | |
338 | void bio_chain(struct bio *bio, struct bio *parent) | |
339 | { | |
340 | BUG_ON(bio->bi_private || bio->bi_end_io); | |
341 | ||
342 | bio->bi_private = parent; | |
343 | bio->bi_end_io = bio_chain_endio; | |
c4cf5261 | 344 | bio_inc_remaining(parent); |
196d38bc KO |
345 | } |
346 | EXPORT_SYMBOL(bio_chain); | |
347 | ||
0a3140ea | 348 | struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev, |
16458cf3 | 349 | unsigned int nr_pages, blk_opf_t opf, gfp_t gfp) |
3b005bf6 | 350 | { |
07888c66 | 351 | struct bio *new = bio_alloc(bdev, nr_pages, opf, gfp); |
0a3140ea | 352 | |
3b005bf6 CH |
353 | if (bio) { |
354 | bio_chain(bio, new); | |
355 | submit_bio(bio); | |
356 | } | |
357 | ||
358 | return new; | |
359 | } | |
360 | EXPORT_SYMBOL_GPL(blk_next_bio); | |
361 | ||
df2cb6da KO |
362 | static void bio_alloc_rescue(struct work_struct *work) |
363 | { | |
364 | struct bio_set *bs = container_of(work, struct bio_set, rescue_work); | |
365 | struct bio *bio; | |
366 | ||
367 | while (1) { | |
368 | spin_lock(&bs->rescue_lock); | |
369 | bio = bio_list_pop(&bs->rescue_list); | |
370 | spin_unlock(&bs->rescue_lock); | |
371 | ||
372 | if (!bio) | |
373 | break; | |
374 | ||
ed00aabd | 375 | submit_bio_noacct(bio); |
df2cb6da KO |
376 | } |
377 | } | |
378 | ||
379 | static void punt_bios_to_rescuer(struct bio_set *bs) | |
380 | { | |
381 | struct bio_list punt, nopunt; | |
382 | struct bio *bio; | |
383 | ||
47e0fb46 N |
384 | if (WARN_ON_ONCE(!bs->rescue_workqueue)) |
385 | return; | |
df2cb6da KO |
386 | /* |
387 | * In order to guarantee forward progress we must punt only bios that | |
388 | * were allocated from this bio_set; otherwise, if there was a bio on | |
389 | * there for a stacking driver higher up in the stack, processing it | |
390 | * could require allocating bios from this bio_set, and doing that from | |
391 | * our own rescuer would be bad. | |
392 | * | |
393 | * Since bio lists are singly linked, pop them all instead of trying to | |
394 | * remove from the middle of the list: | |
395 | */ | |
396 | ||
397 | bio_list_init(&punt); | |
398 | bio_list_init(&nopunt); | |
399 | ||
f5fe1b51 | 400 | while ((bio = bio_list_pop(¤t->bio_list[0]))) |
df2cb6da | 401 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); |
f5fe1b51 | 402 | current->bio_list[0] = nopunt; |
df2cb6da | 403 | |
f5fe1b51 N |
404 | bio_list_init(&nopunt); |
405 | while ((bio = bio_list_pop(¤t->bio_list[1]))) | |
406 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); | |
407 | current->bio_list[1] = nopunt; | |
df2cb6da KO |
408 | |
409 | spin_lock(&bs->rescue_lock); | |
410 | bio_list_merge(&bs->rescue_list, &punt); | |
411 | spin_unlock(&bs->rescue_lock); | |
412 | ||
413 | queue_work(bs->rescue_workqueue, &bs->rescue_work); | |
414 | } | |
415 | ||
b99182c5 PB |
416 | static void bio_alloc_irq_cache_splice(struct bio_alloc_cache *cache) |
417 | { | |
418 | unsigned long flags; | |
419 | ||
420 | /* cache->free_list must be empty */ | |
421 | if (WARN_ON_ONCE(cache->free_list)) | |
422 | return; | |
423 | ||
424 | local_irq_save(flags); | |
425 | cache->free_list = cache->free_list_irq; | |
426 | cache->free_list_irq = NULL; | |
427 | cache->nr += cache->nr_irq; | |
428 | cache->nr_irq = 0; | |
429 | local_irq_restore(flags); | |
430 | } | |
431 | ||
0df71650 | 432 | static struct bio *bio_alloc_percpu_cache(struct block_device *bdev, |
16458cf3 | 433 | unsigned short nr_vecs, blk_opf_t opf, gfp_t gfp, |
0df71650 MS |
434 | struct bio_set *bs) |
435 | { | |
436 | struct bio_alloc_cache *cache; | |
437 | struct bio *bio; | |
438 | ||
439 | cache = per_cpu_ptr(bs->cache, get_cpu()); | |
440 | if (!cache->free_list) { | |
b99182c5 PB |
441 | if (READ_ONCE(cache->nr_irq) >= ALLOC_CACHE_THRESHOLD) |
442 | bio_alloc_irq_cache_splice(cache); | |
443 | if (!cache->free_list) { | |
444 | put_cpu(); | |
445 | return NULL; | |
446 | } | |
0df71650 MS |
447 | } |
448 | bio = cache->free_list; | |
449 | cache->free_list = bio->bi_next; | |
450 | cache->nr--; | |
451 | put_cpu(); | |
452 | ||
453 | bio_init(bio, bdev, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs, opf); | |
454 | bio->bi_pool = bs; | |
455 | return bio; | |
456 | } | |
457 | ||
1da177e4 LT |
458 | /** |
459 | * bio_alloc_bioset - allocate a bio for I/O | |
609be106 CH |
460 | * @bdev: block device to allocate the bio for (can be %NULL) |
461 | * @nr_vecs: number of bvecs to pre-allocate | |
462 | * @opf: operation and flags for bio | |
519c8e9f | 463 | * @gfp_mask: the GFP_* mask given to the slab allocator |
db18efac | 464 | * @bs: the bio_set to allocate from. |
1da177e4 | 465 | * |
3175199a | 466 | * Allocate a bio from the mempools in @bs. |
3f86a82a | 467 | * |
3175199a CH |
468 | * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to |
469 | * allocate a bio. This is due to the mempool guarantees. To make this work, | |
470 | * callers must never allocate more than 1 bio at a time from the general pool. | |
471 | * Callers that need to allocate more than 1 bio must always submit the | |
472 | * previously allocated bio for IO before attempting to allocate a new one. | |
473 | * Failure to do so can cause deadlocks under memory pressure. | |
3f86a82a | 474 | * |
3175199a CH |
475 | * Note that when running under submit_bio_noacct() (i.e. any block driver), |
476 | * bios are not submitted until after you return - see the code in | |
477 | * submit_bio_noacct() that converts recursion into iteration, to prevent | |
478 | * stack overflows. | |
df2cb6da | 479 | * |
3175199a CH |
480 | * This would normally mean allocating multiple bios under submit_bio_noacct() |
481 | * would be susceptible to deadlocks, but we have | |
482 | * deadlock avoidance code that resubmits any blocked bios from a rescuer | |
483 | * thread. | |
df2cb6da | 484 | * |
3175199a CH |
485 | * However, we do not guarantee forward progress for allocations from other |
486 | * mempools. Doing multiple allocations from the same mempool under | |
487 | * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad | |
488 | * for per bio allocations. | |
df2cb6da | 489 | * |
3175199a | 490 | * Returns: Pointer to new bio on success, NULL on failure. |
3f86a82a | 491 | */ |
609be106 | 492 | struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs, |
16458cf3 | 493 | blk_opf_t opf, gfp_t gfp_mask, |
7a88fa19 | 494 | struct bio_set *bs) |
1da177e4 | 495 | { |
df2cb6da | 496 | gfp_t saved_gfp = gfp_mask; |
451a9ebf TH |
497 | struct bio *bio; |
498 | void *p; | |
499 | ||
609be106 CH |
500 | /* should not use nobvec bioset for nr_vecs > 0 */ |
501 | if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_vecs > 0)) | |
3175199a | 502 | return NULL; |
df2cb6da | 503 | |
0df71650 MS |
504 | if (opf & REQ_ALLOC_CACHE) { |
505 | if (bs->cache && nr_vecs <= BIO_INLINE_VECS) { | |
506 | bio = bio_alloc_percpu_cache(bdev, nr_vecs, opf, | |
507 | gfp_mask, bs); | |
508 | if (bio) | |
509 | return bio; | |
510 | /* | |
511 | * No cached bio available, bio returned below marked with | |
512 | * REQ_ALLOC_CACHE to particpate in per-cpu alloc cache. | |
513 | */ | |
514 | } else { | |
515 | opf &= ~REQ_ALLOC_CACHE; | |
516 | } | |
517 | } | |
518 | ||
3175199a CH |
519 | /* |
520 | * submit_bio_noacct() converts recursion to iteration; this means if | |
521 | * we're running beneath it, any bios we allocate and submit will not be | |
522 | * submitted (and thus freed) until after we return. | |
523 | * | |
524 | * This exposes us to a potential deadlock if we allocate multiple bios | |
525 | * from the same bio_set() while running underneath submit_bio_noacct(). | |
526 | * If we were to allocate multiple bios (say a stacking block driver | |
527 | * that was splitting bios), we would deadlock if we exhausted the | |
528 | * mempool's reserve. | |
529 | * | |
530 | * We solve this, and guarantee forward progress, with a rescuer | |
531 | * workqueue per bio_set. If we go to allocate and there are bios on | |
532 | * current->bio_list, we first try the allocation without | |
533 | * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be | |
534 | * blocking to the rescuer workqueue before we retry with the original | |
535 | * gfp_flags. | |
536 | */ | |
537 | if (current->bio_list && | |
538 | (!bio_list_empty(¤t->bio_list[0]) || | |
539 | !bio_list_empty(¤t->bio_list[1])) && | |
540 | bs->rescue_workqueue) | |
541 | gfp_mask &= ~__GFP_DIRECT_RECLAIM; | |
542 | ||
543 | p = mempool_alloc(&bs->bio_pool, gfp_mask); | |
544 | if (!p && gfp_mask != saved_gfp) { | |
545 | punt_bios_to_rescuer(bs); | |
546 | gfp_mask = saved_gfp; | |
8aa6ba2f | 547 | p = mempool_alloc(&bs->bio_pool, gfp_mask); |
3f86a82a | 548 | } |
451a9ebf TH |
549 | if (unlikely(!p)) |
550 | return NULL; | |
759aa12f PB |
551 | if (!mempool_is_saturated(&bs->bio_pool)) |
552 | opf &= ~REQ_ALLOC_CACHE; | |
1da177e4 | 553 | |
3175199a | 554 | bio = p + bs->front_pad; |
609be106 | 555 | if (nr_vecs > BIO_INLINE_VECS) { |
3175199a | 556 | struct bio_vec *bvl = NULL; |
34053979 | 557 | |
609be106 | 558 | bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); |
df2cb6da KO |
559 | if (!bvl && gfp_mask != saved_gfp) { |
560 | punt_bios_to_rescuer(bs); | |
561 | gfp_mask = saved_gfp; | |
609be106 | 562 | bvl = bvec_alloc(&bs->bvec_pool, &nr_vecs, gfp_mask); |
df2cb6da | 563 | } |
34053979 IM |
564 | if (unlikely(!bvl)) |
565 | goto err_free; | |
a38352e0 | 566 | |
49add496 | 567 | bio_init(bio, bdev, bvl, nr_vecs, opf); |
609be106 | 568 | } else if (nr_vecs) { |
49add496 | 569 | bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf); |
3175199a | 570 | } else { |
49add496 | 571 | bio_init(bio, bdev, NULL, 0, opf); |
1da177e4 | 572 | } |
3f86a82a KO |
573 | |
574 | bio->bi_pool = bs; | |
1da177e4 | 575 | return bio; |
34053979 IM |
576 | |
577 | err_free: | |
8aa6ba2f | 578 | mempool_free(p, &bs->bio_pool); |
34053979 | 579 | return NULL; |
1da177e4 | 580 | } |
a112a71d | 581 | EXPORT_SYMBOL(bio_alloc_bioset); |
1da177e4 | 582 | |
3175199a | 583 | /** |
066ff571 CH |
584 | * bio_kmalloc - kmalloc a bio |
585 | * @nr_vecs: number of bio_vecs to allocate | |
3175199a | 586 | * @gfp_mask: the GFP_* mask given to the slab allocator |
3175199a | 587 | * |
066ff571 CH |
588 | * Use kmalloc to allocate a bio (including bvecs). The bio must be initialized |
589 | * using bio_init() before use. To free a bio returned from this function use | |
590 | * kfree() after calling bio_uninit(). A bio returned from this function can | |
591 | * be reused by calling bio_uninit() before calling bio_init() again. | |
592 | * | |
593 | * Note that unlike bio_alloc() or bio_alloc_bioset() allocations from this | |
340e1347 | 594 | * function are not backed by a mempool can fail. Do not use this function |
066ff571 | 595 | * for allocations in the file system I/O path. |
3175199a CH |
596 | * |
597 | * Returns: Pointer to new bio on success, NULL on failure. | |
598 | */ | |
066ff571 | 599 | struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask) |
3175199a CH |
600 | { |
601 | struct bio *bio; | |
602 | ||
066ff571 | 603 | if (nr_vecs > UIO_MAXIOV) |
3175199a | 604 | return NULL; |
066ff571 | 605 | return kmalloc(struct_size(bio, bi_inline_vecs, nr_vecs), gfp_mask); |
3175199a CH |
606 | } |
607 | EXPORT_SYMBOL(bio_kmalloc); | |
608 | ||
649f070e | 609 | void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) |
1da177e4 | 610 | { |
7988613b KO |
611 | struct bio_vec bv; |
612 | struct bvec_iter iter; | |
1da177e4 | 613 | |
649f070e | 614 | __bio_for_each_segment(bv, bio, iter, start) |
ab6c340e | 615 | memzero_bvec(&bv); |
1da177e4 | 616 | } |
649f070e | 617 | EXPORT_SYMBOL(zero_fill_bio_iter); |
1da177e4 | 618 | |
83c9c547 ML |
619 | /** |
620 | * bio_truncate - truncate the bio to small size of @new_size | |
621 | * @bio: the bio to be truncated | |
622 | * @new_size: new size for truncating the bio | |
623 | * | |
624 | * Description: | |
625 | * Truncate the bio to new size of @new_size. If bio_op(bio) is | |
626 | * REQ_OP_READ, zero the truncated part. This function should only | |
627 | * be used for handling corner cases, such as bio eod. | |
628 | */ | |
4f7ab09a | 629 | static void bio_truncate(struct bio *bio, unsigned new_size) |
85a8ce62 ML |
630 | { |
631 | struct bio_vec bv; | |
632 | struct bvec_iter iter; | |
633 | unsigned int done = 0; | |
634 | bool truncated = false; | |
635 | ||
636 | if (new_size >= bio->bi_iter.bi_size) | |
637 | return; | |
638 | ||
83c9c547 | 639 | if (bio_op(bio) != REQ_OP_READ) |
85a8ce62 ML |
640 | goto exit; |
641 | ||
642 | bio_for_each_segment(bv, bio, iter) { | |
643 | if (done + bv.bv_len > new_size) { | |
644 | unsigned offset; | |
645 | ||
646 | if (!truncated) | |
647 | offset = new_size - done; | |
648 | else | |
649 | offset = 0; | |
3ee859e3 OH |
650 | zero_user(bv.bv_page, bv.bv_offset + offset, |
651 | bv.bv_len - offset); | |
85a8ce62 ML |
652 | truncated = true; |
653 | } | |
654 | done += bv.bv_len; | |
655 | } | |
656 | ||
657 | exit: | |
658 | /* | |
659 | * Don't touch bvec table here and make it really immutable, since | |
660 | * fs bio user has to retrieve all pages via bio_for_each_segment_all | |
661 | * in its .end_bio() callback. | |
662 | * | |
663 | * It is enough to truncate bio by updating .bi_size since we can make | |
664 | * correct bvec with the updated .bi_size for drivers. | |
665 | */ | |
666 | bio->bi_iter.bi_size = new_size; | |
667 | } | |
668 | ||
29125ed6 CH |
669 | /** |
670 | * guard_bio_eod - truncate a BIO to fit the block device | |
671 | * @bio: bio to truncate | |
672 | * | |
673 | * This allows us to do IO even on the odd last sectors of a device, even if the | |
674 | * block size is some multiple of the physical sector size. | |
675 | * | |
676 | * We'll just truncate the bio to the size of the device, and clear the end of | |
677 | * the buffer head manually. Truly out-of-range accesses will turn into actual | |
678 | * I/O errors, this only handles the "we need to be able to do I/O at the final | |
679 | * sector" case. | |
680 | */ | |
681 | void guard_bio_eod(struct bio *bio) | |
682 | { | |
309dca30 | 683 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
29125ed6 CH |
684 | |
685 | if (!maxsector) | |
686 | return; | |
687 | ||
688 | /* | |
689 | * If the *whole* IO is past the end of the device, | |
690 | * let it through, and the IO layer will turn it into | |
691 | * an EIO. | |
692 | */ | |
693 | if (unlikely(bio->bi_iter.bi_sector >= maxsector)) | |
694 | return; | |
695 | ||
696 | maxsector -= bio->bi_iter.bi_sector; | |
697 | if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) | |
698 | return; | |
699 | ||
700 | bio_truncate(bio, maxsector << 9); | |
701 | } | |
702 | ||
b99182c5 PB |
703 | static int __bio_alloc_cache_prune(struct bio_alloc_cache *cache, |
704 | unsigned int nr) | |
be4d234d JA |
705 | { |
706 | unsigned int i = 0; | |
707 | struct bio *bio; | |
708 | ||
fcade2ce JA |
709 | while ((bio = cache->free_list) != NULL) { |
710 | cache->free_list = bio->bi_next; | |
be4d234d JA |
711 | cache->nr--; |
712 | bio_free(bio); | |
713 | if (++i == nr) | |
714 | break; | |
715 | } | |
b99182c5 PB |
716 | return i; |
717 | } | |
718 | ||
719 | static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, | |
720 | unsigned int nr) | |
721 | { | |
722 | nr -= __bio_alloc_cache_prune(cache, nr); | |
723 | if (!READ_ONCE(cache->free_list)) { | |
724 | bio_alloc_irq_cache_splice(cache); | |
725 | __bio_alloc_cache_prune(cache, nr); | |
726 | } | |
be4d234d JA |
727 | } |
728 | ||
729 | static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) | |
730 | { | |
731 | struct bio_set *bs; | |
732 | ||
733 | bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); | |
734 | if (bs->cache) { | |
735 | struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); | |
736 | ||
737 | bio_alloc_cache_prune(cache, -1U); | |
738 | } | |
739 | return 0; | |
740 | } | |
741 | ||
742 | static void bio_alloc_cache_destroy(struct bio_set *bs) | |
743 | { | |
744 | int cpu; | |
745 | ||
746 | if (!bs->cache) | |
747 | return; | |
748 | ||
749 | cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
750 | for_each_possible_cpu(cpu) { | |
751 | struct bio_alloc_cache *cache; | |
752 | ||
753 | cache = per_cpu_ptr(bs->cache, cpu); | |
754 | bio_alloc_cache_prune(cache, -1U); | |
755 | } | |
756 | free_percpu(bs->cache); | |
605f7415 | 757 | bs->cache = NULL; |
be4d234d JA |
758 | } |
759 | ||
f25cf75a PB |
760 | static inline void bio_put_percpu_cache(struct bio *bio) |
761 | { | |
762 | struct bio_alloc_cache *cache; | |
763 | ||
764 | cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); | |
e516c3fc PB |
765 | if (READ_ONCE(cache->nr_irq) + cache->nr > ALLOC_CACHE_MAX) |
766 | goto out_free; | |
b99182c5 | 767 | |
c9f5f3aa | 768 | if (in_task()) { |
e516c3fc | 769 | bio_uninit(bio); |
f25cf75a | 770 | bio->bi_next = cache->free_list; |
c9f5f3aa | 771 | /* Not necessary but helps not to iopoll already freed bios */ |
11eb695f | 772 | bio->bi_bdev = NULL; |
f25cf75a PB |
773 | cache->free_list = bio; |
774 | cache->nr++; | |
e516c3fc PB |
775 | } else if (in_hardirq()) { |
776 | lockdep_assert_irqs_disabled(); | |
f25cf75a | 777 | |
e516c3fc | 778 | bio_uninit(bio); |
b99182c5 PB |
779 | bio->bi_next = cache->free_list_irq; |
780 | cache->free_list_irq = bio; | |
781 | cache->nr_irq++; | |
e516c3fc PB |
782 | } else { |
783 | goto out_free; | |
b99182c5 | 784 | } |
f25cf75a | 785 | put_cpu(); |
e516c3fc PB |
786 | return; |
787 | out_free: | |
788 | put_cpu(); | |
789 | bio_free(bio); | |
f25cf75a PB |
790 | } |
791 | ||
1da177e4 LT |
792 | /** |
793 | * bio_put - release a reference to a bio | |
794 | * @bio: bio to release reference to | |
795 | * | |
796 | * Description: | |
797 | * Put a reference to a &struct bio, either one you have gotten with | |
9b10f6a9 | 798 | * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. |
1da177e4 LT |
799 | **/ |
800 | void bio_put(struct bio *bio) | |
801 | { | |
be4d234d | 802 | if (unlikely(bio_flagged(bio, BIO_REFFED))) { |
9e8c0d0d | 803 | BUG_ON(!atomic_read(&bio->__bi_cnt)); |
be4d234d JA |
804 | if (!atomic_dec_and_test(&bio->__bi_cnt)) |
805 | return; | |
806 | } | |
f25cf75a PB |
807 | if (bio->bi_opf & REQ_ALLOC_CACHE) |
808 | bio_put_percpu_cache(bio); | |
809 | else | |
be4d234d | 810 | bio_free(bio); |
1da177e4 | 811 | } |
a112a71d | 812 | EXPORT_SYMBOL(bio_put); |
1da177e4 | 813 | |
a0e8de79 | 814 | static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp) |
59d276fe | 815 | { |
b7c44ed9 | 816 | bio_set_flag(bio, BIO_CLONED); |
ca474b73 | 817 | bio->bi_ioprio = bio_src->bi_ioprio; |
44981351 | 818 | bio->bi_write_hint = bio_src->bi_write_hint; |
59d276fe | 819 | bio->bi_iter = bio_src->bi_iter; |
20bd723e | 820 | |
7ecc56c6 CH |
821 | if (bio->bi_bdev) { |
822 | if (bio->bi_bdev == bio_src->bi_bdev && | |
823 | bio_flagged(bio_src, BIO_REMAPPED)) | |
824 | bio_set_flag(bio, BIO_REMAPPED); | |
825 | bio_clone_blkg_association(bio, bio_src); | |
826 | } | |
56b4b5ab CH |
827 | |
828 | if (bio_crypt_clone(bio, bio_src, gfp) < 0) | |
829 | return -ENOMEM; | |
830 | if (bio_integrity(bio_src) && | |
831 | bio_integrity_clone(bio, bio_src, gfp) < 0) | |
832 | return -ENOMEM; | |
833 | return 0; | |
59d276fe | 834 | } |
59d276fe KO |
835 | |
836 | /** | |
abfc426d CH |
837 | * bio_alloc_clone - clone a bio that shares the original bio's biovec |
838 | * @bdev: block_device to clone onto | |
a0e8de79 CH |
839 | * @bio_src: bio to clone from |
840 | * @gfp: allocation priority | |
841 | * @bs: bio_set to allocate from | |
59d276fe | 842 | * |
a0e8de79 CH |
843 | * Allocate a new bio that is a clone of @bio_src. The caller owns the returned |
844 | * bio, but not the actual data it points to. | |
845 | * | |
846 | * The caller must ensure that the return bio is not freed before @bio_src. | |
59d276fe | 847 | */ |
abfc426d CH |
848 | struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src, |
849 | gfp_t gfp, struct bio_set *bs) | |
59d276fe | 850 | { |
a0e8de79 | 851 | struct bio *bio; |
59d276fe | 852 | |
abfc426d | 853 | bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs); |
a0e8de79 | 854 | if (!bio) |
59d276fe KO |
855 | return NULL; |
856 | ||
a0e8de79 CH |
857 | if (__bio_clone(bio, bio_src, gfp) < 0) { |
858 | bio_put(bio); | |
56b4b5ab CH |
859 | return NULL; |
860 | } | |
a0e8de79 | 861 | bio->bi_io_vec = bio_src->bi_io_vec; |
59d276fe | 862 | |
a0e8de79 | 863 | return bio; |
59d276fe | 864 | } |
abfc426d | 865 | EXPORT_SYMBOL(bio_alloc_clone); |
59d276fe | 866 | |
a0e8de79 | 867 | /** |
abfc426d CH |
868 | * bio_init_clone - clone a bio that shares the original bio's biovec |
869 | * @bdev: block_device to clone onto | |
a0e8de79 CH |
870 | * @bio: bio to clone into |
871 | * @bio_src: bio to clone from | |
872 | * @gfp: allocation priority | |
873 | * | |
874 | * Initialize a new bio in caller provided memory that is a clone of @bio_src. | |
875 | * The caller owns the returned bio, but not the actual data it points to. | |
876 | * | |
877 | * The caller must ensure that @bio_src is not freed before @bio. | |
878 | */ | |
abfc426d CH |
879 | int bio_init_clone(struct block_device *bdev, struct bio *bio, |
880 | struct bio *bio_src, gfp_t gfp) | |
a0e8de79 CH |
881 | { |
882 | int ret; | |
883 | ||
abfc426d | 884 | bio_init(bio, bdev, bio_src->bi_io_vec, 0, bio_src->bi_opf); |
a0e8de79 CH |
885 | ret = __bio_clone(bio, bio_src, gfp); |
886 | if (ret) | |
887 | bio_uninit(bio); | |
888 | return ret; | |
889 | } | |
abfc426d | 890 | EXPORT_SYMBOL(bio_init_clone); |
a0e8de79 | 891 | |
9a6083be CH |
892 | /** |
893 | * bio_full - check if the bio is full | |
894 | * @bio: bio to check | |
895 | * @len: length of one segment to be added | |
896 | * | |
897 | * Return true if @bio is full and one segment with @len bytes can't be | |
898 | * added to the bio, otherwise return false | |
899 | */ | |
900 | static inline bool bio_full(struct bio *bio, unsigned len) | |
901 | { | |
902 | if (bio->bi_vcnt >= bio->bi_max_vecs) | |
903 | return true; | |
904 | if (bio->bi_iter.bi_size > UINT_MAX - len) | |
905 | return true; | |
906 | return false; | |
907 | } | |
908 | ||
858c708d CH |
909 | static bool bvec_try_merge_page(struct bio_vec *bv, struct page *page, |
910 | unsigned int len, unsigned int off, bool *same_page) | |
5919482e | 911 | { |
d8166519 MWO |
912 | size_t bv_end = bv->bv_offset + bv->bv_len; |
913 | phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; | |
5919482e ML |
914 | phys_addr_t page_addr = page_to_phys(page); |
915 | ||
916 | if (vec_end_addr + 1 != page_addr + off) | |
917 | return false; | |
918 | if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) | |
919 | return false; | |
49580e69 LG |
920 | if (!zone_device_pages_have_same_pgmap(bv->bv_page, page)) |
921 | return false; | |
52d52d1c | 922 | |
ff896738 | 923 | *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); |
858c708d CH |
924 | if (!*same_page) { |
925 | if (IS_ENABLED(CONFIG_KMSAN)) | |
926 | return false; | |
927 | if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE) | |
928 | return false; | |
929 | } | |
0eca8b6f | 930 | |
0eca8b6f | 931 | bv->bv_len += len; |
0eca8b6f | 932 | return true; |
9774b391 CH |
933 | } |
934 | ||
e4581105 CH |
935 | /* |
936 | * Try to merge a page into a segment, while obeying the hardware segment | |
937 | * size limit. This is not for normal read/write bios, but for passthrough | |
938 | * or Zone Append operations that we can't split. | |
939 | */ | |
7c8998f7 | 940 | bool bvec_try_merge_hw_page(struct request_queue *q, struct bio_vec *bv, |
ae42f0b3 CH |
941 | struct page *page, unsigned len, unsigned offset, |
942 | bool *same_page) | |
489fbbcb ML |
943 | { |
944 | unsigned long mask = queue_segment_boundary(q); | |
945 | phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; | |
946 | phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; | |
947 | ||
948 | if ((addr1 | mask) != (addr2 | mask)) | |
949 | return false; | |
3f034c37 | 950 | if (len > queue_max_segment_size(q) - bv->bv_len) |
489fbbcb | 951 | return false; |
858c708d | 952 | return bvec_try_merge_page(bv, page, len, offset, same_page); |
489fbbcb ML |
953 | } |
954 | ||
1da177e4 | 955 | /** |
e4581105 CH |
956 | * bio_add_hw_page - attempt to add a page to a bio with hw constraints |
957 | * @q: the target queue | |
958 | * @bio: destination bio | |
959 | * @page: page to add | |
960 | * @len: vec entry length | |
961 | * @offset: vec entry offset | |
962 | * @max_sectors: maximum number of sectors that can be added | |
963 | * @same_page: return if the segment has been merged inside the same page | |
c66a14d0 | 964 | * |
e4581105 CH |
965 | * Add a page to a bio while respecting the hardware max_sectors, max_segment |
966 | * and gap limitations. | |
1da177e4 | 967 | */ |
e4581105 | 968 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
19047087 | 969 | struct page *page, unsigned int len, unsigned int offset, |
e4581105 | 970 | unsigned int max_sectors, bool *same_page) |
1da177e4 | 971 | { |
6ef02df1 CH |
972 | unsigned int max_size = max_sectors << SECTOR_SHIFT; |
973 | ||
e4581105 | 974 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1da177e4 LT |
975 | return 0; |
976 | ||
6ef02df1 CH |
977 | len = min3(len, max_size, queue_max_segment_size(q)); |
978 | if (len > max_size - bio->bi_iter.bi_size) | |
1da177e4 LT |
979 | return 0; |
980 | ||
80cfd548 | 981 | if (bio->bi_vcnt > 0) { |
ae42f0b3 CH |
982 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
983 | ||
984 | if (bvec_try_merge_hw_page(q, bv, page, len, offset, | |
858c708d CH |
985 | same_page)) { |
986 | bio->bi_iter.bi_size += len; | |
384209cd | 987 | return len; |
858c708d | 988 | } |
320ea869 | 989 | |
cd1d83e2 CH |
990 | if (bio->bi_vcnt >= |
991 | min(bio->bi_max_vecs, queue_max_segments(q))) | |
992 | return 0; | |
993 | ||
320ea869 CH |
994 | /* |
995 | * If the queue doesn't support SG gaps and adding this segment | |
996 | * would create a gap, disallow it. | |
997 | */ | |
ae42f0b3 | 998 | if (bvec_gap_to_prev(&q->limits, bv, offset)) |
320ea869 | 999 | return 0; |
80cfd548 JA |
1000 | } |
1001 | ||
d58cdfae | 1002 | bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, offset); |
fcbf6a08 | 1003 | bio->bi_vcnt++; |
dcdca753 | 1004 | bio->bi_iter.bi_size += len; |
1da177e4 LT |
1005 | return len; |
1006 | } | |
19047087 | 1007 | |
e4581105 CH |
1008 | /** |
1009 | * bio_add_pc_page - attempt to add page to passthrough bio | |
1010 | * @q: the target queue | |
1011 | * @bio: destination bio | |
1012 | * @page: page to add | |
1013 | * @len: vec entry length | |
1014 | * @offset: vec entry offset | |
1015 | * | |
1016 | * Attempt to add a page to the bio_vec maplist. This can fail for a | |
1017 | * number of reasons, such as the bio being full or target block device | |
1018 | * limitations. The target block device must allow bio's up to PAGE_SIZE, | |
1019 | * so it is always possible to add a single page to an empty bio. | |
1020 | * | |
1021 | * This should only be used by passthrough bios. | |
1022 | */ | |
19047087 ML |
1023 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, |
1024 | struct page *page, unsigned int len, unsigned int offset) | |
1025 | { | |
d1916c86 | 1026 | bool same_page = false; |
e4581105 CH |
1027 | return bio_add_hw_page(q, bio, page, len, offset, |
1028 | queue_max_hw_sectors(q), &same_page); | |
19047087 | 1029 | } |
a112a71d | 1030 | EXPORT_SYMBOL(bio_add_pc_page); |
6e68af66 | 1031 | |
ae29333f JT |
1032 | /** |
1033 | * bio_add_zone_append_page - attempt to add page to zone-append bio | |
1034 | * @bio: destination bio | |
1035 | * @page: page to add | |
1036 | * @len: vec entry length | |
1037 | * @offset: vec entry offset | |
1038 | * | |
1039 | * Attempt to add a page to the bio_vec maplist of a bio that will be submitted | |
1040 | * for a zone-append request. This can fail for a number of reasons, such as the | |
1041 | * bio being full or the target block device is not a zoned block device or | |
1042 | * other limitations of the target block device. The target block device must | |
1043 | * allow bio's up to PAGE_SIZE, so it is always possible to add a single page | |
1044 | * to an empty bio. | |
1045 | * | |
1046 | * Returns: number of bytes added to the bio, or 0 in case of a failure. | |
1047 | */ | |
1048 | int bio_add_zone_append_page(struct bio *bio, struct page *page, | |
1049 | unsigned int len, unsigned int offset) | |
1050 | { | |
3caee463 | 1051 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
ae29333f JT |
1052 | bool same_page = false; |
1053 | ||
1054 | if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) | |
1055 | return 0; | |
1056 | ||
edd1dbc8 | 1057 | if (WARN_ON_ONCE(!bdev_is_zoned(bio->bi_bdev))) |
ae29333f JT |
1058 | return 0; |
1059 | ||
1060 | return bio_add_hw_page(q, bio, page, len, offset, | |
1061 | queue_max_zone_append_sectors(q), &same_page); | |
1062 | } | |
1063 | EXPORT_SYMBOL_GPL(bio_add_zone_append_page); | |
1064 | ||
0aa69fd3 | 1065 | /** |
551879a4 | 1066 | * __bio_add_page - add page(s) to a bio in a new segment |
0aa69fd3 | 1067 | * @bio: destination bio |
551879a4 ML |
1068 | * @page: start page to add |
1069 | * @len: length of the data to add, may cross pages | |
1070 | * @off: offset of the data relative to @page, may cross pages | |
0aa69fd3 CH |
1071 | * |
1072 | * Add the data at @page + @off to @bio as a new bvec. The caller must ensure | |
1073 | * that @bio has space for another bvec. | |
1074 | */ | |
1075 | void __bio_add_page(struct bio *bio, struct page *page, | |
1076 | unsigned int len, unsigned int off) | |
1077 | { | |
0aa69fd3 | 1078 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
79d08f89 | 1079 | WARN_ON_ONCE(bio_full(bio, len)); |
0aa69fd3 | 1080 | |
d58cdfae | 1081 | bvec_set_page(&bio->bi_io_vec[bio->bi_vcnt], page, len, off); |
c66a14d0 | 1082 | bio->bi_iter.bi_size += len; |
0aa69fd3 CH |
1083 | bio->bi_vcnt++; |
1084 | } | |
1085 | EXPORT_SYMBOL_GPL(__bio_add_page); | |
1086 | ||
1087 | /** | |
551879a4 | 1088 | * bio_add_page - attempt to add page(s) to bio |
0aa69fd3 | 1089 | * @bio: destination bio |
551879a4 ML |
1090 | * @page: start page to add |
1091 | * @len: vec entry length, may cross pages | |
1092 | * @offset: vec entry offset relative to @page, may cross pages | |
0aa69fd3 | 1093 | * |
551879a4 | 1094 | * Attempt to add page(s) to the bio_vec maplist. This will only fail |
0aa69fd3 CH |
1095 | * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. |
1096 | */ | |
1097 | int bio_add_page(struct bio *bio, struct page *page, | |
1098 | unsigned int len, unsigned int offset) | |
1099 | { | |
ff896738 CH |
1100 | bool same_page = false; |
1101 | ||
939e1a37 CH |
1102 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1103 | return 0; | |
61369905 CH |
1104 | if (bio->bi_iter.bi_size > UINT_MAX - len) |
1105 | return 0; | |
939e1a37 | 1106 | |
0eca8b6f | 1107 | if (bio->bi_vcnt > 0 && |
858c708d CH |
1108 | bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], |
1109 | page, len, offset, &same_page)) { | |
1110 | bio->bi_iter.bi_size += len; | |
0eca8b6f | 1111 | return len; |
858c708d | 1112 | } |
0eca8b6f | 1113 | |
80232b52 | 1114 | if (bio->bi_vcnt >= bio->bi_max_vecs) |
0eca8b6f CH |
1115 | return 0; |
1116 | __bio_add_page(bio, page, len, offset); | |
c66a14d0 | 1117 | return len; |
1da177e4 | 1118 | } |
a112a71d | 1119 | EXPORT_SYMBOL(bio_add_page); |
1da177e4 | 1120 | |
7a150f1e JT |
1121 | void bio_add_folio_nofail(struct bio *bio, struct folio *folio, size_t len, |
1122 | size_t off) | |
1123 | { | |
1124 | WARN_ON_ONCE(len > UINT_MAX); | |
1125 | WARN_ON_ONCE(off > UINT_MAX); | |
1126 | __bio_add_page(bio, &folio->page, len, off); | |
1127 | } | |
1128 | ||
85f5a74c MWO |
1129 | /** |
1130 | * bio_add_folio - Attempt to add part of a folio to a bio. | |
1131 | * @bio: BIO to add to. | |
1132 | * @folio: Folio to add. | |
1133 | * @len: How many bytes from the folio to add. | |
1134 | * @off: First byte in this folio to add. | |
1135 | * | |
1136 | * Filesystems that use folios can call this function instead of calling | |
1137 | * bio_add_page() for each page in the folio. If @off is bigger than | |
1138 | * PAGE_SIZE, this function can create a bio_vec that starts in a page | |
1139 | * after the bv_page. BIOs do not support folios that are 4GiB or larger. | |
1140 | * | |
1141 | * Return: Whether the addition was successful. | |
1142 | */ | |
1143 | bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len, | |
1144 | size_t off) | |
1145 | { | |
1146 | if (len > UINT_MAX || off > UINT_MAX) | |
455a844d | 1147 | return false; |
85f5a74c MWO |
1148 | return bio_add_page(bio, &folio->page, len, off) > 0; |
1149 | } | |
cd57b771 | 1150 | EXPORT_SYMBOL(bio_add_folio); |
85f5a74c | 1151 | |
c809084a | 1152 | void __bio_release_pages(struct bio *bio, bool mark_dirty) |
7321ecbf | 1153 | { |
1b151e24 MWO |
1154 | struct folio_iter fi; |
1155 | ||
1156 | bio_for_each_folio_all(fi, bio) { | |
1157 | struct page *page; | |
38b43539 | 1158 | size_t nr_pages; |
7321ecbf | 1159 | |
1b151e24 MWO |
1160 | if (mark_dirty) { |
1161 | folio_lock(fi.folio); | |
1162 | folio_mark_dirty(fi.folio); | |
1163 | folio_unlock(fi.folio); | |
1164 | } | |
1165 | page = folio_page(fi.folio, fi.offset / PAGE_SIZE); | |
38b43539 TB |
1166 | nr_pages = (fi.offset + fi.length - 1) / PAGE_SIZE - |
1167 | fi.offset / PAGE_SIZE + 1; | |
1b151e24 MWO |
1168 | do { |
1169 | bio_release_page(bio, page++); | |
38b43539 | 1170 | } while (--nr_pages != 0); |
d241a95f | 1171 | } |
7321ecbf | 1172 | } |
c809084a | 1173 | EXPORT_SYMBOL_GPL(__bio_release_pages); |
7321ecbf | 1174 | |
1bb6b810 | 1175 | void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) |
6d0c48ae | 1176 | { |
fa5fa8ec PB |
1177 | size_t size = iov_iter_count(iter); |
1178 | ||
7a800a20 | 1179 | WARN_ON_ONCE(bio->bi_max_vecs); |
c42bca92 | 1180 | |
fa5fa8ec PB |
1181 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
1182 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1183 | size_t max_sectors = queue_max_zone_append_sectors(q); | |
1184 | ||
1185 | size = min(size, max_sectors << SECTOR_SHIFT); | |
1186 | } | |
1187 | ||
c42bca92 | 1188 | bio->bi_vcnt = iter->nr_segs; |
c42bca92 PB |
1189 | bio->bi_io_vec = (struct bio_vec *)iter->bvec; |
1190 | bio->bi_iter.bi_bvec_done = iter->iov_offset; | |
fa5fa8ec | 1191 | bio->bi_iter.bi_size = size; |
977be012 | 1192 | bio_set_flag(bio, BIO_CLONED); |
7de55b7d | 1193 | } |
c42bca92 | 1194 | |
c58c0074 KB |
1195 | static int bio_iov_add_page(struct bio *bio, struct page *page, |
1196 | unsigned int len, unsigned int offset) | |
1197 | { | |
1198 | bool same_page = false; | |
1199 | ||
61369905 CH |
1200 | if (WARN_ON_ONCE(bio->bi_iter.bi_size > UINT_MAX - len)) |
1201 | return -EIO; | |
1202 | ||
0eca8b6f | 1203 | if (bio->bi_vcnt > 0 && |
858c708d CH |
1204 | bvec_try_merge_page(&bio->bi_io_vec[bio->bi_vcnt - 1], |
1205 | page, len, offset, &same_page)) { | |
1206 | bio->bi_iter.bi_size += len; | |
0eca8b6f CH |
1207 | if (same_page) |
1208 | bio_release_page(bio, page); | |
c58c0074 KB |
1209 | return 0; |
1210 | } | |
0eca8b6f | 1211 | __bio_add_page(bio, page, len, offset); |
c58c0074 KB |
1212 | return 0; |
1213 | } | |
1214 | ||
1215 | static int bio_iov_add_zone_append_page(struct bio *bio, struct page *page, | |
1216 | unsigned int len, unsigned int offset) | |
1217 | { | |
1218 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1219 | bool same_page = false; | |
1220 | ||
1221 | if (bio_add_hw_page(q, bio, page, len, offset, | |
1222 | queue_max_zone_append_sectors(q), &same_page) != len) | |
1223 | return -EINVAL; | |
1224 | if (same_page) | |
a7e689dd | 1225 | bio_release_page(bio, page); |
c58c0074 KB |
1226 | return 0; |
1227 | } | |
1228 | ||
576ed913 CH |
1229 | #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) |
1230 | ||
2cefe4db | 1231 | /** |
17d51b10 | 1232 | * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio |
2cefe4db KO |
1233 | * @bio: bio to add pages to |
1234 | * @iter: iov iterator describing the region to be mapped | |
1235 | * | |
a7e689dd DH |
1236 | * Extracts pages from *iter and appends them to @bio's bvec array. The pages |
1237 | * will have to be cleaned up in the way indicated by the BIO_PAGE_PINNED flag. | |
1238 | * For a multi-segment *iter, this function only adds pages from the next | |
1239 | * non-empty segment of the iov iterator. | |
2cefe4db | 1240 | */ |
17d51b10 | 1241 | static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) |
2cefe4db | 1242 | { |
f62e52d1 | 1243 | iov_iter_extraction_t extraction_flags = 0; |
576ed913 CH |
1244 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; |
1245 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; | |
2cefe4db KO |
1246 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; |
1247 | struct page **pages = (struct page **)bv; | |
576ed913 | 1248 | ssize_t size, left; |
e97424fd | 1249 | unsigned len, i = 0; |
168145f6 | 1250 | size_t offset; |
325347d9 | 1251 | int ret = 0; |
576ed913 CH |
1252 | |
1253 | /* | |
1254 | * Move page array up in the allocated memory for the bio vecs as far as | |
1255 | * possible so that we can start filling biovecs from the beginning | |
1256 | * without overwriting the temporary page array. | |
c58c0074 | 1257 | */ |
576ed913 CH |
1258 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); |
1259 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); | |
2cefe4db | 1260 | |
5e3e3f2e | 1261 | if (bio->bi_bdev && blk_queue_pci_p2pdma(bio->bi_bdev->bd_disk->queue)) |
f62e52d1 | 1262 | extraction_flags |= ITER_ALLOW_P2PDMA; |
5e3e3f2e | 1263 | |
b1a000d3 KB |
1264 | /* |
1265 | * Each segment in the iov is required to be a block size multiple. | |
1266 | * However, we may not be able to get the entire segment if it spans | |
1267 | * more pages than bi_max_vecs allows, so we have to ALIGN_DOWN the | |
1268 | * result to ensure the bio's total size is correct. The remainder of | |
1269 | * the iov data will be picked up in the next bio iteration. | |
1270 | */ | |
a7e689dd DH |
1271 | size = iov_iter_extract_pages(iter, &pages, |
1272 | UINT_MAX - bio->bi_iter.bi_size, | |
1273 | nr_pages, extraction_flags, &offset); | |
480cb846 AV |
1274 | if (unlikely(size <= 0)) |
1275 | return size ? size : -EFAULT; | |
1276 | ||
1277 | nr_pages = DIV_ROUND_UP(offset + size, PAGE_SIZE); | |
1278 | ||
168145f6 KO |
1279 | if (bio->bi_bdev) { |
1280 | size_t trim = size & (bdev_logical_block_size(bio->bi_bdev) - 1); | |
1281 | iov_iter_revert(iter, trim); | |
1282 | size -= trim; | |
1283 | } | |
480cb846 | 1284 | |
480cb846 AV |
1285 | if (unlikely(!size)) { |
1286 | ret = -EFAULT; | |
e97424fd KB |
1287 | goto out; |
1288 | } | |
2cefe4db | 1289 | |
576ed913 CH |
1290 | for (left = size, i = 0; left > 0; left -= len, i++) { |
1291 | struct page *page = pages[i]; | |
2cefe4db | 1292 | |
576ed913 | 1293 | len = min_t(size_t, PAGE_SIZE - offset, left); |
34cdb8c8 | 1294 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
c58c0074 KB |
1295 | ret = bio_iov_add_zone_append_page(bio, page, len, |
1296 | offset); | |
e97424fd | 1297 | if (ret) |
34cdb8c8 | 1298 | break; |
34cdb8c8 KB |
1299 | } else |
1300 | bio_iov_add_page(bio, page, len, offset); | |
45691804 | 1301 | |
576ed913 | 1302 | offset = 0; |
2cefe4db KO |
1303 | } |
1304 | ||
480cb846 | 1305 | iov_iter_revert(iter, left); |
e97424fd KB |
1306 | out: |
1307 | while (i < nr_pages) | |
a7e689dd | 1308 | bio_release_page(bio, pages[i++]); |
e97424fd | 1309 | |
325347d9 | 1310 | return ret; |
2cefe4db | 1311 | } |
17d51b10 MW |
1312 | |
1313 | /** | |
6d0c48ae | 1314 | * bio_iov_iter_get_pages - add user or kernel pages to a bio |
17d51b10 | 1315 | * @bio: bio to add pages to |
6d0c48ae JA |
1316 | * @iter: iov iterator describing the region to be added |
1317 | * | |
1318 | * This takes either an iterator pointing to user memory, or one pointing to | |
1319 | * kernel pages (BVEC iterator). If we're adding user pages, we pin them and | |
1320 | * map them into the kernel. On IO completion, the caller should put those | |
c42bca92 PB |
1321 | * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided |
1322 | * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs | |
1323 | * to ensure the bvecs and pages stay referenced until the submitted I/O is | |
1324 | * completed by a call to ->ki_complete() or returns with an error other than | |
1325 | * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF | |
1326 | * on IO completion. If it isn't, then pages should be released. | |
17d51b10 | 1327 | * |
17d51b10 | 1328 | * The function tries, but does not guarantee, to pin as many pages as |
5cd3ddc1 | 1329 | * fit into the bio, or are requested in @iter, whatever is smaller. If |
6d0c48ae JA |
1330 | * MM encounters an error pinning the requested pages, it stops. Error |
1331 | * is returned only if 0 pages could be pinned. | |
17d51b10 MW |
1332 | */ |
1333 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) | |
1334 | { | |
c42bca92 | 1335 | int ret = 0; |
14eacf12 | 1336 | |
939e1a37 CH |
1337 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1338 | return -EIO; | |
1339 | ||
c42bca92 | 1340 | if (iov_iter_is_bvec(iter)) { |
fa5fa8ec PB |
1341 | bio_iov_bvec_set(bio, iter); |
1342 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
1343 | return 0; | |
c42bca92 | 1344 | } |
17d51b10 | 1345 | |
a7e689dd DH |
1346 | if (iov_iter_extract_will_pin(iter)) |
1347 | bio_set_flag(bio, BIO_PAGE_PINNED); | |
17d51b10 | 1348 | do { |
c58c0074 | 1349 | ret = __bio_iov_iter_get_pages(bio, iter); |
79d08f89 | 1350 | } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); |
17d51b10 | 1351 | |
14eacf12 | 1352 | return bio->bi_vcnt ? 0 : ret; |
17d51b10 | 1353 | } |
29b2a3aa | 1354 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); |
2cefe4db | 1355 | |
4246a0b6 | 1356 | static void submit_bio_wait_endio(struct bio *bio) |
9e882242 | 1357 | { |
65e53aab | 1358 | complete(bio->bi_private); |
9e882242 KO |
1359 | } |
1360 | ||
1361 | /** | |
1362 | * submit_bio_wait - submit a bio, and wait until it completes | |
9e882242 KO |
1363 | * @bio: The &struct bio which describes the I/O |
1364 | * | |
1365 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from | |
1366 | * bio_endio() on failure. | |
3d289d68 JK |
1367 | * |
1368 | * WARNING: Unlike to how submit_bio() is usually used, this function does not | |
1369 | * result in bio reference to be consumed. The caller must drop the reference | |
1370 | * on his own. | |
9e882242 | 1371 | */ |
4e49ea4a | 1372 | int submit_bio_wait(struct bio *bio) |
9e882242 | 1373 | { |
309dca30 CH |
1374 | DECLARE_COMPLETION_ONSTACK_MAP(done, |
1375 | bio->bi_bdev->bd_disk->lockdep_map); | |
9e882242 | 1376 | |
65e53aab | 1377 | bio->bi_private = &done; |
9e882242 | 1378 | bio->bi_end_io = submit_bio_wait_endio; |
1eff9d32 | 1379 | bio->bi_opf |= REQ_SYNC; |
4e49ea4a | 1380 | submit_bio(bio); |
0eb4db47 | 1381 | blk_wait_io(&done); |
9e882242 | 1382 | |
65e53aab | 1383 | return blk_status_to_errno(bio->bi_status); |
9e882242 KO |
1384 | } |
1385 | EXPORT_SYMBOL(submit_bio_wait); | |
1386 | ||
d4aa57a1 | 1387 | void __bio_advance(struct bio *bio, unsigned bytes) |
054bdf64 KO |
1388 | { |
1389 | if (bio_integrity(bio)) | |
1390 | bio_integrity_advance(bio, bytes); | |
1391 | ||
a892c8d5 | 1392 | bio_crypt_advance(bio, bytes); |
4550dd6c | 1393 | bio_advance_iter(bio, &bio->bi_iter, bytes); |
054bdf64 | 1394 | } |
d4aa57a1 | 1395 | EXPORT_SYMBOL(__bio_advance); |
054bdf64 | 1396 | |
ee4b4e22 JA |
1397 | void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
1398 | struct bio *src, struct bvec_iter *src_iter) | |
1399 | { | |
1400 | while (src_iter->bi_size && dst_iter->bi_size) { | |
1401 | struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); | |
1402 | struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); | |
1403 | unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); | |
1404 | void *src_buf = bvec_kmap_local(&src_bv); | |
1405 | void *dst_buf = bvec_kmap_local(&dst_bv); | |
1406 | ||
1407 | memcpy(dst_buf, src_buf, bytes); | |
1408 | ||
1409 | kunmap_local(dst_buf); | |
1410 | kunmap_local(src_buf); | |
1411 | ||
1412 | bio_advance_iter_single(src, src_iter, bytes); | |
1413 | bio_advance_iter_single(dst, dst_iter, bytes); | |
1414 | } | |
1415 | } | |
1416 | EXPORT_SYMBOL(bio_copy_data_iter); | |
1417 | ||
38a72dac | 1418 | /** |
45db54d5 KO |
1419 | * bio_copy_data - copy contents of data buffers from one bio to another |
1420 | * @src: source bio | |
1421 | * @dst: destination bio | |
38a72dac KO |
1422 | * |
1423 | * Stops when it reaches the end of either @src or @dst - that is, copies | |
1424 | * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). | |
1425 | */ | |
1426 | void bio_copy_data(struct bio *dst, struct bio *src) | |
1427 | { | |
45db54d5 KO |
1428 | struct bvec_iter src_iter = src->bi_iter; |
1429 | struct bvec_iter dst_iter = dst->bi_iter; | |
1430 | ||
ee4b4e22 | 1431 | bio_copy_data_iter(dst, &dst_iter, src, &src_iter); |
38a72dac | 1432 | } |
16ac3d63 KO |
1433 | EXPORT_SYMBOL(bio_copy_data); |
1434 | ||
491221f8 | 1435 | void bio_free_pages(struct bio *bio) |
1dfa0f68 CH |
1436 | { |
1437 | struct bio_vec *bvec; | |
6dc4f100 | 1438 | struct bvec_iter_all iter_all; |
1dfa0f68 | 1439 | |
2b070cfe | 1440 | bio_for_each_segment_all(bvec, bio, iter_all) |
1dfa0f68 CH |
1441 | __free_page(bvec->bv_page); |
1442 | } | |
491221f8 | 1443 | EXPORT_SYMBOL(bio_free_pages); |
1dfa0f68 | 1444 | |
1da177e4 LT |
1445 | /* |
1446 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | |
1447 | * for performing direct-IO in BIOs. | |
1448 | * | |
1b151e24 | 1449 | * The problem is that we cannot run folio_mark_dirty() from interrupt context |
1da177e4 LT |
1450 | * because the required locks are not interrupt-safe. So what we can do is to |
1451 | * mark the pages dirty _before_ performing IO. And in interrupt context, | |
1452 | * check that the pages are still dirty. If so, fine. If not, redirty them | |
1453 | * in process context. | |
1454 | * | |
1da177e4 LT |
1455 | * Note that this code is very hard to test under normal circumstances because |
1456 | * direct-io pins the pages with get_user_pages(). This makes | |
1457 | * is_page_cache_freeable return false, and the VM will not clean the pages. | |
0d5c3eba | 1458 | * But other code (eg, flusher threads) could clean the pages if they are mapped |
1da177e4 LT |
1459 | * pagecache. |
1460 | * | |
1461 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the | |
1462 | * deferred bio dirtying paths. | |
1463 | */ | |
1464 | ||
1465 | /* | |
1466 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. | |
1467 | */ | |
1468 | void bio_set_pages_dirty(struct bio *bio) | |
1469 | { | |
1b151e24 | 1470 | struct folio_iter fi; |
1da177e4 | 1471 | |
1b151e24 MWO |
1472 | bio_for_each_folio_all(fi, bio) { |
1473 | folio_lock(fi.folio); | |
1474 | folio_mark_dirty(fi.folio); | |
1475 | folio_unlock(fi.folio); | |
1da177e4 LT |
1476 | } |
1477 | } | |
7ba37927 | 1478 | EXPORT_SYMBOL_GPL(bio_set_pages_dirty); |
1da177e4 | 1479 | |
1da177e4 LT |
1480 | /* |
1481 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. | |
1482 | * If they are, then fine. If, however, some pages are clean then they must | |
1483 | * have been written out during the direct-IO read. So we take another ref on | |
24d5493f | 1484 | * the BIO and re-dirty the pages in process context. |
1da177e4 LT |
1485 | * |
1486 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from | |
fd363244 DH |
1487 | * here on. It will unpin each page and will run one bio_put() against the |
1488 | * BIO. | |
1da177e4 LT |
1489 | */ |
1490 | ||
65f27f38 | 1491 | static void bio_dirty_fn(struct work_struct *work); |
1da177e4 | 1492 | |
65f27f38 | 1493 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
1da177e4 LT |
1494 | static DEFINE_SPINLOCK(bio_dirty_lock); |
1495 | static struct bio *bio_dirty_list; | |
1496 | ||
1497 | /* | |
1498 | * This runs in process context | |
1499 | */ | |
65f27f38 | 1500 | static void bio_dirty_fn(struct work_struct *work) |
1da177e4 | 1501 | { |
24d5493f | 1502 | struct bio *bio, *next; |
1da177e4 | 1503 | |
24d5493f CH |
1504 | spin_lock_irq(&bio_dirty_lock); |
1505 | next = bio_dirty_list; | |
1da177e4 | 1506 | bio_dirty_list = NULL; |
24d5493f | 1507 | spin_unlock_irq(&bio_dirty_lock); |
1da177e4 | 1508 | |
24d5493f CH |
1509 | while ((bio = next) != NULL) { |
1510 | next = bio->bi_private; | |
1da177e4 | 1511 | |
d241a95f | 1512 | bio_release_pages(bio, true); |
1da177e4 | 1513 | bio_put(bio); |
1da177e4 LT |
1514 | } |
1515 | } | |
1516 | ||
1517 | void bio_check_pages_dirty(struct bio *bio) | |
1518 | { | |
1b151e24 | 1519 | struct folio_iter fi; |
24d5493f | 1520 | unsigned long flags; |
1da177e4 | 1521 | |
1b151e24 MWO |
1522 | bio_for_each_folio_all(fi, bio) { |
1523 | if (!folio_test_dirty(fi.folio)) | |
24d5493f | 1524 | goto defer; |
1da177e4 LT |
1525 | } |
1526 | ||
d241a95f | 1527 | bio_release_pages(bio, false); |
24d5493f CH |
1528 | bio_put(bio); |
1529 | return; | |
1530 | defer: | |
1531 | spin_lock_irqsave(&bio_dirty_lock, flags); | |
1532 | bio->bi_private = bio_dirty_list; | |
1533 | bio_dirty_list = bio; | |
1534 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | |
1535 | schedule_work(&bio_dirty_work); | |
1da177e4 | 1536 | } |
7ba37927 | 1537 | EXPORT_SYMBOL_GPL(bio_check_pages_dirty); |
1da177e4 | 1538 | |
c4cf5261 JA |
1539 | static inline bool bio_remaining_done(struct bio *bio) |
1540 | { | |
1541 | /* | |
1542 | * If we're not chaining, then ->__bi_remaining is always 1 and | |
1543 | * we always end io on the first invocation. | |
1544 | */ | |
1545 | if (!bio_flagged(bio, BIO_CHAIN)) | |
1546 | return true; | |
1547 | ||
1548 | BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); | |
1549 | ||
326e1dbb | 1550 | if (atomic_dec_and_test(&bio->__bi_remaining)) { |
b7c44ed9 | 1551 | bio_clear_flag(bio, BIO_CHAIN); |
c4cf5261 | 1552 | return true; |
326e1dbb | 1553 | } |
c4cf5261 JA |
1554 | |
1555 | return false; | |
1556 | } | |
1557 | ||
1da177e4 LT |
1558 | /** |
1559 | * bio_endio - end I/O on a bio | |
1560 | * @bio: bio | |
1da177e4 LT |
1561 | * |
1562 | * Description: | |
4246a0b6 CH |
1563 | * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred |
1564 | * way to end I/O on a bio. No one should call bi_end_io() directly on a | |
1565 | * bio unless they own it and thus know that it has an end_io function. | |
fbbaf700 N |
1566 | * |
1567 | * bio_endio() can be called several times on a bio that has been chained | |
1568 | * using bio_chain(). The ->bi_end_io() function will only be called the | |
60b6a7e6 | 1569 | * last time. |
1da177e4 | 1570 | **/ |
4246a0b6 | 1571 | void bio_endio(struct bio *bio) |
1da177e4 | 1572 | { |
ba8c6967 | 1573 | again: |
2b885517 | 1574 | if (!bio_remaining_done(bio)) |
ba8c6967 | 1575 | return; |
7c20f116 CH |
1576 | if (!bio_integrity_endio(bio)) |
1577 | return; | |
1da177e4 | 1578 | |
aa1b46dc | 1579 | rq_qos_done_bio(bio); |
67b42d0b | 1580 | |
60b6a7e6 | 1581 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
3caee463 | 1582 | trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); |
60b6a7e6 EH |
1583 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); |
1584 | } | |
1585 | ||
ba8c6967 CH |
1586 | /* |
1587 | * Need to have a real endio function for chained bios, otherwise | |
1588 | * various corner cases will break (like stacking block devices that | |
1589 | * save/restore bi_end_io) - however, we want to avoid unbounded | |
1590 | * recursion and blowing the stack. Tail call optimization would | |
1591 | * handle this, but compiling with frame pointers also disables | |
1592 | * gcc's sibling call optimization. | |
1593 | */ | |
1594 | if (bio->bi_end_io == bio_chain_endio) { | |
1595 | bio = __bio_chain_endio(bio); | |
1596 | goto again; | |
196d38bc | 1597 | } |
ba8c6967 | 1598 | |
9e234eea | 1599 | blk_throtl_bio_endio(bio); |
b222dd2f SL |
1600 | /* release cgroup info */ |
1601 | bio_uninit(bio); | |
ba8c6967 CH |
1602 | if (bio->bi_end_io) |
1603 | bio->bi_end_io(bio); | |
1da177e4 | 1604 | } |
a112a71d | 1605 | EXPORT_SYMBOL(bio_endio); |
1da177e4 | 1606 | |
20d0189b KO |
1607 | /** |
1608 | * bio_split - split a bio | |
1609 | * @bio: bio to split | |
1610 | * @sectors: number of sectors to split from the front of @bio | |
1611 | * @gfp: gfp mask | |
1612 | * @bs: bio set to allocate from | |
1613 | * | |
1614 | * Allocates and returns a new bio which represents @sectors from the start of | |
1615 | * @bio, and updates @bio to represent the remaining sectors. | |
1616 | * | |
f3f5da62 | 1617 | * Unless this is a discard request the newly allocated bio will point |
dad77584 BVA |
1618 | * to @bio's bi_io_vec. It is the caller's responsibility to ensure that |
1619 | * neither @bio nor @bs are freed before the split bio. | |
20d0189b KO |
1620 | */ |
1621 | struct bio *bio_split(struct bio *bio, int sectors, | |
1622 | gfp_t gfp, struct bio_set *bs) | |
1623 | { | |
f341a4d3 | 1624 | struct bio *split; |
20d0189b KO |
1625 | |
1626 | BUG_ON(sectors <= 0); | |
1627 | BUG_ON(sectors >= bio_sectors(bio)); | |
1628 | ||
0512a75b KB |
1629 | /* Zone append commands cannot be split */ |
1630 | if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) | |
1631 | return NULL; | |
1632 | ||
abfc426d | 1633 | split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs); |
20d0189b KO |
1634 | if (!split) |
1635 | return NULL; | |
1636 | ||
1637 | split->bi_iter.bi_size = sectors << 9; | |
1638 | ||
1639 | if (bio_integrity(split)) | |
fbd08e76 | 1640 | bio_integrity_trim(split); |
20d0189b KO |
1641 | |
1642 | bio_advance(bio, split->bi_iter.bi_size); | |
1643 | ||
fbbaf700 | 1644 | if (bio_flagged(bio, BIO_TRACE_COMPLETION)) |
20d59023 | 1645 | bio_set_flag(split, BIO_TRACE_COMPLETION); |
fbbaf700 | 1646 | |
20d0189b KO |
1647 | return split; |
1648 | } | |
1649 | EXPORT_SYMBOL(bio_split); | |
1650 | ||
6678d83f KO |
1651 | /** |
1652 | * bio_trim - trim a bio | |
1653 | * @bio: bio to trim | |
1654 | * @offset: number of sectors to trim from the front of @bio | |
1655 | * @size: size we want to trim @bio to, in sectors | |
e83502ca CK |
1656 | * |
1657 | * This function is typically used for bios that are cloned and submitted | |
1658 | * to the underlying device in parts. | |
6678d83f | 1659 | */ |
e83502ca | 1660 | void bio_trim(struct bio *bio, sector_t offset, sector_t size) |
6678d83f | 1661 | { |
e83502ca | 1662 | if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || |
8535c018 | 1663 | offset + size > bio_sectors(bio))) |
e83502ca | 1664 | return; |
6678d83f KO |
1665 | |
1666 | size <<= 9; | |
4f024f37 | 1667 | if (offset == 0 && size == bio->bi_iter.bi_size) |
6678d83f KO |
1668 | return; |
1669 | ||
6678d83f | 1670 | bio_advance(bio, offset << 9); |
4f024f37 | 1671 | bio->bi_iter.bi_size = size; |
376a78ab DM |
1672 | |
1673 | if (bio_integrity(bio)) | |
fbd08e76 | 1674 | bio_integrity_trim(bio); |
6678d83f KO |
1675 | } |
1676 | EXPORT_SYMBOL_GPL(bio_trim); | |
1677 | ||
1da177e4 LT |
1678 | /* |
1679 | * create memory pools for biovec's in a bio_set. | |
1680 | * use the global biovec slabs created for general use. | |
1681 | */ | |
8aa6ba2f | 1682 | int biovec_init_pool(mempool_t *pool, int pool_entries) |
1da177e4 | 1683 | { |
7a800a20 | 1684 | struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; |
1da177e4 | 1685 | |
8aa6ba2f | 1686 | return mempool_init_slab_pool(pool, pool_entries, bp->slab); |
1da177e4 LT |
1687 | } |
1688 | ||
917a38c7 KO |
1689 | /* |
1690 | * bioset_exit - exit a bioset initialized with bioset_init() | |
1691 | * | |
1692 | * May be called on a zeroed but uninitialized bioset (i.e. allocated with | |
1693 | * kzalloc()). | |
1694 | */ | |
1695 | void bioset_exit(struct bio_set *bs) | |
1da177e4 | 1696 | { |
be4d234d | 1697 | bio_alloc_cache_destroy(bs); |
df2cb6da KO |
1698 | if (bs->rescue_workqueue) |
1699 | destroy_workqueue(bs->rescue_workqueue); | |
917a38c7 | 1700 | bs->rescue_workqueue = NULL; |
df2cb6da | 1701 | |
8aa6ba2f KO |
1702 | mempool_exit(&bs->bio_pool); |
1703 | mempool_exit(&bs->bvec_pool); | |
9f060e22 | 1704 | |
7878cba9 | 1705 | bioset_integrity_free(bs); |
917a38c7 KO |
1706 | if (bs->bio_slab) |
1707 | bio_put_slab(bs); | |
1708 | bs->bio_slab = NULL; | |
1709 | } | |
1710 | EXPORT_SYMBOL(bioset_exit); | |
1da177e4 | 1711 | |
917a38c7 KO |
1712 | /** |
1713 | * bioset_init - Initialize a bio_set | |
dad08527 | 1714 | * @bs: pool to initialize |
917a38c7 KO |
1715 | * @pool_size: Number of bio and bio_vecs to cache in the mempool |
1716 | * @front_pad: Number of bytes to allocate in front of the returned bio | |
1717 | * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS | |
1718 | * and %BIOSET_NEED_RESCUER | |
1719 | * | |
dad08527 KO |
1720 | * Description: |
1721 | * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller | |
1722 | * to ask for a number of bytes to be allocated in front of the bio. | |
1723 | * Front pad allocation is useful for embedding the bio inside | |
1724 | * another structure, to avoid allocating extra data to go with the bio. | |
1725 | * Note that the bio must be embedded at the END of that structure always, | |
1726 | * or things will break badly. | |
1727 | * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated | |
abfc426d CH |
1728 | * for allocating iovecs. This pool is not needed e.g. for bio_init_clone(). |
1729 | * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used | |
1730 | * to dispatch queued requests when the mempool runs out of space. | |
dad08527 | 1731 | * |
917a38c7 KO |
1732 | */ |
1733 | int bioset_init(struct bio_set *bs, | |
1734 | unsigned int pool_size, | |
1735 | unsigned int front_pad, | |
1736 | int flags) | |
1737 | { | |
917a38c7 | 1738 | bs->front_pad = front_pad; |
9f180e31 ML |
1739 | if (flags & BIOSET_NEED_BVECS) |
1740 | bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); | |
1741 | else | |
1742 | bs->back_pad = 0; | |
917a38c7 KO |
1743 | |
1744 | spin_lock_init(&bs->rescue_lock); | |
1745 | bio_list_init(&bs->rescue_list); | |
1746 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue); | |
1747 | ||
49d1ec85 | 1748 | bs->bio_slab = bio_find_or_create_slab(bs); |
917a38c7 KO |
1749 | if (!bs->bio_slab) |
1750 | return -ENOMEM; | |
1751 | ||
1752 | if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) | |
1753 | goto bad; | |
1754 | ||
1755 | if ((flags & BIOSET_NEED_BVECS) && | |
1756 | biovec_init_pool(&bs->bvec_pool, pool_size)) | |
1757 | goto bad; | |
1758 | ||
be4d234d JA |
1759 | if (flags & BIOSET_NEED_RESCUER) { |
1760 | bs->rescue_workqueue = alloc_workqueue("bioset", | |
1761 | WQ_MEM_RECLAIM, 0); | |
1762 | if (!bs->rescue_workqueue) | |
1763 | goto bad; | |
1764 | } | |
1765 | if (flags & BIOSET_PERCPU_CACHE) { | |
1766 | bs->cache = alloc_percpu(struct bio_alloc_cache); | |
1767 | if (!bs->cache) | |
1768 | goto bad; | |
1769 | cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
1770 | } | |
917a38c7 KO |
1771 | |
1772 | return 0; | |
1773 | bad: | |
1774 | bioset_exit(bs); | |
1775 | return -ENOMEM; | |
1776 | } | |
1777 | EXPORT_SYMBOL(bioset_init); | |
1778 | ||
de76fd89 | 1779 | static int __init init_bio(void) |
1da177e4 LT |
1780 | { |
1781 | int i; | |
1782 | ||
a3df2e45 JA |
1783 | BUILD_BUG_ON(BIO_FLAG_LAST > 8 * sizeof_field(struct bio, bi_flags)); |
1784 | ||
7878cba9 | 1785 | bio_integrity_init(); |
1da177e4 | 1786 | |
de76fd89 CH |
1787 | for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { |
1788 | struct biovec_slab *bvs = bvec_slabs + i; | |
a7fcd37c | 1789 | |
de76fd89 CH |
1790 | bvs->slab = kmem_cache_create(bvs->name, |
1791 | bvs->nr_vecs * sizeof(struct bio_vec), 0, | |
1792 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | |
1da177e4 | 1793 | } |
1da177e4 | 1794 | |
be4d234d JA |
1795 | cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, |
1796 | bio_cpu_dead); | |
1797 | ||
12c5b70c JA |
1798 | if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, |
1799 | BIOSET_NEED_BVECS | BIOSET_PERCPU_CACHE)) | |
1da177e4 LT |
1800 | panic("bio: can't allocate bios\n"); |
1801 | ||
f4f8154a | 1802 | if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) |
a91a2785 MP |
1803 | panic("bio: can't create integrity pool\n"); |
1804 | ||
1da177e4 LT |
1805 | return 0; |
1806 | } | |
1da177e4 | 1807 | subsys_initcall(init_bio); |