]>
Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
0fe23479 | 3 | * Copyright (C) 2001 Jens Axboe <[email protected]> |
1da177e4 LT |
4 | */ |
5 | #include <linux/mm.h> | |
6 | #include <linux/swap.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
a27bb332 | 9 | #include <linux/uio.h> |
852c788f | 10 | #include <linux/iocontext.h> |
1da177e4 LT |
11 | #include <linux/slab.h> |
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
630d9c47 | 14 | #include <linux/export.h> |
1da177e4 LT |
15 | #include <linux/mempool.h> |
16 | #include <linux/workqueue.h> | |
852c788f | 17 | #include <linux/cgroup.h> |
08e18eab | 18 | #include <linux/blk-cgroup.h> |
b4c5875d | 19 | #include <linux/highmem.h> |
de6a78b6 | 20 | #include <linux/sched/sysctl.h> |
a892c8d5 | 21 | #include <linux/blk-crypto.h> |
49d1ec85 | 22 | #include <linux/xarray.h> |
1da177e4 | 23 | |
55782138 | 24 | #include <trace/events/block.h> |
9e234eea | 25 | #include "blk.h" |
67b42d0b | 26 | #include "blk-rq-qos.h" |
0bfc2455 | 27 | |
be4d234d JA |
28 | struct bio_alloc_cache { |
29 | struct bio_list free_list; | |
30 | unsigned int nr; | |
31 | }; | |
32 | ||
de76fd89 | 33 | static struct biovec_slab { |
6ac0b715 CH |
34 | int nr_vecs; |
35 | char *name; | |
36 | struct kmem_cache *slab; | |
de76fd89 CH |
37 | } bvec_slabs[] __read_mostly = { |
38 | { .nr_vecs = 16, .name = "biovec-16" }, | |
39 | { .nr_vecs = 64, .name = "biovec-64" }, | |
40 | { .nr_vecs = 128, .name = "biovec-128" }, | |
a8affc03 | 41 | { .nr_vecs = BIO_MAX_VECS, .name = "biovec-max" }, |
1da177e4 | 42 | }; |
6ac0b715 | 43 | |
7a800a20 CH |
44 | static struct biovec_slab *biovec_slab(unsigned short nr_vecs) |
45 | { | |
46 | switch (nr_vecs) { | |
47 | /* smaller bios use inline vecs */ | |
48 | case 5 ... 16: | |
49 | return &bvec_slabs[0]; | |
50 | case 17 ... 64: | |
51 | return &bvec_slabs[1]; | |
52 | case 65 ... 128: | |
53 | return &bvec_slabs[2]; | |
a8affc03 | 54 | case 129 ... BIO_MAX_VECS: |
7a800a20 CH |
55 | return &bvec_slabs[3]; |
56 | default: | |
57 | BUG(); | |
58 | return NULL; | |
59 | } | |
60 | } | |
1da177e4 | 61 | |
1da177e4 LT |
62 | /* |
63 | * fs_bio_set is the bio_set containing bio and iovec memory pools used by | |
64 | * IO code that does not need private memory pools. | |
65 | */ | |
f4f8154a | 66 | struct bio_set fs_bio_set; |
3f86a82a | 67 | EXPORT_SYMBOL(fs_bio_set); |
1da177e4 | 68 | |
bb799ca0 JA |
69 | /* |
70 | * Our slab pool management | |
71 | */ | |
72 | struct bio_slab { | |
73 | struct kmem_cache *slab; | |
74 | unsigned int slab_ref; | |
75 | unsigned int slab_size; | |
76 | char name[8]; | |
77 | }; | |
78 | static DEFINE_MUTEX(bio_slab_lock); | |
49d1ec85 | 79 | static DEFINE_XARRAY(bio_slabs); |
bb799ca0 | 80 | |
49d1ec85 | 81 | static struct bio_slab *create_bio_slab(unsigned int size) |
bb799ca0 | 82 | { |
49d1ec85 | 83 | struct bio_slab *bslab = kzalloc(sizeof(*bslab), GFP_KERNEL); |
bb799ca0 | 84 | |
49d1ec85 ML |
85 | if (!bslab) |
86 | return NULL; | |
bb799ca0 | 87 | |
49d1ec85 ML |
88 | snprintf(bslab->name, sizeof(bslab->name), "bio-%d", size); |
89 | bslab->slab = kmem_cache_create(bslab->name, size, | |
1a7e76e4 CH |
90 | ARCH_KMALLOC_MINALIGN, |
91 | SLAB_HWCACHE_ALIGN | SLAB_TYPESAFE_BY_RCU, NULL); | |
49d1ec85 ML |
92 | if (!bslab->slab) |
93 | goto fail_alloc_slab; | |
bb799ca0 | 94 | |
49d1ec85 ML |
95 | bslab->slab_ref = 1; |
96 | bslab->slab_size = size; | |
bb799ca0 | 97 | |
49d1ec85 ML |
98 | if (!xa_err(xa_store(&bio_slabs, size, bslab, GFP_KERNEL))) |
99 | return bslab; | |
bb799ca0 | 100 | |
49d1ec85 | 101 | kmem_cache_destroy(bslab->slab); |
bb799ca0 | 102 | |
49d1ec85 ML |
103 | fail_alloc_slab: |
104 | kfree(bslab); | |
105 | return NULL; | |
106 | } | |
bb799ca0 | 107 | |
49d1ec85 ML |
108 | static inline unsigned int bs_bio_slab_size(struct bio_set *bs) |
109 | { | |
9f180e31 | 110 | return bs->front_pad + sizeof(struct bio) + bs->back_pad; |
49d1ec85 | 111 | } |
bb799ca0 | 112 | |
49d1ec85 ML |
113 | static struct kmem_cache *bio_find_or_create_slab(struct bio_set *bs) |
114 | { | |
115 | unsigned int size = bs_bio_slab_size(bs); | |
116 | struct bio_slab *bslab; | |
bb799ca0 | 117 | |
49d1ec85 ML |
118 | mutex_lock(&bio_slab_lock); |
119 | bslab = xa_load(&bio_slabs, size); | |
120 | if (bslab) | |
121 | bslab->slab_ref++; | |
122 | else | |
123 | bslab = create_bio_slab(size); | |
bb799ca0 | 124 | mutex_unlock(&bio_slab_lock); |
49d1ec85 ML |
125 | |
126 | if (bslab) | |
127 | return bslab->slab; | |
128 | return NULL; | |
bb799ca0 JA |
129 | } |
130 | ||
131 | static void bio_put_slab(struct bio_set *bs) | |
132 | { | |
133 | struct bio_slab *bslab = NULL; | |
49d1ec85 | 134 | unsigned int slab_size = bs_bio_slab_size(bs); |
bb799ca0 JA |
135 | |
136 | mutex_lock(&bio_slab_lock); | |
137 | ||
49d1ec85 | 138 | bslab = xa_load(&bio_slabs, slab_size); |
bb799ca0 JA |
139 | if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n")) |
140 | goto out; | |
141 | ||
49d1ec85 ML |
142 | WARN_ON_ONCE(bslab->slab != bs->bio_slab); |
143 | ||
bb799ca0 JA |
144 | WARN_ON(!bslab->slab_ref); |
145 | ||
146 | if (--bslab->slab_ref) | |
147 | goto out; | |
148 | ||
49d1ec85 ML |
149 | xa_erase(&bio_slabs, slab_size); |
150 | ||
bb799ca0 | 151 | kmem_cache_destroy(bslab->slab); |
49d1ec85 | 152 | kfree(bslab); |
bb799ca0 JA |
153 | |
154 | out: | |
155 | mutex_unlock(&bio_slab_lock); | |
156 | } | |
157 | ||
7a800a20 | 158 | void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned short nr_vecs) |
7ba1ba12 | 159 | { |
9e8c0d0d | 160 | BUG_ON(nr_vecs > BIO_MAX_VECS); |
ed996a52 | 161 | |
a8affc03 | 162 | if (nr_vecs == BIO_MAX_VECS) |
9f060e22 | 163 | mempool_free(bv, pool); |
7a800a20 CH |
164 | else if (nr_vecs > BIO_INLINE_VECS) |
165 | kmem_cache_free(biovec_slab(nr_vecs)->slab, bv); | |
bb799ca0 | 166 | } |
bb799ca0 | 167 | |
f2c3eb9b CH |
168 | /* |
169 | * Make the first allocation restricted and don't dump info on allocation | |
170 | * failures, since we'll fall back to the mempool in case of failure. | |
171 | */ | |
172 | static inline gfp_t bvec_alloc_gfp(gfp_t gfp) | |
173 | { | |
174 | return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | | |
175 | __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; | |
bb799ca0 JA |
176 | } |
177 | ||
7a800a20 CH |
178 | struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, |
179 | gfp_t gfp_mask) | |
1da177e4 | 180 | { |
7a800a20 | 181 | struct biovec_slab *bvs = biovec_slab(*nr_vecs); |
1da177e4 | 182 | |
7a800a20 | 183 | if (WARN_ON_ONCE(!bvs)) |
7ff9345f | 184 | return NULL; |
7ff9345f JA |
185 | |
186 | /* | |
7a800a20 CH |
187 | * Upgrade the nr_vecs request to take full advantage of the allocation. |
188 | * We also rely on this in the bvec_free path. | |
7ff9345f | 189 | */ |
7a800a20 | 190 | *nr_vecs = bvs->nr_vecs; |
7ff9345f | 191 | |
7ff9345f | 192 | /* |
f007a3d6 CH |
193 | * Try a slab allocation first for all smaller allocations. If that |
194 | * fails and __GFP_DIRECT_RECLAIM is set retry with the mempool. | |
a8affc03 | 195 | * The mempool is sized to handle up to BIO_MAX_VECS entries. |
7ff9345f | 196 | */ |
a8affc03 | 197 | if (*nr_vecs < BIO_MAX_VECS) { |
f007a3d6 | 198 | struct bio_vec *bvl; |
1da177e4 | 199 | |
f2c3eb9b | 200 | bvl = kmem_cache_alloc(bvs->slab, bvec_alloc_gfp(gfp_mask)); |
7a800a20 | 201 | if (likely(bvl) || !(gfp_mask & __GFP_DIRECT_RECLAIM)) |
f007a3d6 | 202 | return bvl; |
a8affc03 | 203 | *nr_vecs = BIO_MAX_VECS; |
7ff9345f JA |
204 | } |
205 | ||
f007a3d6 | 206 | return mempool_alloc(pool, gfp_mask); |
1da177e4 LT |
207 | } |
208 | ||
9ae3b3f5 | 209 | void bio_uninit(struct bio *bio) |
1da177e4 | 210 | { |
db9819c7 CH |
211 | #ifdef CONFIG_BLK_CGROUP |
212 | if (bio->bi_blkg) { | |
213 | blkg_put(bio->bi_blkg); | |
214 | bio->bi_blkg = NULL; | |
215 | } | |
216 | #endif | |
ece841ab JT |
217 | if (bio_integrity(bio)) |
218 | bio_integrity_free(bio); | |
a892c8d5 ST |
219 | |
220 | bio_crypt_free_ctx(bio); | |
4254bba1 | 221 | } |
9ae3b3f5 | 222 | EXPORT_SYMBOL(bio_uninit); |
7ba1ba12 | 223 | |
4254bba1 KO |
224 | static void bio_free(struct bio *bio) |
225 | { | |
226 | struct bio_set *bs = bio->bi_pool; | |
227 | void *p; | |
228 | ||
9ae3b3f5 | 229 | bio_uninit(bio); |
4254bba1 KO |
230 | |
231 | if (bs) { | |
7a800a20 | 232 | bvec_free(&bs->bvec_pool, bio->bi_io_vec, bio->bi_max_vecs); |
4254bba1 KO |
233 | |
234 | /* | |
235 | * If we have front padding, adjust the bio pointer before freeing | |
236 | */ | |
237 | p = bio; | |
bb799ca0 JA |
238 | p -= bs->front_pad; |
239 | ||
8aa6ba2f | 240 | mempool_free(p, &bs->bio_pool); |
4254bba1 KO |
241 | } else { |
242 | /* Bio was allocated by bio_kmalloc() */ | |
243 | kfree(bio); | |
244 | } | |
3676347a PO |
245 | } |
246 | ||
9ae3b3f5 JA |
247 | /* |
248 | * Users of this function have their own bio allocation. Subsequently, | |
249 | * they must remember to pair any call to bio_init() with bio_uninit() | |
250 | * when IO has completed, or when the bio is released. | |
251 | */ | |
3a83f467 ML |
252 | void bio_init(struct bio *bio, struct bio_vec *table, |
253 | unsigned short max_vecs) | |
1da177e4 | 254 | { |
da521626 JA |
255 | bio->bi_next = NULL; |
256 | bio->bi_bdev = NULL; | |
257 | bio->bi_opf = 0; | |
258 | bio->bi_flags = 0; | |
259 | bio->bi_ioprio = 0; | |
260 | bio->bi_write_hint = 0; | |
261 | bio->bi_status = 0; | |
262 | bio->bi_iter.bi_sector = 0; | |
263 | bio->bi_iter.bi_size = 0; | |
264 | bio->bi_iter.bi_idx = 0; | |
265 | bio->bi_iter.bi_bvec_done = 0; | |
266 | bio->bi_end_io = NULL; | |
267 | bio->bi_private = NULL; | |
268 | #ifdef CONFIG_BLK_CGROUP | |
269 | bio->bi_blkg = NULL; | |
270 | bio->bi_issue.value = 0; | |
271 | #ifdef CONFIG_BLK_CGROUP_IOCOST | |
272 | bio->bi_iocost_cost = 0; | |
273 | #endif | |
274 | #endif | |
275 | #ifdef CONFIG_BLK_INLINE_ENCRYPTION | |
276 | bio->bi_crypt_context = NULL; | |
277 | #endif | |
278 | #ifdef CONFIG_BLK_DEV_INTEGRITY | |
279 | bio->bi_integrity = NULL; | |
280 | #endif | |
281 | bio->bi_vcnt = 0; | |
282 | ||
c4cf5261 | 283 | atomic_set(&bio->__bi_remaining, 1); |
dac56212 | 284 | atomic_set(&bio->__bi_cnt, 1); |
3e08773c | 285 | bio->bi_cookie = BLK_QC_T_NONE; |
3a83f467 | 286 | |
3a83f467 | 287 | bio->bi_max_vecs = max_vecs; |
da521626 JA |
288 | bio->bi_io_vec = table; |
289 | bio->bi_pool = NULL; | |
1da177e4 | 290 | } |
a112a71d | 291 | EXPORT_SYMBOL(bio_init); |
1da177e4 | 292 | |
f44b48c7 KO |
293 | /** |
294 | * bio_reset - reinitialize a bio | |
295 | * @bio: bio to reset | |
296 | * | |
297 | * Description: | |
298 | * After calling bio_reset(), @bio will be in the same state as a freshly | |
299 | * allocated bio returned bio bio_alloc_bioset() - the only fields that are | |
300 | * preserved are the ones that are initialized by bio_alloc_bioset(). See | |
301 | * comment in struct bio. | |
302 | */ | |
303 | void bio_reset(struct bio *bio) | |
304 | { | |
9ae3b3f5 | 305 | bio_uninit(bio); |
f44b48c7 | 306 | memset(bio, 0, BIO_RESET_BYTES); |
c4cf5261 | 307 | atomic_set(&bio->__bi_remaining, 1); |
f44b48c7 KO |
308 | } |
309 | EXPORT_SYMBOL(bio_reset); | |
310 | ||
38f8baae | 311 | static struct bio *__bio_chain_endio(struct bio *bio) |
196d38bc | 312 | { |
4246a0b6 CH |
313 | struct bio *parent = bio->bi_private; |
314 | ||
3edf5346 | 315 | if (bio->bi_status && !parent->bi_status) |
4e4cbee9 | 316 | parent->bi_status = bio->bi_status; |
196d38bc | 317 | bio_put(bio); |
38f8baae CH |
318 | return parent; |
319 | } | |
320 | ||
321 | static void bio_chain_endio(struct bio *bio) | |
322 | { | |
323 | bio_endio(__bio_chain_endio(bio)); | |
196d38bc KO |
324 | } |
325 | ||
326 | /** | |
327 | * bio_chain - chain bio completions | |
1051a902 | 328 | * @bio: the target bio |
5b874af6 | 329 | * @parent: the parent bio of @bio |
196d38bc KO |
330 | * |
331 | * The caller won't have a bi_end_io called when @bio completes - instead, | |
332 | * @parent's bi_end_io won't be called until both @parent and @bio have | |
333 | * completed; the chained bio will also be freed when it completes. | |
334 | * | |
335 | * The caller must not set bi_private or bi_end_io in @bio. | |
336 | */ | |
337 | void bio_chain(struct bio *bio, struct bio *parent) | |
338 | { | |
339 | BUG_ON(bio->bi_private || bio->bi_end_io); | |
340 | ||
341 | bio->bi_private = parent; | |
342 | bio->bi_end_io = bio_chain_endio; | |
c4cf5261 | 343 | bio_inc_remaining(parent); |
196d38bc KO |
344 | } |
345 | EXPORT_SYMBOL(bio_chain); | |
346 | ||
df2cb6da KO |
347 | static void bio_alloc_rescue(struct work_struct *work) |
348 | { | |
349 | struct bio_set *bs = container_of(work, struct bio_set, rescue_work); | |
350 | struct bio *bio; | |
351 | ||
352 | while (1) { | |
353 | spin_lock(&bs->rescue_lock); | |
354 | bio = bio_list_pop(&bs->rescue_list); | |
355 | spin_unlock(&bs->rescue_lock); | |
356 | ||
357 | if (!bio) | |
358 | break; | |
359 | ||
ed00aabd | 360 | submit_bio_noacct(bio); |
df2cb6da KO |
361 | } |
362 | } | |
363 | ||
364 | static void punt_bios_to_rescuer(struct bio_set *bs) | |
365 | { | |
366 | struct bio_list punt, nopunt; | |
367 | struct bio *bio; | |
368 | ||
47e0fb46 N |
369 | if (WARN_ON_ONCE(!bs->rescue_workqueue)) |
370 | return; | |
df2cb6da KO |
371 | /* |
372 | * In order to guarantee forward progress we must punt only bios that | |
373 | * were allocated from this bio_set; otherwise, if there was a bio on | |
374 | * there for a stacking driver higher up in the stack, processing it | |
375 | * could require allocating bios from this bio_set, and doing that from | |
376 | * our own rescuer would be bad. | |
377 | * | |
378 | * Since bio lists are singly linked, pop them all instead of trying to | |
379 | * remove from the middle of the list: | |
380 | */ | |
381 | ||
382 | bio_list_init(&punt); | |
383 | bio_list_init(&nopunt); | |
384 | ||
f5fe1b51 | 385 | while ((bio = bio_list_pop(¤t->bio_list[0]))) |
df2cb6da | 386 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); |
f5fe1b51 | 387 | current->bio_list[0] = nopunt; |
df2cb6da | 388 | |
f5fe1b51 N |
389 | bio_list_init(&nopunt); |
390 | while ((bio = bio_list_pop(¤t->bio_list[1]))) | |
391 | bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio); | |
392 | current->bio_list[1] = nopunt; | |
df2cb6da KO |
393 | |
394 | spin_lock(&bs->rescue_lock); | |
395 | bio_list_merge(&bs->rescue_list, &punt); | |
396 | spin_unlock(&bs->rescue_lock); | |
397 | ||
398 | queue_work(bs->rescue_workqueue, &bs->rescue_work); | |
399 | } | |
400 | ||
1da177e4 LT |
401 | /** |
402 | * bio_alloc_bioset - allocate a bio for I/O | |
519c8e9f | 403 | * @gfp_mask: the GFP_* mask given to the slab allocator |
1da177e4 | 404 | * @nr_iovecs: number of iovecs to pre-allocate |
db18efac | 405 | * @bs: the bio_set to allocate from. |
1da177e4 | 406 | * |
3175199a | 407 | * Allocate a bio from the mempools in @bs. |
3f86a82a | 408 | * |
3175199a CH |
409 | * If %__GFP_DIRECT_RECLAIM is set then bio_alloc will always be able to |
410 | * allocate a bio. This is due to the mempool guarantees. To make this work, | |
411 | * callers must never allocate more than 1 bio at a time from the general pool. | |
412 | * Callers that need to allocate more than 1 bio must always submit the | |
413 | * previously allocated bio for IO before attempting to allocate a new one. | |
414 | * Failure to do so can cause deadlocks under memory pressure. | |
3f86a82a | 415 | * |
3175199a CH |
416 | * Note that when running under submit_bio_noacct() (i.e. any block driver), |
417 | * bios are not submitted until after you return - see the code in | |
418 | * submit_bio_noacct() that converts recursion into iteration, to prevent | |
419 | * stack overflows. | |
df2cb6da | 420 | * |
3175199a CH |
421 | * This would normally mean allocating multiple bios under submit_bio_noacct() |
422 | * would be susceptible to deadlocks, but we have | |
423 | * deadlock avoidance code that resubmits any blocked bios from a rescuer | |
424 | * thread. | |
df2cb6da | 425 | * |
3175199a CH |
426 | * However, we do not guarantee forward progress for allocations from other |
427 | * mempools. Doing multiple allocations from the same mempool under | |
428 | * submit_bio_noacct() should be avoided - instead, use bio_set's front_pad | |
429 | * for per bio allocations. | |
df2cb6da | 430 | * |
3175199a | 431 | * Returns: Pointer to new bio on success, NULL on failure. |
3f86a82a | 432 | */ |
0f2e6ab8 | 433 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned short nr_iovecs, |
7a88fa19 | 434 | struct bio_set *bs) |
1da177e4 | 435 | { |
df2cb6da | 436 | gfp_t saved_gfp = gfp_mask; |
451a9ebf TH |
437 | struct bio *bio; |
438 | void *p; | |
439 | ||
3175199a CH |
440 | /* should not use nobvec bioset for nr_iovecs > 0 */ |
441 | if (WARN_ON_ONCE(!mempool_initialized(&bs->bvec_pool) && nr_iovecs > 0)) | |
442 | return NULL; | |
df2cb6da | 443 | |
3175199a CH |
444 | /* |
445 | * submit_bio_noacct() converts recursion to iteration; this means if | |
446 | * we're running beneath it, any bios we allocate and submit will not be | |
447 | * submitted (and thus freed) until after we return. | |
448 | * | |
449 | * This exposes us to a potential deadlock if we allocate multiple bios | |
450 | * from the same bio_set() while running underneath submit_bio_noacct(). | |
451 | * If we were to allocate multiple bios (say a stacking block driver | |
452 | * that was splitting bios), we would deadlock if we exhausted the | |
453 | * mempool's reserve. | |
454 | * | |
455 | * We solve this, and guarantee forward progress, with a rescuer | |
456 | * workqueue per bio_set. If we go to allocate and there are bios on | |
457 | * current->bio_list, we first try the allocation without | |
458 | * __GFP_DIRECT_RECLAIM; if that fails, we punt those bios we would be | |
459 | * blocking to the rescuer workqueue before we retry with the original | |
460 | * gfp_flags. | |
461 | */ | |
462 | if (current->bio_list && | |
463 | (!bio_list_empty(¤t->bio_list[0]) || | |
464 | !bio_list_empty(¤t->bio_list[1])) && | |
465 | bs->rescue_workqueue) | |
466 | gfp_mask &= ~__GFP_DIRECT_RECLAIM; | |
467 | ||
468 | p = mempool_alloc(&bs->bio_pool, gfp_mask); | |
469 | if (!p && gfp_mask != saved_gfp) { | |
470 | punt_bios_to_rescuer(bs); | |
471 | gfp_mask = saved_gfp; | |
8aa6ba2f | 472 | p = mempool_alloc(&bs->bio_pool, gfp_mask); |
3f86a82a | 473 | } |
451a9ebf TH |
474 | if (unlikely(!p)) |
475 | return NULL; | |
1da177e4 | 476 | |
3175199a CH |
477 | bio = p + bs->front_pad; |
478 | if (nr_iovecs > BIO_INLINE_VECS) { | |
3175199a | 479 | struct bio_vec *bvl = NULL; |
34053979 | 480 | |
7a800a20 | 481 | bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask); |
df2cb6da KO |
482 | if (!bvl && gfp_mask != saved_gfp) { |
483 | punt_bios_to_rescuer(bs); | |
484 | gfp_mask = saved_gfp; | |
7a800a20 | 485 | bvl = bvec_alloc(&bs->bvec_pool, &nr_iovecs, gfp_mask); |
df2cb6da | 486 | } |
34053979 IM |
487 | if (unlikely(!bvl)) |
488 | goto err_free; | |
a38352e0 | 489 | |
7a800a20 | 490 | bio_init(bio, bvl, nr_iovecs); |
3f86a82a | 491 | } else if (nr_iovecs) { |
3175199a CH |
492 | bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); |
493 | } else { | |
494 | bio_init(bio, NULL, 0); | |
1da177e4 | 495 | } |
3f86a82a KO |
496 | |
497 | bio->bi_pool = bs; | |
1da177e4 | 498 | return bio; |
34053979 IM |
499 | |
500 | err_free: | |
8aa6ba2f | 501 | mempool_free(p, &bs->bio_pool); |
34053979 | 502 | return NULL; |
1da177e4 | 503 | } |
a112a71d | 504 | EXPORT_SYMBOL(bio_alloc_bioset); |
1da177e4 | 505 | |
3175199a CH |
506 | /** |
507 | * bio_kmalloc - kmalloc a bio for I/O | |
508 | * @gfp_mask: the GFP_* mask given to the slab allocator | |
509 | * @nr_iovecs: number of iovecs to pre-allocate | |
510 | * | |
511 | * Use kmalloc to allocate and initialize a bio. | |
512 | * | |
513 | * Returns: Pointer to new bio on success, NULL on failure. | |
514 | */ | |
0f2e6ab8 | 515 | struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs) |
3175199a CH |
516 | { |
517 | struct bio *bio; | |
518 | ||
519 | if (nr_iovecs > UIO_MAXIOV) | |
520 | return NULL; | |
521 | ||
522 | bio = kmalloc(struct_size(bio, bi_inline_vecs, nr_iovecs), gfp_mask); | |
523 | if (unlikely(!bio)) | |
524 | return NULL; | |
525 | bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs); | |
526 | bio->bi_pool = NULL; | |
527 | return bio; | |
528 | } | |
529 | EXPORT_SYMBOL(bio_kmalloc); | |
530 | ||
6f822e1b | 531 | void zero_fill_bio(struct bio *bio) |
1da177e4 | 532 | { |
7988613b KO |
533 | struct bio_vec bv; |
534 | struct bvec_iter iter; | |
1da177e4 | 535 | |
ab6c340e CH |
536 | bio_for_each_segment(bv, bio, iter) |
537 | memzero_bvec(&bv); | |
1da177e4 | 538 | } |
6f822e1b | 539 | EXPORT_SYMBOL(zero_fill_bio); |
1da177e4 | 540 | |
83c9c547 ML |
541 | /** |
542 | * bio_truncate - truncate the bio to small size of @new_size | |
543 | * @bio: the bio to be truncated | |
544 | * @new_size: new size for truncating the bio | |
545 | * | |
546 | * Description: | |
547 | * Truncate the bio to new size of @new_size. If bio_op(bio) is | |
548 | * REQ_OP_READ, zero the truncated part. This function should only | |
549 | * be used for handling corner cases, such as bio eod. | |
550 | */ | |
4f7ab09a | 551 | static void bio_truncate(struct bio *bio, unsigned new_size) |
85a8ce62 ML |
552 | { |
553 | struct bio_vec bv; | |
554 | struct bvec_iter iter; | |
555 | unsigned int done = 0; | |
556 | bool truncated = false; | |
557 | ||
558 | if (new_size >= bio->bi_iter.bi_size) | |
559 | return; | |
560 | ||
83c9c547 | 561 | if (bio_op(bio) != REQ_OP_READ) |
85a8ce62 ML |
562 | goto exit; |
563 | ||
564 | bio_for_each_segment(bv, bio, iter) { | |
565 | if (done + bv.bv_len > new_size) { | |
566 | unsigned offset; | |
567 | ||
568 | if (!truncated) | |
569 | offset = new_size - done; | |
570 | else | |
571 | offset = 0; | |
572 | zero_user(bv.bv_page, offset, bv.bv_len - offset); | |
573 | truncated = true; | |
574 | } | |
575 | done += bv.bv_len; | |
576 | } | |
577 | ||
578 | exit: | |
579 | /* | |
580 | * Don't touch bvec table here and make it really immutable, since | |
581 | * fs bio user has to retrieve all pages via bio_for_each_segment_all | |
582 | * in its .end_bio() callback. | |
583 | * | |
584 | * It is enough to truncate bio by updating .bi_size since we can make | |
585 | * correct bvec with the updated .bi_size for drivers. | |
586 | */ | |
587 | bio->bi_iter.bi_size = new_size; | |
588 | } | |
589 | ||
29125ed6 CH |
590 | /** |
591 | * guard_bio_eod - truncate a BIO to fit the block device | |
592 | * @bio: bio to truncate | |
593 | * | |
594 | * This allows us to do IO even on the odd last sectors of a device, even if the | |
595 | * block size is some multiple of the physical sector size. | |
596 | * | |
597 | * We'll just truncate the bio to the size of the device, and clear the end of | |
598 | * the buffer head manually. Truly out-of-range accesses will turn into actual | |
599 | * I/O errors, this only handles the "we need to be able to do I/O at the final | |
600 | * sector" case. | |
601 | */ | |
602 | void guard_bio_eod(struct bio *bio) | |
603 | { | |
309dca30 | 604 | sector_t maxsector = bdev_nr_sectors(bio->bi_bdev); |
29125ed6 CH |
605 | |
606 | if (!maxsector) | |
607 | return; | |
608 | ||
609 | /* | |
610 | * If the *whole* IO is past the end of the device, | |
611 | * let it through, and the IO layer will turn it into | |
612 | * an EIO. | |
613 | */ | |
614 | if (unlikely(bio->bi_iter.bi_sector >= maxsector)) | |
615 | return; | |
616 | ||
617 | maxsector -= bio->bi_iter.bi_sector; | |
618 | if (likely((bio->bi_iter.bi_size >> 9) <= maxsector)) | |
619 | return; | |
620 | ||
621 | bio_truncate(bio, maxsector << 9); | |
622 | } | |
623 | ||
be4d234d JA |
624 | #define ALLOC_CACHE_MAX 512 |
625 | #define ALLOC_CACHE_SLACK 64 | |
626 | ||
627 | static void bio_alloc_cache_prune(struct bio_alloc_cache *cache, | |
628 | unsigned int nr) | |
629 | { | |
630 | unsigned int i = 0; | |
631 | struct bio *bio; | |
632 | ||
633 | while ((bio = bio_list_pop(&cache->free_list)) != NULL) { | |
634 | cache->nr--; | |
635 | bio_free(bio); | |
636 | if (++i == nr) | |
637 | break; | |
638 | } | |
639 | } | |
640 | ||
641 | static int bio_cpu_dead(unsigned int cpu, struct hlist_node *node) | |
642 | { | |
643 | struct bio_set *bs; | |
644 | ||
645 | bs = hlist_entry_safe(node, struct bio_set, cpuhp_dead); | |
646 | if (bs->cache) { | |
647 | struct bio_alloc_cache *cache = per_cpu_ptr(bs->cache, cpu); | |
648 | ||
649 | bio_alloc_cache_prune(cache, -1U); | |
650 | } | |
651 | return 0; | |
652 | } | |
653 | ||
654 | static void bio_alloc_cache_destroy(struct bio_set *bs) | |
655 | { | |
656 | int cpu; | |
657 | ||
658 | if (!bs->cache) | |
659 | return; | |
660 | ||
661 | cpuhp_state_remove_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
662 | for_each_possible_cpu(cpu) { | |
663 | struct bio_alloc_cache *cache; | |
664 | ||
665 | cache = per_cpu_ptr(bs->cache, cpu); | |
666 | bio_alloc_cache_prune(cache, -1U); | |
667 | } | |
668 | free_percpu(bs->cache); | |
669 | } | |
670 | ||
1da177e4 LT |
671 | /** |
672 | * bio_put - release a reference to a bio | |
673 | * @bio: bio to release reference to | |
674 | * | |
675 | * Description: | |
676 | * Put a reference to a &struct bio, either one you have gotten with | |
9b10f6a9 | 677 | * bio_alloc, bio_get or bio_clone_*. The last put of a bio will free it. |
1da177e4 LT |
678 | **/ |
679 | void bio_put(struct bio *bio) | |
680 | { | |
be4d234d | 681 | if (unlikely(bio_flagged(bio, BIO_REFFED))) { |
9e8c0d0d | 682 | BUG_ON(!atomic_read(&bio->__bi_cnt)); |
be4d234d JA |
683 | if (!atomic_dec_and_test(&bio->__bi_cnt)) |
684 | return; | |
685 | } | |
dac56212 | 686 | |
be4d234d JA |
687 | if (bio_flagged(bio, BIO_PERCPU_CACHE)) { |
688 | struct bio_alloc_cache *cache; | |
689 | ||
690 | bio_uninit(bio); | |
691 | cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu()); | |
692 | bio_list_add_head(&cache->free_list, bio); | |
693 | if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK) | |
694 | bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK); | |
695 | put_cpu(); | |
696 | } else { | |
697 | bio_free(bio); | |
dac56212 | 698 | } |
1da177e4 | 699 | } |
a112a71d | 700 | EXPORT_SYMBOL(bio_put); |
1da177e4 | 701 | |
59d276fe KO |
702 | /** |
703 | * __bio_clone_fast - clone a bio that shares the original bio's biovec | |
704 | * @bio: destination bio | |
705 | * @bio_src: bio to clone | |
706 | * | |
707 | * Clone a &bio. Caller will own the returned bio, but not | |
708 | * the actual data it points to. Reference count of returned | |
709 | * bio will be one. | |
710 | * | |
711 | * Caller must ensure that @bio_src is not freed before @bio. | |
712 | */ | |
713 | void __bio_clone_fast(struct bio *bio, struct bio *bio_src) | |
714 | { | |
7a800a20 | 715 | WARN_ON_ONCE(bio->bi_pool && bio->bi_max_vecs); |
59d276fe KO |
716 | |
717 | /* | |
309dca30 | 718 | * most users will be overriding ->bi_bdev with a new target, |
59d276fe KO |
719 | * so we don't set nor calculate new physical/hw segment counts here |
720 | */ | |
309dca30 | 721 | bio->bi_bdev = bio_src->bi_bdev; |
b7c44ed9 | 722 | bio_set_flag(bio, BIO_CLONED); |
111be883 SL |
723 | if (bio_flagged(bio_src, BIO_THROTTLED)) |
724 | bio_set_flag(bio, BIO_THROTTLED); | |
46bbf653 CH |
725 | if (bio_flagged(bio_src, BIO_REMAPPED)) |
726 | bio_set_flag(bio, BIO_REMAPPED); | |
1eff9d32 | 727 | bio->bi_opf = bio_src->bi_opf; |
ca474b73 | 728 | bio->bi_ioprio = bio_src->bi_ioprio; |
cb6934f8 | 729 | bio->bi_write_hint = bio_src->bi_write_hint; |
59d276fe KO |
730 | bio->bi_iter = bio_src->bi_iter; |
731 | bio->bi_io_vec = bio_src->bi_io_vec; | |
20bd723e | 732 | |
db6638d7 | 733 | bio_clone_blkg_association(bio, bio_src); |
e439bedf | 734 | blkcg_bio_issue_init(bio); |
59d276fe KO |
735 | } |
736 | EXPORT_SYMBOL(__bio_clone_fast); | |
737 | ||
738 | /** | |
739 | * bio_clone_fast - clone a bio that shares the original bio's biovec | |
740 | * @bio: bio to clone | |
741 | * @gfp_mask: allocation priority | |
742 | * @bs: bio_set to allocate from | |
743 | * | |
744 | * Like __bio_clone_fast, only also allocates the returned bio | |
745 | */ | |
746 | struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) | |
747 | { | |
748 | struct bio *b; | |
749 | ||
750 | b = bio_alloc_bioset(gfp_mask, 0, bs); | |
751 | if (!b) | |
752 | return NULL; | |
753 | ||
754 | __bio_clone_fast(b, bio); | |
755 | ||
07560151 EB |
756 | if (bio_crypt_clone(b, bio, gfp_mask) < 0) |
757 | goto err_put; | |
a892c8d5 | 758 | |
07560151 EB |
759 | if (bio_integrity(bio) && |
760 | bio_integrity_clone(b, bio, gfp_mask) < 0) | |
761 | goto err_put; | |
59d276fe KO |
762 | |
763 | return b; | |
07560151 EB |
764 | |
765 | err_put: | |
766 | bio_put(b); | |
767 | return NULL; | |
59d276fe KO |
768 | } |
769 | EXPORT_SYMBOL(bio_clone_fast); | |
770 | ||
5cbd28e3 CH |
771 | const char *bio_devname(struct bio *bio, char *buf) |
772 | { | |
309dca30 | 773 | return bdevname(bio->bi_bdev, buf); |
5cbd28e3 CH |
774 | } |
775 | EXPORT_SYMBOL(bio_devname); | |
776 | ||
9a6083be CH |
777 | /** |
778 | * bio_full - check if the bio is full | |
779 | * @bio: bio to check | |
780 | * @len: length of one segment to be added | |
781 | * | |
782 | * Return true if @bio is full and one segment with @len bytes can't be | |
783 | * added to the bio, otherwise return false | |
784 | */ | |
785 | static inline bool bio_full(struct bio *bio, unsigned len) | |
786 | { | |
787 | if (bio->bi_vcnt >= bio->bi_max_vecs) | |
788 | return true; | |
789 | if (bio->bi_iter.bi_size > UINT_MAX - len) | |
790 | return true; | |
791 | return false; | |
792 | } | |
793 | ||
5919482e ML |
794 | static inline bool page_is_mergeable(const struct bio_vec *bv, |
795 | struct page *page, unsigned int len, unsigned int off, | |
ff896738 | 796 | bool *same_page) |
5919482e | 797 | { |
d8166519 MWO |
798 | size_t bv_end = bv->bv_offset + bv->bv_len; |
799 | phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1; | |
5919482e ML |
800 | phys_addr_t page_addr = page_to_phys(page); |
801 | ||
802 | if (vec_end_addr + 1 != page_addr + off) | |
803 | return false; | |
804 | if (xen_domain() && !xen_biovec_phys_mergeable(bv, page)) | |
805 | return false; | |
52d52d1c | 806 | |
ff896738 | 807 | *same_page = ((vec_end_addr & PAGE_MASK) == page_addr); |
d8166519 MWO |
808 | if (*same_page) |
809 | return true; | |
810 | return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE); | |
5919482e ML |
811 | } |
812 | ||
9774b391 CH |
813 | /** |
814 | * __bio_try_merge_page - try appending data to an existing bvec. | |
815 | * @bio: destination bio | |
816 | * @page: start page to add | |
817 | * @len: length of the data to add | |
818 | * @off: offset of the data relative to @page | |
819 | * @same_page: return if the segment has been merged inside the same page | |
820 | * | |
821 | * Try to add the data at @page + @off to the last bvec of @bio. This is a | |
822 | * useful optimisation for file systems with a block size smaller than the | |
823 | * page size. | |
824 | * | |
825 | * Warn if (@len, @off) crosses pages in case that @same_page is true. | |
826 | * | |
827 | * Return %true on success or %false on failure. | |
828 | */ | |
829 | static bool __bio_try_merge_page(struct bio *bio, struct page *page, | |
830 | unsigned int len, unsigned int off, bool *same_page) | |
831 | { | |
832 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) | |
833 | return false; | |
834 | ||
835 | if (bio->bi_vcnt > 0) { | |
836 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; | |
837 | ||
838 | if (page_is_mergeable(bv, page, len, off, same_page)) { | |
839 | if (bio->bi_iter.bi_size > UINT_MAX - len) { | |
840 | *same_page = false; | |
841 | return false; | |
842 | } | |
843 | bv->bv_len += len; | |
844 | bio->bi_iter.bi_size += len; | |
845 | return true; | |
846 | } | |
847 | } | |
848 | return false; | |
849 | } | |
850 | ||
e4581105 CH |
851 | /* |
852 | * Try to merge a page into a segment, while obeying the hardware segment | |
853 | * size limit. This is not for normal read/write bios, but for passthrough | |
854 | * or Zone Append operations that we can't split. | |
855 | */ | |
856 | static bool bio_try_merge_hw_seg(struct request_queue *q, struct bio *bio, | |
857 | struct page *page, unsigned len, | |
858 | unsigned offset, bool *same_page) | |
489fbbcb | 859 | { |
384209cd | 860 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
489fbbcb ML |
861 | unsigned long mask = queue_segment_boundary(q); |
862 | phys_addr_t addr1 = page_to_phys(bv->bv_page) + bv->bv_offset; | |
863 | phys_addr_t addr2 = page_to_phys(page) + offset + len - 1; | |
864 | ||
865 | if ((addr1 | mask) != (addr2 | mask)) | |
866 | return false; | |
489fbbcb ML |
867 | if (bv->bv_len + len > queue_max_segment_size(q)) |
868 | return false; | |
384209cd | 869 | return __bio_try_merge_page(bio, page, len, offset, same_page); |
489fbbcb ML |
870 | } |
871 | ||
1da177e4 | 872 | /** |
e4581105 CH |
873 | * bio_add_hw_page - attempt to add a page to a bio with hw constraints |
874 | * @q: the target queue | |
875 | * @bio: destination bio | |
876 | * @page: page to add | |
877 | * @len: vec entry length | |
878 | * @offset: vec entry offset | |
879 | * @max_sectors: maximum number of sectors that can be added | |
880 | * @same_page: return if the segment has been merged inside the same page | |
c66a14d0 | 881 | * |
e4581105 CH |
882 | * Add a page to a bio while respecting the hardware max_sectors, max_segment |
883 | * and gap limitations. | |
1da177e4 | 884 | */ |
e4581105 | 885 | int bio_add_hw_page(struct request_queue *q, struct bio *bio, |
19047087 | 886 | struct page *page, unsigned int len, unsigned int offset, |
e4581105 | 887 | unsigned int max_sectors, bool *same_page) |
1da177e4 | 888 | { |
1da177e4 LT |
889 | struct bio_vec *bvec; |
890 | ||
e4581105 | 891 | if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED))) |
1da177e4 LT |
892 | return 0; |
893 | ||
e4581105 | 894 | if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors) |
1da177e4 LT |
895 | return 0; |
896 | ||
80cfd548 | 897 | if (bio->bi_vcnt > 0) { |
e4581105 | 898 | if (bio_try_merge_hw_seg(q, bio, page, len, offset, same_page)) |
384209cd | 899 | return len; |
320ea869 CH |
900 | |
901 | /* | |
902 | * If the queue doesn't support SG gaps and adding this segment | |
903 | * would create a gap, disallow it. | |
904 | */ | |
384209cd | 905 | bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; |
320ea869 CH |
906 | if (bvec_gap_to_prev(q, bvec, offset)) |
907 | return 0; | |
80cfd548 JA |
908 | } |
909 | ||
79d08f89 | 910 | if (bio_full(bio, len)) |
1da177e4 LT |
911 | return 0; |
912 | ||
14ccb66b | 913 | if (bio->bi_vcnt >= queue_max_segments(q)) |
489fbbcb ML |
914 | return 0; |
915 | ||
fcbf6a08 ML |
916 | bvec = &bio->bi_io_vec[bio->bi_vcnt]; |
917 | bvec->bv_page = page; | |
918 | bvec->bv_len = len; | |
919 | bvec->bv_offset = offset; | |
920 | bio->bi_vcnt++; | |
dcdca753 | 921 | bio->bi_iter.bi_size += len; |
1da177e4 LT |
922 | return len; |
923 | } | |
19047087 | 924 | |
e4581105 CH |
925 | /** |
926 | * bio_add_pc_page - attempt to add page to passthrough bio | |
927 | * @q: the target queue | |
928 | * @bio: destination bio | |
929 | * @page: page to add | |
930 | * @len: vec entry length | |
931 | * @offset: vec entry offset | |
932 | * | |
933 | * Attempt to add a page to the bio_vec maplist. This can fail for a | |
934 | * number of reasons, such as the bio being full or target block device | |
935 | * limitations. The target block device must allow bio's up to PAGE_SIZE, | |
936 | * so it is always possible to add a single page to an empty bio. | |
937 | * | |
938 | * This should only be used by passthrough bios. | |
939 | */ | |
19047087 ML |
940 | int bio_add_pc_page(struct request_queue *q, struct bio *bio, |
941 | struct page *page, unsigned int len, unsigned int offset) | |
942 | { | |
d1916c86 | 943 | bool same_page = false; |
e4581105 CH |
944 | return bio_add_hw_page(q, bio, page, len, offset, |
945 | queue_max_hw_sectors(q), &same_page); | |
19047087 | 946 | } |
a112a71d | 947 | EXPORT_SYMBOL(bio_add_pc_page); |
6e68af66 | 948 | |
ae29333f JT |
949 | /** |
950 | * bio_add_zone_append_page - attempt to add page to zone-append bio | |
951 | * @bio: destination bio | |
952 | * @page: page to add | |
953 | * @len: vec entry length | |
954 | * @offset: vec entry offset | |
955 | * | |
956 | * Attempt to add a page to the bio_vec maplist of a bio that will be submitted | |
957 | * for a zone-append request. This can fail for a number of reasons, such as the | |
958 | * bio being full or the target block device is not a zoned block device or | |
959 | * other limitations of the target block device. The target block device must | |
960 | * allow bio's up to PAGE_SIZE, so it is always possible to add a single page | |
961 | * to an empty bio. | |
962 | * | |
963 | * Returns: number of bytes added to the bio, or 0 in case of a failure. | |
964 | */ | |
965 | int bio_add_zone_append_page(struct bio *bio, struct page *page, | |
966 | unsigned int len, unsigned int offset) | |
967 | { | |
3caee463 | 968 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
ae29333f JT |
969 | bool same_page = false; |
970 | ||
971 | if (WARN_ON_ONCE(bio_op(bio) != REQ_OP_ZONE_APPEND)) | |
972 | return 0; | |
973 | ||
974 | if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) | |
975 | return 0; | |
976 | ||
977 | return bio_add_hw_page(q, bio, page, len, offset, | |
978 | queue_max_zone_append_sectors(q), &same_page); | |
979 | } | |
980 | EXPORT_SYMBOL_GPL(bio_add_zone_append_page); | |
981 | ||
0aa69fd3 | 982 | /** |
551879a4 | 983 | * __bio_add_page - add page(s) to a bio in a new segment |
0aa69fd3 | 984 | * @bio: destination bio |
551879a4 ML |
985 | * @page: start page to add |
986 | * @len: length of the data to add, may cross pages | |
987 | * @off: offset of the data relative to @page, may cross pages | |
0aa69fd3 CH |
988 | * |
989 | * Add the data at @page + @off to @bio as a new bvec. The caller must ensure | |
990 | * that @bio has space for another bvec. | |
991 | */ | |
992 | void __bio_add_page(struct bio *bio, struct page *page, | |
993 | unsigned int len, unsigned int off) | |
994 | { | |
995 | struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt]; | |
c66a14d0 | 996 | |
0aa69fd3 | 997 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
79d08f89 | 998 | WARN_ON_ONCE(bio_full(bio, len)); |
0aa69fd3 CH |
999 | |
1000 | bv->bv_page = page; | |
1001 | bv->bv_offset = off; | |
1002 | bv->bv_len = len; | |
c66a14d0 | 1003 | |
c66a14d0 | 1004 | bio->bi_iter.bi_size += len; |
0aa69fd3 | 1005 | bio->bi_vcnt++; |
b8e24a93 JW |
1006 | |
1007 | if (!bio_flagged(bio, BIO_WORKINGSET) && unlikely(PageWorkingset(page))) | |
1008 | bio_set_flag(bio, BIO_WORKINGSET); | |
0aa69fd3 CH |
1009 | } |
1010 | EXPORT_SYMBOL_GPL(__bio_add_page); | |
1011 | ||
1012 | /** | |
551879a4 | 1013 | * bio_add_page - attempt to add page(s) to bio |
0aa69fd3 | 1014 | * @bio: destination bio |
551879a4 ML |
1015 | * @page: start page to add |
1016 | * @len: vec entry length, may cross pages | |
1017 | * @offset: vec entry offset relative to @page, may cross pages | |
0aa69fd3 | 1018 | * |
551879a4 | 1019 | * Attempt to add page(s) to the bio_vec maplist. This will only fail |
0aa69fd3 CH |
1020 | * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio. |
1021 | */ | |
1022 | int bio_add_page(struct bio *bio, struct page *page, | |
1023 | unsigned int len, unsigned int offset) | |
1024 | { | |
ff896738 CH |
1025 | bool same_page = false; |
1026 | ||
1027 | if (!__bio_try_merge_page(bio, page, len, offset, &same_page)) { | |
79d08f89 | 1028 | if (bio_full(bio, len)) |
0aa69fd3 CH |
1029 | return 0; |
1030 | __bio_add_page(bio, page, len, offset); | |
1031 | } | |
c66a14d0 | 1032 | return len; |
1da177e4 | 1033 | } |
a112a71d | 1034 | EXPORT_SYMBOL(bio_add_page); |
1da177e4 | 1035 | |
c809084a | 1036 | void __bio_release_pages(struct bio *bio, bool mark_dirty) |
7321ecbf CH |
1037 | { |
1038 | struct bvec_iter_all iter_all; | |
1039 | struct bio_vec *bvec; | |
7321ecbf | 1040 | |
d241a95f CH |
1041 | bio_for_each_segment_all(bvec, bio, iter_all) { |
1042 | if (mark_dirty && !PageCompound(bvec->bv_page)) | |
1043 | set_page_dirty_lock(bvec->bv_page); | |
7321ecbf | 1044 | put_page(bvec->bv_page); |
d241a95f | 1045 | } |
7321ecbf | 1046 | } |
c809084a | 1047 | EXPORT_SYMBOL_GPL(__bio_release_pages); |
7321ecbf | 1048 | |
1bb6b810 | 1049 | void bio_iov_bvec_set(struct bio *bio, struct iov_iter *iter) |
6d0c48ae | 1050 | { |
fa5fa8ec PB |
1051 | size_t size = iov_iter_count(iter); |
1052 | ||
7a800a20 | 1053 | WARN_ON_ONCE(bio->bi_max_vecs); |
c42bca92 | 1054 | |
fa5fa8ec PB |
1055 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) { |
1056 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | |
1057 | size_t max_sectors = queue_max_zone_append_sectors(q); | |
1058 | ||
1059 | size = min(size, max_sectors << SECTOR_SHIFT); | |
1060 | } | |
1061 | ||
c42bca92 | 1062 | bio->bi_vcnt = iter->nr_segs; |
c42bca92 PB |
1063 | bio->bi_io_vec = (struct bio_vec *)iter->bvec; |
1064 | bio->bi_iter.bi_bvec_done = iter->iov_offset; | |
fa5fa8ec | 1065 | bio->bi_iter.bi_size = size; |
ed97ce5e | 1066 | bio_set_flag(bio, BIO_NO_PAGE_REF); |
977be012 | 1067 | bio_set_flag(bio, BIO_CLONED); |
7de55b7d | 1068 | } |
c42bca92 | 1069 | |
d9cf3bd5 PB |
1070 | static void bio_put_pages(struct page **pages, size_t size, size_t off) |
1071 | { | |
1072 | size_t i, nr = DIV_ROUND_UP(size + (off & ~PAGE_MASK), PAGE_SIZE); | |
1073 | ||
1074 | for (i = 0; i < nr; i++) | |
1075 | put_page(pages[i]); | |
1076 | } | |
1077 | ||
576ed913 CH |
1078 | #define PAGE_PTRS_PER_BVEC (sizeof(struct bio_vec) / sizeof(struct page *)) |
1079 | ||
2cefe4db | 1080 | /** |
17d51b10 | 1081 | * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio |
2cefe4db KO |
1082 | * @bio: bio to add pages to |
1083 | * @iter: iov iterator describing the region to be mapped | |
1084 | * | |
17d51b10 | 1085 | * Pins pages from *iter and appends them to @bio's bvec array. The |
2cefe4db | 1086 | * pages will have to be released using put_page() when done. |
17d51b10 | 1087 | * For multi-segment *iter, this function only adds pages from the |
3cf14889 | 1088 | * next non-empty segment of the iov iterator. |
2cefe4db | 1089 | */ |
17d51b10 | 1090 | static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) |
2cefe4db | 1091 | { |
576ed913 CH |
1092 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; |
1093 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; | |
2cefe4db KO |
1094 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; |
1095 | struct page **pages = (struct page **)bv; | |
45691804 | 1096 | bool same_page = false; |
576ed913 CH |
1097 | ssize_t size, left; |
1098 | unsigned len, i; | |
b403ea24 | 1099 | size_t offset; |
576ed913 CH |
1100 | |
1101 | /* | |
1102 | * Move page array up in the allocated memory for the bio vecs as far as | |
1103 | * possible so that we can start filling biovecs from the beginning | |
1104 | * without overwriting the temporary page array. | |
1105 | */ | |
1106 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); | |
1107 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); | |
2cefe4db | 1108 | |
35c820e7 | 1109 | size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); |
2cefe4db KO |
1110 | if (unlikely(size <= 0)) |
1111 | return size ? size : -EFAULT; | |
2cefe4db | 1112 | |
576ed913 CH |
1113 | for (left = size, i = 0; left > 0; left -= len, i++) { |
1114 | struct page *page = pages[i]; | |
2cefe4db | 1115 | |
576ed913 | 1116 | len = min_t(size_t, PAGE_SIZE - offset, left); |
45691804 CH |
1117 | |
1118 | if (__bio_try_merge_page(bio, page, len, offset, &same_page)) { | |
1119 | if (same_page) | |
1120 | put_page(page); | |
1121 | } else { | |
d9cf3bd5 PB |
1122 | if (WARN_ON_ONCE(bio_full(bio, len))) { |
1123 | bio_put_pages(pages + i, left, offset); | |
1124 | return -EINVAL; | |
1125 | } | |
45691804 CH |
1126 | __bio_add_page(bio, page, len, offset); |
1127 | } | |
576ed913 | 1128 | offset = 0; |
2cefe4db KO |
1129 | } |
1130 | ||
2cefe4db KO |
1131 | iov_iter_advance(iter, size); |
1132 | return 0; | |
1133 | } | |
17d51b10 | 1134 | |
0512a75b KB |
1135 | static int __bio_iov_append_get_pages(struct bio *bio, struct iov_iter *iter) |
1136 | { | |
1137 | unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt; | |
1138 | unsigned short entries_left = bio->bi_max_vecs - bio->bi_vcnt; | |
3caee463 | 1139 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
0512a75b KB |
1140 | unsigned int max_append_sectors = queue_max_zone_append_sectors(q); |
1141 | struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt; | |
1142 | struct page **pages = (struct page **)bv; | |
1143 | ssize_t size, left; | |
1144 | unsigned len, i; | |
1145 | size_t offset; | |
4977d121 | 1146 | int ret = 0; |
0512a75b KB |
1147 | |
1148 | if (WARN_ON_ONCE(!max_append_sectors)) | |
1149 | return 0; | |
1150 | ||
1151 | /* | |
1152 | * Move page array up in the allocated memory for the bio vecs as far as | |
1153 | * possible so that we can start filling biovecs from the beginning | |
1154 | * without overwriting the temporary page array. | |
1155 | */ | |
1156 | BUILD_BUG_ON(PAGE_PTRS_PER_BVEC < 2); | |
1157 | pages += entries_left * (PAGE_PTRS_PER_BVEC - 1); | |
1158 | ||
1159 | size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset); | |
1160 | if (unlikely(size <= 0)) | |
1161 | return size ? size : -EFAULT; | |
1162 | ||
1163 | for (left = size, i = 0; left > 0; left -= len, i++) { | |
1164 | struct page *page = pages[i]; | |
1165 | bool same_page = false; | |
1166 | ||
1167 | len = min_t(size_t, PAGE_SIZE - offset, left); | |
1168 | if (bio_add_hw_page(q, bio, page, len, offset, | |
4977d121 | 1169 | max_append_sectors, &same_page) != len) { |
d9cf3bd5 | 1170 | bio_put_pages(pages + i, left, offset); |
4977d121 NA |
1171 | ret = -EINVAL; |
1172 | break; | |
1173 | } | |
0512a75b KB |
1174 | if (same_page) |
1175 | put_page(page); | |
1176 | offset = 0; | |
1177 | } | |
1178 | ||
4977d121 NA |
1179 | iov_iter_advance(iter, size - left); |
1180 | return ret; | |
0512a75b KB |
1181 | } |
1182 | ||
17d51b10 | 1183 | /** |
6d0c48ae | 1184 | * bio_iov_iter_get_pages - add user or kernel pages to a bio |
17d51b10 | 1185 | * @bio: bio to add pages to |
6d0c48ae JA |
1186 | * @iter: iov iterator describing the region to be added |
1187 | * | |
1188 | * This takes either an iterator pointing to user memory, or one pointing to | |
1189 | * kernel pages (BVEC iterator). If we're adding user pages, we pin them and | |
1190 | * map them into the kernel. On IO completion, the caller should put those | |
c42bca92 PB |
1191 | * pages. For bvec based iterators bio_iov_iter_get_pages() uses the provided |
1192 | * bvecs rather than copying them. Hence anyone issuing kiocb based IO needs | |
1193 | * to ensure the bvecs and pages stay referenced until the submitted I/O is | |
1194 | * completed by a call to ->ki_complete() or returns with an error other than | |
1195 | * -EIOCBQUEUED. The caller needs to check if the bio is flagged BIO_NO_PAGE_REF | |
1196 | * on IO completion. If it isn't, then pages should be released. | |
17d51b10 | 1197 | * |
17d51b10 | 1198 | * The function tries, but does not guarantee, to pin as many pages as |
5cd3ddc1 | 1199 | * fit into the bio, or are requested in @iter, whatever is smaller. If |
6d0c48ae JA |
1200 | * MM encounters an error pinning the requested pages, it stops. Error |
1201 | * is returned only if 0 pages could be pinned. | |
0cf41e5e PB |
1202 | * |
1203 | * It's intended for direct IO, so doesn't do PSI tracking, the caller is | |
1204 | * responsible for setting BIO_WORKINGSET if necessary. | |
17d51b10 MW |
1205 | */ |
1206 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter) | |
1207 | { | |
c42bca92 | 1208 | int ret = 0; |
14eacf12 | 1209 | |
c42bca92 | 1210 | if (iov_iter_is_bvec(iter)) { |
fa5fa8ec PB |
1211 | bio_iov_bvec_set(bio, iter); |
1212 | iov_iter_advance(iter, bio->bi_iter.bi_size); | |
1213 | return 0; | |
c42bca92 | 1214 | } |
17d51b10 MW |
1215 | |
1216 | do { | |
86004515 | 1217 | if (bio_op(bio) == REQ_OP_ZONE_APPEND) |
0512a75b | 1218 | ret = __bio_iov_append_get_pages(bio, iter); |
86004515 CH |
1219 | else |
1220 | ret = __bio_iov_iter_get_pages(bio, iter); | |
79d08f89 | 1221 | } while (!ret && iov_iter_count(iter) && !bio_full(bio, 0)); |
17d51b10 | 1222 | |
0cf41e5e PB |
1223 | /* don't account direct I/O as memory stall */ |
1224 | bio_clear_flag(bio, BIO_WORKINGSET); | |
14eacf12 | 1225 | return bio->bi_vcnt ? 0 : ret; |
17d51b10 | 1226 | } |
29b2a3aa | 1227 | EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages); |
2cefe4db | 1228 | |
4246a0b6 | 1229 | static void submit_bio_wait_endio(struct bio *bio) |
9e882242 | 1230 | { |
65e53aab | 1231 | complete(bio->bi_private); |
9e882242 KO |
1232 | } |
1233 | ||
1234 | /** | |
1235 | * submit_bio_wait - submit a bio, and wait until it completes | |
9e882242 KO |
1236 | * @bio: The &struct bio which describes the I/O |
1237 | * | |
1238 | * Simple wrapper around submit_bio(). Returns 0 on success, or the error from | |
1239 | * bio_endio() on failure. | |
3d289d68 JK |
1240 | * |
1241 | * WARNING: Unlike to how submit_bio() is usually used, this function does not | |
1242 | * result in bio reference to be consumed. The caller must drop the reference | |
1243 | * on his own. | |
9e882242 | 1244 | */ |
4e49ea4a | 1245 | int submit_bio_wait(struct bio *bio) |
9e882242 | 1246 | { |
309dca30 CH |
1247 | DECLARE_COMPLETION_ONSTACK_MAP(done, |
1248 | bio->bi_bdev->bd_disk->lockdep_map); | |
de6a78b6 | 1249 | unsigned long hang_check; |
9e882242 | 1250 | |
65e53aab | 1251 | bio->bi_private = &done; |
9e882242 | 1252 | bio->bi_end_io = submit_bio_wait_endio; |
1eff9d32 | 1253 | bio->bi_opf |= REQ_SYNC; |
4e49ea4a | 1254 | submit_bio(bio); |
de6a78b6 ML |
1255 | |
1256 | /* Prevent hang_check timer from firing at us during very long I/O */ | |
1257 | hang_check = sysctl_hung_task_timeout_secs; | |
1258 | if (hang_check) | |
1259 | while (!wait_for_completion_io_timeout(&done, | |
1260 | hang_check * (HZ/2))) | |
1261 | ; | |
1262 | else | |
1263 | wait_for_completion_io(&done); | |
9e882242 | 1264 | |
65e53aab | 1265 | return blk_status_to_errno(bio->bi_status); |
9e882242 KO |
1266 | } |
1267 | EXPORT_SYMBOL(submit_bio_wait); | |
1268 | ||
d4aa57a1 | 1269 | void __bio_advance(struct bio *bio, unsigned bytes) |
054bdf64 KO |
1270 | { |
1271 | if (bio_integrity(bio)) | |
1272 | bio_integrity_advance(bio, bytes); | |
1273 | ||
a892c8d5 | 1274 | bio_crypt_advance(bio, bytes); |
4550dd6c | 1275 | bio_advance_iter(bio, &bio->bi_iter, bytes); |
054bdf64 | 1276 | } |
d4aa57a1 | 1277 | EXPORT_SYMBOL(__bio_advance); |
054bdf64 | 1278 | |
45db54d5 KO |
1279 | void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, |
1280 | struct bio *src, struct bvec_iter *src_iter) | |
16ac3d63 | 1281 | { |
45db54d5 | 1282 | while (src_iter->bi_size && dst_iter->bi_size) { |
f8b679a0 CH |
1283 | struct bio_vec src_bv = bio_iter_iovec(src, *src_iter); |
1284 | struct bio_vec dst_bv = bio_iter_iovec(dst, *dst_iter); | |
1285 | unsigned int bytes = min(src_bv.bv_len, dst_bv.bv_len); | |
1286 | void *src_buf; | |
1287 | ||
1288 | src_buf = bvec_kmap_local(&src_bv); | |
1289 | memcpy_to_bvec(&dst_bv, src_buf); | |
1290 | kunmap_local(src_buf); | |
6e6e811d | 1291 | |
22b56c29 PB |
1292 | bio_advance_iter_single(src, src_iter, bytes); |
1293 | bio_advance_iter_single(dst, dst_iter, bytes); | |
16ac3d63 KO |
1294 | } |
1295 | } | |
38a72dac KO |
1296 | EXPORT_SYMBOL(bio_copy_data_iter); |
1297 | ||
1298 | /** | |
45db54d5 KO |
1299 | * bio_copy_data - copy contents of data buffers from one bio to another |
1300 | * @src: source bio | |
1301 | * @dst: destination bio | |
38a72dac KO |
1302 | * |
1303 | * Stops when it reaches the end of either @src or @dst - that is, copies | |
1304 | * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios). | |
1305 | */ | |
1306 | void bio_copy_data(struct bio *dst, struct bio *src) | |
1307 | { | |
45db54d5 KO |
1308 | struct bvec_iter src_iter = src->bi_iter; |
1309 | struct bvec_iter dst_iter = dst->bi_iter; | |
1310 | ||
1311 | bio_copy_data_iter(dst, &dst_iter, src, &src_iter); | |
38a72dac | 1312 | } |
16ac3d63 KO |
1313 | EXPORT_SYMBOL(bio_copy_data); |
1314 | ||
491221f8 | 1315 | void bio_free_pages(struct bio *bio) |
1dfa0f68 CH |
1316 | { |
1317 | struct bio_vec *bvec; | |
6dc4f100 | 1318 | struct bvec_iter_all iter_all; |
1dfa0f68 | 1319 | |
2b070cfe | 1320 | bio_for_each_segment_all(bvec, bio, iter_all) |
1dfa0f68 CH |
1321 | __free_page(bvec->bv_page); |
1322 | } | |
491221f8 | 1323 | EXPORT_SYMBOL(bio_free_pages); |
1dfa0f68 | 1324 | |
1da177e4 LT |
1325 | /* |
1326 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | |
1327 | * for performing direct-IO in BIOs. | |
1328 | * | |
1329 | * The problem is that we cannot run set_page_dirty() from interrupt context | |
1330 | * because the required locks are not interrupt-safe. So what we can do is to | |
1331 | * mark the pages dirty _before_ performing IO. And in interrupt context, | |
1332 | * check that the pages are still dirty. If so, fine. If not, redirty them | |
1333 | * in process context. | |
1334 | * | |
1335 | * We special-case compound pages here: normally this means reads into hugetlb | |
1336 | * pages. The logic in here doesn't really work right for compound pages | |
1337 | * because the VM does not uniformly chase down the head page in all cases. | |
1338 | * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't | |
1339 | * handle them at all. So we skip compound pages here at an early stage. | |
1340 | * | |
1341 | * Note that this code is very hard to test under normal circumstances because | |
1342 | * direct-io pins the pages with get_user_pages(). This makes | |
1343 | * is_page_cache_freeable return false, and the VM will not clean the pages. | |
0d5c3eba | 1344 | * But other code (eg, flusher threads) could clean the pages if they are mapped |
1da177e4 LT |
1345 | * pagecache. |
1346 | * | |
1347 | * Simply disabling the call to bio_set_pages_dirty() is a good way to test the | |
1348 | * deferred bio dirtying paths. | |
1349 | */ | |
1350 | ||
1351 | /* | |
1352 | * bio_set_pages_dirty() will mark all the bio's pages as dirty. | |
1353 | */ | |
1354 | void bio_set_pages_dirty(struct bio *bio) | |
1355 | { | |
cb34e057 | 1356 | struct bio_vec *bvec; |
6dc4f100 | 1357 | struct bvec_iter_all iter_all; |
1da177e4 | 1358 | |
2b070cfe | 1359 | bio_for_each_segment_all(bvec, bio, iter_all) { |
3bb50983 CH |
1360 | if (!PageCompound(bvec->bv_page)) |
1361 | set_page_dirty_lock(bvec->bv_page); | |
1da177e4 LT |
1362 | } |
1363 | } | |
1364 | ||
1da177e4 LT |
1365 | /* |
1366 | * bio_check_pages_dirty() will check that all the BIO's pages are still dirty. | |
1367 | * If they are, then fine. If, however, some pages are clean then they must | |
1368 | * have been written out during the direct-IO read. So we take another ref on | |
24d5493f | 1369 | * the BIO and re-dirty the pages in process context. |
1da177e4 LT |
1370 | * |
1371 | * It is expected that bio_check_pages_dirty() will wholly own the BIO from | |
ea1754a0 KS |
1372 | * here on. It will run one put_page() against each page and will run one |
1373 | * bio_put() against the BIO. | |
1da177e4 LT |
1374 | */ |
1375 | ||
65f27f38 | 1376 | static void bio_dirty_fn(struct work_struct *work); |
1da177e4 | 1377 | |
65f27f38 | 1378 | static DECLARE_WORK(bio_dirty_work, bio_dirty_fn); |
1da177e4 LT |
1379 | static DEFINE_SPINLOCK(bio_dirty_lock); |
1380 | static struct bio *bio_dirty_list; | |
1381 | ||
1382 | /* | |
1383 | * This runs in process context | |
1384 | */ | |
65f27f38 | 1385 | static void bio_dirty_fn(struct work_struct *work) |
1da177e4 | 1386 | { |
24d5493f | 1387 | struct bio *bio, *next; |
1da177e4 | 1388 | |
24d5493f CH |
1389 | spin_lock_irq(&bio_dirty_lock); |
1390 | next = bio_dirty_list; | |
1da177e4 | 1391 | bio_dirty_list = NULL; |
24d5493f | 1392 | spin_unlock_irq(&bio_dirty_lock); |
1da177e4 | 1393 | |
24d5493f CH |
1394 | while ((bio = next) != NULL) { |
1395 | next = bio->bi_private; | |
1da177e4 | 1396 | |
d241a95f | 1397 | bio_release_pages(bio, true); |
1da177e4 | 1398 | bio_put(bio); |
1da177e4 LT |
1399 | } |
1400 | } | |
1401 | ||
1402 | void bio_check_pages_dirty(struct bio *bio) | |
1403 | { | |
cb34e057 | 1404 | struct bio_vec *bvec; |
24d5493f | 1405 | unsigned long flags; |
6dc4f100 | 1406 | struct bvec_iter_all iter_all; |
1da177e4 | 1407 | |
2b070cfe | 1408 | bio_for_each_segment_all(bvec, bio, iter_all) { |
24d5493f CH |
1409 | if (!PageDirty(bvec->bv_page) && !PageCompound(bvec->bv_page)) |
1410 | goto defer; | |
1da177e4 LT |
1411 | } |
1412 | ||
d241a95f | 1413 | bio_release_pages(bio, false); |
24d5493f CH |
1414 | bio_put(bio); |
1415 | return; | |
1416 | defer: | |
1417 | spin_lock_irqsave(&bio_dirty_lock, flags); | |
1418 | bio->bi_private = bio_dirty_list; | |
1419 | bio_dirty_list = bio; | |
1420 | spin_unlock_irqrestore(&bio_dirty_lock, flags); | |
1421 | schedule_work(&bio_dirty_work); | |
1da177e4 LT |
1422 | } |
1423 | ||
c4cf5261 JA |
1424 | static inline bool bio_remaining_done(struct bio *bio) |
1425 | { | |
1426 | /* | |
1427 | * If we're not chaining, then ->__bi_remaining is always 1 and | |
1428 | * we always end io on the first invocation. | |
1429 | */ | |
1430 | if (!bio_flagged(bio, BIO_CHAIN)) | |
1431 | return true; | |
1432 | ||
1433 | BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); | |
1434 | ||
326e1dbb | 1435 | if (atomic_dec_and_test(&bio->__bi_remaining)) { |
b7c44ed9 | 1436 | bio_clear_flag(bio, BIO_CHAIN); |
c4cf5261 | 1437 | return true; |
326e1dbb | 1438 | } |
c4cf5261 JA |
1439 | |
1440 | return false; | |
1441 | } | |
1442 | ||
1da177e4 LT |
1443 | /** |
1444 | * bio_endio - end I/O on a bio | |
1445 | * @bio: bio | |
1da177e4 LT |
1446 | * |
1447 | * Description: | |
4246a0b6 CH |
1448 | * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred |
1449 | * way to end I/O on a bio. No one should call bi_end_io() directly on a | |
1450 | * bio unless they own it and thus know that it has an end_io function. | |
fbbaf700 N |
1451 | * |
1452 | * bio_endio() can be called several times on a bio that has been chained | |
1453 | * using bio_chain(). The ->bi_end_io() function will only be called the | |
60b6a7e6 | 1454 | * last time. |
1da177e4 | 1455 | **/ |
4246a0b6 | 1456 | void bio_endio(struct bio *bio) |
1da177e4 | 1457 | { |
ba8c6967 | 1458 | again: |
2b885517 | 1459 | if (!bio_remaining_done(bio)) |
ba8c6967 | 1460 | return; |
7c20f116 CH |
1461 | if (!bio_integrity_endio(bio)) |
1462 | return; | |
1da177e4 | 1463 | |
a647a524 | 1464 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACKED)) |
3caee463 | 1465 | rq_qos_done_bio(bdev_get_queue(bio->bi_bdev), bio); |
67b42d0b | 1466 | |
60b6a7e6 | 1467 | if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { |
3caee463 | 1468 | trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio); |
60b6a7e6 EH |
1469 | bio_clear_flag(bio, BIO_TRACE_COMPLETION); |
1470 | } | |
1471 | ||
ba8c6967 CH |
1472 | /* |
1473 | * Need to have a real endio function for chained bios, otherwise | |
1474 | * various corner cases will break (like stacking block devices that | |
1475 | * save/restore bi_end_io) - however, we want to avoid unbounded | |
1476 | * recursion and blowing the stack. Tail call optimization would | |
1477 | * handle this, but compiling with frame pointers also disables | |
1478 | * gcc's sibling call optimization. | |
1479 | */ | |
1480 | if (bio->bi_end_io == bio_chain_endio) { | |
1481 | bio = __bio_chain_endio(bio); | |
1482 | goto again; | |
196d38bc | 1483 | } |
ba8c6967 | 1484 | |
9e234eea | 1485 | blk_throtl_bio_endio(bio); |
b222dd2f SL |
1486 | /* release cgroup info */ |
1487 | bio_uninit(bio); | |
ba8c6967 CH |
1488 | if (bio->bi_end_io) |
1489 | bio->bi_end_io(bio); | |
1da177e4 | 1490 | } |
a112a71d | 1491 | EXPORT_SYMBOL(bio_endio); |
1da177e4 | 1492 | |
20d0189b KO |
1493 | /** |
1494 | * bio_split - split a bio | |
1495 | * @bio: bio to split | |
1496 | * @sectors: number of sectors to split from the front of @bio | |
1497 | * @gfp: gfp mask | |
1498 | * @bs: bio set to allocate from | |
1499 | * | |
1500 | * Allocates and returns a new bio which represents @sectors from the start of | |
1501 | * @bio, and updates @bio to represent the remaining sectors. | |
1502 | * | |
f3f5da62 | 1503 | * Unless this is a discard request the newly allocated bio will point |
dad77584 BVA |
1504 | * to @bio's bi_io_vec. It is the caller's responsibility to ensure that |
1505 | * neither @bio nor @bs are freed before the split bio. | |
20d0189b KO |
1506 | */ |
1507 | struct bio *bio_split(struct bio *bio, int sectors, | |
1508 | gfp_t gfp, struct bio_set *bs) | |
1509 | { | |
f341a4d3 | 1510 | struct bio *split; |
20d0189b KO |
1511 | |
1512 | BUG_ON(sectors <= 0); | |
1513 | BUG_ON(sectors >= bio_sectors(bio)); | |
1514 | ||
0512a75b KB |
1515 | /* Zone append commands cannot be split */ |
1516 | if (WARN_ON_ONCE(bio_op(bio) == REQ_OP_ZONE_APPEND)) | |
1517 | return NULL; | |
1518 | ||
f9d03f96 | 1519 | split = bio_clone_fast(bio, gfp, bs); |
20d0189b KO |
1520 | if (!split) |
1521 | return NULL; | |
1522 | ||
1523 | split->bi_iter.bi_size = sectors << 9; | |
1524 | ||
1525 | if (bio_integrity(split)) | |
fbd08e76 | 1526 | bio_integrity_trim(split); |
20d0189b KO |
1527 | |
1528 | bio_advance(bio, split->bi_iter.bi_size); | |
1529 | ||
fbbaf700 | 1530 | if (bio_flagged(bio, BIO_TRACE_COMPLETION)) |
20d59023 | 1531 | bio_set_flag(split, BIO_TRACE_COMPLETION); |
fbbaf700 | 1532 | |
20d0189b KO |
1533 | return split; |
1534 | } | |
1535 | EXPORT_SYMBOL(bio_split); | |
1536 | ||
6678d83f KO |
1537 | /** |
1538 | * bio_trim - trim a bio | |
1539 | * @bio: bio to trim | |
1540 | * @offset: number of sectors to trim from the front of @bio | |
1541 | * @size: size we want to trim @bio to, in sectors | |
e83502ca CK |
1542 | * |
1543 | * This function is typically used for bios that are cloned and submitted | |
1544 | * to the underlying device in parts. | |
6678d83f | 1545 | */ |
e83502ca | 1546 | void bio_trim(struct bio *bio, sector_t offset, sector_t size) |
6678d83f | 1547 | { |
e83502ca CK |
1548 | if (WARN_ON_ONCE(offset > BIO_MAX_SECTORS || size > BIO_MAX_SECTORS || |
1549 | offset + size > bio->bi_iter.bi_size)) | |
1550 | return; | |
6678d83f KO |
1551 | |
1552 | size <<= 9; | |
4f024f37 | 1553 | if (offset == 0 && size == bio->bi_iter.bi_size) |
6678d83f KO |
1554 | return; |
1555 | ||
6678d83f | 1556 | bio_advance(bio, offset << 9); |
4f024f37 | 1557 | bio->bi_iter.bi_size = size; |
376a78ab DM |
1558 | |
1559 | if (bio_integrity(bio)) | |
fbd08e76 | 1560 | bio_integrity_trim(bio); |
6678d83f KO |
1561 | } |
1562 | EXPORT_SYMBOL_GPL(bio_trim); | |
1563 | ||
1da177e4 LT |
1564 | /* |
1565 | * create memory pools for biovec's in a bio_set. | |
1566 | * use the global biovec slabs created for general use. | |
1567 | */ | |
8aa6ba2f | 1568 | int biovec_init_pool(mempool_t *pool, int pool_entries) |
1da177e4 | 1569 | { |
7a800a20 | 1570 | struct biovec_slab *bp = bvec_slabs + ARRAY_SIZE(bvec_slabs) - 1; |
1da177e4 | 1571 | |
8aa6ba2f | 1572 | return mempool_init_slab_pool(pool, pool_entries, bp->slab); |
1da177e4 LT |
1573 | } |
1574 | ||
917a38c7 KO |
1575 | /* |
1576 | * bioset_exit - exit a bioset initialized with bioset_init() | |
1577 | * | |
1578 | * May be called on a zeroed but uninitialized bioset (i.e. allocated with | |
1579 | * kzalloc()). | |
1580 | */ | |
1581 | void bioset_exit(struct bio_set *bs) | |
1da177e4 | 1582 | { |
be4d234d | 1583 | bio_alloc_cache_destroy(bs); |
df2cb6da KO |
1584 | if (bs->rescue_workqueue) |
1585 | destroy_workqueue(bs->rescue_workqueue); | |
917a38c7 | 1586 | bs->rescue_workqueue = NULL; |
df2cb6da | 1587 | |
8aa6ba2f KO |
1588 | mempool_exit(&bs->bio_pool); |
1589 | mempool_exit(&bs->bvec_pool); | |
9f060e22 | 1590 | |
7878cba9 | 1591 | bioset_integrity_free(bs); |
917a38c7 KO |
1592 | if (bs->bio_slab) |
1593 | bio_put_slab(bs); | |
1594 | bs->bio_slab = NULL; | |
1595 | } | |
1596 | EXPORT_SYMBOL(bioset_exit); | |
1da177e4 | 1597 | |
917a38c7 KO |
1598 | /** |
1599 | * bioset_init - Initialize a bio_set | |
dad08527 | 1600 | * @bs: pool to initialize |
917a38c7 KO |
1601 | * @pool_size: Number of bio and bio_vecs to cache in the mempool |
1602 | * @front_pad: Number of bytes to allocate in front of the returned bio | |
1603 | * @flags: Flags to modify behavior, currently %BIOSET_NEED_BVECS | |
1604 | * and %BIOSET_NEED_RESCUER | |
1605 | * | |
dad08527 KO |
1606 | * Description: |
1607 | * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller | |
1608 | * to ask for a number of bytes to be allocated in front of the bio. | |
1609 | * Front pad allocation is useful for embedding the bio inside | |
1610 | * another structure, to avoid allocating extra data to go with the bio. | |
1611 | * Note that the bio must be embedded at the END of that structure always, | |
1612 | * or things will break badly. | |
1613 | * If %BIOSET_NEED_BVECS is set in @flags, a separate pool will be allocated | |
1614 | * for allocating iovecs. This pool is not needed e.g. for bio_clone_fast(). | |
1615 | * If %BIOSET_NEED_RESCUER is set, a workqueue is created which can be used to | |
1616 | * dispatch queued requests when the mempool runs out of space. | |
1617 | * | |
917a38c7 KO |
1618 | */ |
1619 | int bioset_init(struct bio_set *bs, | |
1620 | unsigned int pool_size, | |
1621 | unsigned int front_pad, | |
1622 | int flags) | |
1623 | { | |
917a38c7 | 1624 | bs->front_pad = front_pad; |
9f180e31 ML |
1625 | if (flags & BIOSET_NEED_BVECS) |
1626 | bs->back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec); | |
1627 | else | |
1628 | bs->back_pad = 0; | |
917a38c7 KO |
1629 | |
1630 | spin_lock_init(&bs->rescue_lock); | |
1631 | bio_list_init(&bs->rescue_list); | |
1632 | INIT_WORK(&bs->rescue_work, bio_alloc_rescue); | |
1633 | ||
49d1ec85 | 1634 | bs->bio_slab = bio_find_or_create_slab(bs); |
917a38c7 KO |
1635 | if (!bs->bio_slab) |
1636 | return -ENOMEM; | |
1637 | ||
1638 | if (mempool_init_slab_pool(&bs->bio_pool, pool_size, bs->bio_slab)) | |
1639 | goto bad; | |
1640 | ||
1641 | if ((flags & BIOSET_NEED_BVECS) && | |
1642 | biovec_init_pool(&bs->bvec_pool, pool_size)) | |
1643 | goto bad; | |
1644 | ||
be4d234d JA |
1645 | if (flags & BIOSET_NEED_RESCUER) { |
1646 | bs->rescue_workqueue = alloc_workqueue("bioset", | |
1647 | WQ_MEM_RECLAIM, 0); | |
1648 | if (!bs->rescue_workqueue) | |
1649 | goto bad; | |
1650 | } | |
1651 | if (flags & BIOSET_PERCPU_CACHE) { | |
1652 | bs->cache = alloc_percpu(struct bio_alloc_cache); | |
1653 | if (!bs->cache) | |
1654 | goto bad; | |
1655 | cpuhp_state_add_instance_nocalls(CPUHP_BIO_DEAD, &bs->cpuhp_dead); | |
1656 | } | |
917a38c7 KO |
1657 | |
1658 | return 0; | |
1659 | bad: | |
1660 | bioset_exit(bs); | |
1661 | return -ENOMEM; | |
1662 | } | |
1663 | EXPORT_SYMBOL(bioset_init); | |
1664 | ||
28e89fd9 JA |
1665 | /* |
1666 | * Initialize and setup a new bio_set, based on the settings from | |
1667 | * another bio_set. | |
1668 | */ | |
1669 | int bioset_init_from_src(struct bio_set *bs, struct bio_set *src) | |
1670 | { | |
1671 | int flags; | |
1672 | ||
1673 | flags = 0; | |
1674 | if (src->bvec_pool.min_nr) | |
1675 | flags |= BIOSET_NEED_BVECS; | |
1676 | if (src->rescue_workqueue) | |
1677 | flags |= BIOSET_NEED_RESCUER; | |
1678 | ||
1679 | return bioset_init(bs, src->bio_pool.min_nr, src->front_pad, flags); | |
1680 | } | |
1681 | EXPORT_SYMBOL(bioset_init_from_src); | |
1682 | ||
be4d234d JA |
1683 | /** |
1684 | * bio_alloc_kiocb - Allocate a bio from bio_set based on kiocb | |
1685 | * @kiocb: kiocb describing the IO | |
0ef47db1 | 1686 | * @nr_vecs: number of iovecs to pre-allocate |
be4d234d JA |
1687 | * @bs: bio_set to allocate from |
1688 | * | |
1689 | * Description: | |
1690 | * Like @bio_alloc_bioset, but pass in the kiocb. The kiocb is only | |
1691 | * used to check if we should dip into the per-cpu bio_set allocation | |
3d5b3fbe JA |
1692 | * cache. The allocation uses GFP_KERNEL internally. On return, the |
1693 | * bio is marked BIO_PERCPU_CACHEABLE, and the final put of the bio | |
1694 | * MUST be done from process context, not hard/soft IRQ. | |
be4d234d JA |
1695 | * |
1696 | */ | |
1697 | struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs, | |
1698 | struct bio_set *bs) | |
1699 | { | |
1700 | struct bio_alloc_cache *cache; | |
1701 | struct bio *bio; | |
1702 | ||
1703 | if (!(kiocb->ki_flags & IOCB_ALLOC_CACHE) || nr_vecs > BIO_INLINE_VECS) | |
1704 | return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); | |
1705 | ||
1706 | cache = per_cpu_ptr(bs->cache, get_cpu()); | |
1707 | bio = bio_list_pop(&cache->free_list); | |
1708 | if (bio) { | |
1709 | cache->nr--; | |
1710 | put_cpu(); | |
1711 | bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs); | |
1712 | bio->bi_pool = bs; | |
1713 | bio_set_flag(bio, BIO_PERCPU_CACHE); | |
1714 | return bio; | |
1715 | } | |
1716 | put_cpu(); | |
1717 | bio = bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs); | |
1718 | bio_set_flag(bio, BIO_PERCPU_CACHE); | |
1719 | return bio; | |
1720 | } | |
1721 | EXPORT_SYMBOL_GPL(bio_alloc_kiocb); | |
1722 | ||
de76fd89 | 1723 | static int __init init_bio(void) |
1da177e4 LT |
1724 | { |
1725 | int i; | |
1726 | ||
7878cba9 | 1727 | bio_integrity_init(); |
1da177e4 | 1728 | |
de76fd89 CH |
1729 | for (i = 0; i < ARRAY_SIZE(bvec_slabs); i++) { |
1730 | struct biovec_slab *bvs = bvec_slabs + i; | |
a7fcd37c | 1731 | |
de76fd89 CH |
1732 | bvs->slab = kmem_cache_create(bvs->name, |
1733 | bvs->nr_vecs * sizeof(struct bio_vec), 0, | |
1734 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | |
1da177e4 | 1735 | } |
1da177e4 | 1736 | |
be4d234d JA |
1737 | cpuhp_setup_state_multi(CPUHP_BIO_DEAD, "block/bio:dead", NULL, |
1738 | bio_cpu_dead); | |
1739 | ||
f4f8154a | 1740 | if (bioset_init(&fs_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)) |
1da177e4 LT |
1741 | panic("bio: can't allocate bios\n"); |
1742 | ||
f4f8154a | 1743 | if (bioset_integrity_create(&fs_bio_set, BIO_POOL_SIZE)) |
a91a2785 MP |
1744 | panic("bio: can't create integrity pool\n"); |
1745 | ||
1da177e4 LT |
1746 | return 0; |
1747 | } | |
1da177e4 | 1748 | subsys_initcall(init_bio); |