]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
d6d48196 JA |
2 | /* |
3 | * Functions related to segment and merge handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/scatterlist.h> | |
10 | ||
cda22646 MK |
11 | #include <trace/events/block.h> |
12 | ||
d6d48196 JA |
13 | #include "blk.h" |
14 | ||
54efd50b KO |
15 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
16 | struct bio *bio, | |
bdced438 ML |
17 | struct bio_set *bs, |
18 | unsigned *nsegs) | |
54efd50b KO |
19 | { |
20 | unsigned int max_discard_sectors, granularity; | |
21 | int alignment; | |
22 | sector_t tmp; | |
23 | unsigned split_sectors; | |
24 | ||
bdced438 ML |
25 | *nsegs = 1; |
26 | ||
54efd50b KO |
27 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
28 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
29 | ||
30 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | |
31 | max_discard_sectors -= max_discard_sectors % granularity; | |
32 | ||
33 | if (unlikely(!max_discard_sectors)) { | |
34 | /* XXX: warn */ | |
35 | return NULL; | |
36 | } | |
37 | ||
38 | if (bio_sectors(bio) <= max_discard_sectors) | |
39 | return NULL; | |
40 | ||
41 | split_sectors = max_discard_sectors; | |
42 | ||
43 | /* | |
44 | * If the next starting sector would be misaligned, stop the discard at | |
45 | * the previous aligned sector. | |
46 | */ | |
47 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
48 | ||
49 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
50 | tmp = sector_div(tmp, granularity); | |
51 | ||
52 | if (split_sectors > tmp) | |
53 | split_sectors -= tmp; | |
54 | ||
55 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
56 | } | |
57 | ||
885fa13f CH |
58 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
59 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) | |
60 | { | |
61 | *nsegs = 1; | |
62 | ||
63 | if (!q->limits.max_write_zeroes_sectors) | |
64 | return NULL; | |
65 | ||
66 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) | |
67 | return NULL; | |
68 | ||
69 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); | |
70 | } | |
71 | ||
54efd50b KO |
72 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
73 | struct bio *bio, | |
bdced438 ML |
74 | struct bio_set *bs, |
75 | unsigned *nsegs) | |
54efd50b | 76 | { |
bdced438 ML |
77 | *nsegs = 1; |
78 | ||
54efd50b KO |
79 | if (!q->limits.max_write_same_sectors) |
80 | return NULL; | |
81 | ||
82 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
83 | return NULL; | |
84 | ||
85 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
86 | } | |
87 | ||
d0e5fbb0 ML |
88 | static inline unsigned get_max_io_size(struct request_queue *q, |
89 | struct bio *bio) | |
90 | { | |
91 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | |
92 | unsigned mask = queue_logical_block_size(q) - 1; | |
93 | ||
94 | /* aligned to logical block size */ | |
95 | sectors &= ~(mask >> 9); | |
96 | ||
97 | return sectors; | |
98 | } | |
99 | ||
54efd50b KO |
100 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
101 | struct bio *bio, | |
bdced438 ML |
102 | struct bio_set *bs, |
103 | unsigned *segs) | |
54efd50b | 104 | { |
5014c311 | 105 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 106 | struct bvec_iter iter; |
8ae12666 | 107 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
02e70742 ML |
108 | unsigned front_seg_size = bio->bi_seg_front_size; |
109 | bool do_split = true; | |
110 | struct bio *new = NULL; | |
d0e5fbb0 | 111 | const unsigned max_sectors = get_max_io_size(q, bio); |
54efd50b | 112 | |
54efd50b | 113 | bio_for_each_segment(bv, bio, iter) { |
54efd50b KO |
114 | /* |
115 | * If the queue doesn't support SG gaps and adding this | |
116 | * offset would create a gap, disallow it. | |
117 | */ | |
5014c311 | 118 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
54efd50b KO |
119 | goto split; |
120 | ||
d0e5fbb0 | 121 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
e36f6204 KB |
122 | /* |
123 | * Consider this a new segment if we're splitting in | |
124 | * the middle of this vector. | |
125 | */ | |
126 | if (nsegs < queue_max_segments(q) && | |
d0e5fbb0 | 127 | sectors < max_sectors) { |
e36f6204 | 128 | nsegs++; |
d0e5fbb0 | 129 | sectors = max_sectors; |
e36f6204 | 130 | } |
cf8c0c6a | 131 | goto split; |
e36f6204 KB |
132 | } |
133 | ||
5014c311 | 134 | if (bvprvp && blk_queue_cluster(q)) { |
b4b6cb61 ML |
135 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
136 | goto new_segment; | |
5014c311 | 137 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
54efd50b | 138 | goto new_segment; |
5014c311 | 139 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
54efd50b KO |
140 | goto new_segment; |
141 | ||
142 | seg_size += bv.bv_len; | |
143 | bvprv = bv; | |
578270bf | 144 | bvprvp = &bvprv; |
52cc6eea | 145 | sectors += bv.bv_len >> 9; |
a88d32af | 146 | |
54efd50b KO |
147 | continue; |
148 | } | |
149 | new_segment: | |
150 | if (nsegs == queue_max_segments(q)) | |
151 | goto split; | |
152 | ||
6a501bf0 ML |
153 | if (nsegs == 1 && seg_size > front_seg_size) |
154 | front_seg_size = seg_size; | |
155 | ||
54efd50b KO |
156 | nsegs++; |
157 | bvprv = bv; | |
578270bf | 158 | bvprvp = &bvprv; |
54efd50b | 159 | seg_size = bv.bv_len; |
52cc6eea | 160 | sectors += bv.bv_len >> 9; |
02e70742 | 161 | |
54efd50b KO |
162 | } |
163 | ||
02e70742 | 164 | do_split = false; |
54efd50b | 165 | split: |
bdced438 | 166 | *segs = nsegs; |
02e70742 ML |
167 | |
168 | if (do_split) { | |
169 | new = bio_split(bio, sectors, GFP_NOIO, bs); | |
170 | if (new) | |
171 | bio = new; | |
172 | } | |
173 | ||
6a501bf0 ML |
174 | if (nsegs == 1 && seg_size > front_seg_size) |
175 | front_seg_size = seg_size; | |
02e70742 ML |
176 | bio->bi_seg_front_size = front_seg_size; |
177 | if (seg_size > bio->bi_seg_back_size) | |
178 | bio->bi_seg_back_size = seg_size; | |
179 | ||
180 | return do_split ? new : NULL; | |
54efd50b KO |
181 | } |
182 | ||
af67c31f | 183 | void blk_queue_split(struct request_queue *q, struct bio **bio) |
54efd50b | 184 | { |
bdced438 ML |
185 | struct bio *split, *res; |
186 | unsigned nsegs; | |
54efd50b | 187 | |
7afafc8a AH |
188 | switch (bio_op(*bio)) { |
189 | case REQ_OP_DISCARD: | |
190 | case REQ_OP_SECURE_ERASE: | |
af67c31f | 191 | split = blk_bio_discard_split(q, *bio, q->bio_split, &nsegs); |
7afafc8a | 192 | break; |
a6f0788e | 193 | case REQ_OP_WRITE_ZEROES: |
af67c31f | 194 | split = blk_bio_write_zeroes_split(q, *bio, q->bio_split, &nsegs); |
a6f0788e | 195 | break; |
7afafc8a | 196 | case REQ_OP_WRITE_SAME: |
af67c31f | 197 | split = blk_bio_write_same_split(q, *bio, q->bio_split, &nsegs); |
7afafc8a AH |
198 | break; |
199 | default: | |
bdced438 | 200 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
7afafc8a AH |
201 | break; |
202 | } | |
bdced438 ML |
203 | |
204 | /* physical segments can be figured out during splitting */ | |
205 | res = split ? split : *bio; | |
206 | res->bi_phys_segments = nsegs; | |
207 | bio_set_flag(res, BIO_SEG_VALID); | |
54efd50b KO |
208 | |
209 | if (split) { | |
6ac45aeb | 210 | /* there isn't chance to merge the splitted bio */ |
1eff9d32 | 211 | split->bi_opf |= REQ_NOMERGE; |
6ac45aeb | 212 | |
54efd50b | 213 | bio_chain(split, *bio); |
cda22646 | 214 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
54efd50b KO |
215 | generic_make_request(*bio); |
216 | *bio = split; | |
217 | } | |
218 | } | |
219 | EXPORT_SYMBOL(blk_queue_split); | |
220 | ||
1e428079 | 221 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
07388549 ML |
222 | struct bio *bio, |
223 | bool no_sg_merge) | |
d6d48196 | 224 | { |
7988613b | 225 | struct bio_vec bv, bvprv = { NULL }; |
54efd50b | 226 | int cluster, prev = 0; |
1e428079 | 227 | unsigned int seg_size, nr_phys_segs; |
59247eae | 228 | struct bio *fbio, *bbio; |
7988613b | 229 | struct bvec_iter iter; |
d6d48196 | 230 | |
1e428079 JA |
231 | if (!bio) |
232 | return 0; | |
d6d48196 | 233 | |
a6f0788e CK |
234 | switch (bio_op(bio)) { |
235 | case REQ_OP_DISCARD: | |
236 | case REQ_OP_SECURE_ERASE: | |
a6f0788e | 237 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 CH |
238 | return 0; |
239 | case REQ_OP_WRITE_SAME: | |
5cb8850c | 240 | return 1; |
a6f0788e | 241 | } |
5cb8850c | 242 | |
1e428079 | 243 | fbio = bio; |
e692cb66 | 244 | cluster = blk_queue_cluster(q); |
5df97b91 | 245 | seg_size = 0; |
2c8919de | 246 | nr_phys_segs = 0; |
1e428079 | 247 | for_each_bio(bio) { |
7988613b | 248 | bio_for_each_segment(bv, bio, iter) { |
05f1dd53 JA |
249 | /* |
250 | * If SG merging is disabled, each bio vector is | |
251 | * a segment | |
252 | */ | |
253 | if (no_sg_merge) | |
254 | goto new_segment; | |
255 | ||
54efd50b | 256 | if (prev && cluster) { |
7988613b | 257 | if (seg_size + bv.bv_len |
ae03bf63 | 258 | > queue_max_segment_size(q)) |
1e428079 | 259 | goto new_segment; |
7988613b | 260 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
1e428079 | 261 | goto new_segment; |
7988613b | 262 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
1e428079 | 263 | goto new_segment; |
d6d48196 | 264 | |
7988613b | 265 | seg_size += bv.bv_len; |
1e428079 JA |
266 | bvprv = bv; |
267 | continue; | |
268 | } | |
d6d48196 | 269 | new_segment: |
1e428079 JA |
270 | if (nr_phys_segs == 1 && seg_size > |
271 | fbio->bi_seg_front_size) | |
272 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 273 | |
1e428079 JA |
274 | nr_phys_segs++; |
275 | bvprv = bv; | |
54efd50b | 276 | prev = 1; |
7988613b | 277 | seg_size = bv.bv_len; |
1e428079 | 278 | } |
59247eae | 279 | bbio = bio; |
d6d48196 JA |
280 | } |
281 | ||
59247eae JA |
282 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
283 | fbio->bi_seg_front_size = seg_size; | |
284 | if (seg_size > bbio->bi_seg_back_size) | |
285 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
286 | |
287 | return nr_phys_segs; | |
288 | } | |
289 | ||
290 | void blk_recalc_rq_segments(struct request *rq) | |
291 | { | |
07388549 ML |
292 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
293 | &rq->q->queue_flags); | |
294 | ||
295 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | |
296 | no_sg_merge); | |
d6d48196 JA |
297 | } |
298 | ||
299 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
300 | { | |
7f60dcaa ML |
301 | unsigned short seg_cnt; |
302 | ||
303 | /* estimate segment number by bi_vcnt for non-cloned bio */ | |
304 | if (bio_flagged(bio, BIO_CLONED)) | |
305 | seg_cnt = bio_segments(bio); | |
306 | else | |
307 | seg_cnt = bio->bi_vcnt; | |
764f612c | 308 | |
7f60dcaa ML |
309 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
310 | (seg_cnt < queue_max_segments(q))) | |
311 | bio->bi_phys_segments = seg_cnt; | |
05f1dd53 JA |
312 | else { |
313 | struct bio *nxt = bio->bi_next; | |
314 | ||
315 | bio->bi_next = NULL; | |
7f60dcaa | 316 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
05f1dd53 JA |
317 | bio->bi_next = nxt; |
318 | } | |
1e428079 | 319 | |
b7c44ed9 | 320 | bio_set_flag(bio, BIO_SEG_VALID); |
d6d48196 JA |
321 | } |
322 | EXPORT_SYMBOL(blk_recount_segments); | |
323 | ||
324 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
325 | struct bio *nxt) | |
326 | { | |
2b8221e1 | 327 | struct bio_vec end_bv = { NULL }, nxt_bv; |
f619d254 | 328 | |
e692cb66 | 329 | if (!blk_queue_cluster(q)) |
d6d48196 JA |
330 | return 0; |
331 | ||
86771427 | 332 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 333 | queue_max_segment_size(q)) |
d6d48196 JA |
334 | return 0; |
335 | ||
e17fc0a1 DW |
336 | if (!bio_has_data(bio)) |
337 | return 1; | |
338 | ||
e827091c ML |
339 | bio_get_last_bvec(bio, &end_bv); |
340 | bio_get_first_bvec(nxt, &nxt_bv); | |
f619d254 KO |
341 | |
342 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) | |
e17fc0a1 DW |
343 | return 0; |
344 | ||
d6d48196 | 345 | /* |
e17fc0a1 | 346 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
347 | * these two to be merged into one |
348 | */ | |
f619d254 | 349 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
d6d48196 JA |
350 | return 1; |
351 | ||
352 | return 0; | |
353 | } | |
354 | ||
7988613b | 355 | static inline void |
963ab9e5 | 356 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
7988613b | 357 | struct scatterlist *sglist, struct bio_vec *bvprv, |
963ab9e5 AH |
358 | struct scatterlist **sg, int *nsegs, int *cluster) |
359 | { | |
360 | ||
361 | int nbytes = bvec->bv_len; | |
362 | ||
7988613b | 363 | if (*sg && *cluster) { |
b4b6cb61 ML |
364 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
365 | goto new_segment; | |
366 | ||
7988613b | 367 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
963ab9e5 | 368 | goto new_segment; |
7988613b | 369 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
963ab9e5 AH |
370 | goto new_segment; |
371 | ||
372 | (*sg)->length += nbytes; | |
373 | } else { | |
374 | new_segment: | |
375 | if (!*sg) | |
376 | *sg = sglist; | |
377 | else { | |
378 | /* | |
379 | * If the driver previously mapped a shorter | |
380 | * list, we could see a termination bit | |
381 | * prematurely unless it fully inits the sg | |
382 | * table on each mapping. We KNOW that there | |
383 | * must be more entries here or the driver | |
384 | * would be buggy, so force clear the | |
385 | * termination bit to avoid doing a full | |
386 | * sg_init_table() in drivers for each command. | |
387 | */ | |
c8164d89 | 388 | sg_unmark_end(*sg); |
963ab9e5 AH |
389 | *sg = sg_next(*sg); |
390 | } | |
391 | ||
392 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
393 | (*nsegs)++; | |
394 | } | |
7988613b | 395 | *bvprv = *bvec; |
963ab9e5 AH |
396 | } |
397 | ||
f9d03f96 CH |
398 | static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, |
399 | struct scatterlist *sglist, struct scatterlist **sg) | |
400 | { | |
401 | *sg = sglist; | |
402 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
403 | return 1; | |
404 | } | |
405 | ||
5cb8850c KO |
406 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
407 | struct scatterlist *sglist, | |
408 | struct scatterlist **sg) | |
d6d48196 | 409 | { |
2b8221e1 | 410 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 411 | struct bvec_iter iter; |
f9d03f96 | 412 | int cluster = blk_queue_cluster(q), nsegs = 0; |
5cb8850c KO |
413 | |
414 | for_each_bio(bio) | |
415 | bio_for_each_segment(bvec, bio, iter) | |
416 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | |
417 | &nsegs, &cluster); | |
d6d48196 | 418 | |
5cb8850c KO |
419 | return nsegs; |
420 | } | |
421 | ||
422 | /* | |
423 | * map a request to scatterlist, return number of sg entries setup. Caller | |
424 | * must make sure sg can hold rq->nr_phys_segments entries | |
425 | */ | |
426 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
427 | struct scatterlist *sglist) | |
428 | { | |
429 | struct scatterlist *sg = NULL; | |
430 | int nsegs = 0; | |
431 | ||
f9d03f96 CH |
432 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
433 | nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); | |
434 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) | |
435 | nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); | |
436 | else if (rq->bio) | |
5cb8850c | 437 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
f18573ab | 438 | |
e8064021 | 439 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && |
2e46e8b2 TH |
440 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
441 | unsigned int pad_len = | |
442 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
443 | |
444 | sg->length += pad_len; | |
445 | rq->extra_len += pad_len; | |
446 | } | |
447 | ||
2fb98e84 | 448 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
a8ebb056 | 449 | if (op_is_write(req_op(rq))) |
db0a2e00 TH |
450 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
451 | ||
da81ed16 | 452 | sg_unmark_end(sg); |
d6d48196 JA |
453 | sg = sg_next(sg); |
454 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
455 | q->dma_drain_size, | |
456 | ((unsigned long)q->dma_drain_buffer) & | |
457 | (PAGE_SIZE - 1)); | |
458 | nsegs++; | |
7a85f889 | 459 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
460 | } |
461 | ||
462 | if (sg) | |
463 | sg_mark_end(sg); | |
464 | ||
12e57f59 ML |
465 | /* |
466 | * Something must have been wrong if the figured number of | |
467 | * segment is bigger than number of req's physical segments | |
468 | */ | |
f9d03f96 | 469 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
12e57f59 | 470 | |
d6d48196 JA |
471 | return nsegs; |
472 | } | |
d6d48196 JA |
473 | EXPORT_SYMBOL(blk_rq_map_sg); |
474 | ||
d6d48196 JA |
475 | static inline int ll_new_hw_segment(struct request_queue *q, |
476 | struct request *req, | |
477 | struct bio *bio) | |
478 | { | |
d6d48196 JA |
479 | int nr_phys_segs = bio_phys_segments(q, bio); |
480 | ||
13f05c8d MP |
481 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
482 | goto no_merge; | |
483 | ||
4eaf99be | 484 | if (blk_integrity_merge_bio(q, req, bio) == false) |
13f05c8d | 485 | goto no_merge; |
d6d48196 JA |
486 | |
487 | /* | |
488 | * This will form the start of a new hw segment. Bump both | |
489 | * counters. | |
490 | */ | |
d6d48196 JA |
491 | req->nr_phys_segments += nr_phys_segs; |
492 | return 1; | |
13f05c8d MP |
493 | |
494 | no_merge: | |
e0c72300 | 495 | req_set_nomerge(q, req); |
13f05c8d | 496 | return 0; |
d6d48196 JA |
497 | } |
498 | ||
499 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
500 | struct bio *bio) | |
501 | { | |
5e7c4274 JA |
502 | if (req_gap_back_merge(req, bio)) |
503 | return 0; | |
7f39add3 SG |
504 | if (blk_integrity_rq(req) && |
505 | integrity_req_gap_back_merge(req, bio)) | |
506 | return 0; | |
f31dc1cd | 507 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 508 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
e0c72300 | 509 | req_set_nomerge(q, req); |
d6d48196 JA |
510 | return 0; |
511 | } | |
2cdf79ca | 512 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 513 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 514 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 515 | blk_recount_segments(q, bio); |
d6d48196 JA |
516 | |
517 | return ll_new_hw_segment(q, req, bio); | |
518 | } | |
519 | ||
6728cb0e | 520 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
521 | struct bio *bio) |
522 | { | |
5e7c4274 JA |
523 | |
524 | if (req_gap_front_merge(req, bio)) | |
525 | return 0; | |
7f39add3 SG |
526 | if (blk_integrity_rq(req) && |
527 | integrity_req_gap_front_merge(req, bio)) | |
528 | return 0; | |
f31dc1cd | 529 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 530 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
e0c72300 | 531 | req_set_nomerge(q, req); |
d6d48196 JA |
532 | return 0; |
533 | } | |
2cdf79ca | 534 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 535 | blk_recount_segments(q, bio); |
2cdf79ca | 536 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 537 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
538 | |
539 | return ll_new_hw_segment(q, req, bio); | |
540 | } | |
541 | ||
e7e24500 JA |
542 | /* |
543 | * blk-mq uses req->special to carry normal driver per-request payload, it | |
544 | * does not indicate a prepared command that we cannot merge with. | |
545 | */ | |
546 | static bool req_no_special_merge(struct request *req) | |
547 | { | |
548 | struct request_queue *q = req->q; | |
549 | ||
550 | return !q->mq_ops && req->special; | |
551 | } | |
552 | ||
445251d0 JA |
553 | static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |
554 | struct request *next) | |
555 | { | |
556 | unsigned short segments = blk_rq_nr_discard_segments(req); | |
557 | ||
558 | if (segments >= queue_max_discard_segments(q)) | |
559 | goto no_merge; | |
560 | if (blk_rq_sectors(req) + bio_sectors(next->bio) > | |
561 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) | |
562 | goto no_merge; | |
563 | ||
564 | req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next); | |
565 | return true; | |
566 | no_merge: | |
567 | req_set_nomerge(q, req); | |
568 | return false; | |
569 | } | |
570 | ||
d6d48196 JA |
571 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
572 | struct request *next) | |
573 | { | |
574 | int total_phys_segments; | |
86771427 FT |
575 | unsigned int seg_size = |
576 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
577 | |
578 | /* | |
579 | * First check if the either of the requests are re-queued | |
580 | * requests. Can't merge them if they are. | |
581 | */ | |
e7e24500 | 582 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
d6d48196 JA |
583 | return 0; |
584 | ||
5e7c4274 | 585 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
586 | return 0; |
587 | ||
d6d48196 JA |
588 | /* |
589 | * Will it become too large? | |
590 | */ | |
f31dc1cd | 591 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
17007f39 | 592 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
d6d48196 JA |
593 | return 0; |
594 | ||
595 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
596 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
597 | if (req->nr_phys_segments == 1) | |
598 | req->bio->bi_seg_front_size = seg_size; | |
599 | if (next->nr_phys_segments == 1) | |
600 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 601 | total_phys_segments--; |
86771427 | 602 | } |
d6d48196 | 603 | |
8a78362c | 604 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
605 | return 0; |
606 | ||
4eaf99be | 607 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
608 | return 0; |
609 | ||
d6d48196 JA |
610 | /* Merge is OK... */ |
611 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
612 | return 1; |
613 | } | |
614 | ||
80a761fd TH |
615 | /** |
616 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
617 | * @rq: request to mark as mixed merge | |
618 | * | |
619 | * Description: | |
620 | * @rq is about to be mixed merged. Make sure the attributes | |
621 | * which can be mixed are set in each bio and mark @rq as mixed | |
622 | * merged. | |
623 | */ | |
624 | void blk_rq_set_mixed_merge(struct request *rq) | |
625 | { | |
626 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
627 | struct bio *bio; | |
628 | ||
e8064021 | 629 | if (rq->rq_flags & RQF_MIXED_MERGE) |
80a761fd TH |
630 | return; |
631 | ||
632 | /* | |
633 | * @rq will no longer represent mixable attributes for all the | |
634 | * contained bios. It will just track those of the first one. | |
635 | * Distributes the attributs to each bio. | |
636 | */ | |
637 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
1eff9d32 JA |
638 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
639 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
640 | bio->bi_opf |= ff; | |
80a761fd | 641 | } |
e8064021 | 642 | rq->rq_flags |= RQF_MIXED_MERGE; |
80a761fd TH |
643 | } |
644 | ||
26308eab JM |
645 | static void blk_account_io_merge(struct request *req) |
646 | { | |
647 | if (blk_do_io_stat(req)) { | |
648 | struct hd_struct *part; | |
649 | int cpu; | |
650 | ||
651 | cpu = part_stat_lock(); | |
09e099d4 | 652 | part = req->part; |
26308eab | 653 | |
d62e26b3 JA |
654 | part_round_stats(req->q, cpu, part); |
655 | part_dec_in_flight(req->q, part, rq_data_dir(req)); | |
26308eab | 656 | |
6c23a968 | 657 | hd_struct_put(part); |
26308eab JM |
658 | part_stat_unlock(); |
659 | } | |
660 | } | |
661 | ||
d6d48196 | 662 | /* |
b973cb7e JA |
663 | * For non-mq, this has to be called with the request spinlock acquired. |
664 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
d6d48196 | 665 | */ |
b973cb7e JA |
666 | static struct request *attempt_merge(struct request_queue *q, |
667 | struct request *req, struct request *next) | |
d6d48196 | 668 | { |
2fff8a92 BVA |
669 | if (!q->mq_ops) |
670 | lockdep_assert_held(q->queue_lock); | |
671 | ||
d6d48196 | 672 | if (!rq_mergeable(req) || !rq_mergeable(next)) |
b973cb7e | 673 | return NULL; |
d6d48196 | 674 | |
288dab8a | 675 | if (req_op(req) != req_op(next)) |
b973cb7e | 676 | return NULL; |
f31dc1cd | 677 | |
d6d48196 JA |
678 | /* |
679 | * not contiguous | |
680 | */ | |
83096ebf | 681 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
b973cb7e | 682 | return NULL; |
d6d48196 JA |
683 | |
684 | if (rq_data_dir(req) != rq_data_dir(next) | |
685 | || req->rq_disk != next->rq_disk | |
e7e24500 | 686 | || req_no_special_merge(next)) |
b973cb7e | 687 | return NULL; |
d6d48196 | 688 | |
8fe0d473 | 689 | if (req_op(req) == REQ_OP_WRITE_SAME && |
4363ac7c | 690 | !blk_write_same_mergeable(req->bio, next->bio)) |
b973cb7e | 691 | return NULL; |
4363ac7c | 692 | |
cb6934f8 JA |
693 | /* |
694 | * Don't allow merge of different write hints, or for a hint with | |
695 | * non-hint IO. | |
696 | */ | |
697 | if (req->write_hint != next->write_hint) | |
698 | return NULL; | |
699 | ||
d6d48196 JA |
700 | /* |
701 | * If we are allowed to merge, then append bio list | |
702 | * from next to rq and release next. merge_requests_fn | |
703 | * will have updated segment counts, update sector | |
445251d0 JA |
704 | * counts here. Handle DISCARDs separately, as they |
705 | * have separate settings. | |
d6d48196 | 706 | */ |
445251d0 JA |
707 | if (req_op(req) == REQ_OP_DISCARD) { |
708 | if (!req_attempt_discard_merge(q, req, next)) | |
709 | return NULL; | |
710 | } else if (!ll_merge_requests_fn(q, req, next)) | |
b973cb7e | 711 | return NULL; |
d6d48196 | 712 | |
80a761fd TH |
713 | /* |
714 | * If failfast settings disagree or any of the two is already | |
715 | * a mixed merge, mark both as mixed before proceeding. This | |
716 | * makes sure that all involved bios have mixable attributes | |
717 | * set properly. | |
718 | */ | |
e8064021 | 719 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
80a761fd TH |
720 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
721 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
722 | blk_rq_set_mixed_merge(req); | |
723 | blk_rq_set_mixed_merge(next); | |
724 | } | |
725 | ||
d6d48196 JA |
726 | /* |
727 | * At this point we have either done a back merge | |
728 | * or front merge. We need the smaller start_time of | |
729 | * the merged requests to be the current request | |
730 | * for accounting purposes. | |
731 | */ | |
732 | if (time_after(req->start_time, next->start_time)) | |
733 | req->start_time = next->start_time; | |
734 | ||
735 | req->biotail->bi_next = next->bio; | |
736 | req->biotail = next->biotail; | |
737 | ||
a2dec7b3 | 738 | req->__data_len += blk_rq_bytes(next); |
d6d48196 | 739 | |
445251d0 JA |
740 | if (req_op(req) != REQ_OP_DISCARD) |
741 | elv_merge_requests(q, req, next); | |
d6d48196 | 742 | |
42dad764 JM |
743 | /* |
744 | * 'next' is going away, so update stats accordingly | |
745 | */ | |
746 | blk_account_io_merge(next); | |
d6d48196 JA |
747 | |
748 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
749 | if (blk_rq_cpu_valid(next)) |
750 | req->cpu = next->cpu; | |
d6d48196 | 751 | |
e4d750c9 JA |
752 | /* |
753 | * ownership of bio passed from next to req, return 'next' for | |
754 | * the caller to free | |
755 | */ | |
1cd96c24 | 756 | next->bio = NULL; |
b973cb7e | 757 | return next; |
d6d48196 JA |
758 | } |
759 | ||
b973cb7e | 760 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
761 | { |
762 | struct request *next = elv_latter_request(q, rq); | |
763 | ||
764 | if (next) | |
765 | return attempt_merge(q, rq, next); | |
766 | ||
b973cb7e | 767 | return NULL; |
d6d48196 JA |
768 | } |
769 | ||
b973cb7e | 770 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
771 | { |
772 | struct request *prev = elv_former_request(q, rq); | |
773 | ||
774 | if (prev) | |
775 | return attempt_merge(q, prev, rq); | |
776 | ||
b973cb7e | 777 | return NULL; |
d6d48196 | 778 | } |
5e84ea3a JA |
779 | |
780 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
781 | struct request *next) | |
782 | { | |
72ef799b | 783 | struct elevator_queue *e = q->elevator; |
e4d750c9 | 784 | struct request *free; |
72ef799b | 785 | |
bd166ef1 | 786 | if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) |
c51ca6cf | 787 | if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) |
72ef799b TE |
788 | return 0; |
789 | ||
e4d750c9 JA |
790 | free = attempt_merge(q, rq, next); |
791 | if (free) { | |
792 | __blk_put_request(q, free); | |
793 | return 1; | |
794 | } | |
795 | ||
796 | return 0; | |
5e84ea3a | 797 | } |
050c8ea8 TH |
798 | |
799 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
800 | { | |
e2a60da7 | 801 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
802 | return false; |
803 | ||
288dab8a | 804 | if (req_op(rq) != bio_op(bio)) |
f31dc1cd MP |
805 | return false; |
806 | ||
050c8ea8 TH |
807 | /* different data direction or already started, don't merge */ |
808 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
809 | return false; | |
810 | ||
811 | /* must be same device and not a special request */ | |
74d46992 | 812 | if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq)) |
050c8ea8 TH |
813 | return false; |
814 | ||
815 | /* only merge integrity protected bio into ditto rq */ | |
4eaf99be | 816 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
817 | return false; |
818 | ||
4363ac7c | 819 | /* must be using the same buffer */ |
8fe0d473 | 820 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
4363ac7c MP |
821 | !blk_write_same_mergeable(rq->bio, bio)) |
822 | return false; | |
823 | ||
cb6934f8 JA |
824 | /* |
825 | * Don't allow merge of different write hints, or for a hint with | |
826 | * non-hint IO. | |
827 | */ | |
828 | if (rq->write_hint != bio->bi_write_hint) | |
829 | return false; | |
830 | ||
050c8ea8 TH |
831 | return true; |
832 | } | |
833 | ||
34fe7c05 | 834 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
050c8ea8 | 835 | { |
1e739730 CH |
836 | if (req_op(rq) == REQ_OP_DISCARD && |
837 | queue_max_discard_segments(rq->q) > 1) | |
838 | return ELEVATOR_DISCARD_MERGE; | |
839 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
050c8ea8 | 840 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 841 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
842 | return ELEVATOR_FRONT_MERGE; |
843 | return ELEVATOR_NO_MERGE; | |
844 | } |