]>
Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
cda22646 MK |
10 | #include <trace/events/block.h> |
11 | ||
d6d48196 JA |
12 | #include "blk.h" |
13 | ||
54efd50b KO |
14 | static struct bio *blk_bio_discard_split(struct request_queue *q, |
15 | struct bio *bio, | |
bdced438 ML |
16 | struct bio_set *bs, |
17 | unsigned *nsegs) | |
54efd50b KO |
18 | { |
19 | unsigned int max_discard_sectors, granularity; | |
20 | int alignment; | |
21 | sector_t tmp; | |
22 | unsigned split_sectors; | |
23 | ||
bdced438 ML |
24 | *nsegs = 1; |
25 | ||
54efd50b KO |
26 | /* Zero-sector (unknown) and one-sector granularities are the same. */ |
27 | granularity = max(q->limits.discard_granularity >> 9, 1U); | |
28 | ||
29 | max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); | |
30 | max_discard_sectors -= max_discard_sectors % granularity; | |
31 | ||
32 | if (unlikely(!max_discard_sectors)) { | |
33 | /* XXX: warn */ | |
34 | return NULL; | |
35 | } | |
36 | ||
37 | if (bio_sectors(bio) <= max_discard_sectors) | |
38 | return NULL; | |
39 | ||
40 | split_sectors = max_discard_sectors; | |
41 | ||
42 | /* | |
43 | * If the next starting sector would be misaligned, stop the discard at | |
44 | * the previous aligned sector. | |
45 | */ | |
46 | alignment = (q->limits.discard_alignment >> 9) % granularity; | |
47 | ||
48 | tmp = bio->bi_iter.bi_sector + split_sectors - alignment; | |
49 | tmp = sector_div(tmp, granularity); | |
50 | ||
51 | if (split_sectors > tmp) | |
52 | split_sectors -= tmp; | |
53 | ||
54 | return bio_split(bio, split_sectors, GFP_NOIO, bs); | |
55 | } | |
56 | ||
885fa13f CH |
57 | static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, |
58 | struct bio *bio, struct bio_set *bs, unsigned *nsegs) | |
59 | { | |
60 | *nsegs = 1; | |
61 | ||
62 | if (!q->limits.max_write_zeroes_sectors) | |
63 | return NULL; | |
64 | ||
65 | if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) | |
66 | return NULL; | |
67 | ||
68 | return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); | |
69 | } | |
70 | ||
54efd50b KO |
71 | static struct bio *blk_bio_write_same_split(struct request_queue *q, |
72 | struct bio *bio, | |
bdced438 ML |
73 | struct bio_set *bs, |
74 | unsigned *nsegs) | |
54efd50b | 75 | { |
bdced438 ML |
76 | *nsegs = 1; |
77 | ||
54efd50b KO |
78 | if (!q->limits.max_write_same_sectors) |
79 | return NULL; | |
80 | ||
81 | if (bio_sectors(bio) <= q->limits.max_write_same_sectors) | |
82 | return NULL; | |
83 | ||
84 | return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); | |
85 | } | |
86 | ||
d0e5fbb0 ML |
87 | static inline unsigned get_max_io_size(struct request_queue *q, |
88 | struct bio *bio) | |
89 | { | |
90 | unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); | |
91 | unsigned mask = queue_logical_block_size(q) - 1; | |
92 | ||
93 | /* aligned to logical block size */ | |
94 | sectors &= ~(mask >> 9); | |
95 | ||
96 | return sectors; | |
97 | } | |
98 | ||
54efd50b KO |
99 | static struct bio *blk_bio_segment_split(struct request_queue *q, |
100 | struct bio *bio, | |
bdced438 ML |
101 | struct bio_set *bs, |
102 | unsigned *segs) | |
54efd50b | 103 | { |
5014c311 | 104 | struct bio_vec bv, bvprv, *bvprvp = NULL; |
54efd50b | 105 | struct bvec_iter iter; |
8ae12666 | 106 | unsigned seg_size = 0, nsegs = 0, sectors = 0; |
02e70742 ML |
107 | unsigned front_seg_size = bio->bi_seg_front_size; |
108 | bool do_split = true; | |
109 | struct bio *new = NULL; | |
d0e5fbb0 | 110 | const unsigned max_sectors = get_max_io_size(q, bio); |
4d70dca4 | 111 | unsigned bvecs = 0; |
54efd50b | 112 | |
54efd50b | 113 | bio_for_each_segment(bv, bio, iter) { |
4d70dca4 ML |
114 | /* |
115 | * With arbitrary bio size, the incoming bio may be very | |
116 | * big. We have to split the bio into small bios so that | |
117 | * each holds at most BIO_MAX_PAGES bvecs because | |
118 | * bio_clone() can fail to allocate big bvecs. | |
119 | * | |
120 | * It should have been better to apply the limit per | |
121 | * request queue in which bio_clone() is involved, | |
122 | * instead of globally. The biggest blocker is the | |
123 | * bio_clone() in bio bounce. | |
124 | * | |
125 | * If bio is splitted by this reason, we should have | |
126 | * allowed to continue bios merging, but don't do | |
127 | * that now for making the change simple. | |
128 | * | |
129 | * TODO: deal with bio bounce's bio_clone() gracefully | |
130 | * and convert the global limit into per-queue limit. | |
131 | */ | |
132 | if (bvecs++ >= BIO_MAX_PAGES) | |
133 | goto split; | |
134 | ||
54efd50b KO |
135 | /* |
136 | * If the queue doesn't support SG gaps and adding this | |
137 | * offset would create a gap, disallow it. | |
138 | */ | |
5014c311 | 139 | if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) |
54efd50b KO |
140 | goto split; |
141 | ||
d0e5fbb0 | 142 | if (sectors + (bv.bv_len >> 9) > max_sectors) { |
e36f6204 KB |
143 | /* |
144 | * Consider this a new segment if we're splitting in | |
145 | * the middle of this vector. | |
146 | */ | |
147 | if (nsegs < queue_max_segments(q) && | |
d0e5fbb0 | 148 | sectors < max_sectors) { |
e36f6204 | 149 | nsegs++; |
d0e5fbb0 | 150 | sectors = max_sectors; |
e36f6204 | 151 | } |
d0e5fbb0 ML |
152 | if (sectors) |
153 | goto split; | |
154 | /* Make this single bvec as the 1st segment */ | |
e36f6204 KB |
155 | } |
156 | ||
5014c311 | 157 | if (bvprvp && blk_queue_cluster(q)) { |
54efd50b KO |
158 | if (seg_size + bv.bv_len > queue_max_segment_size(q)) |
159 | goto new_segment; | |
5014c311 | 160 | if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) |
54efd50b | 161 | goto new_segment; |
5014c311 | 162 | if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) |
54efd50b KO |
163 | goto new_segment; |
164 | ||
165 | seg_size += bv.bv_len; | |
166 | bvprv = bv; | |
578270bf | 167 | bvprvp = &bvprv; |
52cc6eea | 168 | sectors += bv.bv_len >> 9; |
a88d32af ML |
169 | |
170 | if (nsegs == 1 && seg_size > front_seg_size) | |
171 | front_seg_size = seg_size; | |
54efd50b KO |
172 | continue; |
173 | } | |
174 | new_segment: | |
175 | if (nsegs == queue_max_segments(q)) | |
176 | goto split; | |
177 | ||
178 | nsegs++; | |
179 | bvprv = bv; | |
578270bf | 180 | bvprvp = &bvprv; |
54efd50b | 181 | seg_size = bv.bv_len; |
52cc6eea | 182 | sectors += bv.bv_len >> 9; |
02e70742 ML |
183 | |
184 | if (nsegs == 1 && seg_size > front_seg_size) | |
185 | front_seg_size = seg_size; | |
54efd50b KO |
186 | } |
187 | ||
02e70742 | 188 | do_split = false; |
54efd50b | 189 | split: |
bdced438 | 190 | *segs = nsegs; |
02e70742 ML |
191 | |
192 | if (do_split) { | |
193 | new = bio_split(bio, sectors, GFP_NOIO, bs); | |
194 | if (new) | |
195 | bio = new; | |
196 | } | |
197 | ||
198 | bio->bi_seg_front_size = front_seg_size; | |
199 | if (seg_size > bio->bi_seg_back_size) | |
200 | bio->bi_seg_back_size = seg_size; | |
201 | ||
202 | return do_split ? new : NULL; | |
54efd50b KO |
203 | } |
204 | ||
205 | void blk_queue_split(struct request_queue *q, struct bio **bio, | |
206 | struct bio_set *bs) | |
207 | { | |
bdced438 ML |
208 | struct bio *split, *res; |
209 | unsigned nsegs; | |
54efd50b | 210 | |
7afafc8a AH |
211 | switch (bio_op(*bio)) { |
212 | case REQ_OP_DISCARD: | |
213 | case REQ_OP_SECURE_ERASE: | |
bdced438 | 214 | split = blk_bio_discard_split(q, *bio, bs, &nsegs); |
7afafc8a | 215 | break; |
a6f0788e | 216 | case REQ_OP_WRITE_ZEROES: |
885fa13f | 217 | split = blk_bio_write_zeroes_split(q, *bio, bs, &nsegs); |
a6f0788e | 218 | break; |
7afafc8a | 219 | case REQ_OP_WRITE_SAME: |
bdced438 | 220 | split = blk_bio_write_same_split(q, *bio, bs, &nsegs); |
7afafc8a AH |
221 | break; |
222 | default: | |
bdced438 | 223 | split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs); |
7afafc8a AH |
224 | break; |
225 | } | |
bdced438 ML |
226 | |
227 | /* physical segments can be figured out during splitting */ | |
228 | res = split ? split : *bio; | |
229 | res->bi_phys_segments = nsegs; | |
230 | bio_set_flag(res, BIO_SEG_VALID); | |
54efd50b KO |
231 | |
232 | if (split) { | |
6ac45aeb | 233 | /* there isn't chance to merge the splitted bio */ |
1eff9d32 | 234 | split->bi_opf |= REQ_NOMERGE; |
6ac45aeb | 235 | |
54efd50b | 236 | bio_chain(split, *bio); |
cda22646 | 237 | trace_block_split(q, split, (*bio)->bi_iter.bi_sector); |
54efd50b KO |
238 | generic_make_request(*bio); |
239 | *bio = split; | |
240 | } | |
241 | } | |
242 | EXPORT_SYMBOL(blk_queue_split); | |
243 | ||
1e428079 | 244 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
07388549 ML |
245 | struct bio *bio, |
246 | bool no_sg_merge) | |
d6d48196 | 247 | { |
7988613b | 248 | struct bio_vec bv, bvprv = { NULL }; |
54efd50b | 249 | int cluster, prev = 0; |
1e428079 | 250 | unsigned int seg_size, nr_phys_segs; |
59247eae | 251 | struct bio *fbio, *bbio; |
7988613b | 252 | struct bvec_iter iter; |
d6d48196 | 253 | |
1e428079 JA |
254 | if (!bio) |
255 | return 0; | |
d6d48196 | 256 | |
a6f0788e CK |
257 | switch (bio_op(bio)) { |
258 | case REQ_OP_DISCARD: | |
259 | case REQ_OP_SECURE_ERASE: | |
a6f0788e | 260 | case REQ_OP_WRITE_ZEROES: |
f9d03f96 CH |
261 | return 0; |
262 | case REQ_OP_WRITE_SAME: | |
5cb8850c | 263 | return 1; |
a6f0788e | 264 | } |
5cb8850c | 265 | |
1e428079 | 266 | fbio = bio; |
e692cb66 | 267 | cluster = blk_queue_cluster(q); |
5df97b91 | 268 | seg_size = 0; |
2c8919de | 269 | nr_phys_segs = 0; |
1e428079 | 270 | for_each_bio(bio) { |
7988613b | 271 | bio_for_each_segment(bv, bio, iter) { |
05f1dd53 JA |
272 | /* |
273 | * If SG merging is disabled, each bio vector is | |
274 | * a segment | |
275 | */ | |
276 | if (no_sg_merge) | |
277 | goto new_segment; | |
278 | ||
54efd50b | 279 | if (prev && cluster) { |
7988613b | 280 | if (seg_size + bv.bv_len |
ae03bf63 | 281 | > queue_max_segment_size(q)) |
1e428079 | 282 | goto new_segment; |
7988613b | 283 | if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv)) |
1e428079 | 284 | goto new_segment; |
7988613b | 285 | if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) |
1e428079 | 286 | goto new_segment; |
d6d48196 | 287 | |
7988613b | 288 | seg_size += bv.bv_len; |
1e428079 JA |
289 | bvprv = bv; |
290 | continue; | |
291 | } | |
d6d48196 | 292 | new_segment: |
1e428079 JA |
293 | if (nr_phys_segs == 1 && seg_size > |
294 | fbio->bi_seg_front_size) | |
295 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 296 | |
1e428079 JA |
297 | nr_phys_segs++; |
298 | bvprv = bv; | |
54efd50b | 299 | prev = 1; |
7988613b | 300 | seg_size = bv.bv_len; |
1e428079 | 301 | } |
59247eae | 302 | bbio = bio; |
d6d48196 JA |
303 | } |
304 | ||
59247eae JA |
305 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
306 | fbio->bi_seg_front_size = seg_size; | |
307 | if (seg_size > bbio->bi_seg_back_size) | |
308 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
309 | |
310 | return nr_phys_segs; | |
311 | } | |
312 | ||
313 | void blk_recalc_rq_segments(struct request *rq) | |
314 | { | |
07388549 ML |
315 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
316 | &rq->q->queue_flags); | |
317 | ||
318 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | |
319 | no_sg_merge); | |
d6d48196 JA |
320 | } |
321 | ||
322 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
323 | { | |
7f60dcaa ML |
324 | unsigned short seg_cnt; |
325 | ||
326 | /* estimate segment number by bi_vcnt for non-cloned bio */ | |
327 | if (bio_flagged(bio, BIO_CLONED)) | |
328 | seg_cnt = bio_segments(bio); | |
329 | else | |
330 | seg_cnt = bio->bi_vcnt; | |
764f612c | 331 | |
7f60dcaa ML |
332 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
333 | (seg_cnt < queue_max_segments(q))) | |
334 | bio->bi_phys_segments = seg_cnt; | |
05f1dd53 JA |
335 | else { |
336 | struct bio *nxt = bio->bi_next; | |
337 | ||
338 | bio->bi_next = NULL; | |
7f60dcaa | 339 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
05f1dd53 JA |
340 | bio->bi_next = nxt; |
341 | } | |
1e428079 | 342 | |
b7c44ed9 | 343 | bio_set_flag(bio, BIO_SEG_VALID); |
d6d48196 JA |
344 | } |
345 | EXPORT_SYMBOL(blk_recount_segments); | |
346 | ||
347 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
348 | struct bio *nxt) | |
349 | { | |
2b8221e1 | 350 | struct bio_vec end_bv = { NULL }, nxt_bv; |
f619d254 | 351 | |
e692cb66 | 352 | if (!blk_queue_cluster(q)) |
d6d48196 JA |
353 | return 0; |
354 | ||
86771427 | 355 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 356 | queue_max_segment_size(q)) |
d6d48196 JA |
357 | return 0; |
358 | ||
e17fc0a1 DW |
359 | if (!bio_has_data(bio)) |
360 | return 1; | |
361 | ||
e827091c ML |
362 | bio_get_last_bvec(bio, &end_bv); |
363 | bio_get_first_bvec(nxt, &nxt_bv); | |
f619d254 KO |
364 | |
365 | if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv)) | |
e17fc0a1 DW |
366 | return 0; |
367 | ||
d6d48196 | 368 | /* |
e17fc0a1 | 369 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
370 | * these two to be merged into one |
371 | */ | |
f619d254 | 372 | if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv)) |
d6d48196 JA |
373 | return 1; |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
7988613b | 378 | static inline void |
963ab9e5 | 379 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, |
7988613b | 380 | struct scatterlist *sglist, struct bio_vec *bvprv, |
963ab9e5 AH |
381 | struct scatterlist **sg, int *nsegs, int *cluster) |
382 | { | |
383 | ||
384 | int nbytes = bvec->bv_len; | |
385 | ||
7988613b | 386 | if (*sg && *cluster) { |
963ab9e5 AH |
387 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) |
388 | goto new_segment; | |
389 | ||
7988613b | 390 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) |
963ab9e5 | 391 | goto new_segment; |
7988613b | 392 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) |
963ab9e5 AH |
393 | goto new_segment; |
394 | ||
395 | (*sg)->length += nbytes; | |
396 | } else { | |
397 | new_segment: | |
398 | if (!*sg) | |
399 | *sg = sglist; | |
400 | else { | |
401 | /* | |
402 | * If the driver previously mapped a shorter | |
403 | * list, we could see a termination bit | |
404 | * prematurely unless it fully inits the sg | |
405 | * table on each mapping. We KNOW that there | |
406 | * must be more entries here or the driver | |
407 | * would be buggy, so force clear the | |
408 | * termination bit to avoid doing a full | |
409 | * sg_init_table() in drivers for each command. | |
410 | */ | |
c8164d89 | 411 | sg_unmark_end(*sg); |
963ab9e5 AH |
412 | *sg = sg_next(*sg); |
413 | } | |
414 | ||
415 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
416 | (*nsegs)++; | |
417 | } | |
7988613b | 418 | *bvprv = *bvec; |
963ab9e5 AH |
419 | } |
420 | ||
f9d03f96 CH |
421 | static inline int __blk_bvec_map_sg(struct request_queue *q, struct bio_vec bv, |
422 | struct scatterlist *sglist, struct scatterlist **sg) | |
423 | { | |
424 | *sg = sglist; | |
425 | sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); | |
426 | return 1; | |
427 | } | |
428 | ||
5cb8850c KO |
429 | static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, |
430 | struct scatterlist *sglist, | |
431 | struct scatterlist **sg) | |
d6d48196 | 432 | { |
2b8221e1 | 433 | struct bio_vec bvec, bvprv = { NULL }; |
5cb8850c | 434 | struct bvec_iter iter; |
f9d03f96 | 435 | int cluster = blk_queue_cluster(q), nsegs = 0; |
5cb8850c KO |
436 | |
437 | for_each_bio(bio) | |
438 | bio_for_each_segment(bvec, bio, iter) | |
439 | __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg, | |
440 | &nsegs, &cluster); | |
d6d48196 | 441 | |
5cb8850c KO |
442 | return nsegs; |
443 | } | |
444 | ||
445 | /* | |
446 | * map a request to scatterlist, return number of sg entries setup. Caller | |
447 | * must make sure sg can hold rq->nr_phys_segments entries | |
448 | */ | |
449 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
450 | struct scatterlist *sglist) | |
451 | { | |
452 | struct scatterlist *sg = NULL; | |
453 | int nsegs = 0; | |
454 | ||
f9d03f96 CH |
455 | if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) |
456 | nsegs = __blk_bvec_map_sg(q, rq->special_vec, sglist, &sg); | |
457 | else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME) | |
458 | nsegs = __blk_bvec_map_sg(q, bio_iovec(rq->bio), sglist, &sg); | |
459 | else if (rq->bio) | |
5cb8850c | 460 | nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); |
f18573ab | 461 | |
e8064021 | 462 | if (unlikely(rq->rq_flags & RQF_COPY_USER) && |
2e46e8b2 TH |
463 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
464 | unsigned int pad_len = | |
465 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
466 | |
467 | sg->length += pad_len; | |
468 | rq->extra_len += pad_len; | |
469 | } | |
470 | ||
2fb98e84 | 471 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
a8ebb056 | 472 | if (op_is_write(req_op(rq))) |
db0a2e00 TH |
473 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
474 | ||
da81ed16 | 475 | sg_unmark_end(sg); |
d6d48196 JA |
476 | sg = sg_next(sg); |
477 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
478 | q->dma_drain_size, | |
479 | ((unsigned long)q->dma_drain_buffer) & | |
480 | (PAGE_SIZE - 1)); | |
481 | nsegs++; | |
7a85f889 | 482 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
483 | } |
484 | ||
485 | if (sg) | |
486 | sg_mark_end(sg); | |
487 | ||
12e57f59 ML |
488 | /* |
489 | * Something must have been wrong if the figured number of | |
490 | * segment is bigger than number of req's physical segments | |
491 | */ | |
f9d03f96 | 492 | WARN_ON(nsegs > blk_rq_nr_phys_segments(rq)); |
12e57f59 | 493 | |
d6d48196 JA |
494 | return nsegs; |
495 | } | |
d6d48196 JA |
496 | EXPORT_SYMBOL(blk_rq_map_sg); |
497 | ||
d6d48196 JA |
498 | static inline int ll_new_hw_segment(struct request_queue *q, |
499 | struct request *req, | |
500 | struct bio *bio) | |
501 | { | |
d6d48196 JA |
502 | int nr_phys_segs = bio_phys_segments(q, bio); |
503 | ||
13f05c8d MP |
504 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
505 | goto no_merge; | |
506 | ||
4eaf99be | 507 | if (blk_integrity_merge_bio(q, req, bio) == false) |
13f05c8d | 508 | goto no_merge; |
d6d48196 JA |
509 | |
510 | /* | |
511 | * This will form the start of a new hw segment. Bump both | |
512 | * counters. | |
513 | */ | |
d6d48196 JA |
514 | req->nr_phys_segments += nr_phys_segs; |
515 | return 1; | |
13f05c8d MP |
516 | |
517 | no_merge: | |
e0c72300 | 518 | req_set_nomerge(q, req); |
13f05c8d | 519 | return 0; |
d6d48196 JA |
520 | } |
521 | ||
522 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
523 | struct bio *bio) | |
524 | { | |
5e7c4274 JA |
525 | if (req_gap_back_merge(req, bio)) |
526 | return 0; | |
7f39add3 SG |
527 | if (blk_integrity_rq(req) && |
528 | integrity_req_gap_back_merge(req, bio)) | |
529 | return 0; | |
f31dc1cd | 530 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 531 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) { |
e0c72300 | 532 | req_set_nomerge(q, req); |
d6d48196 JA |
533 | return 0; |
534 | } | |
2cdf79ca | 535 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 536 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 537 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 538 | blk_recount_segments(q, bio); |
d6d48196 JA |
539 | |
540 | return ll_new_hw_segment(q, req, bio); | |
541 | } | |
542 | ||
6728cb0e | 543 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
544 | struct bio *bio) |
545 | { | |
5e7c4274 JA |
546 | |
547 | if (req_gap_front_merge(req, bio)) | |
548 | return 0; | |
7f39add3 SG |
549 | if (blk_integrity_rq(req) && |
550 | integrity_req_gap_front_merge(req, bio)) | |
551 | return 0; | |
f31dc1cd | 552 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
17007f39 | 553 | blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { |
e0c72300 | 554 | req_set_nomerge(q, req); |
d6d48196 JA |
555 | return 0; |
556 | } | |
2cdf79ca | 557 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 558 | blk_recount_segments(q, bio); |
2cdf79ca | 559 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 560 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
561 | |
562 | return ll_new_hw_segment(q, req, bio); | |
563 | } | |
564 | ||
e7e24500 JA |
565 | /* |
566 | * blk-mq uses req->special to carry normal driver per-request payload, it | |
567 | * does not indicate a prepared command that we cannot merge with. | |
568 | */ | |
569 | static bool req_no_special_merge(struct request *req) | |
570 | { | |
571 | struct request_queue *q = req->q; | |
572 | ||
573 | return !q->mq_ops && req->special; | |
574 | } | |
575 | ||
d6d48196 JA |
576 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
577 | struct request *next) | |
578 | { | |
579 | int total_phys_segments; | |
86771427 FT |
580 | unsigned int seg_size = |
581 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
582 | |
583 | /* | |
584 | * First check if the either of the requests are re-queued | |
585 | * requests. Can't merge them if they are. | |
586 | */ | |
e7e24500 | 587 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
d6d48196 JA |
588 | return 0; |
589 | ||
5e7c4274 | 590 | if (req_gap_back_merge(req, next->bio)) |
854fbb9c KB |
591 | return 0; |
592 | ||
d6d48196 JA |
593 | /* |
594 | * Will it become too large? | |
595 | */ | |
f31dc1cd | 596 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
17007f39 | 597 | blk_rq_get_max_sectors(req, blk_rq_pos(req))) |
d6d48196 JA |
598 | return 0; |
599 | ||
600 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
601 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
602 | if (req->nr_phys_segments == 1) | |
603 | req->bio->bi_seg_front_size = seg_size; | |
604 | if (next->nr_phys_segments == 1) | |
605 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 606 | total_phys_segments--; |
86771427 | 607 | } |
d6d48196 | 608 | |
8a78362c | 609 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
610 | return 0; |
611 | ||
4eaf99be | 612 | if (blk_integrity_merge_rq(q, req, next) == false) |
13f05c8d MP |
613 | return 0; |
614 | ||
d6d48196 JA |
615 | /* Merge is OK... */ |
616 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
617 | return 1; |
618 | } | |
619 | ||
80a761fd TH |
620 | /** |
621 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
622 | * @rq: request to mark as mixed merge | |
623 | * | |
624 | * Description: | |
625 | * @rq is about to be mixed merged. Make sure the attributes | |
626 | * which can be mixed are set in each bio and mark @rq as mixed | |
627 | * merged. | |
628 | */ | |
629 | void blk_rq_set_mixed_merge(struct request *rq) | |
630 | { | |
631 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
632 | struct bio *bio; | |
633 | ||
e8064021 | 634 | if (rq->rq_flags & RQF_MIXED_MERGE) |
80a761fd TH |
635 | return; |
636 | ||
637 | /* | |
638 | * @rq will no longer represent mixable attributes for all the | |
639 | * contained bios. It will just track those of the first one. | |
640 | * Distributes the attributs to each bio. | |
641 | */ | |
642 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
1eff9d32 JA |
643 | WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) && |
644 | (bio->bi_opf & REQ_FAILFAST_MASK) != ff); | |
645 | bio->bi_opf |= ff; | |
80a761fd | 646 | } |
e8064021 | 647 | rq->rq_flags |= RQF_MIXED_MERGE; |
80a761fd TH |
648 | } |
649 | ||
26308eab JM |
650 | static void blk_account_io_merge(struct request *req) |
651 | { | |
652 | if (blk_do_io_stat(req)) { | |
653 | struct hd_struct *part; | |
654 | int cpu; | |
655 | ||
656 | cpu = part_stat_lock(); | |
09e099d4 | 657 | part = req->part; |
26308eab JM |
658 | |
659 | part_round_stats(cpu, part); | |
316d315b | 660 | part_dec_in_flight(part, rq_data_dir(req)); |
26308eab | 661 | |
6c23a968 | 662 | hd_struct_put(part); |
26308eab JM |
663 | part_stat_unlock(); |
664 | } | |
665 | } | |
666 | ||
d6d48196 | 667 | /* |
b973cb7e JA |
668 | * For non-mq, this has to be called with the request spinlock acquired. |
669 | * For mq with scheduling, the appropriate queue wide lock should be held. | |
d6d48196 | 670 | */ |
b973cb7e JA |
671 | static struct request *attempt_merge(struct request_queue *q, |
672 | struct request *req, struct request *next) | |
d6d48196 JA |
673 | { |
674 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
b973cb7e | 675 | return NULL; |
d6d48196 | 676 | |
288dab8a | 677 | if (req_op(req) != req_op(next)) |
b973cb7e | 678 | return NULL; |
f31dc1cd | 679 | |
d6d48196 JA |
680 | /* |
681 | * not contiguous | |
682 | */ | |
83096ebf | 683 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
b973cb7e | 684 | return NULL; |
d6d48196 JA |
685 | |
686 | if (rq_data_dir(req) != rq_data_dir(next) | |
687 | || req->rq_disk != next->rq_disk | |
e7e24500 | 688 | || req_no_special_merge(next)) |
b973cb7e | 689 | return NULL; |
d6d48196 | 690 | |
8fe0d473 | 691 | if (req_op(req) == REQ_OP_WRITE_SAME && |
4363ac7c | 692 | !blk_write_same_mergeable(req->bio, next->bio)) |
b973cb7e | 693 | return NULL; |
4363ac7c | 694 | |
d6d48196 JA |
695 | /* |
696 | * If we are allowed to merge, then append bio list | |
697 | * from next to rq and release next. merge_requests_fn | |
698 | * will have updated segment counts, update sector | |
699 | * counts here. | |
700 | */ | |
701 | if (!ll_merge_requests_fn(q, req, next)) | |
b973cb7e | 702 | return NULL; |
d6d48196 | 703 | |
80a761fd TH |
704 | /* |
705 | * If failfast settings disagree or any of the two is already | |
706 | * a mixed merge, mark both as mixed before proceeding. This | |
707 | * makes sure that all involved bios have mixable attributes | |
708 | * set properly. | |
709 | */ | |
e8064021 | 710 | if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) || |
80a761fd TH |
711 | (req->cmd_flags & REQ_FAILFAST_MASK) != |
712 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
713 | blk_rq_set_mixed_merge(req); | |
714 | blk_rq_set_mixed_merge(next); | |
715 | } | |
716 | ||
d6d48196 JA |
717 | /* |
718 | * At this point we have either done a back merge | |
719 | * or front merge. We need the smaller start_time of | |
720 | * the merged requests to be the current request | |
721 | * for accounting purposes. | |
722 | */ | |
723 | if (time_after(req->start_time, next->start_time)) | |
724 | req->start_time = next->start_time; | |
725 | ||
726 | req->biotail->bi_next = next->bio; | |
727 | req->biotail = next->biotail; | |
728 | ||
a2dec7b3 | 729 | req->__data_len += blk_rq_bytes(next); |
d6d48196 JA |
730 | |
731 | elv_merge_requests(q, req, next); | |
732 | ||
42dad764 JM |
733 | /* |
734 | * 'next' is going away, so update stats accordingly | |
735 | */ | |
736 | blk_account_io_merge(next); | |
d6d48196 JA |
737 | |
738 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
739 | if (blk_rq_cpu_valid(next)) |
740 | req->cpu = next->cpu; | |
d6d48196 | 741 | |
e4d750c9 JA |
742 | /* |
743 | * ownership of bio passed from next to req, return 'next' for | |
744 | * the caller to free | |
745 | */ | |
1cd96c24 | 746 | next->bio = NULL; |
b973cb7e | 747 | return next; |
d6d48196 JA |
748 | } |
749 | ||
b973cb7e | 750 | struct request *attempt_back_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
751 | { |
752 | struct request *next = elv_latter_request(q, rq); | |
753 | ||
754 | if (next) | |
755 | return attempt_merge(q, rq, next); | |
756 | ||
b973cb7e | 757 | return NULL; |
d6d48196 JA |
758 | } |
759 | ||
b973cb7e | 760 | struct request *attempt_front_merge(struct request_queue *q, struct request *rq) |
d6d48196 JA |
761 | { |
762 | struct request *prev = elv_former_request(q, rq); | |
763 | ||
764 | if (prev) | |
765 | return attempt_merge(q, prev, rq); | |
766 | ||
b973cb7e | 767 | return NULL; |
d6d48196 | 768 | } |
5e84ea3a JA |
769 | |
770 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
771 | struct request *next) | |
772 | { | |
72ef799b | 773 | struct elevator_queue *e = q->elevator; |
e4d750c9 | 774 | struct request *free; |
72ef799b | 775 | |
bd166ef1 | 776 | if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn) |
c51ca6cf | 777 | if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next)) |
72ef799b TE |
778 | return 0; |
779 | ||
e4d750c9 JA |
780 | free = attempt_merge(q, rq, next); |
781 | if (free) { | |
782 | __blk_put_request(q, free); | |
783 | return 1; | |
784 | } | |
785 | ||
786 | return 0; | |
5e84ea3a | 787 | } |
050c8ea8 TH |
788 | |
789 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
790 | { | |
e2a60da7 | 791 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
792 | return false; |
793 | ||
288dab8a | 794 | if (req_op(rq) != bio_op(bio)) |
f31dc1cd MP |
795 | return false; |
796 | ||
050c8ea8 TH |
797 | /* different data direction or already started, don't merge */ |
798 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
799 | return false; | |
800 | ||
801 | /* must be same device and not a special request */ | |
e7e24500 | 802 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
050c8ea8 TH |
803 | return false; |
804 | ||
805 | /* only merge integrity protected bio into ditto rq */ | |
4eaf99be | 806 | if (blk_integrity_merge_bio(rq->q, rq, bio) == false) |
050c8ea8 TH |
807 | return false; |
808 | ||
4363ac7c | 809 | /* must be using the same buffer */ |
8fe0d473 | 810 | if (req_op(rq) == REQ_OP_WRITE_SAME && |
4363ac7c MP |
811 | !blk_write_same_mergeable(rq->bio, bio)) |
812 | return false; | |
813 | ||
050c8ea8 TH |
814 | return true; |
815 | } | |
816 | ||
34fe7c05 | 817 | enum elv_merge blk_try_merge(struct request *rq, struct bio *bio) |
050c8ea8 | 818 | { |
1e739730 CH |
819 | if (req_op(rq) == REQ_OP_DISCARD && |
820 | queue_max_discard_segments(rq->q) > 1) | |
821 | return ELEVATOR_DISCARD_MERGE; | |
822 | else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) | |
050c8ea8 | 823 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 824 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
825 | return ELEVATOR_FRONT_MERGE; |
826 | return ELEVATOR_NO_MERGE; | |
827 | } |