]>
Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
1e428079 | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
59247eae | 13 | struct bio *bio) |
d6d48196 | 14 | { |
d6d48196 | 15 | struct bio_vec *bv, *bvprv = NULL; |
1e428079 JA |
16 | int cluster, i, high, highprv = 1; |
17 | unsigned int seg_size, nr_phys_segs; | |
59247eae | 18 | struct bio *fbio, *bbio; |
d6d48196 | 19 | |
1e428079 JA |
20 | if (!bio) |
21 | return 0; | |
d6d48196 | 22 | |
1e428079 | 23 | fbio = bio; |
e692cb66 | 24 | cluster = blk_queue_cluster(q); |
5df97b91 | 25 | seg_size = 0; |
2c8919de | 26 | nr_phys_segs = 0; |
1e428079 JA |
27 | for_each_bio(bio) { |
28 | bio_for_each_segment(bv, bio, i) { | |
29 | /* | |
30 | * the trick here is making sure that a high page is | |
31 | * never considered part of another segment, since that | |
32 | * might change with the bounce page. | |
33 | */ | |
ae03bf63 | 34 | high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); |
1e428079 | 35 | if (high || highprv) |
d6d48196 | 36 | goto new_segment; |
1e428079 | 37 | if (cluster) { |
ae03bf63 MP |
38 | if (seg_size + bv->bv_len |
39 | > queue_max_segment_size(q)) | |
1e428079 JA |
40 | goto new_segment; |
41 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | |
42 | goto new_segment; | |
43 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | |
44 | goto new_segment; | |
d6d48196 | 45 | |
1e428079 JA |
46 | seg_size += bv->bv_len; |
47 | bvprv = bv; | |
48 | continue; | |
49 | } | |
d6d48196 | 50 | new_segment: |
1e428079 JA |
51 | if (nr_phys_segs == 1 && seg_size > |
52 | fbio->bi_seg_front_size) | |
53 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 54 | |
1e428079 JA |
55 | nr_phys_segs++; |
56 | bvprv = bv; | |
57 | seg_size = bv->bv_len; | |
58 | highprv = high; | |
59 | } | |
59247eae | 60 | bbio = bio; |
d6d48196 JA |
61 | } |
62 | ||
59247eae JA |
63 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
64 | fbio->bi_seg_front_size = seg_size; | |
65 | if (seg_size > bbio->bi_seg_back_size) | |
66 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
67 | |
68 | return nr_phys_segs; | |
69 | } | |
70 | ||
71 | void blk_recalc_rq_segments(struct request *rq) | |
72 | { | |
59247eae | 73 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
d6d48196 JA |
74 | } |
75 | ||
76 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
77 | { | |
d6d48196 | 78 | struct bio *nxt = bio->bi_next; |
1e428079 | 79 | |
d6d48196 | 80 | bio->bi_next = NULL; |
59247eae | 81 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
d6d48196 | 82 | bio->bi_next = nxt; |
d6d48196 JA |
83 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
84 | } | |
85 | EXPORT_SYMBOL(blk_recount_segments); | |
86 | ||
87 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
88 | struct bio *nxt) | |
89 | { | |
e692cb66 | 90 | if (!blk_queue_cluster(q)) |
d6d48196 JA |
91 | return 0; |
92 | ||
86771427 | 93 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
ae03bf63 | 94 | queue_max_segment_size(q)) |
d6d48196 JA |
95 | return 0; |
96 | ||
e17fc0a1 DW |
97 | if (!bio_has_data(bio)) |
98 | return 1; | |
99 | ||
100 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | |
101 | return 0; | |
102 | ||
d6d48196 | 103 | /* |
e17fc0a1 | 104 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
105 | * these two to be merged into one |
106 | */ | |
107 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | |
108 | return 1; | |
109 | ||
110 | return 0; | |
111 | } | |
112 | ||
963ab9e5 AH |
113 | static void |
114 | __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, | |
115 | struct scatterlist *sglist, struct bio_vec **bvprv, | |
116 | struct scatterlist **sg, int *nsegs, int *cluster) | |
117 | { | |
118 | ||
119 | int nbytes = bvec->bv_len; | |
120 | ||
121 | if (*bvprv && *cluster) { | |
122 | if ((*sg)->length + nbytes > queue_max_segment_size(q)) | |
123 | goto new_segment; | |
124 | ||
125 | if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) | |
126 | goto new_segment; | |
127 | if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) | |
128 | goto new_segment; | |
129 | ||
130 | (*sg)->length += nbytes; | |
131 | } else { | |
132 | new_segment: | |
133 | if (!*sg) | |
134 | *sg = sglist; | |
135 | else { | |
136 | /* | |
137 | * If the driver previously mapped a shorter | |
138 | * list, we could see a termination bit | |
139 | * prematurely unless it fully inits the sg | |
140 | * table on each mapping. We KNOW that there | |
141 | * must be more entries here or the driver | |
142 | * would be buggy, so force clear the | |
143 | * termination bit to avoid doing a full | |
144 | * sg_init_table() in drivers for each command. | |
145 | */ | |
c8164d89 | 146 | sg_unmark_end(*sg); |
963ab9e5 AH |
147 | *sg = sg_next(*sg); |
148 | } | |
149 | ||
150 | sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
151 | (*nsegs)++; | |
152 | } | |
153 | *bvprv = bvec; | |
154 | } | |
155 | ||
d6d48196 JA |
156 | /* |
157 | * map a request to scatterlist, return number of sg entries setup. Caller | |
158 | * must make sure sg can hold rq->nr_phys_segments entries | |
159 | */ | |
160 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
161 | struct scatterlist *sglist) | |
162 | { | |
163 | struct bio_vec *bvec, *bvprv; | |
164 | struct req_iterator iter; | |
165 | struct scatterlist *sg; | |
166 | int nsegs, cluster; | |
167 | ||
168 | nsegs = 0; | |
e692cb66 | 169 | cluster = blk_queue_cluster(q); |
d6d48196 JA |
170 | |
171 | /* | |
172 | * for each bio in rq | |
173 | */ | |
174 | bvprv = NULL; | |
175 | sg = NULL; | |
176 | rq_for_each_segment(bvec, rq, iter) { | |
963ab9e5 AH |
177 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, |
178 | &nsegs, &cluster); | |
d6d48196 JA |
179 | } /* segments in rq */ |
180 | ||
f18573ab FT |
181 | |
182 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
2e46e8b2 TH |
183 | (blk_rq_bytes(rq) & q->dma_pad_mask)) { |
184 | unsigned int pad_len = | |
185 | (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; | |
f18573ab FT |
186 | |
187 | sg->length += pad_len; | |
188 | rq->extra_len += pad_len; | |
189 | } | |
190 | ||
2fb98e84 | 191 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
7b6d91da | 192 | if (rq->cmd_flags & REQ_WRITE) |
db0a2e00 TH |
193 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); |
194 | ||
d6d48196 JA |
195 | sg->page_link &= ~0x02; |
196 | sg = sg_next(sg); | |
197 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
198 | q->dma_drain_size, | |
199 | ((unsigned long)q->dma_drain_buffer) & | |
200 | (PAGE_SIZE - 1)); | |
201 | nsegs++; | |
7a85f889 | 202 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
203 | } |
204 | ||
205 | if (sg) | |
206 | sg_mark_end(sg); | |
207 | ||
208 | return nsegs; | |
209 | } | |
d6d48196 JA |
210 | EXPORT_SYMBOL(blk_rq_map_sg); |
211 | ||
85b9f66a AH |
212 | /** |
213 | * blk_bio_map_sg - map a bio to a scatterlist | |
214 | * @q: request_queue in question | |
215 | * @bio: bio being mapped | |
216 | * @sglist: scatterlist being mapped | |
217 | * | |
218 | * Note: | |
219 | * Caller must make sure sg can hold bio->bi_phys_segments entries | |
220 | * | |
221 | * Will return the number of sg entries setup | |
222 | */ | |
223 | int blk_bio_map_sg(struct request_queue *q, struct bio *bio, | |
224 | struct scatterlist *sglist) | |
225 | { | |
226 | struct bio_vec *bvec, *bvprv; | |
227 | struct scatterlist *sg; | |
228 | int nsegs, cluster; | |
229 | unsigned long i; | |
230 | ||
231 | nsegs = 0; | |
232 | cluster = blk_queue_cluster(q); | |
233 | ||
234 | bvprv = NULL; | |
235 | sg = NULL; | |
236 | bio_for_each_segment(bvec, bio, i) { | |
237 | __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, | |
238 | &nsegs, &cluster); | |
239 | } /* segments in bio */ | |
240 | ||
241 | if (sg) | |
242 | sg_mark_end(sg); | |
243 | ||
244 | BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); | |
245 | return nsegs; | |
246 | } | |
247 | EXPORT_SYMBOL(blk_bio_map_sg); | |
248 | ||
d6d48196 JA |
249 | static inline int ll_new_hw_segment(struct request_queue *q, |
250 | struct request *req, | |
251 | struct bio *bio) | |
252 | { | |
d6d48196 JA |
253 | int nr_phys_segs = bio_phys_segments(q, bio); |
254 | ||
13f05c8d MP |
255 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) |
256 | goto no_merge; | |
257 | ||
258 | if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio)) | |
259 | goto no_merge; | |
d6d48196 JA |
260 | |
261 | /* | |
262 | * This will form the start of a new hw segment. Bump both | |
263 | * counters. | |
264 | */ | |
d6d48196 JA |
265 | req->nr_phys_segments += nr_phys_segs; |
266 | return 1; | |
13f05c8d MP |
267 | |
268 | no_merge: | |
269 | req->cmd_flags |= REQ_NOMERGE; | |
270 | if (req == q->last_merge) | |
271 | q->last_merge = NULL; | |
272 | return 0; | |
d6d48196 JA |
273 | } |
274 | ||
275 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
276 | struct bio *bio) | |
277 | { | |
f31dc1cd MP |
278 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
279 | blk_rq_get_max_sectors(req)) { | |
d6d48196 JA |
280 | req->cmd_flags |= REQ_NOMERGE; |
281 | if (req == q->last_merge) | |
282 | q->last_merge = NULL; | |
283 | return 0; | |
284 | } | |
2cdf79ca | 285 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 286 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 287 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 288 | blk_recount_segments(q, bio); |
d6d48196 JA |
289 | |
290 | return ll_new_hw_segment(q, req, bio); | |
291 | } | |
292 | ||
6728cb0e | 293 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
294 | struct bio *bio) |
295 | { | |
f31dc1cd MP |
296 | if (blk_rq_sectors(req) + bio_sectors(bio) > |
297 | blk_rq_get_max_sectors(req)) { | |
d6d48196 JA |
298 | req->cmd_flags |= REQ_NOMERGE; |
299 | if (req == q->last_merge) | |
300 | q->last_merge = NULL; | |
301 | return 0; | |
302 | } | |
2cdf79ca | 303 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 304 | blk_recount_segments(q, bio); |
2cdf79ca | 305 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 306 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
307 | |
308 | return ll_new_hw_segment(q, req, bio); | |
309 | } | |
310 | ||
e7e24500 JA |
311 | /* |
312 | * blk-mq uses req->special to carry normal driver per-request payload, it | |
313 | * does not indicate a prepared command that we cannot merge with. | |
314 | */ | |
315 | static bool req_no_special_merge(struct request *req) | |
316 | { | |
317 | struct request_queue *q = req->q; | |
318 | ||
319 | return !q->mq_ops && req->special; | |
320 | } | |
321 | ||
d6d48196 JA |
322 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, |
323 | struct request *next) | |
324 | { | |
325 | int total_phys_segments; | |
86771427 FT |
326 | unsigned int seg_size = |
327 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
328 | |
329 | /* | |
330 | * First check if the either of the requests are re-queued | |
331 | * requests. Can't merge them if they are. | |
332 | */ | |
e7e24500 | 333 | if (req_no_special_merge(req) || req_no_special_merge(next)) |
d6d48196 JA |
334 | return 0; |
335 | ||
336 | /* | |
337 | * Will it become too large? | |
338 | */ | |
f31dc1cd MP |
339 | if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > |
340 | blk_rq_get_max_sectors(req)) | |
d6d48196 JA |
341 | return 0; |
342 | ||
343 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
344 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
345 | if (req->nr_phys_segments == 1) | |
346 | req->bio->bi_seg_front_size = seg_size; | |
347 | if (next->nr_phys_segments == 1) | |
348 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 349 | total_phys_segments--; |
86771427 | 350 | } |
d6d48196 | 351 | |
8a78362c | 352 | if (total_phys_segments > queue_max_segments(q)) |
d6d48196 JA |
353 | return 0; |
354 | ||
13f05c8d MP |
355 | if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next)) |
356 | return 0; | |
357 | ||
d6d48196 JA |
358 | /* Merge is OK... */ |
359 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
360 | return 1; |
361 | } | |
362 | ||
80a761fd TH |
363 | /** |
364 | * blk_rq_set_mixed_merge - mark a request as mixed merge | |
365 | * @rq: request to mark as mixed merge | |
366 | * | |
367 | * Description: | |
368 | * @rq is about to be mixed merged. Make sure the attributes | |
369 | * which can be mixed are set in each bio and mark @rq as mixed | |
370 | * merged. | |
371 | */ | |
372 | void blk_rq_set_mixed_merge(struct request *rq) | |
373 | { | |
374 | unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; | |
375 | struct bio *bio; | |
376 | ||
377 | if (rq->cmd_flags & REQ_MIXED_MERGE) | |
378 | return; | |
379 | ||
380 | /* | |
381 | * @rq will no longer represent mixable attributes for all the | |
382 | * contained bios. It will just track those of the first one. | |
383 | * Distributes the attributs to each bio. | |
384 | */ | |
385 | for (bio = rq->bio; bio; bio = bio->bi_next) { | |
386 | WARN_ON_ONCE((bio->bi_rw & REQ_FAILFAST_MASK) && | |
387 | (bio->bi_rw & REQ_FAILFAST_MASK) != ff); | |
388 | bio->bi_rw |= ff; | |
389 | } | |
390 | rq->cmd_flags |= REQ_MIXED_MERGE; | |
391 | } | |
392 | ||
26308eab JM |
393 | static void blk_account_io_merge(struct request *req) |
394 | { | |
395 | if (blk_do_io_stat(req)) { | |
396 | struct hd_struct *part; | |
397 | int cpu; | |
398 | ||
399 | cpu = part_stat_lock(); | |
09e099d4 | 400 | part = req->part; |
26308eab JM |
401 | |
402 | part_round_stats(cpu, part); | |
316d315b | 403 | part_dec_in_flight(part, rq_data_dir(req)); |
26308eab | 404 | |
6c23a968 | 405 | hd_struct_put(part); |
26308eab JM |
406 | part_stat_unlock(); |
407 | } | |
408 | } | |
409 | ||
d6d48196 JA |
410 | /* |
411 | * Has to be called with the request spinlock acquired | |
412 | */ | |
413 | static int attempt_merge(struct request_queue *q, struct request *req, | |
414 | struct request *next) | |
415 | { | |
416 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
417 | return 0; | |
418 | ||
f31dc1cd MP |
419 | if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) |
420 | return 0; | |
421 | ||
d6d48196 JA |
422 | /* |
423 | * not contiguous | |
424 | */ | |
83096ebf | 425 | if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) |
d6d48196 JA |
426 | return 0; |
427 | ||
428 | if (rq_data_dir(req) != rq_data_dir(next) | |
429 | || req->rq_disk != next->rq_disk | |
e7e24500 | 430 | || req_no_special_merge(next)) |
d6d48196 JA |
431 | return 0; |
432 | ||
4363ac7c MP |
433 | if (req->cmd_flags & REQ_WRITE_SAME && |
434 | !blk_write_same_mergeable(req->bio, next->bio)) | |
435 | return 0; | |
436 | ||
d6d48196 JA |
437 | /* |
438 | * If we are allowed to merge, then append bio list | |
439 | * from next to rq and release next. merge_requests_fn | |
440 | * will have updated segment counts, update sector | |
441 | * counts here. | |
442 | */ | |
443 | if (!ll_merge_requests_fn(q, req, next)) | |
444 | return 0; | |
445 | ||
80a761fd TH |
446 | /* |
447 | * If failfast settings disagree or any of the two is already | |
448 | * a mixed merge, mark both as mixed before proceeding. This | |
449 | * makes sure that all involved bios have mixable attributes | |
450 | * set properly. | |
451 | */ | |
452 | if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || | |
453 | (req->cmd_flags & REQ_FAILFAST_MASK) != | |
454 | (next->cmd_flags & REQ_FAILFAST_MASK)) { | |
455 | blk_rq_set_mixed_merge(req); | |
456 | blk_rq_set_mixed_merge(next); | |
457 | } | |
458 | ||
d6d48196 JA |
459 | /* |
460 | * At this point we have either done a back merge | |
461 | * or front merge. We need the smaller start_time of | |
462 | * the merged requests to be the current request | |
463 | * for accounting purposes. | |
464 | */ | |
465 | if (time_after(req->start_time, next->start_time)) | |
466 | req->start_time = next->start_time; | |
467 | ||
468 | req->biotail->bi_next = next->bio; | |
469 | req->biotail = next->biotail; | |
470 | ||
a2dec7b3 | 471 | req->__data_len += blk_rq_bytes(next); |
d6d48196 JA |
472 | |
473 | elv_merge_requests(q, req, next); | |
474 | ||
42dad764 JM |
475 | /* |
476 | * 'next' is going away, so update stats accordingly | |
477 | */ | |
478 | blk_account_io_merge(next); | |
d6d48196 JA |
479 | |
480 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
481 | if (blk_rq_cpu_valid(next)) |
482 | req->cpu = next->cpu; | |
d6d48196 | 483 | |
1cd96c24 BH |
484 | /* owner-ship of bio passed from next to req */ |
485 | next->bio = NULL; | |
d6d48196 JA |
486 | __blk_put_request(q, next); |
487 | return 1; | |
488 | } | |
489 | ||
490 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
491 | { | |
492 | struct request *next = elv_latter_request(q, rq); | |
493 | ||
494 | if (next) | |
495 | return attempt_merge(q, rq, next); | |
496 | ||
497 | return 0; | |
498 | } | |
499 | ||
500 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
501 | { | |
502 | struct request *prev = elv_former_request(q, rq); | |
503 | ||
504 | if (prev) | |
505 | return attempt_merge(q, prev, rq); | |
506 | ||
507 | return 0; | |
508 | } | |
5e84ea3a JA |
509 | |
510 | int blk_attempt_req_merge(struct request_queue *q, struct request *rq, | |
511 | struct request *next) | |
512 | { | |
513 | return attempt_merge(q, rq, next); | |
514 | } | |
050c8ea8 TH |
515 | |
516 | bool blk_rq_merge_ok(struct request *rq, struct bio *bio) | |
517 | { | |
e2a60da7 | 518 | if (!rq_mergeable(rq) || !bio_mergeable(bio)) |
050c8ea8 TH |
519 | return false; |
520 | ||
f31dc1cd MP |
521 | if (!blk_check_merge_flags(rq->cmd_flags, bio->bi_rw)) |
522 | return false; | |
523 | ||
050c8ea8 TH |
524 | /* different data direction or already started, don't merge */ |
525 | if (bio_data_dir(bio) != rq_data_dir(rq)) | |
526 | return false; | |
527 | ||
528 | /* must be same device and not a special request */ | |
e7e24500 | 529 | if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) |
050c8ea8 TH |
530 | return false; |
531 | ||
532 | /* only merge integrity protected bio into ditto rq */ | |
533 | if (bio_integrity(bio) != blk_integrity_rq(rq)) | |
534 | return false; | |
535 | ||
4363ac7c MP |
536 | /* must be using the same buffer */ |
537 | if (rq->cmd_flags & REQ_WRITE_SAME && | |
538 | !blk_write_same_mergeable(rq->bio, bio)) | |
539 | return false; | |
540 | ||
050c8ea8 TH |
541 | return true; |
542 | } | |
543 | ||
544 | int blk_try_merge(struct request *rq, struct bio *bio) | |
545 | { | |
4f024f37 | 546 | if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector) |
050c8ea8 | 547 | return ELEVATOR_BACK_MERGE; |
4f024f37 | 548 | else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) |
050c8ea8 TH |
549 | return ELEVATOR_FRONT_MERGE; |
550 | return ELEVATOR_NO_MERGE; | |
551 | } |