]>
Commit | Line | Data |
---|---|---|
d6d48196 JA |
1 | /* |
2 | * Functions related to segment and merge handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5 | #include <linux/module.h> | |
6 | #include <linux/bio.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/scatterlist.h> | |
9 | ||
10 | #include "blk.h" | |
11 | ||
12 | void blk_recalc_rq_sectors(struct request *rq, int nsect) | |
13 | { | |
e17fc0a1 | 14 | if (blk_fs_request(rq) || blk_discard_rq(rq)) { |
d6d48196 JA |
15 | rq->hard_sector += nsect; |
16 | rq->hard_nr_sectors -= nsect; | |
17 | ||
18 | /* | |
19 | * Move the I/O submission pointers ahead if required. | |
20 | */ | |
21 | if ((rq->nr_sectors >= rq->hard_nr_sectors) && | |
22 | (rq->sector <= rq->hard_sector)) { | |
23 | rq->sector = rq->hard_sector; | |
24 | rq->nr_sectors = rq->hard_nr_sectors; | |
25 | rq->hard_cur_sectors = bio_cur_sectors(rq->bio); | |
26 | rq->current_nr_sectors = rq->hard_cur_sectors; | |
27 | rq->buffer = bio_data(rq->bio); | |
28 | } | |
29 | ||
30 | /* | |
31 | * if total number of sectors is less than the first segment | |
32 | * size, something has gone terribly wrong | |
33 | */ | |
34 | if (rq->nr_sectors < rq->current_nr_sectors) { | |
6728cb0e | 35 | printk(KERN_ERR "blk: request botched\n"); |
d6d48196 JA |
36 | rq->nr_sectors = rq->current_nr_sectors; |
37 | } | |
38 | } | |
39 | } | |
40 | ||
1e428079 | 41 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
59247eae | 42 | struct bio *bio) |
d6d48196 | 43 | { |
d6d48196 | 44 | unsigned int phys_size; |
d6d48196 | 45 | struct bio_vec *bv, *bvprv = NULL; |
1e428079 JA |
46 | int cluster, i, high, highprv = 1; |
47 | unsigned int seg_size, nr_phys_segs; | |
59247eae | 48 | struct bio *fbio, *bbio; |
d6d48196 | 49 | |
1e428079 JA |
50 | if (!bio) |
51 | return 0; | |
d6d48196 | 52 | |
1e428079 | 53 | fbio = bio; |
75ad23bc | 54 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
5df97b91 MP |
55 | seg_size = 0; |
56 | phys_size = nr_phys_segs = 0; | |
1e428079 JA |
57 | for_each_bio(bio) { |
58 | bio_for_each_segment(bv, bio, i) { | |
59 | /* | |
60 | * the trick here is making sure that a high page is | |
61 | * never considered part of another segment, since that | |
62 | * might change with the bounce page. | |
63 | */ | |
64 | high = page_to_pfn(bv->bv_page) > q->bounce_pfn; | |
65 | if (high || highprv) | |
d6d48196 | 66 | goto new_segment; |
1e428079 JA |
67 | if (cluster) { |
68 | if (seg_size + bv->bv_len > q->max_segment_size) | |
69 | goto new_segment; | |
70 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) | |
71 | goto new_segment; | |
72 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) | |
73 | goto new_segment; | |
d6d48196 | 74 | |
1e428079 JA |
75 | seg_size += bv->bv_len; |
76 | bvprv = bv; | |
77 | continue; | |
78 | } | |
d6d48196 | 79 | new_segment: |
1e428079 JA |
80 | if (nr_phys_segs == 1 && seg_size > |
81 | fbio->bi_seg_front_size) | |
82 | fbio->bi_seg_front_size = seg_size; | |
86771427 | 83 | |
1e428079 JA |
84 | nr_phys_segs++; |
85 | bvprv = bv; | |
86 | seg_size = bv->bv_len; | |
87 | highprv = high; | |
88 | } | |
59247eae | 89 | bbio = bio; |
d6d48196 JA |
90 | } |
91 | ||
59247eae JA |
92 | if (nr_phys_segs == 1 && seg_size > fbio->bi_seg_front_size) |
93 | fbio->bi_seg_front_size = seg_size; | |
94 | if (seg_size > bbio->bi_seg_back_size) | |
95 | bbio->bi_seg_back_size = seg_size; | |
1e428079 JA |
96 | |
97 | return nr_phys_segs; | |
98 | } | |
99 | ||
100 | void blk_recalc_rq_segments(struct request *rq) | |
101 | { | |
59247eae | 102 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); |
d6d48196 JA |
103 | } |
104 | ||
105 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | |
106 | { | |
d6d48196 | 107 | struct bio *nxt = bio->bi_next; |
1e428079 | 108 | |
d6d48196 | 109 | bio->bi_next = NULL; |
59247eae | 110 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); |
d6d48196 | 111 | bio->bi_next = nxt; |
d6d48196 JA |
112 | bio->bi_flags |= (1 << BIO_SEG_VALID); |
113 | } | |
114 | EXPORT_SYMBOL(blk_recount_segments); | |
115 | ||
116 | static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, | |
117 | struct bio *nxt) | |
118 | { | |
75ad23bc | 119 | if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
d6d48196 JA |
120 | return 0; |
121 | ||
86771427 FT |
122 | if (bio->bi_seg_back_size + nxt->bi_seg_front_size > |
123 | q->max_segment_size) | |
d6d48196 JA |
124 | return 0; |
125 | ||
e17fc0a1 DW |
126 | if (!bio_has_data(bio)) |
127 | return 1; | |
128 | ||
129 | if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) | |
130 | return 0; | |
131 | ||
d6d48196 | 132 | /* |
e17fc0a1 | 133 | * bio and nxt are contiguous in memory; check if the queue allows |
d6d48196 JA |
134 | * these two to be merged into one |
135 | */ | |
136 | if (BIO_SEG_BOUNDARY(q, bio, nxt)) | |
137 | return 1; | |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
d6d48196 JA |
142 | /* |
143 | * map a request to scatterlist, return number of sg entries setup. Caller | |
144 | * must make sure sg can hold rq->nr_phys_segments entries | |
145 | */ | |
146 | int blk_rq_map_sg(struct request_queue *q, struct request *rq, | |
147 | struct scatterlist *sglist) | |
148 | { | |
149 | struct bio_vec *bvec, *bvprv; | |
150 | struct req_iterator iter; | |
151 | struct scatterlist *sg; | |
152 | int nsegs, cluster; | |
153 | ||
154 | nsegs = 0; | |
75ad23bc | 155 | cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
d6d48196 JA |
156 | |
157 | /* | |
158 | * for each bio in rq | |
159 | */ | |
160 | bvprv = NULL; | |
161 | sg = NULL; | |
162 | rq_for_each_segment(bvec, rq, iter) { | |
163 | int nbytes = bvec->bv_len; | |
164 | ||
165 | if (bvprv && cluster) { | |
166 | if (sg->length + nbytes > q->max_segment_size) | |
167 | goto new_segment; | |
168 | ||
169 | if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) | |
170 | goto new_segment; | |
171 | if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) | |
172 | goto new_segment; | |
173 | ||
174 | sg->length += nbytes; | |
175 | } else { | |
176 | new_segment: | |
177 | if (!sg) | |
178 | sg = sglist; | |
179 | else { | |
180 | /* | |
181 | * If the driver previously mapped a shorter | |
182 | * list, we could see a termination bit | |
183 | * prematurely unless it fully inits the sg | |
184 | * table on each mapping. We KNOW that there | |
185 | * must be more entries here or the driver | |
186 | * would be buggy, so force clear the | |
187 | * termination bit to avoid doing a full | |
188 | * sg_init_table() in drivers for each command. | |
189 | */ | |
190 | sg->page_link &= ~0x02; | |
191 | sg = sg_next(sg); | |
192 | } | |
193 | ||
194 | sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); | |
195 | nsegs++; | |
196 | } | |
197 | bvprv = bvec; | |
198 | } /* segments in rq */ | |
199 | ||
f18573ab FT |
200 | |
201 | if (unlikely(rq->cmd_flags & REQ_COPY_USER) && | |
202 | (rq->data_len & q->dma_pad_mask)) { | |
203 | unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; | |
204 | ||
205 | sg->length += pad_len; | |
206 | rq->extra_len += pad_len; | |
207 | } | |
208 | ||
2fb98e84 | 209 | if (q->dma_drain_size && q->dma_drain_needed(rq)) { |
db0a2e00 TH |
210 | if (rq->cmd_flags & REQ_RW) |
211 | memset(q->dma_drain_buffer, 0, q->dma_drain_size); | |
212 | ||
d6d48196 JA |
213 | sg->page_link &= ~0x02; |
214 | sg = sg_next(sg); | |
215 | sg_set_page(sg, virt_to_page(q->dma_drain_buffer), | |
216 | q->dma_drain_size, | |
217 | ((unsigned long)q->dma_drain_buffer) & | |
218 | (PAGE_SIZE - 1)); | |
219 | nsegs++; | |
7a85f889 | 220 | rq->extra_len += q->dma_drain_size; |
d6d48196 JA |
221 | } |
222 | ||
223 | if (sg) | |
224 | sg_mark_end(sg); | |
225 | ||
226 | return nsegs; | |
227 | } | |
d6d48196 JA |
228 | EXPORT_SYMBOL(blk_rq_map_sg); |
229 | ||
d6d48196 JA |
230 | static inline int ll_new_hw_segment(struct request_queue *q, |
231 | struct request *req, | |
232 | struct bio *bio) | |
233 | { | |
d6d48196 JA |
234 | int nr_phys_segs = bio_phys_segments(q, bio); |
235 | ||
5df97b91 | 236 | if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments |
d6d48196 JA |
237 | || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { |
238 | req->cmd_flags |= REQ_NOMERGE; | |
239 | if (req == q->last_merge) | |
240 | q->last_merge = NULL; | |
241 | return 0; | |
242 | } | |
243 | ||
244 | /* | |
245 | * This will form the start of a new hw segment. Bump both | |
246 | * counters. | |
247 | */ | |
d6d48196 JA |
248 | req->nr_phys_segments += nr_phys_segs; |
249 | return 1; | |
250 | } | |
251 | ||
252 | int ll_back_merge_fn(struct request_queue *q, struct request *req, | |
253 | struct bio *bio) | |
254 | { | |
255 | unsigned short max_sectors; | |
d6d48196 JA |
256 | |
257 | if (unlikely(blk_pc_request(req))) | |
258 | max_sectors = q->max_hw_sectors; | |
259 | else | |
260 | max_sectors = q->max_sectors; | |
261 | ||
262 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | |
263 | req->cmd_flags |= REQ_NOMERGE; | |
264 | if (req == q->last_merge) | |
265 | q->last_merge = NULL; | |
266 | return 0; | |
267 | } | |
2cdf79ca | 268 | if (!bio_flagged(req->biotail, BIO_SEG_VALID)) |
d6d48196 | 269 | blk_recount_segments(q, req->biotail); |
2cdf79ca | 270 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 271 | blk_recount_segments(q, bio); |
d6d48196 JA |
272 | |
273 | return ll_new_hw_segment(q, req, bio); | |
274 | } | |
275 | ||
6728cb0e | 276 | int ll_front_merge_fn(struct request_queue *q, struct request *req, |
d6d48196 JA |
277 | struct bio *bio) |
278 | { | |
279 | unsigned short max_sectors; | |
d6d48196 JA |
280 | |
281 | if (unlikely(blk_pc_request(req))) | |
282 | max_sectors = q->max_hw_sectors; | |
283 | else | |
284 | max_sectors = q->max_sectors; | |
285 | ||
286 | ||
287 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | |
288 | req->cmd_flags |= REQ_NOMERGE; | |
289 | if (req == q->last_merge) | |
290 | q->last_merge = NULL; | |
291 | return 0; | |
292 | } | |
2cdf79ca | 293 | if (!bio_flagged(bio, BIO_SEG_VALID)) |
d6d48196 | 294 | blk_recount_segments(q, bio); |
2cdf79ca | 295 | if (!bio_flagged(req->bio, BIO_SEG_VALID)) |
d6d48196 | 296 | blk_recount_segments(q, req->bio); |
d6d48196 JA |
297 | |
298 | return ll_new_hw_segment(q, req, bio); | |
299 | } | |
300 | ||
301 | static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |
302 | struct request *next) | |
303 | { | |
304 | int total_phys_segments; | |
86771427 FT |
305 | unsigned int seg_size = |
306 | req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; | |
d6d48196 JA |
307 | |
308 | /* | |
309 | * First check if the either of the requests are re-queued | |
310 | * requests. Can't merge them if they are. | |
311 | */ | |
312 | if (req->special || next->special) | |
313 | return 0; | |
314 | ||
315 | /* | |
316 | * Will it become too large? | |
317 | */ | |
318 | if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) | |
319 | return 0; | |
320 | ||
321 | total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; | |
86771427 FT |
322 | if (blk_phys_contig_segment(q, req->biotail, next->bio)) { |
323 | if (req->nr_phys_segments == 1) | |
324 | req->bio->bi_seg_front_size = seg_size; | |
325 | if (next->nr_phys_segments == 1) | |
326 | next->biotail->bi_seg_back_size = seg_size; | |
d6d48196 | 327 | total_phys_segments--; |
86771427 | 328 | } |
d6d48196 JA |
329 | |
330 | if (total_phys_segments > q->max_phys_segments) | |
331 | return 0; | |
332 | ||
5df97b91 | 333 | if (total_phys_segments > q->max_hw_segments) |
d6d48196 JA |
334 | return 0; |
335 | ||
336 | /* Merge is OK... */ | |
337 | req->nr_phys_segments = total_phys_segments; | |
d6d48196 JA |
338 | return 1; |
339 | } | |
340 | ||
26308eab JM |
341 | static void blk_account_io_merge(struct request *req) |
342 | { | |
343 | if (blk_do_io_stat(req)) { | |
344 | struct hd_struct *part; | |
345 | int cpu; | |
346 | ||
347 | cpu = part_stat_lock(); | |
348 | part = disk_map_sector_rcu(req->rq_disk, req->sector); | |
349 | ||
350 | part_round_stats(cpu, part); | |
351 | part_dec_in_flight(part); | |
352 | ||
353 | part_stat_unlock(); | |
354 | } | |
355 | } | |
356 | ||
d6d48196 JA |
357 | /* |
358 | * Has to be called with the request spinlock acquired | |
359 | */ | |
360 | static int attempt_merge(struct request_queue *q, struct request *req, | |
361 | struct request *next) | |
362 | { | |
363 | if (!rq_mergeable(req) || !rq_mergeable(next)) | |
364 | return 0; | |
365 | ||
366 | /* | |
367 | * not contiguous | |
368 | */ | |
369 | if (req->sector + req->nr_sectors != next->sector) | |
370 | return 0; | |
371 | ||
372 | if (rq_data_dir(req) != rq_data_dir(next) | |
373 | || req->rq_disk != next->rq_disk | |
374 | || next->special) | |
375 | return 0; | |
376 | ||
7ba1ba12 MP |
377 | if (blk_integrity_rq(req) != blk_integrity_rq(next)) |
378 | return 0; | |
379 | ||
d6d48196 JA |
380 | /* |
381 | * If we are allowed to merge, then append bio list | |
382 | * from next to rq and release next. merge_requests_fn | |
383 | * will have updated segment counts, update sector | |
384 | * counts here. | |
385 | */ | |
386 | if (!ll_merge_requests_fn(q, req, next)) | |
387 | return 0; | |
388 | ||
389 | /* | |
390 | * At this point we have either done a back merge | |
391 | * or front merge. We need the smaller start_time of | |
392 | * the merged requests to be the current request | |
393 | * for accounting purposes. | |
394 | */ | |
395 | if (time_after(req->start_time, next->start_time)) | |
396 | req->start_time = next->start_time; | |
397 | ||
398 | req->biotail->bi_next = next->bio; | |
399 | req->biotail = next->biotail; | |
400 | ||
401 | req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; | |
402 | ||
403 | elv_merge_requests(q, req, next); | |
404 | ||
42dad764 JM |
405 | /* |
406 | * 'next' is going away, so update stats accordingly | |
407 | */ | |
408 | blk_account_io_merge(next); | |
d6d48196 JA |
409 | |
410 | req->ioprio = ioprio_best(req->ioprio, next->ioprio); | |
ab780f1e JA |
411 | if (blk_rq_cpu_valid(next)) |
412 | req->cpu = next->cpu; | |
d6d48196 | 413 | |
1cd96c24 BH |
414 | /* owner-ship of bio passed from next to req */ |
415 | next->bio = NULL; | |
d6d48196 JA |
416 | __blk_put_request(q, next); |
417 | return 1; | |
418 | } | |
419 | ||
420 | int attempt_back_merge(struct request_queue *q, struct request *rq) | |
421 | { | |
422 | struct request *next = elv_latter_request(q, rq); | |
423 | ||
424 | if (next) | |
425 | return attempt_merge(q, rq, next); | |
426 | ||
427 | return 0; | |
428 | } | |
429 | ||
430 | int attempt_front_merge(struct request_queue *q, struct request *rq) | |
431 | { | |
432 | struct request *prev = elv_former_request(q, rq); | |
433 | ||
434 | if (prev) | |
435 | return attempt_merge(q, prev, rq); | |
436 | ||
437 | return 0; | |
438 | } |