]>
Commit | Line | Data |
---|---|---|
945ffb60 JA |
1 | /* |
2 | * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler, | |
3 | * for the blk-mq scheduling framework | |
4 | * | |
5 | * Copyright (C) 2016 Jens Axboe <[email protected]> | |
6 | */ | |
7 | #include <linux/kernel.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/blkdev.h> | |
10 | #include <linux/blk-mq.h> | |
11 | #include <linux/elevator.h> | |
12 | #include <linux/bio.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/compiler.h> | |
17 | #include <linux/rbtree.h> | |
18 | #include <linux/sbitmap.h> | |
19 | ||
20 | #include "blk.h" | |
21 | #include "blk-mq.h" | |
daaadb3e | 22 | #include "blk-mq-debugfs.h" |
945ffb60 JA |
23 | #include "blk-mq-tag.h" |
24 | #include "blk-mq-sched.h" | |
25 | ||
26 | /* | |
27 | * See Documentation/block/deadline-iosched.txt | |
28 | */ | |
29 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ | |
30 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ | |
31 | static const int writes_starved = 2; /* max times reads can starve a write */ | |
32 | static const int fifo_batch = 16; /* # of sequential requests treated as one | |
33 | by the above parameters. For throughput. */ | |
34 | ||
35 | struct deadline_data { | |
36 | /* | |
37 | * run time data | |
38 | */ | |
39 | ||
40 | /* | |
41 | * requests (deadline_rq s) are present on both sort_list and fifo_list | |
42 | */ | |
43 | struct rb_root sort_list[2]; | |
44 | struct list_head fifo_list[2]; | |
45 | ||
46 | /* | |
47 | * next in sort order. read, write or both are NULL | |
48 | */ | |
49 | struct request *next_rq[2]; | |
50 | unsigned int batching; /* number of sequential requests made */ | |
51 | unsigned int starved; /* times reads have starved writes */ | |
52 | ||
53 | /* | |
54 | * settings that change how the i/o scheduler behaves | |
55 | */ | |
56 | int fifo_expire[2]; | |
57 | int fifo_batch; | |
58 | int writes_starved; | |
59 | int front_merges; | |
60 | ||
61 | spinlock_t lock; | |
5700f691 | 62 | spinlock_t zone_lock; |
945ffb60 JA |
63 | struct list_head dispatch; |
64 | }; | |
65 | ||
66 | static inline struct rb_root * | |
67 | deadline_rb_root(struct deadline_data *dd, struct request *rq) | |
68 | { | |
69 | return &dd->sort_list[rq_data_dir(rq)]; | |
70 | } | |
71 | ||
72 | /* | |
73 | * get the request after `rq' in sector-sorted order | |
74 | */ | |
75 | static inline struct request * | |
76 | deadline_latter_request(struct request *rq) | |
77 | { | |
78 | struct rb_node *node = rb_next(&rq->rb_node); | |
79 | ||
80 | if (node) | |
81 | return rb_entry_rq(node); | |
82 | ||
83 | return NULL; | |
84 | } | |
85 | ||
86 | static void | |
87 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) | |
88 | { | |
89 | struct rb_root *root = deadline_rb_root(dd, rq); | |
90 | ||
91 | elv_rb_add(root, rq); | |
92 | } | |
93 | ||
94 | static inline void | |
95 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) | |
96 | { | |
97 | const int data_dir = rq_data_dir(rq); | |
98 | ||
99 | if (dd->next_rq[data_dir] == rq) | |
100 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
101 | ||
102 | elv_rb_del(deadline_rb_root(dd, rq), rq); | |
103 | } | |
104 | ||
105 | /* | |
106 | * remove rq from rbtree and fifo. | |
107 | */ | |
108 | static void deadline_remove_request(struct request_queue *q, struct request *rq) | |
109 | { | |
110 | struct deadline_data *dd = q->elevator->elevator_data; | |
111 | ||
112 | list_del_init(&rq->queuelist); | |
113 | ||
114 | /* | |
115 | * We might not be on the rbtree, if we are doing an insert merge | |
116 | */ | |
117 | if (!RB_EMPTY_NODE(&rq->rb_node)) | |
118 | deadline_del_rq_rb(dd, rq); | |
119 | ||
120 | elv_rqhash_del(q, rq); | |
121 | if (q->last_merge == rq) | |
122 | q->last_merge = NULL; | |
123 | } | |
124 | ||
125 | static void dd_request_merged(struct request_queue *q, struct request *req, | |
34fe7c05 | 126 | enum elv_merge type) |
945ffb60 JA |
127 | { |
128 | struct deadline_data *dd = q->elevator->elevator_data; | |
129 | ||
130 | /* | |
131 | * if the merge was a front merge, we need to reposition request | |
132 | */ | |
133 | if (type == ELEVATOR_FRONT_MERGE) { | |
134 | elv_rb_del(deadline_rb_root(dd, req), req); | |
135 | deadline_add_rq_rb(dd, req); | |
136 | } | |
137 | } | |
138 | ||
139 | static void dd_merged_requests(struct request_queue *q, struct request *req, | |
140 | struct request *next) | |
141 | { | |
142 | /* | |
143 | * if next expires before rq, assign its expire time to rq | |
144 | * and move into next position (next will be deleted) in fifo | |
145 | */ | |
146 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { | |
147 | if (time_before((unsigned long)next->fifo_time, | |
148 | (unsigned long)req->fifo_time)) { | |
149 | list_move(&req->queuelist, &next->queuelist); | |
150 | req->fifo_time = next->fifo_time; | |
151 | } | |
152 | } | |
153 | ||
154 | /* | |
155 | * kill knowledge of next, this one is a goner | |
156 | */ | |
157 | deadline_remove_request(q, next); | |
158 | } | |
159 | ||
160 | /* | |
161 | * move an entry to dispatch queue | |
162 | */ | |
163 | static void | |
164 | deadline_move_request(struct deadline_data *dd, struct request *rq) | |
165 | { | |
166 | const int data_dir = rq_data_dir(rq); | |
167 | ||
168 | dd->next_rq[READ] = NULL; | |
169 | dd->next_rq[WRITE] = NULL; | |
170 | dd->next_rq[data_dir] = deadline_latter_request(rq); | |
171 | ||
172 | /* | |
173 | * take it off the sort and fifo list | |
174 | */ | |
175 | deadline_remove_request(rq->q, rq); | |
176 | } | |
177 | ||
178 | /* | |
179 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, | |
180 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) | |
181 | */ | |
182 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) | |
183 | { | |
184 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); | |
185 | ||
186 | /* | |
187 | * rq is expired! | |
188 | */ | |
189 | if (time_after_eq(jiffies, (unsigned long)rq->fifo_time)) | |
190 | return 1; | |
191 | ||
192 | return 0; | |
193 | } | |
194 | ||
bf09ce56 DLM |
195 | /* |
196 | * For the specified data direction, return the next request to | |
197 | * dispatch using arrival ordered lists. | |
198 | */ | |
199 | static struct request * | |
200 | deadline_fifo_request(struct deadline_data *dd, int data_dir) | |
201 | { | |
5700f691 DLM |
202 | struct request *rq; |
203 | unsigned long flags; | |
204 | ||
bf09ce56 DLM |
205 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) |
206 | return NULL; | |
207 | ||
208 | if (list_empty(&dd->fifo_list[data_dir])) | |
209 | return NULL; | |
210 | ||
5700f691 DLM |
211 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); |
212 | if (data_dir == READ || !blk_queue_is_zoned(rq->q)) | |
213 | return rq; | |
214 | ||
215 | /* | |
216 | * Look for a write request that can be dispatched, that is one with | |
217 | * an unlocked target zone. | |
218 | */ | |
219 | spin_lock_irqsave(&dd->zone_lock, flags); | |
220 | list_for_each_entry(rq, &dd->fifo_list[WRITE], queuelist) { | |
221 | if (blk_req_can_dispatch_to_zone(rq)) | |
222 | goto out; | |
223 | } | |
224 | rq = NULL; | |
225 | out: | |
226 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
227 | ||
228 | return rq; | |
bf09ce56 DLM |
229 | } |
230 | ||
231 | /* | |
232 | * For the specified data direction, return the next request to | |
233 | * dispatch using sector position sorted lists. | |
234 | */ | |
235 | static struct request * | |
236 | deadline_next_request(struct deadline_data *dd, int data_dir) | |
237 | { | |
5700f691 DLM |
238 | struct request *rq; |
239 | unsigned long flags; | |
240 | ||
bf09ce56 DLM |
241 | if (WARN_ON_ONCE(data_dir != READ && data_dir != WRITE)) |
242 | return NULL; | |
243 | ||
5700f691 DLM |
244 | rq = dd->next_rq[data_dir]; |
245 | if (!rq) | |
246 | return NULL; | |
247 | ||
248 | if (data_dir == READ || !blk_queue_is_zoned(rq->q)) | |
249 | return rq; | |
250 | ||
251 | /* | |
252 | * Look for a write request that can be dispatched, that is one with | |
253 | * an unlocked target zone. | |
254 | */ | |
255 | spin_lock_irqsave(&dd->zone_lock, flags); | |
256 | while (rq) { | |
257 | if (blk_req_can_dispatch_to_zone(rq)) | |
258 | break; | |
259 | rq = deadline_latter_request(rq); | |
260 | } | |
261 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
262 | ||
263 | return rq; | |
bf09ce56 DLM |
264 | } |
265 | ||
945ffb60 JA |
266 | /* |
267 | * deadline_dispatch_requests selects the best request according to | |
268 | * read/write expire, fifo_batch, etc | |
269 | */ | |
ca11f209 | 270 | static struct request *__dd_dispatch_request(struct deadline_data *dd) |
945ffb60 | 271 | { |
bf09ce56 | 272 | struct request *rq, *next_rq; |
945ffb60 JA |
273 | bool reads, writes; |
274 | int data_dir; | |
275 | ||
276 | if (!list_empty(&dd->dispatch)) { | |
277 | rq = list_first_entry(&dd->dispatch, struct request, queuelist); | |
278 | list_del_init(&rq->queuelist); | |
279 | goto done; | |
280 | } | |
281 | ||
282 | reads = !list_empty(&dd->fifo_list[READ]); | |
283 | writes = !list_empty(&dd->fifo_list[WRITE]); | |
284 | ||
285 | /* | |
286 | * batches are currently reads XOR writes | |
287 | */ | |
bf09ce56 DLM |
288 | rq = deadline_next_request(dd, WRITE); |
289 | if (!rq) | |
290 | rq = deadline_next_request(dd, READ); | |
945ffb60 JA |
291 | |
292 | if (rq && dd->batching < dd->fifo_batch) | |
293 | /* we have a next request are still entitled to batch */ | |
294 | goto dispatch_request; | |
295 | ||
296 | /* | |
297 | * at this point we are not running a batch. select the appropriate | |
298 | * data direction (read / write) | |
299 | */ | |
300 | ||
301 | if (reads) { | |
302 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); | |
303 | ||
5700f691 DLM |
304 | if (deadline_fifo_request(dd, WRITE) && |
305 | (dd->starved++ >= dd->writes_starved)) | |
945ffb60 JA |
306 | goto dispatch_writes; |
307 | ||
308 | data_dir = READ; | |
309 | ||
310 | goto dispatch_find_request; | |
311 | } | |
312 | ||
313 | /* | |
314 | * there are either no reads or writes have been starved | |
315 | */ | |
316 | ||
317 | if (writes) { | |
318 | dispatch_writes: | |
319 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); | |
320 | ||
321 | dd->starved = 0; | |
322 | ||
323 | data_dir = WRITE; | |
324 | ||
325 | goto dispatch_find_request; | |
326 | } | |
327 | ||
328 | return NULL; | |
329 | ||
330 | dispatch_find_request: | |
331 | /* | |
332 | * we are not running a batch, find best request for selected data_dir | |
333 | */ | |
bf09ce56 DLM |
334 | next_rq = deadline_next_request(dd, data_dir); |
335 | if (deadline_check_fifo(dd, data_dir) || !next_rq) { | |
945ffb60 JA |
336 | /* |
337 | * A deadline has expired, the last request was in the other | |
338 | * direction, or we have run out of higher-sectored requests. | |
339 | * Start again from the request with the earliest expiry time. | |
340 | */ | |
bf09ce56 | 341 | rq = deadline_fifo_request(dd, data_dir); |
945ffb60 JA |
342 | } else { |
343 | /* | |
344 | * The last req was the same dir and we have a next request in | |
345 | * sort order. No expired requests so continue on from here. | |
346 | */ | |
bf09ce56 | 347 | rq = next_rq; |
945ffb60 JA |
348 | } |
349 | ||
5700f691 DLM |
350 | /* |
351 | * For a zoned block device, if we only have writes queued and none of | |
352 | * them can be dispatched, rq will be NULL. | |
353 | */ | |
354 | if (!rq) | |
355 | return NULL; | |
356 | ||
945ffb60 JA |
357 | dd->batching = 0; |
358 | ||
359 | dispatch_request: | |
360 | /* | |
361 | * rq is the selected appropriate request. | |
362 | */ | |
363 | dd->batching++; | |
364 | deadline_move_request(dd, rq); | |
365 | done: | |
5700f691 DLM |
366 | /* |
367 | * If the request needs its target zone locked, do it. | |
368 | */ | |
369 | blk_req_zone_write_lock(rq); | |
945ffb60 JA |
370 | rq->rq_flags |= RQF_STARTED; |
371 | return rq; | |
372 | } | |
373 | ||
ca11f209 JA |
374 | /* |
375 | * One confusing aspect here is that we get called for a specific | |
376 | * hardware queue, but we return a request that may not be for a | |
377 | * different hardware queue. This is because mq-deadline has shared | |
378 | * state for all hardware queues, in terms of sorting, FIFOs, etc. | |
379 | */ | |
c13660a0 | 380 | static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) |
945ffb60 JA |
381 | { |
382 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
c13660a0 | 383 | struct request *rq; |
945ffb60 JA |
384 | |
385 | spin_lock(&dd->lock); | |
ca11f209 | 386 | rq = __dd_dispatch_request(dd); |
945ffb60 | 387 | spin_unlock(&dd->lock); |
c13660a0 JA |
388 | |
389 | return rq; | |
945ffb60 JA |
390 | } |
391 | ||
392 | static void dd_exit_queue(struct elevator_queue *e) | |
393 | { | |
394 | struct deadline_data *dd = e->elevator_data; | |
395 | ||
396 | BUG_ON(!list_empty(&dd->fifo_list[READ])); | |
397 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); | |
398 | ||
399 | kfree(dd); | |
400 | } | |
401 | ||
402 | /* | |
403 | * initialize elevator private data (deadline_data). | |
404 | */ | |
405 | static int dd_init_queue(struct request_queue *q, struct elevator_type *e) | |
406 | { | |
407 | struct deadline_data *dd; | |
408 | struct elevator_queue *eq; | |
409 | ||
410 | eq = elevator_alloc(q, e); | |
411 | if (!eq) | |
412 | return -ENOMEM; | |
413 | ||
414 | dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); | |
415 | if (!dd) { | |
416 | kobject_put(&eq->kobj); | |
417 | return -ENOMEM; | |
418 | } | |
419 | eq->elevator_data = dd; | |
420 | ||
421 | INIT_LIST_HEAD(&dd->fifo_list[READ]); | |
422 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); | |
423 | dd->sort_list[READ] = RB_ROOT; | |
424 | dd->sort_list[WRITE] = RB_ROOT; | |
425 | dd->fifo_expire[READ] = read_expire; | |
426 | dd->fifo_expire[WRITE] = write_expire; | |
427 | dd->writes_starved = writes_starved; | |
428 | dd->front_merges = 1; | |
429 | dd->fifo_batch = fifo_batch; | |
430 | spin_lock_init(&dd->lock); | |
5700f691 | 431 | spin_lock_init(&dd->zone_lock); |
945ffb60 JA |
432 | INIT_LIST_HEAD(&dd->dispatch); |
433 | ||
434 | q->elevator = eq; | |
435 | return 0; | |
436 | } | |
437 | ||
438 | static int dd_request_merge(struct request_queue *q, struct request **rq, | |
439 | struct bio *bio) | |
440 | { | |
441 | struct deadline_data *dd = q->elevator->elevator_data; | |
442 | sector_t sector = bio_end_sector(bio); | |
443 | struct request *__rq; | |
444 | ||
445 | if (!dd->front_merges) | |
446 | return ELEVATOR_NO_MERGE; | |
447 | ||
448 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); | |
449 | if (__rq) { | |
450 | BUG_ON(sector != blk_rq_pos(__rq)); | |
451 | ||
452 | if (elv_bio_merge_ok(__rq, bio)) { | |
453 | *rq = __rq; | |
454 | return ELEVATOR_FRONT_MERGE; | |
455 | } | |
456 | } | |
457 | ||
458 | return ELEVATOR_NO_MERGE; | |
459 | } | |
460 | ||
461 | static bool dd_bio_merge(struct blk_mq_hw_ctx *hctx, struct bio *bio) | |
462 | { | |
463 | struct request_queue *q = hctx->queue; | |
464 | struct deadline_data *dd = q->elevator->elevator_data; | |
e4d750c9 JA |
465 | struct request *free = NULL; |
466 | bool ret; | |
945ffb60 JA |
467 | |
468 | spin_lock(&dd->lock); | |
e4d750c9 | 469 | ret = blk_mq_sched_try_merge(q, bio, &free); |
945ffb60 JA |
470 | spin_unlock(&dd->lock); |
471 | ||
e4d750c9 JA |
472 | if (free) |
473 | blk_mq_free_request(free); | |
474 | ||
945ffb60 JA |
475 | return ret; |
476 | } | |
477 | ||
478 | /* | |
479 | * add rq to rbtree and fifo | |
480 | */ | |
481 | static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, | |
482 | bool at_head) | |
483 | { | |
484 | struct request_queue *q = hctx->queue; | |
485 | struct deadline_data *dd = q->elevator->elevator_data; | |
486 | const int data_dir = rq_data_dir(rq); | |
487 | ||
5700f691 DLM |
488 | /* |
489 | * This may be a requeue of a write request that has locked its | |
490 | * target zone. If it is the case, this releases the zone lock. | |
491 | */ | |
492 | blk_req_zone_write_unlock(rq); | |
493 | ||
945ffb60 JA |
494 | if (blk_mq_sched_try_insert_merge(q, rq)) |
495 | return; | |
496 | ||
497 | blk_mq_sched_request_inserted(rq); | |
498 | ||
57292b58 | 499 | if (at_head || blk_rq_is_passthrough(rq)) { |
945ffb60 JA |
500 | if (at_head) |
501 | list_add(&rq->queuelist, &dd->dispatch); | |
502 | else | |
503 | list_add_tail(&rq->queuelist, &dd->dispatch); | |
504 | } else { | |
505 | deadline_add_rq_rb(dd, rq); | |
506 | ||
507 | if (rq_mergeable(rq)) { | |
508 | elv_rqhash_add(q, rq); | |
509 | if (!q->last_merge) | |
510 | q->last_merge = rq; | |
511 | } | |
512 | ||
513 | /* | |
514 | * set expire time and add to fifo list | |
515 | */ | |
516 | rq->fifo_time = jiffies + dd->fifo_expire[data_dir]; | |
517 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); | |
518 | } | |
519 | } | |
520 | ||
521 | static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, | |
522 | struct list_head *list, bool at_head) | |
523 | { | |
524 | struct request_queue *q = hctx->queue; | |
525 | struct deadline_data *dd = q->elevator->elevator_data; | |
526 | ||
527 | spin_lock(&dd->lock); | |
528 | while (!list_empty(list)) { | |
529 | struct request *rq; | |
530 | ||
531 | rq = list_first_entry(list, struct request, queuelist); | |
532 | list_del_init(&rq->queuelist); | |
533 | dd_insert_request(hctx, rq, at_head); | |
534 | } | |
535 | spin_unlock(&dd->lock); | |
536 | } | |
537 | ||
f3bc78d2 DLM |
538 | /* |
539 | * Nothing to do here. This is defined only to ensure that .finish_request | |
540 | * method is called upon request completion. | |
541 | */ | |
542 | static void dd_prepare_request(struct request *rq, struct bio *bio) | |
543 | { | |
544 | } | |
545 | ||
5700f691 DLM |
546 | /* |
547 | * For zoned block devices, write unlock the target zone of | |
548 | * completed write requests. Do this while holding the zone lock | |
549 | * spinlock so that the zone is never unlocked while deadline_fifo_request() | |
f3bc78d2 DLM |
550 | * or deadline_next_request() are executing. This function is called for |
551 | * all requests, whether or not these requests complete successfully. | |
5700f691 | 552 | */ |
f3bc78d2 | 553 | static void dd_finish_request(struct request *rq) |
5700f691 DLM |
554 | { |
555 | struct request_queue *q = rq->q; | |
556 | ||
557 | if (blk_queue_is_zoned(q)) { | |
558 | struct deadline_data *dd = q->elevator->elevator_data; | |
559 | unsigned long flags; | |
560 | ||
561 | spin_lock_irqsave(&dd->zone_lock, flags); | |
562 | blk_req_zone_write_unlock(rq); | |
563 | spin_unlock_irqrestore(&dd->zone_lock, flags); | |
564 | } | |
565 | } | |
566 | ||
945ffb60 JA |
567 | static bool dd_has_work(struct blk_mq_hw_ctx *hctx) |
568 | { | |
569 | struct deadline_data *dd = hctx->queue->elevator->elevator_data; | |
570 | ||
571 | return !list_empty_careful(&dd->dispatch) || | |
572 | !list_empty_careful(&dd->fifo_list[0]) || | |
573 | !list_empty_careful(&dd->fifo_list[1]); | |
574 | } | |
575 | ||
576 | /* | |
577 | * sysfs parts below | |
578 | */ | |
579 | static ssize_t | |
580 | deadline_var_show(int var, char *page) | |
581 | { | |
582 | return sprintf(page, "%d\n", var); | |
583 | } | |
584 | ||
235f8da1 | 585 | static void |
586 | deadline_var_store(int *var, const char *page) | |
945ffb60 JA |
587 | { |
588 | char *p = (char *) page; | |
589 | ||
590 | *var = simple_strtol(p, &p, 10); | |
945ffb60 JA |
591 | } |
592 | ||
593 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | |
594 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ | |
595 | { \ | |
596 | struct deadline_data *dd = e->elevator_data; \ | |
597 | int __data = __VAR; \ | |
598 | if (__CONV) \ | |
599 | __data = jiffies_to_msecs(__data); \ | |
600 | return deadline_var_show(__data, (page)); \ | |
601 | } | |
602 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); | |
603 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); | |
604 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); | |
605 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); | |
606 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); | |
607 | #undef SHOW_FUNCTION | |
608 | ||
609 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | |
610 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ | |
611 | { \ | |
612 | struct deadline_data *dd = e->elevator_data; \ | |
613 | int __data; \ | |
235f8da1 | 614 | deadline_var_store(&__data, (page)); \ |
945ffb60 JA |
615 | if (__data < (MIN)) \ |
616 | __data = (MIN); \ | |
617 | else if (__data > (MAX)) \ | |
618 | __data = (MAX); \ | |
619 | if (__CONV) \ | |
620 | *(__PTR) = msecs_to_jiffies(__data); \ | |
621 | else \ | |
622 | *(__PTR) = __data; \ | |
235f8da1 | 623 | return count; \ |
945ffb60 JA |
624 | } |
625 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | |
626 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | |
627 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | |
628 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); | |
629 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); | |
630 | #undef STORE_FUNCTION | |
631 | ||
632 | #define DD_ATTR(name) \ | |
5657a819 | 633 | __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store) |
945ffb60 JA |
634 | |
635 | static struct elv_fs_entry deadline_attrs[] = { | |
636 | DD_ATTR(read_expire), | |
637 | DD_ATTR(write_expire), | |
638 | DD_ATTR(writes_starved), | |
639 | DD_ATTR(front_merges), | |
640 | DD_ATTR(fifo_batch), | |
641 | __ATTR_NULL | |
642 | }; | |
643 | ||
daaadb3e OS |
644 | #ifdef CONFIG_BLK_DEBUG_FS |
645 | #define DEADLINE_DEBUGFS_DDIR_ATTRS(ddir, name) \ | |
646 | static void *deadline_##name##_fifo_start(struct seq_file *m, \ | |
647 | loff_t *pos) \ | |
648 | __acquires(&dd->lock) \ | |
649 | { \ | |
650 | struct request_queue *q = m->private; \ | |
651 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
652 | \ | |
653 | spin_lock(&dd->lock); \ | |
654 | return seq_list_start(&dd->fifo_list[ddir], *pos); \ | |
655 | } \ | |
656 | \ | |
657 | static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \ | |
658 | loff_t *pos) \ | |
659 | { \ | |
660 | struct request_queue *q = m->private; \ | |
661 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
662 | \ | |
663 | return seq_list_next(v, &dd->fifo_list[ddir], pos); \ | |
664 | } \ | |
665 | \ | |
666 | static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \ | |
667 | __releases(&dd->lock) \ | |
668 | { \ | |
669 | struct request_queue *q = m->private; \ | |
670 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
671 | \ | |
672 | spin_unlock(&dd->lock); \ | |
673 | } \ | |
674 | \ | |
675 | static const struct seq_operations deadline_##name##_fifo_seq_ops = { \ | |
676 | .start = deadline_##name##_fifo_start, \ | |
677 | .next = deadline_##name##_fifo_next, \ | |
678 | .stop = deadline_##name##_fifo_stop, \ | |
679 | .show = blk_mq_debugfs_rq_show, \ | |
680 | }; \ | |
681 | \ | |
682 | static int deadline_##name##_next_rq_show(void *data, \ | |
683 | struct seq_file *m) \ | |
684 | { \ | |
685 | struct request_queue *q = data; \ | |
686 | struct deadline_data *dd = q->elevator->elevator_data; \ | |
687 | struct request *rq = dd->next_rq[ddir]; \ | |
688 | \ | |
689 | if (rq) \ | |
690 | __blk_mq_debugfs_rq_show(m, rq); \ | |
691 | return 0; \ | |
692 | } | |
693 | DEADLINE_DEBUGFS_DDIR_ATTRS(READ, read) | |
694 | DEADLINE_DEBUGFS_DDIR_ATTRS(WRITE, write) | |
695 | #undef DEADLINE_DEBUGFS_DDIR_ATTRS | |
696 | ||
697 | static int deadline_batching_show(void *data, struct seq_file *m) | |
698 | { | |
699 | struct request_queue *q = data; | |
700 | struct deadline_data *dd = q->elevator->elevator_data; | |
701 | ||
702 | seq_printf(m, "%u\n", dd->batching); | |
703 | return 0; | |
704 | } | |
705 | ||
706 | static int deadline_starved_show(void *data, struct seq_file *m) | |
707 | { | |
708 | struct request_queue *q = data; | |
709 | struct deadline_data *dd = q->elevator->elevator_data; | |
710 | ||
711 | seq_printf(m, "%u\n", dd->starved); | |
712 | return 0; | |
713 | } | |
714 | ||
715 | static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos) | |
716 | __acquires(&dd->lock) | |
717 | { | |
718 | struct request_queue *q = m->private; | |
719 | struct deadline_data *dd = q->elevator->elevator_data; | |
720 | ||
721 | spin_lock(&dd->lock); | |
722 | return seq_list_start(&dd->dispatch, *pos); | |
723 | } | |
724 | ||
725 | static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos) | |
726 | { | |
727 | struct request_queue *q = m->private; | |
728 | struct deadline_data *dd = q->elevator->elevator_data; | |
729 | ||
730 | return seq_list_next(v, &dd->dispatch, pos); | |
731 | } | |
732 | ||
733 | static void deadline_dispatch_stop(struct seq_file *m, void *v) | |
734 | __releases(&dd->lock) | |
735 | { | |
736 | struct request_queue *q = m->private; | |
737 | struct deadline_data *dd = q->elevator->elevator_data; | |
738 | ||
739 | spin_unlock(&dd->lock); | |
740 | } | |
741 | ||
742 | static const struct seq_operations deadline_dispatch_seq_ops = { | |
743 | .start = deadline_dispatch_start, | |
744 | .next = deadline_dispatch_next, | |
745 | .stop = deadline_dispatch_stop, | |
746 | .show = blk_mq_debugfs_rq_show, | |
747 | }; | |
748 | ||
749 | #define DEADLINE_QUEUE_DDIR_ATTRS(name) \ | |
750 | {#name "_fifo_list", 0400, .seq_ops = &deadline_##name##_fifo_seq_ops}, \ | |
751 | {#name "_next_rq", 0400, deadline_##name##_next_rq_show} | |
752 | static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = { | |
753 | DEADLINE_QUEUE_DDIR_ATTRS(read), | |
754 | DEADLINE_QUEUE_DDIR_ATTRS(write), | |
755 | {"batching", 0400, deadline_batching_show}, | |
756 | {"starved", 0400, deadline_starved_show}, | |
757 | {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops}, | |
758 | {}, | |
759 | }; | |
760 | #undef DEADLINE_QUEUE_DDIR_ATTRS | |
761 | #endif | |
762 | ||
945ffb60 JA |
763 | static struct elevator_type mq_deadline = { |
764 | .ops.mq = { | |
765 | .insert_requests = dd_insert_requests, | |
c13660a0 | 766 | .dispatch_request = dd_dispatch_request, |
f3bc78d2 DLM |
767 | .prepare_request = dd_prepare_request, |
768 | .finish_request = dd_finish_request, | |
945ffb60 JA |
769 | .next_request = elv_rb_latter_request, |
770 | .former_request = elv_rb_former_request, | |
771 | .bio_merge = dd_bio_merge, | |
772 | .request_merge = dd_request_merge, | |
773 | .requests_merged = dd_merged_requests, | |
774 | .request_merged = dd_request_merged, | |
775 | .has_work = dd_has_work, | |
776 | .init_sched = dd_init_queue, | |
777 | .exit_sched = dd_exit_queue, | |
778 | }, | |
779 | ||
780 | .uses_mq = true, | |
daaadb3e OS |
781 | #ifdef CONFIG_BLK_DEBUG_FS |
782 | .queue_debugfs_attrs = deadline_queue_debugfs_attrs, | |
783 | #endif | |
945ffb60 JA |
784 | .elevator_attrs = deadline_attrs, |
785 | .elevator_name = "mq-deadline", | |
4d740bc9 | 786 | .elevator_alias = "deadline", |
945ffb60 JA |
787 | .elevator_owner = THIS_MODULE, |
788 | }; | |
7de967e7 | 789 | MODULE_ALIAS("mq-deadline-iosched"); |
945ffb60 JA |
790 | |
791 | static int __init deadline_init(void) | |
792 | { | |
793 | return elv_register(&mq_deadline); | |
794 | } | |
795 | ||
796 | static void __exit deadline_exit(void) | |
797 | { | |
798 | elv_unregister(&mq_deadline); | |
799 | } | |
800 | ||
801 | module_init(deadline_init); | |
802 | module_exit(deadline_exit); | |
803 | ||
804 | MODULE_AUTHOR("Jens Axboe"); | |
805 | MODULE_LICENSE("GPL"); | |
806 | MODULE_DESCRIPTION("MQ deadline IO scheduler"); |