]> Git Repo - linux.git/blame - block/mq-deadline.c
block/mq-deadline: Add an invariant check
[linux.git] / block / mq-deadline.c
CommitLineData
3dcf60bc 1// SPDX-License-Identifier: GPL-2.0
945ffb60
JA
2/*
3 * MQ Deadline i/o scheduler - adaptation of the legacy deadline scheduler,
4 * for the blk-mq scheduling framework
5 *
6 * Copyright (C) 2016 Jens Axboe <[email protected]>
7 */
8#include <linux/kernel.h>
9#include <linux/fs.h>
10#include <linux/blkdev.h>
11#include <linux/blk-mq.h>
945ffb60
JA
12#include <linux/bio.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/init.h>
16#include <linux/compiler.h>
17#include <linux/rbtree.h>
18#include <linux/sbitmap.h>
19
b357e4a6
CK
20#include <trace/events/block.h>
21
2e9bc346 22#include "elevator.h"
945ffb60
JA
23#include "blk.h"
24#include "blk-mq.h"
daaadb3e 25#include "blk-mq-debugfs.h"
945ffb60
JA
26#include "blk-mq-tag.h"
27#include "blk-mq-sched.h"
28
29/*
898bd37a 30 * See Documentation/block/deadline-iosched.rst
945ffb60
JA
31 */
32static const int read_expire = HZ / 2; /* max time before a read is submitted. */
33static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
34static const int writes_starved = 2; /* max times reads can starve a write */
35static const int fifo_batch = 16; /* # of sequential requests treated as one
36 by the above parameters. For throughput. */
37
004a26b3
BVA
38enum dd_data_dir {
39 DD_READ = READ,
40 DD_WRITE = WRITE,
41};
42
43enum { DD_DIR_COUNT = 2 };
44
c807ab52
BVA
45enum dd_prio {
46 DD_RT_PRIO = 0,
47 DD_BE_PRIO = 1,
48 DD_IDLE_PRIO = 2,
49 DD_PRIO_MAX = 2,
50};
51
52enum { DD_PRIO_COUNT = 3 };
53
0f783995
TH
54/* I/O statistics per I/O priority. */
55struct io_stats_per_prio {
56 local_t inserted;
57 local_t merged;
58 local_t dispatched;
59 local_t completed;
60};
61
38ba64d1
BVA
62/* I/O statistics for all I/O priorities (enum dd_prio). */
63struct io_stats {
64 struct io_stats_per_prio stats[DD_PRIO_COUNT];
65};
66
c807ab52
BVA
67/*
68 * Deadline scheduler data per I/O priority (enum dd_prio). Requests are
69 * present on both sort_list[] and fifo_list[].
70 */
71struct dd_per_prio {
72 struct list_head dispatch;
73 struct rb_root sort_list[DD_DIR_COUNT];
74 struct list_head fifo_list[DD_DIR_COUNT];
75 /* Next request in FIFO order. Read, write or both are NULL. */
76 struct request *next_rq[DD_DIR_COUNT];
77};
78
945ffb60
JA
79struct deadline_data {
80 /*
81 * run time data
82 */
83
c807ab52 84 struct dd_per_prio per_prio[DD_PRIO_COUNT];
945ffb60 85
d672d325
BVA
86 /* Data direction of latest dispatched request. */
87 enum dd_data_dir last_dir;
945ffb60
JA
88 unsigned int batching; /* number of sequential requests made */
89 unsigned int starved; /* times reads have starved writes */
90
38ba64d1
BVA
91 struct io_stats __percpu *stats;
92
945ffb60
JA
93 /*
94 * settings that change how the i/o scheduler behaves
95 */
004a26b3 96 int fifo_expire[DD_DIR_COUNT];
945ffb60
JA
97 int fifo_batch;
98 int writes_starved;
99 int front_merges;
07757588 100 u32 async_depth;
945ffb60
JA
101
102 spinlock_t lock;
5700f691 103 spinlock_t zone_lock;
c807ab52
BVA
104};
105
38ba64d1
BVA
106/* Count one event of type 'event_type' and with I/O priority 'prio' */
107#define dd_count(dd, event_type, prio) do { \
108 struct io_stats *io_stats = get_cpu_ptr((dd)->stats); \
109 \
110 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
111 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
112 local_inc(&io_stats->stats[(prio)].event_type); \
113 put_cpu_ptr(io_stats); \
114} while (0)
115
116/*
117 * Returns the total number of dd_count(dd, event_type, prio) calls across all
118 * CPUs. No locking or barriers since it is fine if the returned sum is slightly
119 * outdated.
120 */
121#define dd_sum(dd, event_type, prio) ({ \
122 unsigned int cpu; \
123 u32 sum = 0; \
124 \
125 BUILD_BUG_ON(!__same_type((dd), struct deadline_data *)); \
126 BUILD_BUG_ON(!__same_type((prio), enum dd_prio)); \
127 for_each_present_cpu(cpu) \
128 sum += local_read(&per_cpu_ptr((dd)->stats, cpu)-> \
129 stats[(prio)].event_type); \
130 sum; \
131})
132
c807ab52
BVA
133/* Maps an I/O priority class to a deadline scheduler priority. */
134static const enum dd_prio ioprio_class_to_prio[] = {
135 [IOPRIO_CLASS_NONE] = DD_BE_PRIO,
136 [IOPRIO_CLASS_RT] = DD_RT_PRIO,
137 [IOPRIO_CLASS_BE] = DD_BE_PRIO,
138 [IOPRIO_CLASS_IDLE] = DD_IDLE_PRIO,
945ffb60
JA
139};
140
141static inline struct rb_root *
c807ab52 142deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq)
945ffb60 143{
c807ab52
BVA
144 return &per_prio->sort_list[rq_data_dir(rq)];
145}
146
147/*
148 * Returns the I/O priority class (IOPRIO_CLASS_*) that has been assigned to a
149 * request.
150 */
151static u8 dd_rq_ioclass(struct request *rq)
152{
153 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
945ffb60
JA
154}
155
156/*
157 * get the request after `rq' in sector-sorted order
158 */
159static inline struct request *
160deadline_latter_request(struct request *rq)
161{
162 struct rb_node *node = rb_next(&rq->rb_node);
163
164 if (node)
165 return rb_entry_rq(node);
166
167 return NULL;
168}
169
170static void
c807ab52 171deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
945ffb60 172{
c807ab52 173 struct rb_root *root = deadline_rb_root(per_prio, rq);
945ffb60
JA
174
175 elv_rb_add(root, rq);
176}
177
178static inline void
c807ab52 179deadline_del_rq_rb(struct dd_per_prio *per_prio, struct request *rq)
945ffb60 180{
004a26b3 181 const enum dd_data_dir data_dir = rq_data_dir(rq);
945ffb60 182
c807ab52
BVA
183 if (per_prio->next_rq[data_dir] == rq)
184 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
945ffb60 185
c807ab52 186 elv_rb_del(deadline_rb_root(per_prio, rq), rq);
945ffb60
JA
187}
188
189/*
190 * remove rq from rbtree and fifo.
191 */
c807ab52
BVA
192static void deadline_remove_request(struct request_queue *q,
193 struct dd_per_prio *per_prio,
194 struct request *rq)
945ffb60 195{
945ffb60
JA
196 list_del_init(&rq->queuelist);
197
198 /*
199 * We might not be on the rbtree, if we are doing an insert merge
200 */
201 if (!RB_EMPTY_NODE(&rq->rb_node))
c807ab52 202 deadline_del_rq_rb(per_prio, rq);
945ffb60
JA
203
204 elv_rqhash_del(q, rq);
205 if (q->last_merge == rq)
206 q->last_merge = NULL;
207}
208
209static void dd_request_merged(struct request_queue *q, struct request *req,
34fe7c05 210 enum elv_merge type)
945ffb60
JA
211{
212 struct deadline_data *dd = q->elevator->elevator_data;
c807ab52
BVA
213 const u8 ioprio_class = dd_rq_ioclass(req);
214 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
215 struct dd_per_prio *per_prio = &dd->per_prio[prio];
945ffb60
JA
216
217 /*
218 * if the merge was a front merge, we need to reposition request
219 */
220 if (type == ELEVATOR_FRONT_MERGE) {
c807ab52
BVA
221 elv_rb_del(deadline_rb_root(per_prio, req), req);
222 deadline_add_rq_rb(per_prio, req);
945ffb60
JA
223 }
224}
225
46eae2e3
BVA
226/*
227 * Callback function that is invoked after @next has been merged into @req.
228 */
945ffb60
JA
229static void dd_merged_requests(struct request_queue *q, struct request *req,
230 struct request *next)
231{
38ba64d1 232 struct deadline_data *dd = q->elevator->elevator_data;
c807ab52
BVA
233 const u8 ioprio_class = dd_rq_ioclass(next);
234 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
235
38ba64d1
BVA
236 dd_count(dd, merged, prio);
237
945ffb60
JA
238 /*
239 * if next expires before rq, assign its expire time to rq
240 * and move into next position (next will be deleted) in fifo
241 */
242 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) {
243 if (time_before((unsigned long)next->fifo_time,
244 (unsigned long)req->fifo_time)) {
245 list_move(&req->queuelist, &next->queuelist);
246 req->fifo_time = next->fifo_time;
247 }
248 }
249
250 /*
251 * kill knowledge of next, this one is a goner
252 */
c807ab52 253 deadline_remove_request(q, &dd->per_prio[prio], next);
945ffb60
JA
254}
255
256/*
257 * move an entry to dispatch queue
258 */
259static void
c807ab52
BVA
260deadline_move_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
261 struct request *rq)
945ffb60 262{
004a26b3 263 const enum dd_data_dir data_dir = rq_data_dir(rq);
945ffb60 264
c807ab52 265 per_prio->next_rq[data_dir] = deadline_latter_request(rq);
945ffb60
JA
266
267 /*
268 * take it off the sort and fifo list
269 */
c807ab52 270 deadline_remove_request(rq->q, per_prio, rq);
945ffb60
JA
271}
272
32f64cad
BVA
273/* Number of requests queued for a given priority level. */
274static u32 dd_queued(struct deadline_data *dd, enum dd_prio prio)
275{
276 return dd_sum(dd, inserted, prio) - dd_sum(dd, completed, prio);
277}
278
945ffb60
JA
279/*
280 * deadline_check_fifo returns 0 if there are no expired requests on the fifo,
281 * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
282 */
c807ab52 283static inline int deadline_check_fifo(struct dd_per_prio *per_prio,
004a26b3 284 enum dd_data_dir data_dir)
945ffb60 285{
c807ab52 286 struct request *rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
945ffb60
JA
287
288 /*
289 * rq is expired!
290 */
291 if (time_after_eq(jiffies, (unsigned long)rq->fifo_time))
292 return 1;
293
294 return 0;
295}
296
bf09ce56
DLM
297/*
298 * For the specified data direction, return the next request to
299 * dispatch using arrival ordered lists.
300 */
301static struct request *
c807ab52
BVA
302deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
303 enum dd_data_dir data_dir)
bf09ce56 304{
5700f691
DLM
305 struct request *rq;
306 unsigned long flags;
307
c807ab52 308 if (list_empty(&per_prio->fifo_list[data_dir]))
bf09ce56
DLM
309 return NULL;
310
c807ab52 311 rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
004a26b3 312 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
5700f691
DLM
313 return rq;
314
315 /*
316 * Look for a write request that can be dispatched, that is one with
317 * an unlocked target zone.
318 */
319 spin_lock_irqsave(&dd->zone_lock, flags);
c807ab52 320 list_for_each_entry(rq, &per_prio->fifo_list[DD_WRITE], queuelist) {
5700f691
DLM
321 if (blk_req_can_dispatch_to_zone(rq))
322 goto out;
323 }
324 rq = NULL;
325out:
326 spin_unlock_irqrestore(&dd->zone_lock, flags);
327
328 return rq;
bf09ce56
DLM
329}
330
331/*
332 * For the specified data direction, return the next request to
333 * dispatch using sector position sorted lists.
334 */
335static struct request *
c807ab52
BVA
336deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
337 enum dd_data_dir data_dir)
bf09ce56 338{
5700f691
DLM
339 struct request *rq;
340 unsigned long flags;
341
c807ab52 342 rq = per_prio->next_rq[data_dir];
5700f691
DLM
343 if (!rq)
344 return NULL;
345
004a26b3 346 if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
5700f691
DLM
347 return rq;
348
349 /*
350 * Look for a write request that can be dispatched, that is one with
351 * an unlocked target zone.
352 */
353 spin_lock_irqsave(&dd->zone_lock, flags);
354 while (rq) {
355 if (blk_req_can_dispatch_to_zone(rq))
356 break;
357 rq = deadline_latter_request(rq);
358 }
359 spin_unlock_irqrestore(&dd->zone_lock, flags);
360
361 return rq;
bf09ce56
DLM
362}
363
945ffb60
JA
364/*
365 * deadline_dispatch_requests selects the best request according to
7b05bf77 366 * read/write expire, fifo_batch, etc
945ffb60 367 */
c807ab52 368static struct request *__dd_dispatch_request(struct deadline_data *dd,
7b05bf77 369 struct dd_per_prio *per_prio)
945ffb60 370{
bf09ce56 371 struct request *rq, *next_rq;
004a26b3 372 enum dd_data_dir data_dir;
38ba64d1
BVA
373 enum dd_prio prio;
374 u8 ioprio_class;
945ffb60 375
3bd473f4
BVA
376 lockdep_assert_held(&dd->lock);
377
c807ab52
BVA
378 if (!list_empty(&per_prio->dispatch)) {
379 rq = list_first_entry(&per_prio->dispatch, struct request,
380 queuelist);
945ffb60
JA
381 list_del_init(&rq->queuelist);
382 goto done;
383 }
384
945ffb60
JA
385 /*
386 * batches are currently reads XOR writes
387 */
c807ab52 388 rq = deadline_next_request(dd, per_prio, dd->last_dir);
945ffb60
JA
389 if (rq && dd->batching < dd->fifo_batch)
390 /* we have a next request are still entitled to batch */
391 goto dispatch_request;
392
393 /*
394 * at this point we are not running a batch. select the appropriate
395 * data direction (read / write)
396 */
397
c807ab52
BVA
398 if (!list_empty(&per_prio->fifo_list[DD_READ])) {
399 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_READ]));
945ffb60 400
c807ab52 401 if (deadline_fifo_request(dd, per_prio, DD_WRITE) &&
5700f691 402 (dd->starved++ >= dd->writes_starved))
945ffb60
JA
403 goto dispatch_writes;
404
004a26b3 405 data_dir = DD_READ;
945ffb60
JA
406
407 goto dispatch_find_request;
408 }
409
410 /*
411 * there are either no reads or writes have been starved
412 */
413
c807ab52 414 if (!list_empty(&per_prio->fifo_list[DD_WRITE])) {
945ffb60 415dispatch_writes:
c807ab52 416 BUG_ON(RB_EMPTY_ROOT(&per_prio->sort_list[DD_WRITE]));
945ffb60
JA
417
418 dd->starved = 0;
419
004a26b3 420 data_dir = DD_WRITE;
945ffb60
JA
421
422 goto dispatch_find_request;
423 }
424
425 return NULL;
426
427dispatch_find_request:
428 /*
429 * we are not running a batch, find best request for selected data_dir
430 */
c807ab52
BVA
431 next_rq = deadline_next_request(dd, per_prio, data_dir);
432 if (deadline_check_fifo(per_prio, data_dir) || !next_rq) {
945ffb60
JA
433 /*
434 * A deadline has expired, the last request was in the other
435 * direction, or we have run out of higher-sectored requests.
436 * Start again from the request with the earliest expiry time.
437 */
c807ab52 438 rq = deadline_fifo_request(dd, per_prio, data_dir);
945ffb60
JA
439 } else {
440 /*
441 * The last req was the same dir and we have a next request in
442 * sort order. No expired requests so continue on from here.
443 */
bf09ce56 444 rq = next_rq;
945ffb60
JA
445 }
446
5700f691
DLM
447 /*
448 * For a zoned block device, if we only have writes queued and none of
449 * them can be dispatched, rq will be NULL.
450 */
451 if (!rq)
452 return NULL;
453
d672d325 454 dd->last_dir = data_dir;
945ffb60
JA
455 dd->batching = 0;
456
457dispatch_request:
458 /*
459 * rq is the selected appropriate request.
460 */
461 dd->batching++;
c807ab52 462 deadline_move_request(dd, per_prio, rq);
945ffb60 463done:
38ba64d1
BVA
464 ioprio_class = dd_rq_ioclass(rq);
465 prio = ioprio_class_to_prio[ioprio_class];
466 dd_count(dd, dispatched, prio);
5700f691
DLM
467 /*
468 * If the request needs its target zone locked, do it.
469 */
470 blk_req_zone_write_lock(rq);
945ffb60
JA
471 rq->rq_flags |= RQF_STARTED;
472 return rq;
473}
474
ca11f209 475/*
46eae2e3
BVA
476 * Called from blk_mq_run_hw_queue() -> __blk_mq_sched_dispatch_requests().
477 *
ca11f209 478 * One confusing aspect here is that we get called for a specific
7211aef8 479 * hardware queue, but we may return a request that is for a
ca11f209
JA
480 * different hardware queue. This is because mq-deadline has shared
481 * state for all hardware queues, in terms of sorting, FIFOs, etc.
482 */
c13660a0 483static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
945ffb60
JA
484{
485 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
7b05bf77 486 struct request *rq;
c807ab52 487 enum dd_prio prio;
945ffb60
JA
488
489 spin_lock(&dd->lock);
fb926032 490 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
7b05bf77
JA
491 rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
492 if (rq)
c807ab52
BVA
493 break;
494 }
945ffb60 495 spin_unlock(&dd->lock);
c13660a0
JA
496
497 return rq;
945ffb60
JA
498}
499
07757588
BVA
500/*
501 * Called by __blk_mq_alloc_request(). The shallow_depth value set by this
502 * function is used by __blk_mq_get_tag().
503 */
504static void dd_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
505{
506 struct deadline_data *dd = data->q->elevator->elevator_data;
507
508 /* Do not throttle synchronous reads. */
509 if (op_is_sync(op) && !op_is_write(op))
510 return;
511
512 /*
513 * Throttle asynchronous requests and writes such that these requests
514 * do not block the allocation of synchronous requests.
515 */
516 data->shallow_depth = dd->async_depth;
517}
518
519/* Called by blk_mq_update_nr_requests(). */
520static void dd_depth_updated(struct blk_mq_hw_ctx *hctx)
521{
522 struct request_queue *q = hctx->queue;
523 struct deadline_data *dd = q->elevator->elevator_data;
524 struct blk_mq_tags *tags = hctx->sched_tags;
525
526 dd->async_depth = max(1UL, 3 * q->nr_requests / 4);
527
528 sbitmap_queue_min_shallow_depth(tags->bitmap_tags, dd->async_depth);
529}
530
531/* Called by blk_mq_init_hctx() and blk_mq_init_sched(). */
532static int dd_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
533{
534 dd_depth_updated(hctx);
535 return 0;
536}
537
3e9a99eb 538static void dd_exit_sched(struct elevator_queue *e)
945ffb60
JA
539{
540 struct deadline_data *dd = e->elevator_data;
c807ab52 541 enum dd_prio prio;
945ffb60 542
c807ab52
BVA
543 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
544 struct dd_per_prio *per_prio = &dd->per_prio[prio];
545
546 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_READ]));
547 WARN_ON_ONCE(!list_empty(&per_prio->fifo_list[DD_WRITE]));
32f64cad
BVA
548 WARN_ONCE(dd_queued(dd, prio) != 0,
549 "statistics for priority %d: i %u m %u d %u c %u\n",
550 prio, dd_sum(dd, inserted, prio),
551 dd_sum(dd, merged, prio),
552 dd_sum(dd, dispatched, prio),
553 dd_sum(dd, completed, prio));
c807ab52 554 }
945ffb60 555
38ba64d1
BVA
556 free_percpu(dd->stats);
557
945ffb60
JA
558 kfree(dd);
559}
560
561/*
0f783995 562 * initialize elevator private data (deadline_data).
945ffb60 563 */
3e9a99eb 564static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
945ffb60
JA
565{
566 struct deadline_data *dd;
567 struct elevator_queue *eq;
c807ab52
BVA
568 enum dd_prio prio;
569 int ret = -ENOMEM;
945ffb60
JA
570
571 eq = elevator_alloc(q, e);
572 if (!eq)
c807ab52 573 return ret;
945ffb60
JA
574
575 dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
c807ab52
BVA
576 if (!dd)
577 goto put_eq;
578
945ffb60
JA
579 eq->elevator_data = dd;
580
38ba64d1
BVA
581 dd->stats = alloc_percpu_gfp(typeof(*dd->stats),
582 GFP_KERNEL | __GFP_ZERO);
583 if (!dd->stats)
584 goto free_dd;
585
c807ab52
BVA
586 for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
587 struct dd_per_prio *per_prio = &dd->per_prio[prio];
588
589 INIT_LIST_HEAD(&per_prio->dispatch);
590 INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
591 INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
592 per_prio->sort_list[DD_READ] = RB_ROOT;
593 per_prio->sort_list[DD_WRITE] = RB_ROOT;
594 }
004a26b3
BVA
595 dd->fifo_expire[DD_READ] = read_expire;
596 dd->fifo_expire[DD_WRITE] = write_expire;
945ffb60
JA
597 dd->writes_starved = writes_starved;
598 dd->front_merges = 1;
d672d325 599 dd->last_dir = DD_WRITE;
945ffb60
JA
600 dd->fifo_batch = fifo_batch;
601 spin_lock_init(&dd->lock);
5700f691 602 spin_lock_init(&dd->zone_lock);
945ffb60
JA
603
604 q->elevator = eq;
605 return 0;
c807ab52 606
38ba64d1
BVA
607free_dd:
608 kfree(dd);
609
c807ab52
BVA
610put_eq:
611 kobject_put(&eq->kobj);
612 return ret;
945ffb60
JA
613}
614
46eae2e3
BVA
615/*
616 * Try to merge @bio into an existing request. If @bio has been merged into
617 * an existing request, store the pointer to that request into *@rq.
618 */
945ffb60
JA
619static int dd_request_merge(struct request_queue *q, struct request **rq,
620 struct bio *bio)
621{
622 struct deadline_data *dd = q->elevator->elevator_data;
c807ab52
BVA
623 const u8 ioprio_class = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
624 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
625 struct dd_per_prio *per_prio = &dd->per_prio[prio];
945ffb60
JA
626 sector_t sector = bio_end_sector(bio);
627 struct request *__rq;
628
629 if (!dd->front_merges)
630 return ELEVATOR_NO_MERGE;
631
c807ab52 632 __rq = elv_rb_find(&per_prio->sort_list[bio_data_dir(bio)], sector);
945ffb60
JA
633 if (__rq) {
634 BUG_ON(sector != blk_rq_pos(__rq));
635
636 if (elv_bio_merge_ok(__rq, bio)) {
637 *rq = __rq;
866663b7
ML
638 if (blk_discard_mergable(__rq))
639 return ELEVATOR_DISCARD_MERGE;
945ffb60
JA
640 return ELEVATOR_FRONT_MERGE;
641 }
642 }
643
644 return ELEVATOR_NO_MERGE;
645}
646
46eae2e3
BVA
647/*
648 * Attempt to merge a bio into an existing request. This function is called
649 * before @bio is associated with a request.
650 */
efed9a33 651static bool dd_bio_merge(struct request_queue *q, struct bio *bio,
14ccb66b 652 unsigned int nr_segs)
945ffb60 653{
945ffb60 654 struct deadline_data *dd = q->elevator->elevator_data;
e4d750c9
JA
655 struct request *free = NULL;
656 bool ret;
945ffb60
JA
657
658 spin_lock(&dd->lock);
14ccb66b 659 ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
945ffb60
JA
660 spin_unlock(&dd->lock);
661
e4d750c9
JA
662 if (free)
663 blk_mq_free_request(free);
664
945ffb60
JA
665 return ret;
666}
667
668/*
669 * add rq to rbtree and fifo
670 */
671static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
672 bool at_head)
673{
674 struct request_queue *q = hctx->queue;
675 struct deadline_data *dd = q->elevator->elevator_data;
004a26b3 676 const enum dd_data_dir data_dir = rq_data_dir(rq);
c807ab52
BVA
677 u16 ioprio = req_get_ioprio(rq);
678 u8 ioprio_class = IOPRIO_PRIO_CLASS(ioprio);
679 struct dd_per_prio *per_prio;
680 enum dd_prio prio;
fd2ef39c 681 LIST_HEAD(free);
945ffb60 682
3bd473f4
BVA
683 lockdep_assert_held(&dd->lock);
684
5700f691
DLM
685 /*
686 * This may be a requeue of a write request that has locked its
687 * target zone. If it is the case, this releases the zone lock.
688 */
689 blk_req_zone_write_unlock(rq);
690
c807ab52 691 prio = ioprio_class_to_prio[ioprio_class];
e2c7275d
BVA
692 if (!rq->elv.priv[0]) {
693 dd_count(dd, inserted, prio);
694 rq->elv.priv[0] = (void *)(uintptr_t)1;
695 }
c807ab52 696
fd2ef39c
JK
697 if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
698 blk_mq_free_requests(&free);
945ffb60 699 return;
fd2ef39c 700 }
945ffb60 701
b357e4a6 702 trace_block_rq_insert(rq);
945ffb60 703
c807ab52 704 per_prio = &dd->per_prio[prio];
7687b38a 705 if (at_head) {
c807ab52 706 list_add(&rq->queuelist, &per_prio->dispatch);
945ffb60 707 } else {
c807ab52 708 deadline_add_rq_rb(per_prio, rq);
945ffb60
JA
709
710 if (rq_mergeable(rq)) {
711 elv_rqhash_add(q, rq);
712 if (!q->last_merge)
713 q->last_merge = rq;
714 }
715
716 /*
717 * set expire time and add to fifo list
718 */
719 rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
c807ab52 720 list_add_tail(&rq->queuelist, &per_prio->fifo_list[data_dir]);
945ffb60
JA
721 }
722}
723
46eae2e3
BVA
724/*
725 * Called from blk_mq_sched_insert_request() or blk_mq_sched_insert_requests().
726 */
945ffb60
JA
727static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
728 struct list_head *list, bool at_head)
729{
730 struct request_queue *q = hctx->queue;
731 struct deadline_data *dd = q->elevator->elevator_data;
732
733 spin_lock(&dd->lock);
734 while (!list_empty(list)) {
735 struct request *rq;
736
737 rq = list_first_entry(list, struct request, queuelist);
738 list_del_init(&rq->queuelist);
739 dd_insert_request(hctx, rq, at_head);
740 }
741 spin_unlock(&dd->lock);
742}
743
b6d2b054 744/* Callback from inside blk_mq_rq_ctx_init(). */
5d9c305b 745static void dd_prepare_request(struct request *rq)
f3bc78d2 746{
b6d2b054 747 rq->elv.priv[0] = NULL;
f3bc78d2
DLM
748}
749
5700f691 750/*
46eae2e3
BVA
751 * Callback from inside blk_mq_free_request().
752 *
5700f691
DLM
753 * For zoned block devices, write unlock the target zone of
754 * completed write requests. Do this while holding the zone lock
755 * spinlock so that the zone is never unlocked while deadline_fifo_request()
f3bc78d2
DLM
756 * or deadline_next_request() are executing. This function is called for
757 * all requests, whether or not these requests complete successfully.
cb8acabb
DLM
758 *
759 * For a zoned block device, __dd_dispatch_request() may have stopped
760 * dispatching requests if all the queued requests are write requests directed
761 * at zones that are already locked due to on-going write requests. To ensure
762 * write request dispatch progress in this case, mark the queue as needing a
763 * restart to ensure that the queue is run again after completion of the
764 * request and zones being unlocked.
5700f691 765 */
f3bc78d2 766static void dd_finish_request(struct request *rq)
5700f691
DLM
767{
768 struct request_queue *q = rq->q;
c807ab52
BVA
769 struct deadline_data *dd = q->elevator->elevator_data;
770 const u8 ioprio_class = dd_rq_ioclass(rq);
771 const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
772 struct dd_per_prio *per_prio = &dd->per_prio[prio];
5700f691 773
b6d2b054
BVA
774 /*
775 * The block layer core may call dd_finish_request() without having
e2c7275d
BVA
776 * called dd_insert_requests(). Skip requests that bypassed I/O
777 * scheduling. See also blk_mq_request_bypass_insert().
b6d2b054 778 */
e2c7275d
BVA
779 if (!rq->elv.priv[0])
780 return;
781
782 dd_count(dd, completed, prio);
38ba64d1 783
5700f691 784 if (blk_queue_is_zoned(q)) {
5700f691
DLM
785 unsigned long flags;
786
787 spin_lock_irqsave(&dd->zone_lock, flags);
788 blk_req_zone_write_unlock(rq);
c807ab52 789 if (!list_empty(&per_prio->fifo_list[DD_WRITE]))
cb8acabb 790 blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
5700f691
DLM
791 spin_unlock_irqrestore(&dd->zone_lock, flags);
792 }
793}
794
c807ab52
BVA
795static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
796{
797 return !list_empty_careful(&per_prio->dispatch) ||
798 !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
799 !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
800}
801
945ffb60
JA
802static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
803{
804 struct deadline_data *dd = hctx->queue->elevator->elevator_data;
c807ab52
BVA
805 enum dd_prio prio;
806
807 for (prio = 0; prio <= DD_PRIO_MAX; prio++)
808 if (dd_has_work_for_prio(&dd->per_prio[prio]))
809 return true;
945ffb60 810
c807ab52 811 return false;
945ffb60
JA
812}
813
814/*
815 * sysfs parts below
816 */
d6d7f013 817#define SHOW_INT(__FUNC, __VAR) \
945ffb60
JA
818static ssize_t __FUNC(struct elevator_queue *e, char *page) \
819{ \
820 struct deadline_data *dd = e->elevator_data; \
d6d7f013
BVA
821 \
822 return sysfs_emit(page, "%d\n", __VAR); \
945ffb60 823}
d6d7f013
BVA
824#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
825SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
826SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
827SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
828SHOW_INT(deadline_front_merges_show, dd->front_merges);
07757588 829SHOW_INT(deadline_async_depth_show, dd->front_merges);
d6d7f013
BVA
830SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
831#undef SHOW_INT
832#undef SHOW_JIFFIES
945ffb60
JA
833
834#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
835static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
836{ \
837 struct deadline_data *dd = e->elevator_data; \
d6d7f013
BVA
838 int __data, __ret; \
839 \
840 __ret = kstrtoint(page, 0, &__data); \
841 if (__ret < 0) \
842 return __ret; \
945ffb60
JA
843 if (__data < (MIN)) \
844 __data = (MIN); \
845 else if (__data > (MAX)) \
846 __data = (MAX); \
d6d7f013 847 *(__PTR) = __CONV(__data); \
235f8da1 848 return count; \
945ffb60 849}
d6d7f013
BVA
850#define STORE_INT(__FUNC, __PTR, MIN, MAX) \
851 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, )
852#define STORE_JIFFIES(__FUNC, __PTR, MIN, MAX) \
853 STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
854STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
855STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
856STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
857STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
07757588 858STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
d6d7f013 859STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
945ffb60 860#undef STORE_FUNCTION
d6d7f013
BVA
861#undef STORE_INT
862#undef STORE_JIFFIES
945ffb60
JA
863
864#define DD_ATTR(name) \
5657a819 865 __ATTR(name, 0644, deadline_##name##_show, deadline_##name##_store)
945ffb60
JA
866
867static struct elv_fs_entry deadline_attrs[] = {
868 DD_ATTR(read_expire),
869 DD_ATTR(write_expire),
870 DD_ATTR(writes_starved),
871 DD_ATTR(front_merges),
07757588 872 DD_ATTR(async_depth),
945ffb60
JA
873 DD_ATTR(fifo_batch),
874 __ATTR_NULL
875};
876
daaadb3e 877#ifdef CONFIG_BLK_DEBUG_FS
c807ab52 878#define DEADLINE_DEBUGFS_DDIR_ATTRS(prio, data_dir, name) \
daaadb3e
OS
879static void *deadline_##name##_fifo_start(struct seq_file *m, \
880 loff_t *pos) \
881 __acquires(&dd->lock) \
882{ \
883 struct request_queue *q = m->private; \
884 struct deadline_data *dd = q->elevator->elevator_data; \
c807ab52 885 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
daaadb3e
OS
886 \
887 spin_lock(&dd->lock); \
c807ab52 888 return seq_list_start(&per_prio->fifo_list[data_dir], *pos); \
daaadb3e
OS
889} \
890 \
891static void *deadline_##name##_fifo_next(struct seq_file *m, void *v, \
892 loff_t *pos) \
893{ \
894 struct request_queue *q = m->private; \
895 struct deadline_data *dd = q->elevator->elevator_data; \
c807ab52 896 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
daaadb3e 897 \
c807ab52 898 return seq_list_next(v, &per_prio->fifo_list[data_dir], pos); \
daaadb3e
OS
899} \
900 \
901static void deadline_##name##_fifo_stop(struct seq_file *m, void *v) \
902 __releases(&dd->lock) \
903{ \
904 struct request_queue *q = m->private; \
905 struct deadline_data *dd = q->elevator->elevator_data; \
906 \
907 spin_unlock(&dd->lock); \
908} \
909 \
910static const struct seq_operations deadline_##name##_fifo_seq_ops = { \
911 .start = deadline_##name##_fifo_start, \
912 .next = deadline_##name##_fifo_next, \
913 .stop = deadline_##name##_fifo_stop, \
914 .show = blk_mq_debugfs_rq_show, \
915}; \
916 \
917static int deadline_##name##_next_rq_show(void *data, \
918 struct seq_file *m) \
919{ \
920 struct request_queue *q = data; \
921 struct deadline_data *dd = q->elevator->elevator_data; \
c807ab52
BVA
922 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
923 struct request *rq = per_prio->next_rq[data_dir]; \
daaadb3e
OS
924 \
925 if (rq) \
926 __blk_mq_debugfs_rq_show(m, rq); \
927 return 0; \
928}
c807ab52
BVA
929
930DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_READ, read0);
931DEADLINE_DEBUGFS_DDIR_ATTRS(DD_RT_PRIO, DD_WRITE, write0);
932DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_READ, read1);
933DEADLINE_DEBUGFS_DDIR_ATTRS(DD_BE_PRIO, DD_WRITE, write1);
934DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_READ, read2);
935DEADLINE_DEBUGFS_DDIR_ATTRS(DD_IDLE_PRIO, DD_WRITE, write2);
daaadb3e
OS
936#undef DEADLINE_DEBUGFS_DDIR_ATTRS
937
938static int deadline_batching_show(void *data, struct seq_file *m)
939{
940 struct request_queue *q = data;
941 struct deadline_data *dd = q->elevator->elevator_data;
942
943 seq_printf(m, "%u\n", dd->batching);
944 return 0;
945}
946
947static int deadline_starved_show(void *data, struct seq_file *m)
948{
949 struct request_queue *q = data;
950 struct deadline_data *dd = q->elevator->elevator_data;
951
952 seq_printf(m, "%u\n", dd->starved);
953 return 0;
954}
955
07757588
BVA
956static int dd_async_depth_show(void *data, struct seq_file *m)
957{
958 struct request_queue *q = data;
959 struct deadline_data *dd = q->elevator->elevator_data;
960
961 seq_printf(m, "%u\n", dd->async_depth);
962 return 0;
963}
964
38ba64d1
BVA
965static int dd_queued_show(void *data, struct seq_file *m)
966{
967 struct request_queue *q = data;
968 struct deadline_data *dd = q->elevator->elevator_data;
969
970 seq_printf(m, "%u %u %u\n", dd_queued(dd, DD_RT_PRIO),
971 dd_queued(dd, DD_BE_PRIO),
972 dd_queued(dd, DD_IDLE_PRIO));
973 return 0;
974}
975
976/* Number of requests owned by the block driver for a given priority. */
977static u32 dd_owned_by_driver(struct deadline_data *dd, enum dd_prio prio)
978{
979 return dd_sum(dd, dispatched, prio) + dd_sum(dd, merged, prio)
980 - dd_sum(dd, completed, prio);
981}
982
983static int dd_owned_by_driver_show(void *data, struct seq_file *m)
984{
985 struct request_queue *q = data;
986 struct deadline_data *dd = q->elevator->elevator_data;
987
988 seq_printf(m, "%u %u %u\n", dd_owned_by_driver(dd, DD_RT_PRIO),
989 dd_owned_by_driver(dd, DD_BE_PRIO),
990 dd_owned_by_driver(dd, DD_IDLE_PRIO));
991 return 0;
992}
993
c807ab52
BVA
994#define DEADLINE_DISPATCH_ATTR(prio) \
995static void *deadline_dispatch##prio##_start(struct seq_file *m, \
996 loff_t *pos) \
997 __acquires(&dd->lock) \
998{ \
999 struct request_queue *q = m->private; \
1000 struct deadline_data *dd = q->elevator->elevator_data; \
1001 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1002 \
1003 spin_lock(&dd->lock); \
1004 return seq_list_start(&per_prio->dispatch, *pos); \
1005} \
1006 \
1007static void *deadline_dispatch##prio##_next(struct seq_file *m, \
1008 void *v, loff_t *pos) \
1009{ \
1010 struct request_queue *q = m->private; \
1011 struct deadline_data *dd = q->elevator->elevator_data; \
1012 struct dd_per_prio *per_prio = &dd->per_prio[prio]; \
1013 \
1014 return seq_list_next(v, &per_prio->dispatch, pos); \
1015} \
1016 \
1017static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v) \
1018 __releases(&dd->lock) \
1019{ \
1020 struct request_queue *q = m->private; \
1021 struct deadline_data *dd = q->elevator->elevator_data; \
1022 \
1023 spin_unlock(&dd->lock); \
1024} \
1025 \
1026static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
1027 .start = deadline_dispatch##prio##_start, \
1028 .next = deadline_dispatch##prio##_next, \
1029 .stop = deadline_dispatch##prio##_stop, \
1030 .show = blk_mq_debugfs_rq_show, \
daaadb3e
OS
1031}
1032
c807ab52
BVA
1033DEADLINE_DISPATCH_ATTR(0);
1034DEADLINE_DISPATCH_ATTR(1);
1035DEADLINE_DISPATCH_ATTR(2);
1036#undef DEADLINE_DISPATCH_ATTR
daaadb3e 1037
c807ab52
BVA
1038#define DEADLINE_QUEUE_DDIR_ATTRS(name) \
1039 {#name "_fifo_list", 0400, \
1040 .seq_ops = &deadline_##name##_fifo_seq_ops}
1041#define DEADLINE_NEXT_RQ_ATTR(name) \
daaadb3e
OS
1042 {#name "_next_rq", 0400, deadline_##name##_next_rq_show}
1043static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
c807ab52
BVA
1044 DEADLINE_QUEUE_DDIR_ATTRS(read0),
1045 DEADLINE_QUEUE_DDIR_ATTRS(write0),
1046 DEADLINE_QUEUE_DDIR_ATTRS(read1),
1047 DEADLINE_QUEUE_DDIR_ATTRS(write1),
1048 DEADLINE_QUEUE_DDIR_ATTRS(read2),
1049 DEADLINE_QUEUE_DDIR_ATTRS(write2),
1050 DEADLINE_NEXT_RQ_ATTR(read0),
1051 DEADLINE_NEXT_RQ_ATTR(write0),
1052 DEADLINE_NEXT_RQ_ATTR(read1),
1053 DEADLINE_NEXT_RQ_ATTR(write1),
1054 DEADLINE_NEXT_RQ_ATTR(read2),
1055 DEADLINE_NEXT_RQ_ATTR(write2),
daaadb3e
OS
1056 {"batching", 0400, deadline_batching_show},
1057 {"starved", 0400, deadline_starved_show},
07757588 1058 {"async_depth", 0400, dd_async_depth_show},
c807ab52
BVA
1059 {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
1060 {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
1061 {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
38ba64d1
BVA
1062 {"owned_by_driver", 0400, dd_owned_by_driver_show},
1063 {"queued", 0400, dd_queued_show},
daaadb3e
OS
1064 {},
1065};
1066#undef DEADLINE_QUEUE_DDIR_ATTRS
1067#endif
1068
945ffb60 1069static struct elevator_type mq_deadline = {
f9cd4bfe 1070 .ops = {
07757588
BVA
1071 .depth_updated = dd_depth_updated,
1072 .limit_depth = dd_limit_depth,
945ffb60 1073 .insert_requests = dd_insert_requests,
c13660a0 1074 .dispatch_request = dd_dispatch_request,
f3bc78d2
DLM
1075 .prepare_request = dd_prepare_request,
1076 .finish_request = dd_finish_request,
945ffb60
JA
1077 .next_request = elv_rb_latter_request,
1078 .former_request = elv_rb_former_request,
1079 .bio_merge = dd_bio_merge,
1080 .request_merge = dd_request_merge,
1081 .requests_merged = dd_merged_requests,
1082 .request_merged = dd_request_merged,
1083 .has_work = dd_has_work,
3e9a99eb
BVA
1084 .init_sched = dd_init_sched,
1085 .exit_sched = dd_exit_sched,
07757588 1086 .init_hctx = dd_init_hctx,
945ffb60
JA
1087 },
1088
daaadb3e
OS
1089#ifdef CONFIG_BLK_DEBUG_FS
1090 .queue_debugfs_attrs = deadline_queue_debugfs_attrs,
1091#endif
945ffb60
JA
1092 .elevator_attrs = deadline_attrs,
1093 .elevator_name = "mq-deadline",
4d740bc9 1094 .elevator_alias = "deadline",
68c43f13 1095 .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
945ffb60
JA
1096 .elevator_owner = THIS_MODULE,
1097};
7de967e7 1098MODULE_ALIAS("mq-deadline-iosched");
945ffb60
JA
1099
1100static int __init deadline_init(void)
1101{
0f783995 1102 return elv_register(&mq_deadline);
945ffb60
JA
1103}
1104
1105static void __exit deadline_exit(void)
1106{
1107 elv_unregister(&mq_deadline);
1108}
1109
1110module_init(deadline_init);
1111module_exit(deadline_exit);
1112
c807ab52 1113MODULE_AUTHOR("Jens Axboe, Damien Le Moal and Bart Van Assche");
945ffb60
JA
1114MODULE_LICENSE("GPL");
1115MODULE_DESCRIPTION("MQ deadline IO scheduler");
This page took 0.561423 seconds and 4 git commands to generate.