]>
Commit | Line | Data |
---|---|---|
8c16567d | 1 | // SPDX-License-Identifier: GPL-2.0 |
07e4fead OS |
2 | /* |
3 | * Copyright (C) 2017 Facebook | |
07e4fead OS |
4 | */ |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/debugfs.h> | |
9 | ||
10 | #include <linux/blk-mq.h> | |
18fbda91 | 11 | #include "blk.h" |
07e4fead | 12 | #include "blk-mq.h" |
d173a251 | 13 | #include "blk-mq-debugfs.h" |
d96b37c0 | 14 | #include "blk-mq-tag.h" |
cc56694f | 15 | #include "blk-rq-qos.h" |
07e4fead | 16 | |
1209cb7f BVA |
17 | static void print_stat(struct seq_file *m, struct blk_rq_stat *stat) |
18 | { | |
19 | if (stat->nr_samples) { | |
315eb656 | 20 | seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu", |
1209cb7f BVA |
21 | stat->nr_samples, stat->mean, stat->min, stat->max); |
22 | } else { | |
23 | seq_puts(m, "samples=0"); | |
24 | } | |
25 | } | |
26 | ||
27 | static int queue_poll_stat_show(void *data, struct seq_file *m) | |
28 | { | |
29 | struct request_queue *q = data; | |
30 | int bucket; | |
31 | ||
243d9f78 CK |
32 | for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) { |
33 | seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket)); | |
34 | print_stat(m, &q->poll_stat[2 * bucket]); | |
1209cb7f BVA |
35 | seq_puts(m, "\n"); |
36 | ||
243d9f78 CK |
37 | seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket)); |
38 | print_stat(m, &q->poll_stat[2 * bucket + 1]); | |
1209cb7f BVA |
39 | seq_puts(m, "\n"); |
40 | } | |
41 | return 0; | |
42 | } | |
43 | ||
44 | static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos) | |
45 | __acquires(&q->requeue_lock) | |
46 | { | |
47 | struct request_queue *q = m->private; | |
48 | ||
49 | spin_lock_irq(&q->requeue_lock); | |
50 | return seq_list_start(&q->requeue_list, *pos); | |
51 | } | |
52 | ||
53 | static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos) | |
54 | { | |
55 | struct request_queue *q = m->private; | |
56 | ||
57 | return seq_list_next(v, &q->requeue_list, pos); | |
58 | } | |
59 | ||
60 | static void queue_requeue_list_stop(struct seq_file *m, void *v) | |
61 | __releases(&q->requeue_lock) | |
62 | { | |
63 | struct request_queue *q = m->private; | |
64 | ||
65 | spin_unlock_irq(&q->requeue_lock); | |
66 | } | |
67 | ||
68 | static const struct seq_operations queue_requeue_list_seq_ops = { | |
69 | .start = queue_requeue_list_start, | |
70 | .next = queue_requeue_list_next, | |
71 | .stop = queue_requeue_list_stop, | |
72 | .show = blk_mq_debugfs_rq_show, | |
73 | }; | |
74 | ||
91d68905 BVA |
75 | static int blk_flags_show(struct seq_file *m, const unsigned long flags, |
76 | const char *const *flag_name, int flag_name_count) | |
77 | { | |
78 | bool sep = false; | |
79 | int i; | |
80 | ||
81 | for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { | |
82 | if (!(flags & BIT(i))) | |
83 | continue; | |
84 | if (sep) | |
bec03d6b | 85 | seq_puts(m, "|"); |
91d68905 BVA |
86 | sep = true; |
87 | if (i < flag_name_count && flag_name[i]) | |
88 | seq_puts(m, flag_name[i]); | |
89 | else | |
90 | seq_printf(m, "%d", i); | |
91 | } | |
91d68905 BVA |
92 | return 0; |
93 | } | |
94 | ||
cd84a62e BVA |
95 | static int queue_pm_only_show(void *data, struct seq_file *m) |
96 | { | |
97 | struct request_queue *q = data; | |
98 | ||
99 | seq_printf(m, "%d\n", atomic_read(&q->pm_only)); | |
100 | return 0; | |
101 | } | |
102 | ||
1a435111 | 103 | #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name |
91d68905 | 104 | static const char *const blk_queue_flag_name[] = { |
1a435111 | 105 | QUEUE_FLAG_NAME(STOPPED), |
1a435111 | 106 | QUEUE_FLAG_NAME(DYING), |
1a435111 OS |
107 | QUEUE_FLAG_NAME(NOMERGES), |
108 | QUEUE_FLAG_NAME(SAME_COMP), | |
109 | QUEUE_FLAG_NAME(FAIL_IO), | |
1a435111 OS |
110 | QUEUE_FLAG_NAME(NONROT), |
111 | QUEUE_FLAG_NAME(IO_STAT), | |
112 | QUEUE_FLAG_NAME(DISCARD), | |
113 | QUEUE_FLAG_NAME(NOXMERGES), | |
114 | QUEUE_FLAG_NAME(ADD_RANDOM), | |
115 | QUEUE_FLAG_NAME(SECERASE), | |
116 | QUEUE_FLAG_NAME(SAME_FORCE), | |
117 | QUEUE_FLAG_NAME(DEAD), | |
118 | QUEUE_FLAG_NAME(INIT_DONE), | |
1cb039f3 | 119 | QUEUE_FLAG_NAME(STABLE_WRITES), |
1a435111 OS |
120 | QUEUE_FLAG_NAME(POLL), |
121 | QUEUE_FLAG_NAME(WC), | |
122 | QUEUE_FLAG_NAME(FUA), | |
1a435111 OS |
123 | QUEUE_FLAG_NAME(DAX), |
124 | QUEUE_FLAG_NAME(STATS), | |
125 | QUEUE_FLAG_NAME(POLL_STATS), | |
126 | QUEUE_FLAG_NAME(REGISTERED), | |
22d53821 | 127 | QUEUE_FLAG_NAME(QUIESCED), |
bfe373f6 HT |
128 | QUEUE_FLAG_NAME(PCI_P2PDMA), |
129 | QUEUE_FLAG_NAME(ZONE_RESETALL), | |
130 | QUEUE_FLAG_NAME(RQ_ALLOC_TIME), | |
1dbdd99b | 131 | QUEUE_FLAG_NAME(HCTX_ACTIVE), |
dc304326 | 132 | QUEUE_FLAG_NAME(NOWAIT), |
91d68905 | 133 | }; |
1a435111 | 134 | #undef QUEUE_FLAG_NAME |
91d68905 | 135 | |
f57de23a | 136 | static int queue_state_show(void *data, struct seq_file *m) |
91d68905 | 137 | { |
f57de23a | 138 | struct request_queue *q = data; |
91d68905 BVA |
139 | |
140 | blk_flags_show(m, q->queue_flags, blk_queue_flag_name, | |
141 | ARRAY_SIZE(blk_queue_flag_name)); | |
fd07dc81 | 142 | seq_puts(m, "\n"); |
91d68905 BVA |
143 | return 0; |
144 | } | |
145 | ||
f57de23a OS |
146 | static ssize_t queue_state_write(void *data, const char __user *buf, |
147 | size_t count, loff_t *ppos) | |
91d68905 | 148 | { |
f57de23a | 149 | struct request_queue *q = data; |
71b90511 | 150 | char opbuf[16] = { }, *op; |
91d68905 | 151 | |
18d4d7d0 BVA |
152 | /* |
153 | * The "state" attribute is removed after blk_cleanup_queue() has called | |
154 | * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid | |
155 | * triggering a use-after-free. | |
156 | */ | |
157 | if (blk_queue_dead(q)) | |
158 | return -ENOENT; | |
159 | ||
71b90511 | 160 | if (count >= sizeof(opbuf)) { |
c7e4145a OS |
161 | pr_err("%s: operation too long\n", __func__); |
162 | goto inval; | |
163 | } | |
164 | ||
71b90511 | 165 | if (copy_from_user(opbuf, buf, count)) |
91d68905 | 166 | return -EFAULT; |
71b90511 | 167 | op = strstrip(opbuf); |
91d68905 BVA |
168 | if (strcmp(op, "run") == 0) { |
169 | blk_mq_run_hw_queues(q, true); | |
170 | } else if (strcmp(op, "start") == 0) { | |
171 | blk_mq_start_stopped_hw_queues(q, true); | |
edea55ab BVA |
172 | } else if (strcmp(op, "kick") == 0) { |
173 | blk_mq_kick_requeue_list(q); | |
91d68905 | 174 | } else { |
c7e4145a OS |
175 | pr_err("%s: unsupported operation '%s'\n", __func__, op); |
176 | inval: | |
edea55ab | 177 | pr_err("%s: use 'run', 'start' or 'kick'\n", __func__); |
91d68905 BVA |
178 | return -EINVAL; |
179 | } | |
c7e4145a | 180 | return count; |
91d68905 BVA |
181 | } |
182 | ||
f793dfd3 JA |
183 | static int queue_write_hint_show(void *data, struct seq_file *m) |
184 | { | |
185 | struct request_queue *q = data; | |
186 | int i; | |
187 | ||
188 | for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) | |
189 | seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
194 | static ssize_t queue_write_hint_store(void *data, const char __user *buf, | |
195 | size_t count, loff_t *ppos) | |
196 | { | |
197 | struct request_queue *q = data; | |
198 | int i; | |
199 | ||
200 | for (i = 0; i < BLK_MAX_WRITE_HINTS; i++) | |
201 | q->write_hints[i] = 0; | |
202 | ||
203 | return count; | |
204 | } | |
205 | ||
1209cb7f BVA |
206 | static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { |
207 | { "poll_stat", 0400, queue_poll_stat_show }, | |
208 | { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, | |
cd84a62e | 209 | { "pm_only", 0600, queue_pm_only_show, NULL }, |
1209cb7f BVA |
210 | { "state", 0600, queue_state_show, queue_state_write }, |
211 | { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store }, | |
18bc4230 | 212 | { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, |
1209cb7f BVA |
213 | { }, |
214 | }; | |
34dbad5d | 215 | |
1a435111 | 216 | #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name |
f5c0b091 | 217 | static const char *const hctx_state_name[] = { |
1a435111 OS |
218 | HCTX_STATE_NAME(STOPPED), |
219 | HCTX_STATE_NAME(TAG_ACTIVE), | |
220 | HCTX_STATE_NAME(SCHED_RESTART), | |
bf0beec0 | 221 | HCTX_STATE_NAME(INACTIVE), |
f5c0b091 | 222 | }; |
1a435111 OS |
223 | #undef HCTX_STATE_NAME |
224 | ||
f57de23a | 225 | static int hctx_state_show(void *data, struct seq_file *m) |
9abb2ad2 | 226 | { |
f57de23a | 227 | struct blk_mq_hw_ctx *hctx = data; |
9abb2ad2 | 228 | |
f5c0b091 BVA |
229 | blk_flags_show(m, hctx->state, hctx_state_name, |
230 | ARRAY_SIZE(hctx_state_name)); | |
fd07dc81 | 231 | seq_puts(m, "\n"); |
9abb2ad2 OS |
232 | return 0; |
233 | } | |
234 | ||
1a435111 | 235 | #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name |
f5c0b091 | 236 | static const char *const alloc_policy_name[] = { |
1a435111 OS |
237 | BLK_TAG_ALLOC_NAME(FIFO), |
238 | BLK_TAG_ALLOC_NAME(RR), | |
f5c0b091 | 239 | }; |
1a435111 | 240 | #undef BLK_TAG_ALLOC_NAME |
f5c0b091 | 241 | |
1a435111 | 242 | #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name |
f5c0b091 | 243 | static const char *const hctx_flag_name[] = { |
1a435111 | 244 | HCTX_FLAG_NAME(SHOULD_MERGE), |
51db1c37 | 245 | HCTX_FLAG_NAME(TAG_QUEUE_SHARED), |
1a435111 OS |
246 | HCTX_FLAG_NAME(BLOCKING), |
247 | HCTX_FLAG_NAME(NO_SCHED), | |
bf0beec0 | 248 | HCTX_FLAG_NAME(STACKING), |
02f938e9 | 249 | HCTX_FLAG_NAME(TAG_HCTX_SHARED), |
f5c0b091 | 250 | }; |
1a435111 | 251 | #undef HCTX_FLAG_NAME |
f5c0b091 | 252 | |
f57de23a | 253 | static int hctx_flags_show(void *data, struct seq_file *m) |
9abb2ad2 | 254 | { |
f57de23a | 255 | struct blk_mq_hw_ctx *hctx = data; |
f5c0b091 | 256 | const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags); |
9abb2ad2 | 257 | |
f5c0b091 BVA |
258 | seq_puts(m, "alloc_policy="); |
259 | if (alloc_policy < ARRAY_SIZE(alloc_policy_name) && | |
260 | alloc_policy_name[alloc_policy]) | |
261 | seq_puts(m, alloc_policy_name[alloc_policy]); | |
262 | else | |
263 | seq_printf(m, "%d", alloc_policy); | |
264 | seq_puts(m, " "); | |
265 | blk_flags_show(m, | |
266 | hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy), | |
267 | hctx_flag_name, ARRAY_SIZE(hctx_flag_name)); | |
fd07dc81 | 268 | seq_puts(m, "\n"); |
9abb2ad2 OS |
269 | return 0; |
270 | } | |
271 | ||
1a435111 | 272 | #define CMD_FLAG_NAME(name) [__REQ_##name] = #name |
8658dca8 | 273 | static const char *const cmd_flag_name[] = { |
1a435111 OS |
274 | CMD_FLAG_NAME(FAILFAST_DEV), |
275 | CMD_FLAG_NAME(FAILFAST_TRANSPORT), | |
276 | CMD_FLAG_NAME(FAILFAST_DRIVER), | |
277 | CMD_FLAG_NAME(SYNC), | |
278 | CMD_FLAG_NAME(META), | |
279 | CMD_FLAG_NAME(PRIO), | |
280 | CMD_FLAG_NAME(NOMERGE), | |
281 | CMD_FLAG_NAME(IDLE), | |
282 | CMD_FLAG_NAME(INTEGRITY), | |
283 | CMD_FLAG_NAME(FUA), | |
284 | CMD_FLAG_NAME(PREFLUSH), | |
285 | CMD_FLAG_NAME(RAHEAD), | |
286 | CMD_FLAG_NAME(BACKGROUND), | |
22d53821 | 287 | CMD_FLAG_NAME(NOWAIT), |
1c26010c | 288 | CMD_FLAG_NAME(NOUNMAP), |
6ce913fe | 289 | CMD_FLAG_NAME(POLLED), |
8658dca8 | 290 | }; |
1a435111 | 291 | #undef CMD_FLAG_NAME |
8658dca8 | 292 | |
1a435111 | 293 | #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name |
8658dca8 | 294 | static const char *const rqf_name[] = { |
85ba3eff | 295 | RQF_NAME(STARTED), |
1a435111 OS |
296 | RQF_NAME(SOFTBARRIER), |
297 | RQF_NAME(FLUSH_SEQ), | |
298 | RQF_NAME(MIXED_MERGE), | |
299 | RQF_NAME(MQ_INFLIGHT), | |
300 | RQF_NAME(DONTPREP), | |
1a435111 OS |
301 | RQF_NAME(FAILED), |
302 | RQF_NAME(QUIET), | |
303 | RQF_NAME(ELVPRIV), | |
304 | RQF_NAME(IO_STAT), | |
1a435111 OS |
305 | RQF_NAME(PM), |
306 | RQF_NAME(HASHED), | |
307 | RQF_NAME(STATS), | |
308 | RQF_NAME(SPECIAL_PAYLOAD), | |
5d75d3f2 | 309 | RQF_NAME(ZONE_WRITE_LOCKED), |
76a86f9d | 310 | RQF_NAME(MQ_POLL_SLEPT), |
62ba0c00 | 311 | RQF_NAME(ELV), |
8658dca8 | 312 | }; |
1a435111 | 313 | #undef RQF_NAME |
8658dca8 | 314 | |
ec6dcf63 BVA |
315 | static const char *const blk_mq_rq_state_name_array[] = { |
316 | [MQ_RQ_IDLE] = "idle", | |
317 | [MQ_RQ_IN_FLIGHT] = "in_flight", | |
318 | [MQ_RQ_COMPLETE] = "complete", | |
319 | }; | |
320 | ||
321 | static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state) | |
322 | { | |
a1e79188 | 323 | if (WARN_ON_ONCE((unsigned int)rq_state >= |
ec6dcf63 BVA |
324 | ARRAY_SIZE(blk_mq_rq_state_name_array))) |
325 | return "(?)"; | |
326 | return blk_mq_rq_state_name_array[rq_state]; | |
327 | } | |
328 | ||
daaadb3e | 329 | int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq) |
950cd7e9 | 330 | { |
2836ee4b | 331 | const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; |
f9bc64a0 | 332 | const unsigned int op = req_op(rq); |
874c893b | 333 | const char *op_str = blk_op_str(op); |
950cd7e9 | 334 | |
8658dca8 | 335 | seq_printf(m, "%p {.op=", rq); |
874c893b | 336 | if (strcmp(op_str, "UNKNOWN") == 0) |
3f6d385f | 337 | seq_printf(m, "%u", op); |
8658dca8 | 338 | else |
874c893b | 339 | seq_printf(m, "%s", op_str); |
8658dca8 BVA |
340 | seq_puts(m, ", .cmd_flags="); |
341 | blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, | |
342 | ARRAY_SIZE(cmd_flag_name)); | |
343 | seq_puts(m, ", .rq_flags="); | |
344 | blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, | |
345 | ARRAY_SIZE(rqf_name)); | |
ec6dcf63 | 346 | seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); |
2836ee4b | 347 | seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, |
8658dca8 | 348 | rq->internal_tag); |
2836ee4b BVA |
349 | if (mq_ops->show_rq) |
350 | mq_ops->show_rq(m, rq); | |
351 | seq_puts(m, "}\n"); | |
950cd7e9 OS |
352 | return 0; |
353 | } | |
daaadb3e OS |
354 | EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show); |
355 | ||
356 | int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) | |
357 | { | |
358 | return __blk_mq_debugfs_rq_show(m, list_entry_rq(v)); | |
359 | } | |
16b738f6 | 360 | EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); |
950cd7e9 OS |
361 | |
362 | static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) | |
f3bcb0e6 | 363 | __acquires(&hctx->lock) |
950cd7e9 OS |
364 | { |
365 | struct blk_mq_hw_ctx *hctx = m->private; | |
366 | ||
367 | spin_lock(&hctx->lock); | |
368 | return seq_list_start(&hctx->dispatch, *pos); | |
369 | } | |
370 | ||
371 | static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos) | |
372 | { | |
373 | struct blk_mq_hw_ctx *hctx = m->private; | |
374 | ||
375 | return seq_list_next(v, &hctx->dispatch, pos); | |
376 | } | |
377 | ||
378 | static void hctx_dispatch_stop(struct seq_file *m, void *v) | |
f3bcb0e6 | 379 | __releases(&hctx->lock) |
950cd7e9 OS |
380 | { |
381 | struct blk_mq_hw_ctx *hctx = m->private; | |
382 | ||
383 | spin_unlock(&hctx->lock); | |
384 | } | |
385 | ||
386 | static const struct seq_operations hctx_dispatch_seq_ops = { | |
387 | .start = hctx_dispatch_start, | |
388 | .next = hctx_dispatch_next, | |
389 | .stop = hctx_dispatch_stop, | |
390 | .show = blk_mq_debugfs_rq_show, | |
391 | }; | |
392 | ||
2720bab5 BVA |
393 | struct show_busy_params { |
394 | struct seq_file *m; | |
395 | struct blk_mq_hw_ctx *hctx; | |
396 | }; | |
397 | ||
398 | /* | |
399 | * Note: the state of a request may change while this function is in progress, | |
7baa8572 JA |
400 | * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to |
401 | * keep iterating requests. | |
2720bab5 | 402 | */ |
7baa8572 | 403 | static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved) |
2720bab5 BVA |
404 | { |
405 | const struct show_busy_params *params = data; | |
406 | ||
ea4f995e | 407 | if (rq->mq_hctx == params->hctx) |
b5fc1e8b | 408 | __blk_mq_debugfs_rq_show(params->m, rq); |
7baa8572 JA |
409 | |
410 | return true; | |
2720bab5 BVA |
411 | } |
412 | ||
413 | static int hctx_busy_show(void *data, struct seq_file *m) | |
414 | { | |
415 | struct blk_mq_hw_ctx *hctx = data; | |
416 | struct show_busy_params params = { .m = m, .hctx = hctx }; | |
417 | ||
418 | blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq, | |
419 | ¶ms); | |
420 | ||
421 | return 0; | |
422 | } | |
423 | ||
346fc108 ML |
424 | static const char *const hctx_types[] = { |
425 | [HCTX_TYPE_DEFAULT] = "default", | |
426 | [HCTX_TYPE_READ] = "read", | |
427 | [HCTX_TYPE_POLL] = "poll", | |
428 | }; | |
429 | ||
430 | static int hctx_type_show(void *data, struct seq_file *m) | |
431 | { | |
432 | struct blk_mq_hw_ctx *hctx = data; | |
433 | ||
434 | BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES); | |
435 | seq_printf(m, "%s\n", hctx_types[hctx->type]); | |
436 | return 0; | |
437 | } | |
438 | ||
f57de23a | 439 | static int hctx_ctx_map_show(void *data, struct seq_file *m) |
0bfa5288 | 440 | { |
f57de23a | 441 | struct blk_mq_hw_ctx *hctx = data; |
0bfa5288 OS |
442 | |
443 | sbitmap_bitmap_show(&hctx->ctx_map, m); | |
444 | return 0; | |
445 | } | |
446 | ||
d96b37c0 OS |
447 | static void blk_mq_debugfs_tags_show(struct seq_file *m, |
448 | struct blk_mq_tags *tags) | |
449 | { | |
450 | seq_printf(m, "nr_tags=%u\n", tags->nr_tags); | |
451 | seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags); | |
452 | seq_printf(m, "active_queues=%d\n", | |
453 | atomic_read(&tags->active_queues)); | |
454 | ||
455 | seq_puts(m, "\nbitmap_tags:\n"); | |
ae0f1a73 | 456 | sbitmap_queue_show(&tags->bitmap_tags, m); |
d96b37c0 OS |
457 | |
458 | if (tags->nr_reserved_tags) { | |
459 | seq_puts(m, "\nbreserved_tags:\n"); | |
ae0f1a73 | 460 | sbitmap_queue_show(&tags->breserved_tags, m); |
d96b37c0 OS |
461 | } |
462 | } | |
463 | ||
f57de23a | 464 | static int hctx_tags_show(void *data, struct seq_file *m) |
d96b37c0 | 465 | { |
f57de23a | 466 | struct blk_mq_hw_ctx *hctx = data; |
d96b37c0 | 467 | struct request_queue *q = hctx->queue; |
8c0f14ea | 468 | int res; |
d96b37c0 | 469 | |
8c0f14ea BVA |
470 | res = mutex_lock_interruptible(&q->sysfs_lock); |
471 | if (res) | |
472 | goto out; | |
d96b37c0 OS |
473 | if (hctx->tags) |
474 | blk_mq_debugfs_tags_show(m, hctx->tags); | |
475 | mutex_unlock(&q->sysfs_lock); | |
476 | ||
8c0f14ea BVA |
477 | out: |
478 | return res; | |
d96b37c0 OS |
479 | } |
480 | ||
f57de23a | 481 | static int hctx_tags_bitmap_show(void *data, struct seq_file *m) |
d7e3621a | 482 | { |
f57de23a | 483 | struct blk_mq_hw_ctx *hctx = data; |
d7e3621a | 484 | struct request_queue *q = hctx->queue; |
8c0f14ea | 485 | int res; |
d7e3621a | 486 | |
8c0f14ea BVA |
487 | res = mutex_lock_interruptible(&q->sysfs_lock); |
488 | if (res) | |
489 | goto out; | |
d7e3621a | 490 | if (hctx->tags) |
ae0f1a73 | 491 | sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m); |
d7e3621a | 492 | mutex_unlock(&q->sysfs_lock); |
8c0f14ea BVA |
493 | |
494 | out: | |
495 | return res; | |
d7e3621a OS |
496 | } |
497 | ||
f57de23a | 498 | static int hctx_sched_tags_show(void *data, struct seq_file *m) |
d96b37c0 | 499 | { |
f57de23a | 500 | struct blk_mq_hw_ctx *hctx = data; |
d96b37c0 | 501 | struct request_queue *q = hctx->queue; |
8c0f14ea | 502 | int res; |
d96b37c0 | 503 | |
8c0f14ea BVA |
504 | res = mutex_lock_interruptible(&q->sysfs_lock); |
505 | if (res) | |
506 | goto out; | |
d96b37c0 OS |
507 | if (hctx->sched_tags) |
508 | blk_mq_debugfs_tags_show(m, hctx->sched_tags); | |
509 | mutex_unlock(&q->sysfs_lock); | |
510 | ||
8c0f14ea BVA |
511 | out: |
512 | return res; | |
d96b37c0 OS |
513 | } |
514 | ||
f57de23a | 515 | static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m) |
d7e3621a | 516 | { |
f57de23a | 517 | struct blk_mq_hw_ctx *hctx = data; |
d7e3621a | 518 | struct request_queue *q = hctx->queue; |
8c0f14ea | 519 | int res; |
d7e3621a | 520 | |
8c0f14ea BVA |
521 | res = mutex_lock_interruptible(&q->sysfs_lock); |
522 | if (res) | |
523 | goto out; | |
d7e3621a | 524 | if (hctx->sched_tags) |
ae0f1a73 | 525 | sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m); |
d7e3621a | 526 | mutex_unlock(&q->sysfs_lock); |
8c0f14ea BVA |
527 | |
528 | out: | |
529 | return res; | |
d7e3621a OS |
530 | } |
531 | ||
f57de23a | 532 | static int hctx_run_show(void *data, struct seq_file *m) |
4a46f05e | 533 | { |
f57de23a | 534 | struct blk_mq_hw_ctx *hctx = data; |
4a46f05e OS |
535 | |
536 | seq_printf(m, "%lu\n", hctx->run); | |
537 | return 0; | |
538 | } | |
539 | ||
f57de23a OS |
540 | static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count, |
541 | loff_t *ppos) | |
4a46f05e | 542 | { |
f57de23a | 543 | struct blk_mq_hw_ctx *hctx = data; |
4a46f05e OS |
544 | |
545 | hctx->run = 0; | |
546 | return count; | |
547 | } | |
548 | ||
f57de23a | 549 | static int hctx_active_show(void *data, struct seq_file *m) |
4a46f05e | 550 | { |
f57de23a | 551 | struct blk_mq_hw_ctx *hctx = data; |
4a46f05e | 552 | |
9b84c629 | 553 | seq_printf(m, "%d\n", __blk_mq_active_requests(hctx)); |
4a46f05e OS |
554 | return 0; |
555 | } | |
556 | ||
6e768717 ML |
557 | static int hctx_dispatch_busy_show(void *data, struct seq_file *m) |
558 | { | |
559 | struct blk_mq_hw_ctx *hctx = data; | |
560 | ||
561 | seq_printf(m, "%u\n", hctx->dispatch_busy); | |
562 | return 0; | |
563 | } | |
564 | ||
c16d6b5a ML |
565 | #define CTX_RQ_SEQ_OPS(name, type) \ |
566 | static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \ | |
567 | __acquires(&ctx->lock) \ | |
568 | { \ | |
569 | struct blk_mq_ctx *ctx = m->private; \ | |
570 | \ | |
571 | spin_lock(&ctx->lock); \ | |
572 | return seq_list_start(&ctx->rq_lists[type], *pos); \ | |
573 | } \ | |
574 | \ | |
575 | static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \ | |
576 | loff_t *pos) \ | |
577 | { \ | |
578 | struct blk_mq_ctx *ctx = m->private; \ | |
579 | \ | |
580 | return seq_list_next(v, &ctx->rq_lists[type], pos); \ | |
581 | } \ | |
582 | \ | |
583 | static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \ | |
584 | __releases(&ctx->lock) \ | |
585 | { \ | |
586 | struct blk_mq_ctx *ctx = m->private; \ | |
587 | \ | |
588 | spin_unlock(&ctx->lock); \ | |
589 | } \ | |
590 | \ | |
591 | static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \ | |
592 | .start = ctx_##name##_rq_list_start, \ | |
593 | .next = ctx_##name##_rq_list_next, \ | |
594 | .stop = ctx_##name##_rq_list_stop, \ | |
595 | .show = blk_mq_debugfs_rq_show, \ | |
596 | } | |
597 | ||
598 | CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT); | |
599 | CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ); | |
600 | CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL); | |
950cd7e9 | 601 | |
f57de23a OS |
602 | static int blk_mq_debugfs_show(struct seq_file *m, void *v) |
603 | { | |
604 | const struct blk_mq_debugfs_attr *attr = m->private; | |
605 | void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private; | |
606 | ||
607 | return attr->show(data, m); | |
608 | } | |
609 | ||
610 | static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf, | |
611 | size_t count, loff_t *ppos) | |
4a46f05e OS |
612 | { |
613 | struct seq_file *m = file->private_data; | |
f57de23a OS |
614 | const struct blk_mq_debugfs_attr *attr = m->private; |
615 | void *data = d_inode(file->f_path.dentry->d_parent)->i_private; | |
4a46f05e | 616 | |
6b136a24 EG |
617 | /* |
618 | * Attributes that only implement .seq_ops are read-only and 'attr' is | |
619 | * the same with 'data' in this case. | |
620 | */ | |
621 | if (attr == data || !attr->write) | |
f57de23a OS |
622 | return -EPERM; |
623 | ||
624 | return attr->write(data, buf, count, ppos); | |
625 | } | |
626 | ||
627 | static int blk_mq_debugfs_open(struct inode *inode, struct file *file) | |
628 | { | |
629 | const struct blk_mq_debugfs_attr *attr = inode->i_private; | |
630 | void *data = d_inode(file->f_path.dentry->d_parent)->i_private; | |
631 | struct seq_file *m; | |
632 | int ret; | |
633 | ||
634 | if (attr->seq_ops) { | |
635 | ret = seq_open(file, attr->seq_ops); | |
636 | if (!ret) { | |
637 | m = file->private_data; | |
638 | m->private = data; | |
639 | } | |
640 | return ret; | |
641 | } | |
642 | ||
643 | if (WARN_ON_ONCE(!attr->show)) | |
644 | return -EPERM; | |
645 | ||
646 | return single_open(file, blk_mq_debugfs_show, inode->i_private); | |
4a46f05e OS |
647 | } |
648 | ||
f57de23a OS |
649 | static int blk_mq_debugfs_release(struct inode *inode, struct file *file) |
650 | { | |
651 | const struct blk_mq_debugfs_attr *attr = inode->i_private; | |
652 | ||
653 | if (attr->show) | |
654 | return single_release(inode, file); | |
ee1e0359 CK |
655 | |
656 | return seq_release(inode, file); | |
f57de23a OS |
657 | } |
658 | ||
f8465933 | 659 | static const struct file_operations blk_mq_debugfs_fops = { |
f57de23a | 660 | .open = blk_mq_debugfs_open, |
4a46f05e | 661 | .read = seq_read, |
f57de23a | 662 | .write = blk_mq_debugfs_write, |
4a46f05e | 663 | .llseek = seq_lseek, |
f57de23a | 664 | .release = blk_mq_debugfs_release, |
4a46f05e OS |
665 | }; |
666 | ||
07e4fead | 667 | static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = { |
f57de23a OS |
668 | {"state", 0400, hctx_state_show}, |
669 | {"flags", 0400, hctx_flags_show}, | |
670 | {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops}, | |
2720bab5 | 671 | {"busy", 0400, hctx_busy_show}, |
f57de23a OS |
672 | {"ctx_map", 0400, hctx_ctx_map_show}, |
673 | {"tags", 0400, hctx_tags_show}, | |
674 | {"tags_bitmap", 0400, hctx_tags_bitmap_show}, | |
675 | {"sched_tags", 0400, hctx_sched_tags_show}, | |
676 | {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show}, | |
f57de23a OS |
677 | {"run", 0600, hctx_run_show, hctx_run_write}, |
678 | {"active", 0400, hctx_active_show}, | |
6e768717 | 679 | {"dispatch_busy", 0400, hctx_dispatch_busy_show}, |
346fc108 | 680 | {"type", 0400, hctx_type_show}, |
72f2f8f6 | 681 | {}, |
07e4fead OS |
682 | }; |
683 | ||
684 | static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { | |
c16d6b5a ML |
685 | {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops}, |
686 | {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops}, | |
687 | {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops}, | |
72f2f8f6 | 688 | {}, |
07e4fead OS |
689 | }; |
690 | ||
6cfc0081 | 691 | static void debugfs_create_files(struct dentry *parent, void *data, |
9c1051aa OS |
692 | const struct blk_mq_debugfs_attr *attr) |
693 | { | |
36991ca6 | 694 | if (IS_ERR_OR_NULL(parent)) |
6cfc0081 | 695 | return; |
36991ca6 | 696 | |
9c1051aa OS |
697 | d_inode(parent)->i_private = data; |
698 | ||
6cfc0081 GKH |
699 | for (; attr->name; attr++) |
700 | debugfs_create_file(attr->name, attr->mode, parent, | |
701 | (void *)attr, &blk_mq_debugfs_fops); | |
9c1051aa OS |
702 | } |
703 | ||
6cfc0081 | 704 | void blk_mq_debugfs_register(struct request_queue *q) |
07e4fead | 705 | { |
9c1051aa OS |
706 | struct blk_mq_hw_ctx *hctx; |
707 | int i; | |
708 | ||
6cfc0081 | 709 | debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); |
07e4fead | 710 | |
9c1051aa | 711 | /* |
70e62f4b | 712 | * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir |
9c1051aa OS |
713 | * didn't exist yet (because we don't know what to name the directory |
714 | * until the queue is registered to a gendisk). | |
715 | */ | |
70e62f4b OS |
716 | if (q->elevator && !q->sched_debugfs_dir) |
717 | blk_mq_debugfs_register_sched(q); | |
718 | ||
719 | /* Similarly, blk_mq_init_hctx() couldn't do this previously. */ | |
9c1051aa | 720 | queue_for_each_hw_ctx(q, hctx, i) { |
6cfc0081 GKH |
721 | if (!hctx->debugfs_dir) |
722 | blk_mq_debugfs_register_hctx(q, hctx); | |
723 | if (q->elevator && !hctx->sched_debugfs_dir) | |
724 | blk_mq_debugfs_register_sched_hctx(q, hctx); | |
9c1051aa OS |
725 | } |
726 | ||
cc56694f ML |
727 | if (q->rq_qos) { |
728 | struct rq_qos *rqos = q->rq_qos; | |
729 | ||
730 | while (rqos) { | |
731 | blk_mq_debugfs_register_rqos(rqos); | |
732 | rqos = rqos->next; | |
733 | } | |
734 | } | |
07e4fead OS |
735 | } |
736 | ||
737 | void blk_mq_debugfs_unregister(struct request_queue *q) | |
738 | { | |
d332ce09 | 739 | q->sched_debugfs_dir = NULL; |
07e4fead OS |
740 | } |
741 | ||
6cfc0081 GKH |
742 | static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx, |
743 | struct blk_mq_ctx *ctx) | |
07e4fead OS |
744 | { |
745 | struct dentry *ctx_dir; | |
746 | char name[20]; | |
07e4fead OS |
747 | |
748 | snprintf(name, sizeof(name), "cpu%u", ctx->cpu); | |
9c1051aa | 749 | ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir); |
07e4fead | 750 | |
6cfc0081 | 751 | debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs); |
07e4fead OS |
752 | } |
753 | ||
6cfc0081 GKH |
754 | void blk_mq_debugfs_register_hctx(struct request_queue *q, |
755 | struct blk_mq_hw_ctx *hctx) | |
07e4fead OS |
756 | { |
757 | struct blk_mq_ctx *ctx; | |
07e4fead OS |
758 | char name[20]; |
759 | int i; | |
760 | ||
88aabbd7 | 761 | snprintf(name, sizeof(name), "hctx%u", hctx->queue_num); |
9c1051aa | 762 | hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); |
07e4fead | 763 | |
6cfc0081 | 764 | debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs); |
9c1051aa | 765 | |
6cfc0081 GKH |
766 | hctx_for_each_ctx(hctx, ctx, i) |
767 | blk_mq_debugfs_register_ctx(hctx, ctx); | |
9c1051aa OS |
768 | } |
769 | ||
770 | void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx) | |
771 | { | |
772 | debugfs_remove_recursive(hctx->debugfs_dir); | |
d332ce09 | 773 | hctx->sched_debugfs_dir = NULL; |
9c1051aa | 774 | hctx->debugfs_dir = NULL; |
07e4fead OS |
775 | } |
776 | ||
6cfc0081 | 777 | void blk_mq_debugfs_register_hctxs(struct request_queue *q) |
07e4fead OS |
778 | { |
779 | struct blk_mq_hw_ctx *hctx; | |
780 | int i; | |
781 | ||
6cfc0081 GKH |
782 | queue_for_each_hw_ctx(q, hctx, i) |
783 | blk_mq_debugfs_register_hctx(q, hctx); | |
07e4fead OS |
784 | } |
785 | ||
9c1051aa | 786 | void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) |
07e4fead | 787 | { |
9c1051aa OS |
788 | struct blk_mq_hw_ctx *hctx; |
789 | int i; | |
790 | ||
791 | queue_for_each_hw_ctx(q, hctx, i) | |
792 | blk_mq_debugfs_unregister_hctx(hctx); | |
07e4fead | 793 | } |
d332ce09 | 794 | |
6cfc0081 | 795 | void blk_mq_debugfs_register_sched(struct request_queue *q) |
d332ce09 OS |
796 | { |
797 | struct elevator_type *e = q->elevator->type; | |
798 | ||
7e41c3c9 GKH |
799 | /* |
800 | * If the parent directory has not been created yet, return, we will be | |
801 | * called again later on and the directory/files will be created then. | |
802 | */ | |
803 | if (!q->debugfs_dir) | |
804 | return; | |
805 | ||
d332ce09 | 806 | if (!e->queue_debugfs_attrs) |
6cfc0081 | 807 | return; |
d332ce09 OS |
808 | |
809 | q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); | |
d332ce09 | 810 | |
6cfc0081 | 811 | debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); |
d332ce09 OS |
812 | } |
813 | ||
814 | void blk_mq_debugfs_unregister_sched(struct request_queue *q) | |
815 | { | |
816 | debugfs_remove_recursive(q->sched_debugfs_dir); | |
817 | q->sched_debugfs_dir = NULL; | |
818 | } | |
819 | ||
fb44023e BVA |
820 | static const char *rq_qos_id_to_name(enum rq_qos_id id) |
821 | { | |
822 | switch (id) { | |
823 | case RQ_QOS_WBT: | |
824 | return "wbt"; | |
825 | case RQ_QOS_LATENCY: | |
826 | return "latency"; | |
827 | case RQ_QOS_COST: | |
828 | return "cost"; | |
556910e3 BVA |
829 | case RQ_QOS_IOPRIO: |
830 | return "ioprio"; | |
fb44023e BVA |
831 | } |
832 | return "unknown"; | |
833 | } | |
834 | ||
cc56694f ML |
835 | void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos) |
836 | { | |
837 | debugfs_remove_recursive(rqos->debugfs_dir); | |
838 | rqos->debugfs_dir = NULL; | |
839 | } | |
840 | ||
6cfc0081 | 841 | void blk_mq_debugfs_register_rqos(struct rq_qos *rqos) |
cc56694f ML |
842 | { |
843 | struct request_queue *q = rqos->q; | |
844 | const char *dir_name = rq_qos_id_to_name(rqos->id); | |
845 | ||
cc56694f | 846 | if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs) |
6cfc0081 | 847 | return; |
cc56694f | 848 | |
6cfc0081 | 849 | if (!q->rqos_debugfs_dir) |
cc56694f ML |
850 | q->rqos_debugfs_dir = debugfs_create_dir("rqos", |
851 | q->debugfs_dir); | |
cc56694f ML |
852 | |
853 | rqos->debugfs_dir = debugfs_create_dir(dir_name, | |
854 | rqos->q->rqos_debugfs_dir); | |
cc56694f | 855 | |
6cfc0081 | 856 | debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs); |
cc56694f ML |
857 | } |
858 | ||
859 | void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) | |
860 | { | |
861 | debugfs_remove_recursive(q->rqos_debugfs_dir); | |
862 | q->rqos_debugfs_dir = NULL; | |
863 | } | |
864 | ||
6cfc0081 GKH |
865 | void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, |
866 | struct blk_mq_hw_ctx *hctx) | |
d332ce09 OS |
867 | { |
868 | struct elevator_type *e = q->elevator->type; | |
869 | ||
1e91e28e S |
870 | /* |
871 | * If the parent debugfs directory has not been created yet, return; | |
872 | * We will be called again later on with appropriate parent debugfs | |
873 | * directory from blk_register_queue() | |
874 | */ | |
875 | if (!hctx->debugfs_dir) | |
876 | return; | |
877 | ||
d332ce09 | 878 | if (!e->hctx_debugfs_attrs) |
6cfc0081 | 879 | return; |
d332ce09 OS |
880 | |
881 | hctx->sched_debugfs_dir = debugfs_create_dir("sched", | |
882 | hctx->debugfs_dir); | |
6cfc0081 GKH |
883 | debugfs_create_files(hctx->sched_debugfs_dir, hctx, |
884 | e->hctx_debugfs_attrs); | |
d332ce09 OS |
885 | } |
886 | ||
887 | void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx) | |
888 | { | |
889 | debugfs_remove_recursive(hctx->sched_debugfs_dir); | |
890 | hctx->sched_debugfs_dir = NULL; | |
891 | } |