]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
320ae51f | 12 | #include <linux/blk-mq.h> |
eea8f41c | 13 | #include <linux/blk-cgroup.h> |
8324aa91 JA |
14 | |
15 | #include "blk.h" | |
3edcc0ce | 16 | #include "blk-mq.h" |
d173a251 | 17 | #include "blk-mq-debugfs.h" |
87760e5e | 18 | #include "blk-wbt.h" |
8324aa91 JA |
19 | |
20 | struct queue_sysfs_entry { | |
21 | struct attribute attr; | |
22 | ssize_t (*show)(struct request_queue *, char *); | |
23 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
24 | }; | |
25 | ||
26 | static ssize_t | |
9cb308ce | 27 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 28 | { |
9cb308ce | 29 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
30 | } |
31 | ||
32 | static ssize_t | |
33 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
34 | { | |
b1f3b64d DR |
35 | int err; |
36 | unsigned long v; | |
37 | ||
ed751e68 | 38 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
39 | if (err || v > UINT_MAX) |
40 | return -EINVAL; | |
41 | ||
42 | *var = v; | |
8324aa91 | 43 | |
8324aa91 JA |
44 | return count; |
45 | } | |
46 | ||
80e091d1 | 47 | static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5e JA |
48 | { |
49 | int err; | |
80e091d1 | 50 | s64 v; |
87760e5e | 51 | |
80e091d1 | 52 | err = kstrtos64(page, 10, &v); |
87760e5e JA |
53 | if (err < 0) |
54 | return err; | |
55 | ||
56 | *var = v; | |
57 | return 0; | |
58 | } | |
59 | ||
8324aa91 JA |
60 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
61 | { | |
62 | return queue_var_show(q->nr_requests, (page)); | |
63 | } | |
64 | ||
65 | static ssize_t | |
66 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
67 | { | |
8324aa91 | 68 | unsigned long nr; |
e3a2b3f9 | 69 | int ret, err; |
b8a9ae77 | 70 | |
e3a2b3f9 | 71 | if (!q->request_fn && !q->mq_ops) |
b8a9ae77 JA |
72 | return -EINVAL; |
73 | ||
74 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
75 | if (ret < 0) |
76 | return ret; | |
77 | ||
8324aa91 JA |
78 | if (nr < BLKDEV_MIN_RQ) |
79 | nr = BLKDEV_MIN_RQ; | |
80 | ||
e3a2b3f9 JA |
81 | if (q->request_fn) |
82 | err = blk_update_nr_requests(q, nr); | |
83 | else | |
84 | err = blk_mq_update_nr_requests(q, nr); | |
85 | ||
86 | if (err) | |
87 | return err; | |
88 | ||
8324aa91 JA |
89 | return ret; |
90 | } | |
91 | ||
92 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
93 | { | |
dc3b17cc | 94 | unsigned long ra_kb = q->backing_dev_info->ra_pages << |
09cbfeaf | 95 | (PAGE_SHIFT - 10); |
8324aa91 JA |
96 | |
97 | return queue_var_show(ra_kb, (page)); | |
98 | } | |
99 | ||
100 | static ssize_t | |
101 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
102 | { | |
103 | unsigned long ra_kb; | |
104 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
105 | ||
b1f3b64d DR |
106 | if (ret < 0) |
107 | return ret; | |
108 | ||
dc3b17cc | 109 | q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
110 | |
111 | return ret; | |
112 | } | |
113 | ||
114 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
115 | { | |
ae03bf63 | 116 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
117 | |
118 | return queue_var_show(max_sectors_kb, (page)); | |
119 | } | |
120 | ||
c77a5710 MP |
121 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
122 | { | |
123 | return queue_var_show(queue_max_segments(q), (page)); | |
124 | } | |
125 | ||
1e739730 CH |
126 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
127 | char *page) | |
128 | { | |
129 | return queue_var_show(queue_max_discard_segments(q), (page)); | |
130 | } | |
131 | ||
13f05c8d MP |
132 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
133 | { | |
134 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
135 | } | |
136 | ||
c77a5710 MP |
137 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
138 | { | |
e692cb66 | 139 | if (blk_queue_cluster(q)) |
c77a5710 MP |
140 | return queue_var_show(queue_max_segment_size(q), (page)); |
141 | ||
09cbfeaf | 142 | return queue_var_show(PAGE_SIZE, (page)); |
c77a5710 MP |
143 | } |
144 | ||
e1defc4f | 145 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 146 | { |
e1defc4f | 147 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
148 | } |
149 | ||
c72758f3 MP |
150 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
151 | { | |
152 | return queue_var_show(queue_physical_block_size(q), page); | |
153 | } | |
154 | ||
87caf97c HR |
155 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
156 | { | |
157 | return queue_var_show(q->limits.chunk_sectors, page); | |
158 | } | |
159 | ||
c72758f3 MP |
160 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
161 | { | |
162 | return queue_var_show(queue_io_min(q), page); | |
163 | } | |
164 | ||
165 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
166 | { | |
167 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
168 | } |
169 | ||
86b37281 MP |
170 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
171 | { | |
172 | return queue_var_show(q->limits.discard_granularity, page); | |
173 | } | |
174 | ||
0034af03 JA |
175 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
176 | { | |
0034af03 | 177 | |
18f922d0 A |
178 | return sprintf(page, "%llu\n", |
179 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
180 | } |
181 | ||
86b37281 MP |
182 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
183 | { | |
a934a00a MP |
184 | return sprintf(page, "%llu\n", |
185 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
186 | } |
187 | ||
0034af03 JA |
188 | static ssize_t queue_discard_max_store(struct request_queue *q, |
189 | const char *page, size_t count) | |
190 | { | |
191 | unsigned long max_discard; | |
192 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
193 | ||
194 | if (ret < 0) | |
195 | return ret; | |
196 | ||
197 | if (max_discard & (q->limits.discard_granularity - 1)) | |
198 | return -EINVAL; | |
199 | ||
200 | max_discard >>= 9; | |
201 | if (max_discard > UINT_MAX) | |
202 | return -EINVAL; | |
203 | ||
204 | if (max_discard > q->limits.max_hw_discard_sectors) | |
205 | max_discard = q->limits.max_hw_discard_sectors; | |
206 | ||
207 | q->limits.max_discard_sectors = max_discard; | |
208 | return ret; | |
209 | } | |
210 | ||
98262f27 MP |
211 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
212 | { | |
48920ff2 | 213 | return queue_var_show(0, page); |
98262f27 MP |
214 | } |
215 | ||
4363ac7c MP |
216 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
217 | { | |
218 | return sprintf(page, "%llu\n", | |
219 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
220 | } | |
221 | ||
a6f0788e CK |
222 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
223 | { | |
224 | return sprintf(page, "%llu\n", | |
225 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
226 | } | |
4363ac7c | 227 | |
8324aa91 JA |
228 | static ssize_t |
229 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
230 | { | |
231 | unsigned long max_sectors_kb, | |
ae03bf63 | 232 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 233 | page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91 JA |
234 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
235 | ||
b1f3b64d DR |
236 | if (ret < 0) |
237 | return ret; | |
238 | ||
ca369d51 MP |
239 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
240 | q->limits.max_dev_sectors >> 1); | |
241 | ||
8324aa91 JA |
242 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
243 | return -EINVAL; | |
7c239517 | 244 | |
8324aa91 | 245 | spin_lock_irq(q->queue_lock); |
c295fc05 | 246 | q->limits.max_sectors = max_sectors_kb << 1; |
dc3b17cc | 247 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
248 | spin_unlock_irq(q->queue_lock); |
249 | ||
250 | return ret; | |
251 | } | |
252 | ||
253 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
254 | { | |
ae03bf63 | 255 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
256 | |
257 | return queue_var_show(max_hw_sectors_kb, (page)); | |
258 | } | |
259 | ||
956bcb7c JA |
260 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
261 | static ssize_t \ | |
262 | queue_show_##name(struct request_queue *q, char *page) \ | |
263 | { \ | |
264 | int bit; \ | |
265 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
266 | return queue_var_show(neg ? !bit : bit, page); \ | |
267 | } \ | |
268 | static ssize_t \ | |
269 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
270 | { \ | |
271 | unsigned long val; \ | |
272 | ssize_t ret; \ | |
273 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
274 | if (ret < 0) \ |
275 | return ret; \ | |
956bcb7c JA |
276 | if (neg) \ |
277 | val = !val; \ | |
278 | \ | |
956bcb7c | 279 | if (val) \ |
8814ce8a | 280 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 281 | else \ |
8814ce8a | 282 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 283 | return ret; \ |
1308835f BZ |
284 | } |
285 | ||
956bcb7c JA |
286 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
287 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
288 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
289 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 290 | |
797476b8 DLM |
291 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
292 | { | |
293 | switch (blk_queue_zoned_model(q)) { | |
294 | case BLK_ZONED_HA: | |
295 | return sprintf(page, "host-aware\n"); | |
296 | case BLK_ZONED_HM: | |
297 | return sprintf(page, "host-managed\n"); | |
298 | default: | |
299 | return sprintf(page, "none\n"); | |
300 | } | |
301 | } | |
302 | ||
ac9fafa1 AB |
303 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
304 | { | |
488991e2 AB |
305 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
306 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
307 | } |
308 | ||
309 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
310 | size_t count) | |
311 | { | |
312 | unsigned long nm; | |
313 | ssize_t ret = queue_var_store(&nm, page, count); | |
314 | ||
b1f3b64d DR |
315 | if (ret < 0) |
316 | return ret; | |
317 | ||
bf0f9702 | 318 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
319 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
320 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
321 | if (nm == 2) | |
bf0f9702 | 322 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
323 | else if (nm) |
324 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 325 | spin_unlock_irq(q->queue_lock); |
1308835f | 326 | |
ac9fafa1 AB |
327 | return ret; |
328 | } | |
329 | ||
c7c22e4d JA |
330 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
331 | { | |
9cb308ce | 332 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 333 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 334 | |
5757a6d7 | 335 | return queue_var_show(set << force, page); |
c7c22e4d JA |
336 | } |
337 | ||
338 | static ssize_t | |
339 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
340 | { | |
341 | ssize_t ret = -EINVAL; | |
0a06ff06 | 342 | #ifdef CONFIG_SMP |
c7c22e4d JA |
343 | unsigned long val; |
344 | ||
345 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
346 | if (ret < 0) |
347 | return ret; | |
348 | ||
c7c22e4d | 349 | spin_lock_irq(q->queue_lock); |
e8037d49 | 350 | if (val == 2) { |
c7c22e4d | 351 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d49 ES |
352 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
353 | } else if (val == 1) { | |
354 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
355 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
356 | } else if (val == 0) { | |
5757a6d7 DW |
357 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
358 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
359 | } | |
c7c22e4d JA |
360 | spin_unlock_irq(q->queue_lock); |
361 | #endif | |
362 | return ret; | |
363 | } | |
8324aa91 | 364 | |
06426adf JA |
365 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
366 | { | |
64f1c21e JA |
367 | int val; |
368 | ||
369 | if (q->poll_nsec == -1) | |
370 | val = -1; | |
371 | else | |
372 | val = q->poll_nsec / 1000; | |
373 | ||
374 | return sprintf(page, "%d\n", val); | |
06426adf JA |
375 | } |
376 | ||
377 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
378 | size_t count) | |
379 | { | |
64f1c21e | 380 | int err, val; |
06426adf JA |
381 | |
382 | if (!q->mq_ops || !q->mq_ops->poll) | |
383 | return -EINVAL; | |
384 | ||
64f1c21e JA |
385 | err = kstrtoint(page, 10, &val); |
386 | if (err < 0) | |
387 | return err; | |
06426adf | 388 | |
64f1c21e JA |
389 | if (val == -1) |
390 | q->poll_nsec = -1; | |
391 | else | |
392 | q->poll_nsec = val * 1000; | |
393 | ||
394 | return count; | |
06426adf JA |
395 | } |
396 | ||
05229bee JA |
397 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
398 | { | |
399 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
400 | } | |
401 | ||
402 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
403 | size_t count) | |
404 | { | |
405 | unsigned long poll_on; | |
406 | ssize_t ret; | |
407 | ||
408 | if (!q->mq_ops || !q->mq_ops->poll) | |
409 | return -EINVAL; | |
410 | ||
411 | ret = queue_var_store(&poll_on, page, count); | |
412 | if (ret < 0) | |
413 | return ret; | |
414 | ||
05229bee | 415 | if (poll_on) |
8814ce8a | 416 | blk_queue_flag_set(QUEUE_FLAG_POLL, q); |
05229bee | 417 | else |
8814ce8a | 418 | blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
05229bee JA |
419 | |
420 | return ret; | |
421 | } | |
422 | ||
87760e5e JA |
423 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
424 | { | |
425 | if (!q->rq_wb) | |
426 | return -EINVAL; | |
427 | ||
428 | return sprintf(page, "%llu\n", div_u64(q->rq_wb->min_lat_nsec, 1000)); | |
429 | } | |
430 | ||
431 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
432 | size_t count) | |
433 | { | |
80e091d1 | 434 | struct rq_wb *rwb; |
87760e5e | 435 | ssize_t ret; |
80e091d1 | 436 | s64 val; |
87760e5e | 437 | |
87760e5e JA |
438 | ret = queue_var_store64(&val, page); |
439 | if (ret < 0) | |
440 | return ret; | |
d62118b6 JA |
441 | if (val < -1) |
442 | return -EINVAL; | |
443 | ||
444 | rwb = q->rq_wb; | |
445 | if (!rwb) { | |
446 | ret = wbt_init(q); | |
447 | if (ret) | |
448 | return ret; | |
d62118b6 | 449 | } |
87760e5e | 450 | |
f6804743 | 451 | rwb = q->rq_wb; |
80e091d1 JA |
452 | if (val == -1) |
453 | rwb->min_lat_nsec = wbt_default_latency_nsec(q); | |
454 | else if (val >= 0) | |
455 | rwb->min_lat_nsec = val * 1000ULL; | |
d62118b6 JA |
456 | |
457 | if (rwb->enable_state == WBT_STATE_ON_DEFAULT) | |
458 | rwb->enable_state = WBT_STATE_ON_MANUAL; | |
80e091d1 JA |
459 | |
460 | wbt_update_limits(rwb); | |
87760e5e JA |
461 | return count; |
462 | } | |
463 | ||
93e9d8e8 JA |
464 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
465 | { | |
466 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
467 | return sprintf(page, "write back\n"); | |
468 | ||
469 | return sprintf(page, "write through\n"); | |
470 | } | |
471 | ||
472 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
473 | size_t count) | |
474 | { | |
475 | int set = -1; | |
476 | ||
477 | if (!strncmp(page, "write back", 10)) | |
478 | set = 1; | |
479 | else if (!strncmp(page, "write through", 13) || | |
480 | !strncmp(page, "none", 4)) | |
481 | set = 0; | |
482 | ||
483 | if (set == -1) | |
484 | return -EINVAL; | |
485 | ||
93e9d8e8 | 486 | if (set) |
8814ce8a | 487 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
93e9d8e8 | 488 | else |
8814ce8a | 489 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
93e9d8e8 JA |
490 | |
491 | return count; | |
492 | } | |
493 | ||
6fcefbe5 KO |
494 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
495 | { | |
496 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); | |
497 | } | |
498 | ||
ea6ca600 YK |
499 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
500 | { | |
501 | return queue_var_show(blk_queue_dax(q), page); | |
502 | } | |
503 | ||
8324aa91 | 504 | static struct queue_sysfs_entry queue_requests_entry = { |
5657a819 | 505 | .attr = {.name = "nr_requests", .mode = 0644 }, |
8324aa91 JA |
506 | .show = queue_requests_show, |
507 | .store = queue_requests_store, | |
508 | }; | |
509 | ||
510 | static struct queue_sysfs_entry queue_ra_entry = { | |
5657a819 | 511 | .attr = {.name = "read_ahead_kb", .mode = 0644 }, |
8324aa91 JA |
512 | .show = queue_ra_show, |
513 | .store = queue_ra_store, | |
514 | }; | |
515 | ||
516 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
5657a819 | 517 | .attr = {.name = "max_sectors_kb", .mode = 0644 }, |
8324aa91 JA |
518 | .show = queue_max_sectors_show, |
519 | .store = queue_max_sectors_store, | |
520 | }; | |
521 | ||
522 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
5657a819 | 523 | .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, |
8324aa91 JA |
524 | .show = queue_max_hw_sectors_show, |
525 | }; | |
526 | ||
c77a5710 | 527 | static struct queue_sysfs_entry queue_max_segments_entry = { |
5657a819 | 528 | .attr = {.name = "max_segments", .mode = 0444 }, |
c77a5710 MP |
529 | .show = queue_max_segments_show, |
530 | }; | |
531 | ||
1e739730 | 532 | static struct queue_sysfs_entry queue_max_discard_segments_entry = { |
5657a819 | 533 | .attr = {.name = "max_discard_segments", .mode = 0444 }, |
1e739730 CH |
534 | .show = queue_max_discard_segments_show, |
535 | }; | |
536 | ||
13f05c8d | 537 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
5657a819 | 538 | .attr = {.name = "max_integrity_segments", .mode = 0444 }, |
13f05c8d MP |
539 | .show = queue_max_integrity_segments_show, |
540 | }; | |
541 | ||
c77a5710 | 542 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
5657a819 | 543 | .attr = {.name = "max_segment_size", .mode = 0444 }, |
c77a5710 MP |
544 | .show = queue_max_segment_size_show, |
545 | }; | |
546 | ||
8324aa91 | 547 | static struct queue_sysfs_entry queue_iosched_entry = { |
5657a819 | 548 | .attr = {.name = "scheduler", .mode = 0644 }, |
8324aa91 JA |
549 | .show = elv_iosched_show, |
550 | .store = elv_iosched_store, | |
551 | }; | |
552 | ||
e68b903c | 553 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819 | 554 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4f MP |
555 | .show = queue_logical_block_size_show, |
556 | }; | |
557 | ||
558 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
5657a819 | 559 | .attr = {.name = "logical_block_size", .mode = 0444 }, |
e1defc4f | 560 | .show = queue_logical_block_size_show, |
e68b903c MP |
561 | }; |
562 | ||
c72758f3 | 563 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
5657a819 | 564 | .attr = {.name = "physical_block_size", .mode = 0444 }, |
c72758f3 MP |
565 | .show = queue_physical_block_size_show, |
566 | }; | |
567 | ||
87caf97c | 568 | static struct queue_sysfs_entry queue_chunk_sectors_entry = { |
5657a819 | 569 | .attr = {.name = "chunk_sectors", .mode = 0444 }, |
87caf97c HR |
570 | .show = queue_chunk_sectors_show, |
571 | }; | |
572 | ||
c72758f3 | 573 | static struct queue_sysfs_entry queue_io_min_entry = { |
5657a819 | 574 | .attr = {.name = "minimum_io_size", .mode = 0444 }, |
c72758f3 MP |
575 | .show = queue_io_min_show, |
576 | }; | |
577 | ||
578 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
5657a819 | 579 | .attr = {.name = "optimal_io_size", .mode = 0444 }, |
c72758f3 | 580 | .show = queue_io_opt_show, |
e68b903c MP |
581 | }; |
582 | ||
86b37281 | 583 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
5657a819 | 584 | .attr = {.name = "discard_granularity", .mode = 0444 }, |
86b37281 MP |
585 | .show = queue_discard_granularity_show, |
586 | }; | |
587 | ||
0034af03 | 588 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
5657a819 | 589 | .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, |
0034af03 JA |
590 | .show = queue_discard_max_hw_show, |
591 | }; | |
592 | ||
86b37281 | 593 | static struct queue_sysfs_entry queue_discard_max_entry = { |
5657a819 | 594 | .attr = {.name = "discard_max_bytes", .mode = 0644 }, |
86b37281 | 595 | .show = queue_discard_max_show, |
0034af03 | 596 | .store = queue_discard_max_store, |
86b37281 MP |
597 | }; |
598 | ||
98262f27 | 599 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
5657a819 | 600 | .attr = {.name = "discard_zeroes_data", .mode = 0444 }, |
98262f27 MP |
601 | .show = queue_discard_zeroes_data_show, |
602 | }; | |
603 | ||
4363ac7c | 604 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
5657a819 | 605 | .attr = {.name = "write_same_max_bytes", .mode = 0444 }, |
4363ac7c MP |
606 | .show = queue_write_same_max_show, |
607 | }; | |
608 | ||
a6f0788e | 609 | static struct queue_sysfs_entry queue_write_zeroes_max_entry = { |
5657a819 | 610 | .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, |
a6f0788e CK |
611 | .show = queue_write_zeroes_max_show, |
612 | }; | |
613 | ||
1308835f | 614 | static struct queue_sysfs_entry queue_nonrot_entry = { |
5657a819 | 615 | .attr = {.name = "rotational", .mode = 0644 }, |
956bcb7c JA |
616 | .show = queue_show_nonrot, |
617 | .store = queue_store_nonrot, | |
1308835f BZ |
618 | }; |
619 | ||
797476b8 | 620 | static struct queue_sysfs_entry queue_zoned_entry = { |
5657a819 | 621 | .attr = {.name = "zoned", .mode = 0444 }, |
797476b8 DLM |
622 | .show = queue_zoned_show, |
623 | }; | |
624 | ||
ac9fafa1 | 625 | static struct queue_sysfs_entry queue_nomerges_entry = { |
5657a819 | 626 | .attr = {.name = "nomerges", .mode = 0644 }, |
ac9fafa1 AB |
627 | .show = queue_nomerges_show, |
628 | .store = queue_nomerges_store, | |
629 | }; | |
630 | ||
c7c22e4d | 631 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
5657a819 | 632 | .attr = {.name = "rq_affinity", .mode = 0644 }, |
c7c22e4d JA |
633 | .show = queue_rq_affinity_show, |
634 | .store = queue_rq_affinity_store, | |
635 | }; | |
636 | ||
bc58ba94 | 637 | static struct queue_sysfs_entry queue_iostats_entry = { |
5657a819 | 638 | .attr = {.name = "iostats", .mode = 0644 }, |
956bcb7c JA |
639 | .show = queue_show_iostats, |
640 | .store = queue_store_iostats, | |
bc58ba94 JA |
641 | }; |
642 | ||
e2e1a148 | 643 | static struct queue_sysfs_entry queue_random_entry = { |
5657a819 | 644 | .attr = {.name = "add_random", .mode = 0644 }, |
956bcb7c JA |
645 | .show = queue_show_random, |
646 | .store = queue_store_random, | |
e2e1a148 JA |
647 | }; |
648 | ||
05229bee | 649 | static struct queue_sysfs_entry queue_poll_entry = { |
5657a819 | 650 | .attr = {.name = "io_poll", .mode = 0644 }, |
05229bee JA |
651 | .show = queue_poll_show, |
652 | .store = queue_poll_store, | |
653 | }; | |
654 | ||
06426adf | 655 | static struct queue_sysfs_entry queue_poll_delay_entry = { |
5657a819 | 656 | .attr = {.name = "io_poll_delay", .mode = 0644 }, |
06426adf JA |
657 | .show = queue_poll_delay_show, |
658 | .store = queue_poll_delay_store, | |
659 | }; | |
660 | ||
93e9d8e8 | 661 | static struct queue_sysfs_entry queue_wc_entry = { |
5657a819 | 662 | .attr = {.name = "write_cache", .mode = 0644 }, |
93e9d8e8 JA |
663 | .show = queue_wc_show, |
664 | .store = queue_wc_store, | |
665 | }; | |
666 | ||
6fcefbe5 | 667 | static struct queue_sysfs_entry queue_fua_entry = { |
5657a819 | 668 | .attr = {.name = "fua", .mode = 0444 }, |
6fcefbe5 KO |
669 | .show = queue_fua_show, |
670 | }; | |
671 | ||
ea6ca600 | 672 | static struct queue_sysfs_entry queue_dax_entry = { |
5657a819 | 673 | .attr = {.name = "dax", .mode = 0444 }, |
ea6ca600 YK |
674 | .show = queue_dax_show, |
675 | }; | |
676 | ||
87760e5e | 677 | static struct queue_sysfs_entry queue_wb_lat_entry = { |
5657a819 | 678 | .attr = {.name = "wbt_lat_usec", .mode = 0644 }, |
87760e5e JA |
679 | .show = queue_wb_lat_show, |
680 | .store = queue_wb_lat_store, | |
681 | }; | |
682 | ||
297e3d85 SL |
683 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
684 | static struct queue_sysfs_entry throtl_sample_time_entry = { | |
5657a819 | 685 | .attr = {.name = "throttle_sample_time", .mode = 0644 }, |
297e3d85 SL |
686 | .show = blk_throtl_sample_time_show, |
687 | .store = blk_throtl_sample_time_store, | |
688 | }; | |
689 | #endif | |
690 | ||
8324aa91 JA |
691 | static struct attribute *default_attrs[] = { |
692 | &queue_requests_entry.attr, | |
693 | &queue_ra_entry.attr, | |
694 | &queue_max_hw_sectors_entry.attr, | |
695 | &queue_max_sectors_entry.attr, | |
c77a5710 | 696 | &queue_max_segments_entry.attr, |
1e739730 | 697 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 698 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 699 | &queue_max_segment_size_entry.attr, |
8324aa91 | 700 | &queue_iosched_entry.attr, |
e68b903c | 701 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 702 | &queue_logical_block_size_entry.attr, |
c72758f3 | 703 | &queue_physical_block_size_entry.attr, |
87caf97c | 704 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
705 | &queue_io_min_entry.attr, |
706 | &queue_io_opt_entry.attr, | |
86b37281 MP |
707 | &queue_discard_granularity_entry.attr, |
708 | &queue_discard_max_entry.attr, | |
0034af03 | 709 | &queue_discard_max_hw_entry.attr, |
98262f27 | 710 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 711 | &queue_write_same_max_entry.attr, |
a6f0788e | 712 | &queue_write_zeroes_max_entry.attr, |
1308835f | 713 | &queue_nonrot_entry.attr, |
797476b8 | 714 | &queue_zoned_entry.attr, |
ac9fafa1 | 715 | &queue_nomerges_entry.attr, |
c7c22e4d | 716 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 717 | &queue_iostats_entry.attr, |
e2e1a148 | 718 | &queue_random_entry.attr, |
05229bee | 719 | &queue_poll_entry.attr, |
93e9d8e8 | 720 | &queue_wc_entry.attr, |
6fcefbe5 | 721 | &queue_fua_entry.attr, |
ea6ca600 | 722 | &queue_dax_entry.attr, |
87760e5e | 723 | &queue_wb_lat_entry.attr, |
06426adf | 724 | &queue_poll_delay_entry.attr, |
297e3d85 SL |
725 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
726 | &throtl_sample_time_entry.attr, | |
727 | #endif | |
8324aa91 JA |
728 | NULL, |
729 | }; | |
730 | ||
731 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
732 | ||
733 | static ssize_t | |
734 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
735 | { | |
736 | struct queue_sysfs_entry *entry = to_queue(attr); | |
737 | struct request_queue *q = | |
738 | container_of(kobj, struct request_queue, kobj); | |
739 | ssize_t res; | |
740 | ||
741 | if (!entry->show) | |
742 | return -EIO; | |
743 | mutex_lock(&q->sysfs_lock); | |
3f3299d5 | 744 | if (blk_queue_dying(q)) { |
8324aa91 JA |
745 | mutex_unlock(&q->sysfs_lock); |
746 | return -ENOENT; | |
747 | } | |
748 | res = entry->show(q, page); | |
749 | mutex_unlock(&q->sysfs_lock); | |
750 | return res; | |
751 | } | |
752 | ||
753 | static ssize_t | |
754 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
755 | const char *page, size_t length) | |
756 | { | |
757 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 758 | struct request_queue *q; |
8324aa91 JA |
759 | ssize_t res; |
760 | ||
761 | if (!entry->store) | |
762 | return -EIO; | |
6728cb0e JA |
763 | |
764 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 765 | mutex_lock(&q->sysfs_lock); |
3f3299d5 | 766 | if (blk_queue_dying(q)) { |
8324aa91 JA |
767 | mutex_unlock(&q->sysfs_lock); |
768 | return -ENOENT; | |
769 | } | |
770 | res = entry->store(q, page, length); | |
771 | mutex_unlock(&q->sysfs_lock); | |
772 | return res; | |
773 | } | |
774 | ||
548bc8e1 TH |
775 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
776 | { | |
777 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
778 | rcu_head); | |
779 | kmem_cache_free(blk_requestq_cachep, q); | |
780 | } | |
781 | ||
8324aa91 | 782 | /** |
dc9edc44 BVA |
783 | * __blk_release_queue - release a request queue when it is no longer needed |
784 | * @work: pointer to the release_work member of the request queue to be released | |
8324aa91 JA |
785 | * |
786 | * Description: | |
dc9edc44 BVA |
787 | * blk_release_queue is the counterpart of blk_init_queue(). It should be |
788 | * called when a request queue is being released; typically when a block | |
789 | * device is being de-registered. Its primary task it to free the queue | |
790 | * itself. | |
8324aa91 | 791 | * |
dc9edc44 | 792 | * Notes: |
45a9c9d9 BVA |
793 | * The low level driver must have finished any outstanding requests first |
794 | * via blk_cleanup_queue(). | |
dc9edc44 BVA |
795 | * |
796 | * Although blk_release_queue() may be called with preemption disabled, | |
797 | * __blk_release_queue() may sleep. | |
798 | */ | |
799 | static void __blk_release_queue(struct work_struct *work) | |
8324aa91 | 800 | { |
dc9edc44 | 801 | struct request_queue *q = container_of(work, typeof(*q), release_work); |
8324aa91 | 802 | |
34dbad5d OS |
803 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
804 | blk_stat_remove_callback(q, q->poll_cb); | |
805 | blk_stat_free_callback(q->poll_cb); | |
777eb1bf | 806 | |
34dbad5d OS |
807 | blk_free_queue_stats(q->stats); |
808 | ||
b425e504 | 809 | blk_exit_rl(q, &q->root_rl); |
8324aa91 JA |
810 | |
811 | if (q->queue_tags) | |
812 | __blk_queue_free_tags(q); | |
813 | ||
6d247d7f CH |
814 | if (!q->mq_ops) { |
815 | if (q->exit_rq_fn) | |
816 | q->exit_rq_fn(q, q->fq->flush_rq); | |
f70ced09 | 817 | blk_free_flush_queue(q->fq); |
6d247d7f | 818 | } else { |
e09aae7e | 819 | blk_mq_release(q); |
6d247d7f | 820 | } |
18741986 | 821 | |
8324aa91 JA |
822 | blk_trace_shutdown(q); |
823 | ||
62ebce16 OS |
824 | if (q->mq_ops) |
825 | blk_mq_debugfs_unregister(q); | |
826 | ||
338aa96d | 827 | bioset_exit(&q->bio_split); |
54efd50b | 828 | |
a73f730d | 829 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 830 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
831 | } |
832 | ||
dc9edc44 BVA |
833 | static void blk_release_queue(struct kobject *kobj) |
834 | { | |
835 | struct request_queue *q = | |
836 | container_of(kobj, struct request_queue, kobj); | |
837 | ||
838 | INIT_WORK(&q->release_work, __blk_release_queue); | |
839 | schedule_work(&q->release_work); | |
840 | } | |
841 | ||
52cf25d0 | 842 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
843 | .show = queue_attr_show, |
844 | .store = queue_attr_store, | |
845 | }; | |
846 | ||
847 | struct kobj_type blk_queue_ktype = { | |
848 | .sysfs_ops = &queue_sysfs_ops, | |
849 | .default_attrs = default_attrs, | |
850 | .release = blk_release_queue, | |
851 | }; | |
852 | ||
2c2086af BVA |
853 | /** |
854 | * blk_register_queue - register a block layer queue with sysfs | |
855 | * @disk: Disk of which the request queue should be registered with sysfs. | |
856 | */ | |
8324aa91 JA |
857 | int blk_register_queue(struct gendisk *disk) |
858 | { | |
859 | int ret; | |
1d54ad6d | 860 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
861 | struct request_queue *q = disk->queue; |
862 | ||
fb199746 | 863 | if (WARN_ON(!q)) |
8324aa91 JA |
864 | return -ENXIO; |
865 | ||
334335d2 OS |
866 | WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), |
867 | "%s is registering an already registered queue\n", | |
868 | kobject_name(&dev->kobj)); | |
869 | queue_flag_set_unlocked(QUEUE_FLAG_REGISTERED, q); | |
870 | ||
749fefe6 | 871 | /* |
17497acb TH |
872 | * SCSI probing may synchronously create and destroy a lot of |
873 | * request_queues for non-existent devices. Shutting down a fully | |
874 | * functional queue takes measureable wallclock time as RCU grace | |
875 | * periods are involved. To avoid excessive latency in these | |
876 | * cases, a request_queue starts out in a degraded mode which is | |
877 | * faster to shut down and is made fully functional here as | |
878 | * request_queues for non-existent devices never get registered. | |
749fefe6 | 879 | */ |
df35c7c9 AS |
880 | if (!blk_queue_init_done(q)) { |
881 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
3ef28e83 | 882 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
df35c7c9 AS |
883 | blk_queue_bypass_end(q); |
884 | } | |
749fefe6 | 885 | |
1d54ad6d LZ |
886 | ret = blk_trace_init_sysfs(dev); |
887 | if (ret) | |
888 | return ret; | |
889 | ||
b410aff2 TE |
890 | /* Prevent changes through sysfs until registration is completed. */ |
891 | mutex_lock(&q->sysfs_lock); | |
892 | ||
c9059598 | 893 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
894 | if (ret < 0) { |
895 | blk_trace_remove_sysfs(dev); | |
b410aff2 | 896 | goto unlock; |
ed5302d3 | 897 | } |
8324aa91 | 898 | |
a8ecdd71 | 899 | if (q->mq_ops) { |
2d0364c8 | 900 | __blk_mq_register_dev(dev, q); |
a8ecdd71 BVA |
901 | blk_mq_debugfs_register(q); |
902 | } | |
9c1051aa | 903 | |
8324aa91 JA |
904 | kobject_uevent(&q->kobj, KOBJ_ADD); |
905 | ||
8330cdb0 | 906 | wbt_enable_default(q); |
87760e5e | 907 | |
d61fcfa4 SL |
908 | blk_throtl_register_queue(q); |
909 | ||
80c6b157 OS |
910 | if (q->request_fn || (q->mq_ops && q->elevator)) { |
911 | ret = elv_register_queue(q); | |
912 | if (ret) { | |
2c2086af | 913 | mutex_unlock(&q->sysfs_lock); |
80c6b157 OS |
914 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
915 | kobject_del(&q->kobj); | |
916 | blk_trace_remove_sysfs(dev); | |
917 | kobject_put(&dev->kobj); | |
2c2086af | 918 | return ret; |
80c6b157 | 919 | } |
8324aa91 | 920 | } |
b410aff2 TE |
921 | ret = 0; |
922 | unlock: | |
923 | mutex_unlock(&q->sysfs_lock); | |
924 | return ret; | |
8324aa91 | 925 | } |
fa70d2e2 | 926 | EXPORT_SYMBOL_GPL(blk_register_queue); |
8324aa91 | 927 | |
2c2086af BVA |
928 | /** |
929 | * blk_unregister_queue - counterpart of blk_register_queue() | |
930 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
931 | * | |
932 | * Note: the caller is responsible for guaranteeing that this function is called | |
933 | * after blk_register_queue() has finished. | |
934 | */ | |
8324aa91 JA |
935 | void blk_unregister_queue(struct gendisk *disk) |
936 | { | |
937 | struct request_queue *q = disk->queue; | |
938 | ||
fb199746 AM |
939 | if (WARN_ON(!q)) |
940 | return; | |
941 | ||
fa70d2e2 MS |
942 | /* Return early if disk->queue was never registered. */ |
943 | if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) | |
944 | return; | |
945 | ||
667257e8 | 946 | /* |
2c2086af BVA |
947 | * Since sysfs_remove_dir() prevents adding new directory entries |
948 | * before removal of existing entries starts, protect against | |
949 | * concurrent elv_iosched_store() calls. | |
667257e8 | 950 | */ |
e9a823fb | 951 | mutex_lock(&q->sysfs_lock); |
334335d2 | 952 | |
8814ce8a | 953 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
02ba8893 | 954 | |
2c2086af BVA |
955 | /* |
956 | * Remove the sysfs attributes before unregistering the queue data | |
957 | * structures that can be modified through sysfs. | |
958 | */ | |
320ae51f | 959 | if (q->mq_ops) |
b21d5b30 | 960 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
2c2086af | 961 | mutex_unlock(&q->sysfs_lock); |
8324aa91 | 962 | |
48c0d4d4 ZK |
963 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
964 | kobject_del(&q->kobj); | |
965 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
667257e8 | 966 | |
2c2086af BVA |
967 | wbt_exit(q); |
968 | ||
969 | mutex_lock(&q->sysfs_lock); | |
970 | if (q->request_fn || (q->mq_ops && q->elevator)) | |
971 | elv_unregister_queue(q); | |
667257e8 | 972 | mutex_unlock(&q->sysfs_lock); |
2c2086af BVA |
973 | |
974 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 975 | } |