]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
320ae51f | 12 | #include <linux/blk-mq.h> |
eea8f41c | 13 | #include <linux/blk-cgroup.h> |
8324aa91 JA |
14 | |
15 | #include "blk.h" | |
3edcc0ce | 16 | #include "blk-mq.h" |
d173a251 | 17 | #include "blk-mq-debugfs.h" |
87760e5e | 18 | #include "blk-wbt.h" |
8324aa91 JA |
19 | |
20 | struct queue_sysfs_entry { | |
21 | struct attribute attr; | |
22 | ssize_t (*show)(struct request_queue *, char *); | |
23 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
24 | }; | |
25 | ||
26 | static ssize_t | |
9cb308ce | 27 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 28 | { |
9cb308ce | 29 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
30 | } |
31 | ||
32 | static ssize_t | |
33 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
34 | { | |
b1f3b64d DR |
35 | int err; |
36 | unsigned long v; | |
37 | ||
ed751e68 | 38 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
39 | if (err || v > UINT_MAX) |
40 | return -EINVAL; | |
41 | ||
42 | *var = v; | |
8324aa91 | 43 | |
8324aa91 JA |
44 | return count; |
45 | } | |
46 | ||
80e091d1 | 47 | static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5e JA |
48 | { |
49 | int err; | |
80e091d1 | 50 | s64 v; |
87760e5e | 51 | |
80e091d1 | 52 | err = kstrtos64(page, 10, &v); |
87760e5e JA |
53 | if (err < 0) |
54 | return err; | |
55 | ||
56 | *var = v; | |
57 | return 0; | |
58 | } | |
59 | ||
8324aa91 JA |
60 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
61 | { | |
62 | return queue_var_show(q->nr_requests, (page)); | |
63 | } | |
64 | ||
65 | static ssize_t | |
66 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
67 | { | |
8324aa91 | 68 | unsigned long nr; |
e3a2b3f9 | 69 | int ret, err; |
b8a9ae77 | 70 | |
344e9ffc | 71 | if (!queue_is_mq(q)) |
b8a9ae77 JA |
72 | return -EINVAL; |
73 | ||
74 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
75 | if (ret < 0) |
76 | return ret; | |
77 | ||
8324aa91 JA |
78 | if (nr < BLKDEV_MIN_RQ) |
79 | nr = BLKDEV_MIN_RQ; | |
80 | ||
a1ce35fa | 81 | err = blk_mq_update_nr_requests(q, nr); |
e3a2b3f9 JA |
82 | if (err) |
83 | return err; | |
84 | ||
8324aa91 JA |
85 | return ret; |
86 | } | |
87 | ||
88 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
89 | { | |
dc3b17cc | 90 | unsigned long ra_kb = q->backing_dev_info->ra_pages << |
09cbfeaf | 91 | (PAGE_SHIFT - 10); |
8324aa91 JA |
92 | |
93 | return queue_var_show(ra_kb, (page)); | |
94 | } | |
95 | ||
96 | static ssize_t | |
97 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
98 | { | |
99 | unsigned long ra_kb; | |
100 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
101 | ||
b1f3b64d DR |
102 | if (ret < 0) |
103 | return ret; | |
104 | ||
dc3b17cc | 105 | q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
106 | |
107 | return ret; | |
108 | } | |
109 | ||
110 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
111 | { | |
ae03bf63 | 112 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
113 | |
114 | return queue_var_show(max_sectors_kb, (page)); | |
115 | } | |
116 | ||
c77a5710 MP |
117 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
118 | { | |
119 | return queue_var_show(queue_max_segments(q), (page)); | |
120 | } | |
121 | ||
1e739730 CH |
122 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
123 | char *page) | |
124 | { | |
125 | return queue_var_show(queue_max_discard_segments(q), (page)); | |
126 | } | |
127 | ||
13f05c8d MP |
128 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
129 | { | |
130 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
131 | } | |
132 | ||
c77a5710 MP |
133 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
134 | { | |
38417468 | 135 | return queue_var_show(queue_max_segment_size(q), (page)); |
c77a5710 MP |
136 | } |
137 | ||
e1defc4f | 138 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 139 | { |
e1defc4f | 140 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
141 | } |
142 | ||
c72758f3 MP |
143 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
144 | { | |
145 | return queue_var_show(queue_physical_block_size(q), page); | |
146 | } | |
147 | ||
87caf97c HR |
148 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
149 | { | |
150 | return queue_var_show(q->limits.chunk_sectors, page); | |
151 | } | |
152 | ||
c72758f3 MP |
153 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
154 | { | |
155 | return queue_var_show(queue_io_min(q), page); | |
156 | } | |
157 | ||
158 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
159 | { | |
160 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
161 | } |
162 | ||
86b37281 MP |
163 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
164 | { | |
165 | return queue_var_show(q->limits.discard_granularity, page); | |
166 | } | |
167 | ||
0034af03 JA |
168 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
169 | { | |
0034af03 | 170 | |
18f922d0 A |
171 | return sprintf(page, "%llu\n", |
172 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
173 | } |
174 | ||
86b37281 MP |
175 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
176 | { | |
a934a00a MP |
177 | return sprintf(page, "%llu\n", |
178 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
179 | } |
180 | ||
0034af03 JA |
181 | static ssize_t queue_discard_max_store(struct request_queue *q, |
182 | const char *page, size_t count) | |
183 | { | |
184 | unsigned long max_discard; | |
185 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
186 | ||
187 | if (ret < 0) | |
188 | return ret; | |
189 | ||
190 | if (max_discard & (q->limits.discard_granularity - 1)) | |
191 | return -EINVAL; | |
192 | ||
193 | max_discard >>= 9; | |
194 | if (max_discard > UINT_MAX) | |
195 | return -EINVAL; | |
196 | ||
197 | if (max_discard > q->limits.max_hw_discard_sectors) | |
198 | max_discard = q->limits.max_hw_discard_sectors; | |
199 | ||
200 | q->limits.max_discard_sectors = max_discard; | |
201 | return ret; | |
202 | } | |
203 | ||
98262f27 MP |
204 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
205 | { | |
48920ff2 | 206 | return queue_var_show(0, page); |
98262f27 MP |
207 | } |
208 | ||
4363ac7c MP |
209 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
210 | { | |
211 | return sprintf(page, "%llu\n", | |
212 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
213 | } | |
214 | ||
a6f0788e CK |
215 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
216 | { | |
217 | return sprintf(page, "%llu\n", | |
218 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
219 | } | |
4363ac7c | 220 | |
8324aa91 JA |
221 | static ssize_t |
222 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
223 | { | |
224 | unsigned long max_sectors_kb, | |
ae03bf63 | 225 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 226 | page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91 JA |
227 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
228 | ||
b1f3b64d DR |
229 | if (ret < 0) |
230 | return ret; | |
231 | ||
ca369d51 MP |
232 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
233 | q->limits.max_dev_sectors >> 1); | |
234 | ||
8324aa91 JA |
235 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
236 | return -EINVAL; | |
7c239517 | 237 | |
0d945c1f | 238 | spin_lock_irq(&q->queue_lock); |
c295fc05 | 239 | q->limits.max_sectors = max_sectors_kb << 1; |
dc3b17cc | 240 | q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); |
0d945c1f | 241 | spin_unlock_irq(&q->queue_lock); |
8324aa91 JA |
242 | |
243 | return ret; | |
244 | } | |
245 | ||
246 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
247 | { | |
ae03bf63 | 248 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
249 | |
250 | return queue_var_show(max_hw_sectors_kb, (page)); | |
251 | } | |
252 | ||
956bcb7c JA |
253 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
254 | static ssize_t \ | |
255 | queue_show_##name(struct request_queue *q, char *page) \ | |
256 | { \ | |
257 | int bit; \ | |
258 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
259 | return queue_var_show(neg ? !bit : bit, page); \ | |
260 | } \ | |
261 | static ssize_t \ | |
262 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
263 | { \ | |
264 | unsigned long val; \ | |
265 | ssize_t ret; \ | |
266 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
267 | if (ret < 0) \ |
268 | return ret; \ | |
956bcb7c JA |
269 | if (neg) \ |
270 | val = !val; \ | |
271 | \ | |
956bcb7c | 272 | if (val) \ |
8814ce8a | 273 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 274 | else \ |
8814ce8a | 275 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 276 | return ret; \ |
1308835f BZ |
277 | } |
278 | ||
956bcb7c JA |
279 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
280 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
281 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
282 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 283 | |
797476b8 DLM |
284 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
285 | { | |
286 | switch (blk_queue_zoned_model(q)) { | |
287 | case BLK_ZONED_HA: | |
288 | return sprintf(page, "host-aware\n"); | |
289 | case BLK_ZONED_HM: | |
290 | return sprintf(page, "host-managed\n"); | |
291 | default: | |
292 | return sprintf(page, "none\n"); | |
293 | } | |
294 | } | |
295 | ||
965b652e DLM |
296 | static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
297 | { | |
298 | return queue_var_show(blk_queue_nr_zones(q), page); | |
299 | } | |
300 | ||
ac9fafa1 AB |
301 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
302 | { | |
488991e2 AB |
303 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
304 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
305 | } |
306 | ||
307 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
308 | size_t count) | |
309 | { | |
310 | unsigned long nm; | |
311 | ssize_t ret = queue_var_store(&nm, page, count); | |
312 | ||
b1f3b64d DR |
313 | if (ret < 0) |
314 | return ret; | |
315 | ||
57d74df9 CH |
316 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
317 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
488991e2 | 318 | if (nm == 2) |
57d74df9 | 319 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 | 320 | else if (nm) |
57d74df9 | 321 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
1308835f | 322 | |
ac9fafa1 AB |
323 | return ret; |
324 | } | |
325 | ||
c7c22e4d JA |
326 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
327 | { | |
9cb308ce | 328 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 329 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 330 | |
5757a6d7 | 331 | return queue_var_show(set << force, page); |
c7c22e4d JA |
332 | } |
333 | ||
334 | static ssize_t | |
335 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
336 | { | |
337 | ssize_t ret = -EINVAL; | |
0a06ff06 | 338 | #ifdef CONFIG_SMP |
c7c22e4d JA |
339 | unsigned long val; |
340 | ||
341 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
342 | if (ret < 0) |
343 | return ret; | |
344 | ||
e8037d49 | 345 | if (val == 2) { |
57d74df9 CH |
346 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
347 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 348 | } else if (val == 1) { |
57d74df9 CH |
349 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
350 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 351 | } else if (val == 0) { |
57d74df9 CH |
352 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
353 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
5757a6d7 | 354 | } |
c7c22e4d JA |
355 | #endif |
356 | return ret; | |
357 | } | |
8324aa91 | 358 | |
06426adf JA |
359 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
360 | { | |
64f1c21e JA |
361 | int val; |
362 | ||
29ece8b4 YY |
363 | if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) |
364 | val = BLK_MQ_POLL_CLASSIC; | |
64f1c21e JA |
365 | else |
366 | val = q->poll_nsec / 1000; | |
367 | ||
368 | return sprintf(page, "%d\n", val); | |
06426adf JA |
369 | } |
370 | ||
371 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
372 | size_t count) | |
373 | { | |
64f1c21e | 374 | int err, val; |
06426adf JA |
375 | |
376 | if (!q->mq_ops || !q->mq_ops->poll) | |
377 | return -EINVAL; | |
378 | ||
64f1c21e JA |
379 | err = kstrtoint(page, 10, &val); |
380 | if (err < 0) | |
381 | return err; | |
06426adf | 382 | |
29ece8b4 YY |
383 | if (val == BLK_MQ_POLL_CLASSIC) |
384 | q->poll_nsec = BLK_MQ_POLL_CLASSIC; | |
385 | else if (val >= 0) | |
64f1c21e | 386 | q->poll_nsec = val * 1000; |
29ece8b4 YY |
387 | else |
388 | return -EINVAL; | |
64f1c21e JA |
389 | |
390 | return count; | |
06426adf JA |
391 | } |
392 | ||
05229bee JA |
393 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
394 | { | |
395 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
396 | } | |
397 | ||
398 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
399 | size_t count) | |
400 | { | |
401 | unsigned long poll_on; | |
402 | ssize_t ret; | |
403 | ||
cd19181b ML |
404 | if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || |
405 | !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) | |
05229bee JA |
406 | return -EINVAL; |
407 | ||
408 | ret = queue_var_store(&poll_on, page, count); | |
409 | if (ret < 0) | |
410 | return ret; | |
411 | ||
05229bee | 412 | if (poll_on) |
8814ce8a | 413 | blk_queue_flag_set(QUEUE_FLAG_POLL, q); |
05229bee | 414 | else |
8814ce8a | 415 | blk_queue_flag_clear(QUEUE_FLAG_POLL, q); |
05229bee JA |
416 | |
417 | return ret; | |
418 | } | |
419 | ||
65cd1d13 WZ |
420 | static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
421 | { | |
422 | return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); | |
423 | } | |
424 | ||
425 | static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, | |
426 | size_t count) | |
427 | { | |
428 | unsigned int val; | |
429 | int err; | |
430 | ||
431 | err = kstrtou32(page, 10, &val); | |
432 | if (err || val == 0) | |
433 | return -EINVAL; | |
434 | ||
435 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); | |
436 | ||
437 | return count; | |
438 | } | |
439 | ||
87760e5e JA |
440 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
441 | { | |
a7905043 | 442 | if (!wbt_rq_qos(q)) |
87760e5e JA |
443 | return -EINVAL; |
444 | ||
a7905043 | 445 | return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); |
87760e5e JA |
446 | } |
447 | ||
448 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
449 | size_t count) | |
450 | { | |
a7905043 | 451 | struct rq_qos *rqos; |
87760e5e | 452 | ssize_t ret; |
80e091d1 | 453 | s64 val; |
87760e5e | 454 | |
87760e5e JA |
455 | ret = queue_var_store64(&val, page); |
456 | if (ret < 0) | |
457 | return ret; | |
d62118b6 JA |
458 | if (val < -1) |
459 | return -EINVAL; | |
460 | ||
a7905043 JB |
461 | rqos = wbt_rq_qos(q); |
462 | if (!rqos) { | |
d62118b6 JA |
463 | ret = wbt_init(q); |
464 | if (ret) | |
465 | return ret; | |
d62118b6 | 466 | } |
87760e5e | 467 | |
80e091d1 | 468 | if (val == -1) |
a7905043 | 469 | val = wbt_default_latency_nsec(q); |
80e091d1 | 470 | else if (val >= 0) |
a7905043 | 471 | val *= 1000ULL; |
d62118b6 | 472 | |
b7143fe6 AZ |
473 | if (wbt_get_min_lat(q) == val) |
474 | return count; | |
475 | ||
c125311d JA |
476 | /* |
477 | * Ensure that the queue is idled, in case the latency update | |
478 | * ends up either enabling or disabling wbt completely. We can't | |
479 | * have IO inflight if that happens. | |
480 | */ | |
a1ce35fa JA |
481 | blk_mq_freeze_queue(q); |
482 | blk_mq_quiesce_queue(q); | |
80e091d1 | 483 | |
c125311d | 484 | wbt_set_min_lat(q, val); |
a7905043 | 485 | wbt_update_limits(q); |
c125311d | 486 | |
a1ce35fa JA |
487 | blk_mq_unquiesce_queue(q); |
488 | blk_mq_unfreeze_queue(q); | |
c125311d | 489 | |
87760e5e JA |
490 | return count; |
491 | } | |
492 | ||
93e9d8e8 JA |
493 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
494 | { | |
495 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
496 | return sprintf(page, "write back\n"); | |
497 | ||
498 | return sprintf(page, "write through\n"); | |
499 | } | |
500 | ||
501 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
502 | size_t count) | |
503 | { | |
504 | int set = -1; | |
505 | ||
506 | if (!strncmp(page, "write back", 10)) | |
507 | set = 1; | |
508 | else if (!strncmp(page, "write through", 13) || | |
509 | !strncmp(page, "none", 4)) | |
510 | set = 0; | |
511 | ||
512 | if (set == -1) | |
513 | return -EINVAL; | |
514 | ||
93e9d8e8 | 515 | if (set) |
8814ce8a | 516 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
93e9d8e8 | 517 | else |
8814ce8a | 518 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
93e9d8e8 JA |
519 | |
520 | return count; | |
521 | } | |
522 | ||
6fcefbe5 KO |
523 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
524 | { | |
525 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); | |
526 | } | |
527 | ||
ea6ca600 YK |
528 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
529 | { | |
530 | return queue_var_show(blk_queue_dax(q), page); | |
531 | } | |
532 | ||
8324aa91 | 533 | static struct queue_sysfs_entry queue_requests_entry = { |
5657a819 | 534 | .attr = {.name = "nr_requests", .mode = 0644 }, |
8324aa91 JA |
535 | .show = queue_requests_show, |
536 | .store = queue_requests_store, | |
537 | }; | |
538 | ||
539 | static struct queue_sysfs_entry queue_ra_entry = { | |
5657a819 | 540 | .attr = {.name = "read_ahead_kb", .mode = 0644 }, |
8324aa91 JA |
541 | .show = queue_ra_show, |
542 | .store = queue_ra_store, | |
543 | }; | |
544 | ||
545 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
5657a819 | 546 | .attr = {.name = "max_sectors_kb", .mode = 0644 }, |
8324aa91 JA |
547 | .show = queue_max_sectors_show, |
548 | .store = queue_max_sectors_store, | |
549 | }; | |
550 | ||
551 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
5657a819 | 552 | .attr = {.name = "max_hw_sectors_kb", .mode = 0444 }, |
8324aa91 JA |
553 | .show = queue_max_hw_sectors_show, |
554 | }; | |
555 | ||
c77a5710 | 556 | static struct queue_sysfs_entry queue_max_segments_entry = { |
5657a819 | 557 | .attr = {.name = "max_segments", .mode = 0444 }, |
c77a5710 MP |
558 | .show = queue_max_segments_show, |
559 | }; | |
560 | ||
1e739730 | 561 | static struct queue_sysfs_entry queue_max_discard_segments_entry = { |
5657a819 | 562 | .attr = {.name = "max_discard_segments", .mode = 0444 }, |
1e739730 CH |
563 | .show = queue_max_discard_segments_show, |
564 | }; | |
565 | ||
13f05c8d | 566 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
5657a819 | 567 | .attr = {.name = "max_integrity_segments", .mode = 0444 }, |
13f05c8d MP |
568 | .show = queue_max_integrity_segments_show, |
569 | }; | |
570 | ||
c77a5710 | 571 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
5657a819 | 572 | .attr = {.name = "max_segment_size", .mode = 0444 }, |
c77a5710 MP |
573 | .show = queue_max_segment_size_show, |
574 | }; | |
575 | ||
8324aa91 | 576 | static struct queue_sysfs_entry queue_iosched_entry = { |
5657a819 | 577 | .attr = {.name = "scheduler", .mode = 0644 }, |
8324aa91 JA |
578 | .show = elv_iosched_show, |
579 | .store = elv_iosched_store, | |
580 | }; | |
581 | ||
e68b903c | 582 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819 | 583 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4f MP |
584 | .show = queue_logical_block_size_show, |
585 | }; | |
586 | ||
587 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
5657a819 | 588 | .attr = {.name = "logical_block_size", .mode = 0444 }, |
e1defc4f | 589 | .show = queue_logical_block_size_show, |
e68b903c MP |
590 | }; |
591 | ||
c72758f3 | 592 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
5657a819 | 593 | .attr = {.name = "physical_block_size", .mode = 0444 }, |
c72758f3 MP |
594 | .show = queue_physical_block_size_show, |
595 | }; | |
596 | ||
87caf97c | 597 | static struct queue_sysfs_entry queue_chunk_sectors_entry = { |
5657a819 | 598 | .attr = {.name = "chunk_sectors", .mode = 0444 }, |
87caf97c HR |
599 | .show = queue_chunk_sectors_show, |
600 | }; | |
601 | ||
c72758f3 | 602 | static struct queue_sysfs_entry queue_io_min_entry = { |
5657a819 | 603 | .attr = {.name = "minimum_io_size", .mode = 0444 }, |
c72758f3 MP |
604 | .show = queue_io_min_show, |
605 | }; | |
606 | ||
607 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
5657a819 | 608 | .attr = {.name = "optimal_io_size", .mode = 0444 }, |
c72758f3 | 609 | .show = queue_io_opt_show, |
e68b903c MP |
610 | }; |
611 | ||
86b37281 | 612 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
5657a819 | 613 | .attr = {.name = "discard_granularity", .mode = 0444 }, |
86b37281 MP |
614 | .show = queue_discard_granularity_show, |
615 | }; | |
616 | ||
0034af03 | 617 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
5657a819 | 618 | .attr = {.name = "discard_max_hw_bytes", .mode = 0444 }, |
0034af03 JA |
619 | .show = queue_discard_max_hw_show, |
620 | }; | |
621 | ||
86b37281 | 622 | static struct queue_sysfs_entry queue_discard_max_entry = { |
5657a819 | 623 | .attr = {.name = "discard_max_bytes", .mode = 0644 }, |
86b37281 | 624 | .show = queue_discard_max_show, |
0034af03 | 625 | .store = queue_discard_max_store, |
86b37281 MP |
626 | }; |
627 | ||
98262f27 | 628 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
5657a819 | 629 | .attr = {.name = "discard_zeroes_data", .mode = 0444 }, |
98262f27 MP |
630 | .show = queue_discard_zeroes_data_show, |
631 | }; | |
632 | ||
4363ac7c | 633 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
5657a819 | 634 | .attr = {.name = "write_same_max_bytes", .mode = 0444 }, |
4363ac7c MP |
635 | .show = queue_write_same_max_show, |
636 | }; | |
637 | ||
a6f0788e | 638 | static struct queue_sysfs_entry queue_write_zeroes_max_entry = { |
5657a819 | 639 | .attr = {.name = "write_zeroes_max_bytes", .mode = 0444 }, |
a6f0788e CK |
640 | .show = queue_write_zeroes_max_show, |
641 | }; | |
642 | ||
1308835f | 643 | static struct queue_sysfs_entry queue_nonrot_entry = { |
5657a819 | 644 | .attr = {.name = "rotational", .mode = 0644 }, |
956bcb7c JA |
645 | .show = queue_show_nonrot, |
646 | .store = queue_store_nonrot, | |
1308835f BZ |
647 | }; |
648 | ||
797476b8 | 649 | static struct queue_sysfs_entry queue_zoned_entry = { |
5657a819 | 650 | .attr = {.name = "zoned", .mode = 0444 }, |
797476b8 DLM |
651 | .show = queue_zoned_show, |
652 | }; | |
653 | ||
965b652e DLM |
654 | static struct queue_sysfs_entry queue_nr_zones_entry = { |
655 | .attr = {.name = "nr_zones", .mode = 0444 }, | |
656 | .show = queue_nr_zones_show, | |
657 | }; | |
658 | ||
ac9fafa1 | 659 | static struct queue_sysfs_entry queue_nomerges_entry = { |
5657a819 | 660 | .attr = {.name = "nomerges", .mode = 0644 }, |
ac9fafa1 AB |
661 | .show = queue_nomerges_show, |
662 | .store = queue_nomerges_store, | |
663 | }; | |
664 | ||
c7c22e4d | 665 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
5657a819 | 666 | .attr = {.name = "rq_affinity", .mode = 0644 }, |
c7c22e4d JA |
667 | .show = queue_rq_affinity_show, |
668 | .store = queue_rq_affinity_store, | |
669 | }; | |
670 | ||
bc58ba94 | 671 | static struct queue_sysfs_entry queue_iostats_entry = { |
5657a819 | 672 | .attr = {.name = "iostats", .mode = 0644 }, |
956bcb7c JA |
673 | .show = queue_show_iostats, |
674 | .store = queue_store_iostats, | |
bc58ba94 JA |
675 | }; |
676 | ||
e2e1a148 | 677 | static struct queue_sysfs_entry queue_random_entry = { |
5657a819 | 678 | .attr = {.name = "add_random", .mode = 0644 }, |
956bcb7c JA |
679 | .show = queue_show_random, |
680 | .store = queue_store_random, | |
e2e1a148 JA |
681 | }; |
682 | ||
05229bee | 683 | static struct queue_sysfs_entry queue_poll_entry = { |
5657a819 | 684 | .attr = {.name = "io_poll", .mode = 0644 }, |
05229bee JA |
685 | .show = queue_poll_show, |
686 | .store = queue_poll_store, | |
687 | }; | |
688 | ||
06426adf | 689 | static struct queue_sysfs_entry queue_poll_delay_entry = { |
5657a819 | 690 | .attr = {.name = "io_poll_delay", .mode = 0644 }, |
06426adf JA |
691 | .show = queue_poll_delay_show, |
692 | .store = queue_poll_delay_store, | |
693 | }; | |
694 | ||
93e9d8e8 | 695 | static struct queue_sysfs_entry queue_wc_entry = { |
5657a819 | 696 | .attr = {.name = "write_cache", .mode = 0644 }, |
93e9d8e8 JA |
697 | .show = queue_wc_show, |
698 | .store = queue_wc_store, | |
699 | }; | |
700 | ||
6fcefbe5 | 701 | static struct queue_sysfs_entry queue_fua_entry = { |
5657a819 | 702 | .attr = {.name = "fua", .mode = 0444 }, |
6fcefbe5 KO |
703 | .show = queue_fua_show, |
704 | }; | |
705 | ||
ea6ca600 | 706 | static struct queue_sysfs_entry queue_dax_entry = { |
5657a819 | 707 | .attr = {.name = "dax", .mode = 0444 }, |
ea6ca600 YK |
708 | .show = queue_dax_show, |
709 | }; | |
710 | ||
65cd1d13 WZ |
711 | static struct queue_sysfs_entry queue_io_timeout_entry = { |
712 | .attr = {.name = "io_timeout", .mode = 0644 }, | |
713 | .show = queue_io_timeout_show, | |
714 | .store = queue_io_timeout_store, | |
715 | }; | |
716 | ||
87760e5e | 717 | static struct queue_sysfs_entry queue_wb_lat_entry = { |
5657a819 | 718 | .attr = {.name = "wbt_lat_usec", .mode = 0644 }, |
87760e5e JA |
719 | .show = queue_wb_lat_show, |
720 | .store = queue_wb_lat_store, | |
721 | }; | |
722 | ||
297e3d85 SL |
723 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
724 | static struct queue_sysfs_entry throtl_sample_time_entry = { | |
5657a819 | 725 | .attr = {.name = "throttle_sample_time", .mode = 0644 }, |
297e3d85 SL |
726 | .show = blk_throtl_sample_time_show, |
727 | .store = blk_throtl_sample_time_store, | |
728 | }; | |
729 | #endif | |
730 | ||
4d25339e | 731 | static struct attribute *queue_attrs[] = { |
8324aa91 JA |
732 | &queue_requests_entry.attr, |
733 | &queue_ra_entry.attr, | |
734 | &queue_max_hw_sectors_entry.attr, | |
735 | &queue_max_sectors_entry.attr, | |
c77a5710 | 736 | &queue_max_segments_entry.attr, |
1e739730 | 737 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 738 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 739 | &queue_max_segment_size_entry.attr, |
8324aa91 | 740 | &queue_iosched_entry.attr, |
e68b903c | 741 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 742 | &queue_logical_block_size_entry.attr, |
c72758f3 | 743 | &queue_physical_block_size_entry.attr, |
87caf97c | 744 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
745 | &queue_io_min_entry.attr, |
746 | &queue_io_opt_entry.attr, | |
86b37281 MP |
747 | &queue_discard_granularity_entry.attr, |
748 | &queue_discard_max_entry.attr, | |
0034af03 | 749 | &queue_discard_max_hw_entry.attr, |
98262f27 | 750 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 751 | &queue_write_same_max_entry.attr, |
a6f0788e | 752 | &queue_write_zeroes_max_entry.attr, |
1308835f | 753 | &queue_nonrot_entry.attr, |
797476b8 | 754 | &queue_zoned_entry.attr, |
965b652e | 755 | &queue_nr_zones_entry.attr, |
ac9fafa1 | 756 | &queue_nomerges_entry.attr, |
c7c22e4d | 757 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 758 | &queue_iostats_entry.attr, |
e2e1a148 | 759 | &queue_random_entry.attr, |
05229bee | 760 | &queue_poll_entry.attr, |
93e9d8e8 | 761 | &queue_wc_entry.attr, |
6fcefbe5 | 762 | &queue_fua_entry.attr, |
ea6ca600 | 763 | &queue_dax_entry.attr, |
87760e5e | 764 | &queue_wb_lat_entry.attr, |
06426adf | 765 | &queue_poll_delay_entry.attr, |
65cd1d13 | 766 | &queue_io_timeout_entry.attr, |
297e3d85 SL |
767 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
768 | &throtl_sample_time_entry.attr, | |
769 | #endif | |
8324aa91 JA |
770 | NULL, |
771 | }; | |
772 | ||
4d25339e WZ |
773 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
774 | int n) | |
775 | { | |
776 | struct request_queue *q = | |
777 | container_of(kobj, struct request_queue, kobj); | |
778 | ||
779 | if (attr == &queue_io_timeout_entry.attr && | |
780 | (!q->mq_ops || !q->mq_ops->timeout)) | |
781 | return 0; | |
782 | ||
783 | return attr->mode; | |
784 | } | |
785 | ||
786 | static struct attribute_group queue_attr_group = { | |
787 | .attrs = queue_attrs, | |
788 | .is_visible = queue_attr_visible, | |
789 | }; | |
790 | ||
791 | ||
8324aa91 JA |
792 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
793 | ||
794 | static ssize_t | |
795 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
796 | { | |
797 | struct queue_sysfs_entry *entry = to_queue(attr); | |
798 | struct request_queue *q = | |
799 | container_of(kobj, struct request_queue, kobj); | |
800 | ssize_t res; | |
801 | ||
802 | if (!entry->show) | |
803 | return -EIO; | |
804 | mutex_lock(&q->sysfs_lock); | |
3f3299d5 | 805 | if (blk_queue_dying(q)) { |
8324aa91 JA |
806 | mutex_unlock(&q->sysfs_lock); |
807 | return -ENOENT; | |
808 | } | |
809 | res = entry->show(q, page); | |
810 | mutex_unlock(&q->sysfs_lock); | |
811 | return res; | |
812 | } | |
813 | ||
814 | static ssize_t | |
815 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
816 | const char *page, size_t length) | |
817 | { | |
818 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 819 | struct request_queue *q; |
8324aa91 JA |
820 | ssize_t res; |
821 | ||
822 | if (!entry->store) | |
823 | return -EIO; | |
6728cb0e JA |
824 | |
825 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 826 | mutex_lock(&q->sysfs_lock); |
3f3299d5 | 827 | if (blk_queue_dying(q)) { |
8324aa91 JA |
828 | mutex_unlock(&q->sysfs_lock); |
829 | return -ENOENT; | |
830 | } | |
831 | res = entry->store(q, page, length); | |
832 | mutex_unlock(&q->sysfs_lock); | |
833 | return res; | |
834 | } | |
835 | ||
548bc8e1 TH |
836 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
837 | { | |
838 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
839 | rcu_head); | |
840 | kmem_cache_free(blk_requestq_cachep, q); | |
841 | } | |
842 | ||
8324aa91 | 843 | /** |
1e936428 | 844 | * __blk_release_queue - release a request queue |
dc9edc44 | 845 | * @work: pointer to the release_work member of the request queue to be released |
8324aa91 JA |
846 | * |
847 | * Description: | |
1e936428 MPS |
848 | * This function is called when a block device is being unregistered. The |
849 | * process of releasing a request queue starts with blk_cleanup_queue, which | |
850 | * set the appropriate flags and then calls blk_put_queue, that decrements | |
851 | * the reference counter of the request queue. Once the reference counter | |
852 | * of the request queue reaches zero, blk_release_queue is called to release | |
853 | * all allocated resources of the request queue. | |
dc9edc44 BVA |
854 | */ |
855 | static void __blk_release_queue(struct work_struct *work) | |
8324aa91 | 856 | { |
dc9edc44 | 857 | struct request_queue *q = container_of(work, typeof(*q), release_work); |
8324aa91 | 858 | |
34dbad5d OS |
859 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
860 | blk_stat_remove_callback(q, q->poll_cb); | |
861 | blk_stat_free_callback(q->poll_cb); | |
777eb1bf | 862 | |
24ecc358 BVA |
863 | if (!blk_queue_dead(q)) { |
864 | /* | |
865 | * Last reference was dropped without having called | |
866 | * blk_cleanup_queue(). | |
867 | */ | |
868 | WARN_ONCE(blk_queue_init_done(q), | |
869 | "request queue %p has been registered but blk_cleanup_queue() has not been called for that queue\n", | |
870 | q); | |
871 | blk_exit_queue(q); | |
872 | } | |
873 | ||
b86d865c | 874 | WARN(blk_queue_root_blkg(q), |
24ecc358 BVA |
875 | "request queue %p is being released but it has not yet been removed from the blkcg controller\n", |
876 | q); | |
877 | ||
34dbad5d OS |
878 | blk_free_queue_stats(q->stats); |
879 | ||
bf505456 DLM |
880 | blk_queue_free_zone_bitmaps(q); |
881 | ||
344e9ffc | 882 | if (queue_is_mq(q)) |
e09aae7e | 883 | blk_mq_release(q); |
18741986 | 884 | |
8324aa91 JA |
885 | blk_trace_shutdown(q); |
886 | ||
344e9ffc | 887 | if (queue_is_mq(q)) |
62ebce16 OS |
888 | blk_mq_debugfs_unregister(q); |
889 | ||
338aa96d | 890 | bioset_exit(&q->bio_split); |
54efd50b | 891 | |
a73f730d | 892 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 893 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
894 | } |
895 | ||
dc9edc44 BVA |
896 | static void blk_release_queue(struct kobject *kobj) |
897 | { | |
898 | struct request_queue *q = | |
899 | container_of(kobj, struct request_queue, kobj); | |
900 | ||
901 | INIT_WORK(&q->release_work, __blk_release_queue); | |
902 | schedule_work(&q->release_work); | |
903 | } | |
904 | ||
52cf25d0 | 905 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
906 | .show = queue_attr_show, |
907 | .store = queue_attr_store, | |
908 | }; | |
909 | ||
910 | struct kobj_type blk_queue_ktype = { | |
911 | .sysfs_ops = &queue_sysfs_ops, | |
8324aa91 JA |
912 | .release = blk_release_queue, |
913 | }; | |
914 | ||
2c2086af BVA |
915 | /** |
916 | * blk_register_queue - register a block layer queue with sysfs | |
917 | * @disk: Disk of which the request queue should be registered with sysfs. | |
918 | */ | |
8324aa91 JA |
919 | int blk_register_queue(struct gendisk *disk) |
920 | { | |
921 | int ret; | |
1d54ad6d | 922 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
923 | struct request_queue *q = disk->queue; |
924 | ||
fb199746 | 925 | if (WARN_ON(!q)) |
8324aa91 JA |
926 | return -ENXIO; |
927 | ||
334335d2 OS |
928 | WARN_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags), |
929 | "%s is registering an already registered queue\n", | |
930 | kobject_name(&dev->kobj)); | |
57d74df9 | 931 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
334335d2 | 932 | |
749fefe6 | 933 | /* |
17497acb TH |
934 | * SCSI probing may synchronously create and destroy a lot of |
935 | * request_queues for non-existent devices. Shutting down a fully | |
936 | * functional queue takes measureable wallclock time as RCU grace | |
937 | * periods are involved. To avoid excessive latency in these | |
938 | * cases, a request_queue starts out in a degraded mode which is | |
939 | * faster to shut down and is made fully functional here as | |
940 | * request_queues for non-existent devices never get registered. | |
749fefe6 | 941 | */ |
df35c7c9 | 942 | if (!blk_queue_init_done(q)) { |
57d74df9 | 943 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); |
3ef28e83 | 944 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
df35c7c9 | 945 | } |
749fefe6 | 946 | |
1d54ad6d LZ |
947 | ret = blk_trace_init_sysfs(dev); |
948 | if (ret) | |
949 | return ret; | |
950 | ||
b410aff2 TE |
951 | /* Prevent changes through sysfs until registration is completed. */ |
952 | mutex_lock(&q->sysfs_lock); | |
953 | ||
c9059598 | 954 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
955 | if (ret < 0) { |
956 | blk_trace_remove_sysfs(dev); | |
b410aff2 | 957 | goto unlock; |
ed5302d3 | 958 | } |
8324aa91 | 959 | |
4d25339e WZ |
960 | ret = sysfs_create_group(&q->kobj, &queue_attr_group); |
961 | if (ret) { | |
962 | blk_trace_remove_sysfs(dev); | |
963 | kobject_del(&q->kobj); | |
964 | kobject_put(&dev->kobj); | |
965 | goto unlock; | |
966 | } | |
967 | ||
344e9ffc | 968 | if (queue_is_mq(q)) { |
2d0364c8 | 969 | __blk_mq_register_dev(dev, q); |
a8ecdd71 BVA |
970 | blk_mq_debugfs_register(q); |
971 | } | |
9c1051aa | 972 | |
8324aa91 JA |
973 | kobject_uevent(&q->kobj, KOBJ_ADD); |
974 | ||
8330cdb0 | 975 | wbt_enable_default(q); |
87760e5e | 976 | |
d61fcfa4 SL |
977 | blk_throtl_register_queue(q); |
978 | ||
344e9ffc | 979 | if (q->elevator) { |
80c6b157 OS |
980 | ret = elv_register_queue(q); |
981 | if (ret) { | |
2c2086af | 982 | mutex_unlock(&q->sysfs_lock); |
80c6b157 OS |
983 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
984 | kobject_del(&q->kobj); | |
985 | blk_trace_remove_sysfs(dev); | |
986 | kobject_put(&dev->kobj); | |
2c2086af | 987 | return ret; |
80c6b157 | 988 | } |
8324aa91 | 989 | } |
b410aff2 TE |
990 | ret = 0; |
991 | unlock: | |
992 | mutex_unlock(&q->sysfs_lock); | |
993 | return ret; | |
8324aa91 | 994 | } |
fa70d2e2 | 995 | EXPORT_SYMBOL_GPL(blk_register_queue); |
8324aa91 | 996 | |
2c2086af BVA |
997 | /** |
998 | * blk_unregister_queue - counterpart of blk_register_queue() | |
999 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
1000 | * | |
1001 | * Note: the caller is responsible for guaranteeing that this function is called | |
1002 | * after blk_register_queue() has finished. | |
1003 | */ | |
8324aa91 JA |
1004 | void blk_unregister_queue(struct gendisk *disk) |
1005 | { | |
1006 | struct request_queue *q = disk->queue; | |
1007 | ||
fb199746 AM |
1008 | if (WARN_ON(!q)) |
1009 | return; | |
1010 | ||
fa70d2e2 MS |
1011 | /* Return early if disk->queue was never registered. */ |
1012 | if (!test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) | |
1013 | return; | |
1014 | ||
667257e8 | 1015 | /* |
2c2086af BVA |
1016 | * Since sysfs_remove_dir() prevents adding new directory entries |
1017 | * before removal of existing entries starts, protect against | |
1018 | * concurrent elv_iosched_store() calls. | |
667257e8 | 1019 | */ |
e9a823fb | 1020 | mutex_lock(&q->sysfs_lock); |
334335d2 | 1021 | |
8814ce8a | 1022 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
02ba8893 | 1023 | |
2c2086af BVA |
1024 | /* |
1025 | * Remove the sysfs attributes before unregistering the queue data | |
1026 | * structures that can be modified through sysfs. | |
1027 | */ | |
344e9ffc | 1028 | if (queue_is_mq(q)) |
b21d5b30 | 1029 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
2c2086af | 1030 | mutex_unlock(&q->sysfs_lock); |
8324aa91 | 1031 | |
48c0d4d4 ZK |
1032 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
1033 | kobject_del(&q->kobj); | |
1034 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
667257e8 | 1035 | |
2c2086af | 1036 | mutex_lock(&q->sysfs_lock); |
344e9ffc | 1037 | if (q->elevator) |
2c2086af | 1038 | elv_unregister_queue(q); |
667257e8 | 1039 | mutex_unlock(&q->sysfs_lock); |
2c2086af BVA |
1040 | |
1041 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 1042 | } |