]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
8324aa91 JA |
2 | /* |
3 | * Functions related to sysfs handling | |
4 | */ | |
5 | #include <linux/kernel.h> | |
5a0e3ad6 | 6 | #include <linux/slab.h> |
8324aa91 JA |
7 | #include <linux/module.h> |
8 | #include <linux/bio.h> | |
9 | #include <linux/blkdev.h> | |
66114cad | 10 | #include <linux/backing-dev.h> |
8324aa91 | 11 | #include <linux/blktrace_api.h> |
320ae51f | 12 | #include <linux/blk-mq.h> |
eea8f41c | 13 | #include <linux/blk-cgroup.h> |
85e0cbbb | 14 | #include <linux/debugfs.h> |
8324aa91 JA |
15 | |
16 | #include "blk.h" | |
3edcc0ce | 17 | #include "blk-mq.h" |
d173a251 | 18 | #include "blk-mq-debugfs.h" |
87760e5e | 19 | #include "blk-wbt.h" |
a7b36ee6 | 20 | #include "blk-throttle.h" |
8324aa91 JA |
21 | |
22 | struct queue_sysfs_entry { | |
23 | struct attribute attr; | |
24 | ssize_t (*show)(struct request_queue *, char *); | |
25 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
26 | }; | |
27 | ||
28 | static ssize_t | |
9cb308ce | 29 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 30 | { |
9cb308ce | 31 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
32 | } |
33 | ||
34 | static ssize_t | |
35 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
36 | { | |
b1f3b64d DR |
37 | int err; |
38 | unsigned long v; | |
39 | ||
ed751e68 | 40 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
41 | if (err || v > UINT_MAX) |
42 | return -EINVAL; | |
43 | ||
44 | *var = v; | |
8324aa91 | 45 | |
8324aa91 JA |
46 | return count; |
47 | } | |
48 | ||
80e091d1 | 49 | static ssize_t queue_var_store64(s64 *var, const char *page) |
87760e5e JA |
50 | { |
51 | int err; | |
80e091d1 | 52 | s64 v; |
87760e5e | 53 | |
80e091d1 | 54 | err = kstrtos64(page, 10, &v); |
87760e5e JA |
55 | if (err < 0) |
56 | return err; | |
57 | ||
58 | *var = v; | |
59 | return 0; | |
60 | } | |
61 | ||
8324aa91 JA |
62 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
63 | { | |
28af7428 | 64 | return queue_var_show(q->nr_requests, page); |
8324aa91 JA |
65 | } |
66 | ||
67 | static ssize_t | |
68 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
69 | { | |
8324aa91 | 70 | unsigned long nr; |
e3a2b3f9 | 71 | int ret, err; |
b8a9ae77 | 72 | |
344e9ffc | 73 | if (!queue_is_mq(q)) |
b8a9ae77 JA |
74 | return -EINVAL; |
75 | ||
76 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
77 | if (ret < 0) |
78 | return ret; | |
79 | ||
8324aa91 JA |
80 | if (nr < BLKDEV_MIN_RQ) |
81 | nr = BLKDEV_MIN_RQ; | |
82 | ||
a1ce35fa | 83 | err = blk_mq_update_nr_requests(q, nr); |
e3a2b3f9 JA |
84 | if (err) |
85 | return err; | |
86 | ||
8324aa91 JA |
87 | return ret; |
88 | } | |
89 | ||
90 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
91 | { | |
edb0872f | 92 | unsigned long ra_kb; |
8324aa91 | 93 | |
d152c682 | 94 | if (!q->disk) |
edb0872f | 95 | return -EINVAL; |
d152c682 | 96 | ra_kb = q->disk->bdi->ra_pages << (PAGE_SHIFT - 10); |
8c390ff9 | 97 | return queue_var_show(ra_kb, page); |
8324aa91 JA |
98 | } |
99 | ||
100 | static ssize_t | |
101 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
102 | { | |
103 | unsigned long ra_kb; | |
edb0872f | 104 | ssize_t ret; |
8324aa91 | 105 | |
d152c682 | 106 | if (!q->disk) |
edb0872f CH |
107 | return -EINVAL; |
108 | ret = queue_var_store(&ra_kb, page, count); | |
b1f3b64d DR |
109 | if (ret < 0) |
110 | return ret; | |
d152c682 | 111 | q->disk->bdi->ra_pages = ra_kb >> (PAGE_SHIFT - 10); |
8324aa91 JA |
112 | return ret; |
113 | } | |
114 | ||
115 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
116 | { | |
ae03bf63 | 117 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 | 118 | |
8c390ff9 | 119 | return queue_var_show(max_sectors_kb, page); |
8324aa91 JA |
120 | } |
121 | ||
c77a5710 MP |
122 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
123 | { | |
8c390ff9 | 124 | return queue_var_show(queue_max_segments(q), page); |
c77a5710 MP |
125 | } |
126 | ||
1e739730 CH |
127 | static ssize_t queue_max_discard_segments_show(struct request_queue *q, |
128 | char *page) | |
129 | { | |
8c390ff9 | 130 | return queue_var_show(queue_max_discard_segments(q), page); |
1e739730 CH |
131 | } |
132 | ||
13f05c8d MP |
133 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
134 | { | |
8c390ff9 | 135 | return queue_var_show(q->limits.max_integrity_segments, page); |
13f05c8d MP |
136 | } |
137 | ||
c77a5710 MP |
138 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
139 | { | |
8c390ff9 | 140 | return queue_var_show(queue_max_segment_size(q), page); |
c77a5710 MP |
141 | } |
142 | ||
e1defc4f | 143 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 144 | { |
e1defc4f | 145 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
146 | } |
147 | ||
c72758f3 MP |
148 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
149 | { | |
150 | return queue_var_show(queue_physical_block_size(q), page); | |
151 | } | |
152 | ||
87caf97c HR |
153 | static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) |
154 | { | |
155 | return queue_var_show(q->limits.chunk_sectors, page); | |
156 | } | |
157 | ||
c72758f3 MP |
158 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
159 | { | |
160 | return queue_var_show(queue_io_min(q), page); | |
161 | } | |
162 | ||
163 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
164 | { | |
165 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
166 | } |
167 | ||
86b37281 MP |
168 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
169 | { | |
170 | return queue_var_show(q->limits.discard_granularity, page); | |
171 | } | |
172 | ||
0034af03 JA |
173 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
174 | { | |
0034af03 | 175 | |
18f922d0 A |
176 | return sprintf(page, "%llu\n", |
177 | (unsigned long long)q->limits.max_hw_discard_sectors << 9); | |
0034af03 JA |
178 | } |
179 | ||
86b37281 MP |
180 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
181 | { | |
a934a00a MP |
182 | return sprintf(page, "%llu\n", |
183 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
184 | } |
185 | ||
0034af03 JA |
186 | static ssize_t queue_discard_max_store(struct request_queue *q, |
187 | const char *page, size_t count) | |
188 | { | |
189 | unsigned long max_discard; | |
190 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
191 | ||
192 | if (ret < 0) | |
193 | return ret; | |
194 | ||
195 | if (max_discard & (q->limits.discard_granularity - 1)) | |
196 | return -EINVAL; | |
197 | ||
198 | max_discard >>= 9; | |
199 | if (max_discard > UINT_MAX) | |
200 | return -EINVAL; | |
201 | ||
202 | if (max_discard > q->limits.max_hw_discard_sectors) | |
203 | max_discard = q->limits.max_hw_discard_sectors; | |
204 | ||
205 | q->limits.max_discard_sectors = max_discard; | |
206 | return ret; | |
207 | } | |
208 | ||
98262f27 MP |
209 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
210 | { | |
48920ff2 | 211 | return queue_var_show(0, page); |
98262f27 MP |
212 | } |
213 | ||
4363ac7c MP |
214 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
215 | { | |
216 | return sprintf(page, "%llu\n", | |
217 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
218 | } | |
219 | ||
a6f0788e CK |
220 | static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) |
221 | { | |
222 | return sprintf(page, "%llu\n", | |
223 | (unsigned long long)q->limits.max_write_zeroes_sectors << 9); | |
224 | } | |
4363ac7c | 225 | |
a805a4fa DLM |
226 | static ssize_t queue_zone_write_granularity_show(struct request_queue *q, |
227 | char *page) | |
228 | { | |
229 | return queue_var_show(queue_zone_write_granularity(q), page); | |
230 | } | |
231 | ||
0512a75b KB |
232 | static ssize_t queue_zone_append_max_show(struct request_queue *q, char *page) |
233 | { | |
234 | unsigned long long max_sectors = q->limits.max_zone_append_sectors; | |
235 | ||
236 | return sprintf(page, "%llu\n", max_sectors << SECTOR_SHIFT); | |
237 | } | |
238 | ||
8324aa91 JA |
239 | static ssize_t |
240 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
241 | { | |
242 | unsigned long max_sectors_kb, | |
ae03bf63 | 243 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
09cbfeaf | 244 | page_kb = 1 << (PAGE_SHIFT - 10); |
8324aa91 JA |
245 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
246 | ||
b1f3b64d DR |
247 | if (ret < 0) |
248 | return ret; | |
249 | ||
ca369d51 MP |
250 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
251 | q->limits.max_dev_sectors >> 1); | |
252 | ||
8324aa91 JA |
253 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
254 | return -EINVAL; | |
7c239517 | 255 | |
0d945c1f | 256 | spin_lock_irq(&q->queue_lock); |
c295fc05 | 257 | q->limits.max_sectors = max_sectors_kb << 1; |
d152c682 CH |
258 | if (q->disk) |
259 | q->disk->bdi->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); | |
0d945c1f | 260 | spin_unlock_irq(&q->queue_lock); |
8324aa91 JA |
261 | |
262 | return ret; | |
263 | } | |
264 | ||
265 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
266 | { | |
ae03bf63 | 267 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 | 268 | |
8c390ff9 | 269 | return queue_var_show(max_hw_sectors_kb, page); |
8324aa91 JA |
270 | } |
271 | ||
28af7428 MG |
272 | static ssize_t queue_virt_boundary_mask_show(struct request_queue *q, char *page) |
273 | { | |
8c390ff9 | 274 | return queue_var_show(q->limits.virt_boundary_mask, page); |
28af7428 MG |
275 | } |
276 | ||
956bcb7c JA |
277 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
278 | static ssize_t \ | |
fc93fe14 | 279 | queue_##name##_show(struct request_queue *q, char *page) \ |
956bcb7c JA |
280 | { \ |
281 | int bit; \ | |
282 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
283 | return queue_var_show(neg ? !bit : bit, page); \ | |
284 | } \ | |
285 | static ssize_t \ | |
fc93fe14 | 286 | queue_##name##_store(struct request_queue *q, const char *page, size_t count) \ |
956bcb7c JA |
287 | { \ |
288 | unsigned long val; \ | |
289 | ssize_t ret; \ | |
290 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
291 | if (ret < 0) \ |
292 | return ret; \ | |
956bcb7c JA |
293 | if (neg) \ |
294 | val = !val; \ | |
295 | \ | |
956bcb7c | 296 | if (val) \ |
8814ce8a | 297 | blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 298 | else \ |
8814ce8a | 299 | blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ |
956bcb7c | 300 | return ret; \ |
1308835f BZ |
301 | } |
302 | ||
956bcb7c JA |
303 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
304 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
305 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
1cb039f3 | 306 | QUEUE_SYSFS_BIT_FNS(stable_writes, STABLE_WRITES, 0); |
956bcb7c | 307 | #undef QUEUE_SYSFS_BIT_FNS |
1308835f | 308 | |
797476b8 DLM |
309 | static ssize_t queue_zoned_show(struct request_queue *q, char *page) |
310 | { | |
311 | switch (blk_queue_zoned_model(q)) { | |
312 | case BLK_ZONED_HA: | |
313 | return sprintf(page, "host-aware\n"); | |
314 | case BLK_ZONED_HM: | |
315 | return sprintf(page, "host-managed\n"); | |
316 | default: | |
317 | return sprintf(page, "none\n"); | |
318 | } | |
319 | } | |
320 | ||
965b652e DLM |
321 | static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) |
322 | { | |
323 | return queue_var_show(blk_queue_nr_zones(q), page); | |
324 | } | |
325 | ||
e15864f8 NC |
326 | static ssize_t queue_max_open_zones_show(struct request_queue *q, char *page) |
327 | { | |
328 | return queue_var_show(queue_max_open_zones(q), page); | |
329 | } | |
330 | ||
659bf827 NC |
331 | static ssize_t queue_max_active_zones_show(struct request_queue *q, char *page) |
332 | { | |
333 | return queue_var_show(queue_max_active_zones(q), page); | |
334 | } | |
335 | ||
ac9fafa1 AB |
336 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
337 | { | |
488991e2 AB |
338 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
339 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
340 | } |
341 | ||
342 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
343 | size_t count) | |
344 | { | |
345 | unsigned long nm; | |
346 | ssize_t ret = queue_var_store(&nm, page, count); | |
347 | ||
b1f3b64d DR |
348 | if (ret < 0) |
349 | return ret; | |
350 | ||
57d74df9 CH |
351 | blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
352 | blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
488991e2 | 353 | if (nm == 2) |
57d74df9 | 354 | blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 | 355 | else if (nm) |
57d74df9 | 356 | blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
1308835f | 357 | |
ac9fafa1 AB |
358 | return ret; |
359 | } | |
360 | ||
c7c22e4d JA |
361 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
362 | { | |
9cb308ce | 363 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 364 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 365 | |
5757a6d7 | 366 | return queue_var_show(set << force, page); |
c7c22e4d JA |
367 | } |
368 | ||
369 | static ssize_t | |
370 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
371 | { | |
372 | ssize_t ret = -EINVAL; | |
0a06ff06 | 373 | #ifdef CONFIG_SMP |
c7c22e4d JA |
374 | unsigned long val; |
375 | ||
376 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
377 | if (ret < 0) |
378 | return ret; | |
379 | ||
e8037d49 | 380 | if (val == 2) { |
57d74df9 CH |
381 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
382 | blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 383 | } else if (val == 1) { |
57d74df9 CH |
384 | blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
385 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
e8037d49 | 386 | } else if (val == 0) { |
57d74df9 CH |
387 | blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
388 | blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
5757a6d7 | 389 | } |
c7c22e4d JA |
390 | #endif |
391 | return ret; | |
392 | } | |
8324aa91 | 393 | |
06426adf JA |
394 | static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) |
395 | { | |
64f1c21e JA |
396 | int val; |
397 | ||
29ece8b4 YY |
398 | if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) |
399 | val = BLK_MQ_POLL_CLASSIC; | |
64f1c21e JA |
400 | else |
401 | val = q->poll_nsec / 1000; | |
402 | ||
403 | return sprintf(page, "%d\n", val); | |
06426adf JA |
404 | } |
405 | ||
406 | static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, | |
407 | size_t count) | |
408 | { | |
64f1c21e | 409 | int err, val; |
06426adf JA |
410 | |
411 | if (!q->mq_ops || !q->mq_ops->poll) | |
412 | return -EINVAL; | |
413 | ||
64f1c21e JA |
414 | err = kstrtoint(page, 10, &val); |
415 | if (err < 0) | |
416 | return err; | |
06426adf | 417 | |
29ece8b4 YY |
418 | if (val == BLK_MQ_POLL_CLASSIC) |
419 | q->poll_nsec = BLK_MQ_POLL_CLASSIC; | |
420 | else if (val >= 0) | |
64f1c21e | 421 | q->poll_nsec = val * 1000; |
29ece8b4 YY |
422 | else |
423 | return -EINVAL; | |
64f1c21e JA |
424 | |
425 | return count; | |
06426adf JA |
426 | } |
427 | ||
05229bee JA |
428 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
429 | { | |
430 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
431 | } | |
432 | ||
433 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
434 | size_t count) | |
435 | { | |
a614dd22 | 436 | if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) |
05229bee | 437 | return -EINVAL; |
a614dd22 CH |
438 | pr_info_ratelimited("writes to the poll attribute are ignored.\n"); |
439 | pr_info_ratelimited("please use driver specific parameters instead.\n"); | |
440 | return count; | |
05229bee JA |
441 | } |
442 | ||
65cd1d13 WZ |
443 | static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) |
444 | { | |
445 | return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); | |
446 | } | |
447 | ||
448 | static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, | |
449 | size_t count) | |
450 | { | |
451 | unsigned int val; | |
452 | int err; | |
453 | ||
454 | err = kstrtou32(page, 10, &val); | |
455 | if (err || val == 0) | |
456 | return -EINVAL; | |
457 | ||
458 | blk_queue_rq_timeout(q, msecs_to_jiffies(val)); | |
459 | ||
460 | return count; | |
461 | } | |
462 | ||
87760e5e JA |
463 | static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) |
464 | { | |
a7905043 | 465 | if (!wbt_rq_qos(q)) |
87760e5e JA |
466 | return -EINVAL; |
467 | ||
a7905043 | 468 | return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); |
87760e5e JA |
469 | } |
470 | ||
471 | static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, | |
472 | size_t count) | |
473 | { | |
a7905043 | 474 | struct rq_qos *rqos; |
87760e5e | 475 | ssize_t ret; |
80e091d1 | 476 | s64 val; |
87760e5e | 477 | |
87760e5e JA |
478 | ret = queue_var_store64(&val, page); |
479 | if (ret < 0) | |
480 | return ret; | |
d62118b6 JA |
481 | if (val < -1) |
482 | return -EINVAL; | |
483 | ||
a7905043 JB |
484 | rqos = wbt_rq_qos(q); |
485 | if (!rqos) { | |
d62118b6 JA |
486 | ret = wbt_init(q); |
487 | if (ret) | |
488 | return ret; | |
d62118b6 | 489 | } |
87760e5e | 490 | |
80e091d1 | 491 | if (val == -1) |
a7905043 | 492 | val = wbt_default_latency_nsec(q); |
80e091d1 | 493 | else if (val >= 0) |
a7905043 | 494 | val *= 1000ULL; |
d62118b6 | 495 | |
b7143fe6 AZ |
496 | if (wbt_get_min_lat(q) == val) |
497 | return count; | |
498 | ||
c125311d JA |
499 | /* |
500 | * Ensure that the queue is idled, in case the latency update | |
501 | * ends up either enabling or disabling wbt completely. We can't | |
502 | * have IO inflight if that happens. | |
503 | */ | |
a1ce35fa JA |
504 | blk_mq_freeze_queue(q); |
505 | blk_mq_quiesce_queue(q); | |
80e091d1 | 506 | |
c125311d | 507 | wbt_set_min_lat(q, val); |
c125311d | 508 | |
a1ce35fa JA |
509 | blk_mq_unquiesce_queue(q); |
510 | blk_mq_unfreeze_queue(q); | |
c125311d | 511 | |
87760e5e JA |
512 | return count; |
513 | } | |
514 | ||
93e9d8e8 JA |
515 | static ssize_t queue_wc_show(struct request_queue *q, char *page) |
516 | { | |
517 | if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) | |
518 | return sprintf(page, "write back\n"); | |
519 | ||
520 | return sprintf(page, "write through\n"); | |
521 | } | |
522 | ||
523 | static ssize_t queue_wc_store(struct request_queue *q, const char *page, | |
524 | size_t count) | |
525 | { | |
526 | int set = -1; | |
527 | ||
528 | if (!strncmp(page, "write back", 10)) | |
529 | set = 1; | |
530 | else if (!strncmp(page, "write through", 13) || | |
531 | !strncmp(page, "none", 4)) | |
532 | set = 0; | |
533 | ||
534 | if (set == -1) | |
535 | return -EINVAL; | |
536 | ||
93e9d8e8 | 537 | if (set) |
8814ce8a | 538 | blk_queue_flag_set(QUEUE_FLAG_WC, q); |
93e9d8e8 | 539 | else |
8814ce8a | 540 | blk_queue_flag_clear(QUEUE_FLAG_WC, q); |
93e9d8e8 JA |
541 | |
542 | return count; | |
543 | } | |
544 | ||
6fcefbe5 KO |
545 | static ssize_t queue_fua_show(struct request_queue *q, char *page) |
546 | { | |
547 | return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); | |
548 | } | |
549 | ||
ea6ca600 YK |
550 | static ssize_t queue_dax_show(struct request_queue *q, char *page) |
551 | { | |
552 | return queue_var_show(blk_queue_dax(q), page); | |
553 | } | |
554 | ||
35626147 CH |
555 | #define QUEUE_RO_ENTRY(_prefix, _name) \ |
556 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
557 | .attr = { .name = _name, .mode = 0444 }, \ | |
558 | .show = _prefix##_show, \ | |
559 | }; | |
560 | ||
561 | #define QUEUE_RW_ENTRY(_prefix, _name) \ | |
562 | static struct queue_sysfs_entry _prefix##_entry = { \ | |
563 | .attr = { .name = _name, .mode = 0644 }, \ | |
564 | .show = _prefix##_show, \ | |
565 | .store = _prefix##_store, \ | |
566 | }; | |
567 | ||
568 | QUEUE_RW_ENTRY(queue_requests, "nr_requests"); | |
569 | QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb"); | |
570 | QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb"); | |
571 | QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb"); | |
572 | QUEUE_RO_ENTRY(queue_max_segments, "max_segments"); | |
573 | QUEUE_RO_ENTRY(queue_max_integrity_segments, "max_integrity_segments"); | |
574 | QUEUE_RO_ENTRY(queue_max_segment_size, "max_segment_size"); | |
575 | QUEUE_RW_ENTRY(elv_iosched, "scheduler"); | |
576 | ||
577 | QUEUE_RO_ENTRY(queue_logical_block_size, "logical_block_size"); | |
578 | QUEUE_RO_ENTRY(queue_physical_block_size, "physical_block_size"); | |
579 | QUEUE_RO_ENTRY(queue_chunk_sectors, "chunk_sectors"); | |
580 | QUEUE_RO_ENTRY(queue_io_min, "minimum_io_size"); | |
581 | QUEUE_RO_ENTRY(queue_io_opt, "optimal_io_size"); | |
582 | ||
583 | QUEUE_RO_ENTRY(queue_max_discard_segments, "max_discard_segments"); | |
584 | QUEUE_RO_ENTRY(queue_discard_granularity, "discard_granularity"); | |
585 | QUEUE_RO_ENTRY(queue_discard_max_hw, "discard_max_hw_bytes"); | |
586 | QUEUE_RW_ENTRY(queue_discard_max, "discard_max_bytes"); | |
587 | QUEUE_RO_ENTRY(queue_discard_zeroes_data, "discard_zeroes_data"); | |
588 | ||
589 | QUEUE_RO_ENTRY(queue_write_same_max, "write_same_max_bytes"); | |
590 | QUEUE_RO_ENTRY(queue_write_zeroes_max, "write_zeroes_max_bytes"); | |
591 | QUEUE_RO_ENTRY(queue_zone_append_max, "zone_append_max_bytes"); | |
a805a4fa | 592 | QUEUE_RO_ENTRY(queue_zone_write_granularity, "zone_write_granularity"); |
35626147 CH |
593 | |
594 | QUEUE_RO_ENTRY(queue_zoned, "zoned"); | |
595 | QUEUE_RO_ENTRY(queue_nr_zones, "nr_zones"); | |
596 | QUEUE_RO_ENTRY(queue_max_open_zones, "max_open_zones"); | |
597 | QUEUE_RO_ENTRY(queue_max_active_zones, "max_active_zones"); | |
598 | ||
599 | QUEUE_RW_ENTRY(queue_nomerges, "nomerges"); | |
600 | QUEUE_RW_ENTRY(queue_rq_affinity, "rq_affinity"); | |
601 | QUEUE_RW_ENTRY(queue_poll, "io_poll"); | |
602 | QUEUE_RW_ENTRY(queue_poll_delay, "io_poll_delay"); | |
603 | QUEUE_RW_ENTRY(queue_wc, "write_cache"); | |
604 | QUEUE_RO_ENTRY(queue_fua, "fua"); | |
605 | QUEUE_RO_ENTRY(queue_dax, "dax"); | |
606 | QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); | |
607 | QUEUE_RW_ENTRY(queue_wb_lat, "wbt_lat_usec"); | |
28af7428 | 608 | QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); |
8324aa91 | 609 | |
35626147 CH |
610 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
611 | QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); | |
612 | #endif | |
8324aa91 | 613 | |
35626147 | 614 | /* legacy alias for logical_block_size: */ |
e68b903c | 615 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
5657a819 | 616 | .attr = {.name = "hw_sector_size", .mode = 0444 }, |
e1defc4f MP |
617 | .show = queue_logical_block_size_show, |
618 | }; | |
619 | ||
fc93fe14 CH |
620 | QUEUE_RW_ENTRY(queue_nonrot, "rotational"); |
621 | QUEUE_RW_ENTRY(queue_iostats, "iostats"); | |
622 | QUEUE_RW_ENTRY(queue_random, "add_random"); | |
1cb039f3 | 623 | QUEUE_RW_ENTRY(queue_stable_writes, "stable_writes"); |
e2e1a148 | 624 | |
4d25339e | 625 | static struct attribute *queue_attrs[] = { |
8324aa91 JA |
626 | &queue_requests_entry.attr, |
627 | &queue_ra_entry.attr, | |
628 | &queue_max_hw_sectors_entry.attr, | |
629 | &queue_max_sectors_entry.attr, | |
c77a5710 | 630 | &queue_max_segments_entry.attr, |
1e739730 | 631 | &queue_max_discard_segments_entry.attr, |
13f05c8d | 632 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 633 | &queue_max_segment_size_entry.attr, |
35626147 | 634 | &elv_iosched_entry.attr, |
e68b903c | 635 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 636 | &queue_logical_block_size_entry.attr, |
c72758f3 | 637 | &queue_physical_block_size_entry.attr, |
87caf97c | 638 | &queue_chunk_sectors_entry.attr, |
c72758f3 MP |
639 | &queue_io_min_entry.attr, |
640 | &queue_io_opt_entry.attr, | |
86b37281 MP |
641 | &queue_discard_granularity_entry.attr, |
642 | &queue_discard_max_entry.attr, | |
0034af03 | 643 | &queue_discard_max_hw_entry.attr, |
98262f27 | 644 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 645 | &queue_write_same_max_entry.attr, |
a6f0788e | 646 | &queue_write_zeroes_max_entry.attr, |
0512a75b | 647 | &queue_zone_append_max_entry.attr, |
a805a4fa | 648 | &queue_zone_write_granularity_entry.attr, |
1308835f | 649 | &queue_nonrot_entry.attr, |
797476b8 | 650 | &queue_zoned_entry.attr, |
965b652e | 651 | &queue_nr_zones_entry.attr, |
e15864f8 | 652 | &queue_max_open_zones_entry.attr, |
659bf827 | 653 | &queue_max_active_zones_entry.attr, |
ac9fafa1 | 654 | &queue_nomerges_entry.attr, |
c7c22e4d | 655 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 656 | &queue_iostats_entry.attr, |
1cb039f3 | 657 | &queue_stable_writes_entry.attr, |
e2e1a148 | 658 | &queue_random_entry.attr, |
05229bee | 659 | &queue_poll_entry.attr, |
93e9d8e8 | 660 | &queue_wc_entry.attr, |
6fcefbe5 | 661 | &queue_fua_entry.attr, |
ea6ca600 | 662 | &queue_dax_entry.attr, |
87760e5e | 663 | &queue_wb_lat_entry.attr, |
06426adf | 664 | &queue_poll_delay_entry.attr, |
65cd1d13 | 665 | &queue_io_timeout_entry.attr, |
297e3d85 | 666 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
35626147 | 667 | &blk_throtl_sample_time_entry.attr, |
297e3d85 | 668 | #endif |
28af7428 | 669 | &queue_virt_boundary_mask_entry.attr, |
8324aa91 JA |
670 | NULL, |
671 | }; | |
672 | ||
4d25339e WZ |
673 | static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr, |
674 | int n) | |
675 | { | |
676 | struct request_queue *q = | |
677 | container_of(kobj, struct request_queue, kobj); | |
678 | ||
679 | if (attr == &queue_io_timeout_entry.attr && | |
680 | (!q->mq_ops || !q->mq_ops->timeout)) | |
681 | return 0; | |
682 | ||
659bf827 NC |
683 | if ((attr == &queue_max_open_zones_entry.attr || |
684 | attr == &queue_max_active_zones_entry.attr) && | |
e15864f8 NC |
685 | !blk_queue_is_zoned(q)) |
686 | return 0; | |
687 | ||
4d25339e WZ |
688 | return attr->mode; |
689 | } | |
690 | ||
691 | static struct attribute_group queue_attr_group = { | |
692 | .attrs = queue_attrs, | |
693 | .is_visible = queue_attr_visible, | |
694 | }; | |
695 | ||
696 | ||
8324aa91 JA |
697 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
698 | ||
699 | static ssize_t | |
700 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
701 | { | |
702 | struct queue_sysfs_entry *entry = to_queue(attr); | |
703 | struct request_queue *q = | |
704 | container_of(kobj, struct request_queue, kobj); | |
705 | ssize_t res; | |
706 | ||
707 | if (!entry->show) | |
708 | return -EIO; | |
709 | mutex_lock(&q->sysfs_lock); | |
8324aa91 JA |
710 | res = entry->show(q, page); |
711 | mutex_unlock(&q->sysfs_lock); | |
712 | return res; | |
713 | } | |
714 | ||
715 | static ssize_t | |
716 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
717 | const char *page, size_t length) | |
718 | { | |
719 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 720 | struct request_queue *q; |
8324aa91 JA |
721 | ssize_t res; |
722 | ||
723 | if (!entry->store) | |
724 | return -EIO; | |
6728cb0e JA |
725 | |
726 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 727 | mutex_lock(&q->sysfs_lock); |
8324aa91 JA |
728 | res = entry->store(q, page, length); |
729 | mutex_unlock(&q->sysfs_lock); | |
730 | return res; | |
731 | } | |
732 | ||
548bc8e1 TH |
733 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
734 | { | |
735 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
736 | rcu_head); | |
737 | kmem_cache_free(blk_requestq_cachep, q); | |
738 | } | |
739 | ||
47cdee29 ML |
740 | /* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */ |
741 | static void blk_exit_queue(struct request_queue *q) | |
742 | { | |
743 | /* | |
744 | * Since the I/O scheduler exit code may access cgroup information, | |
745 | * perform I/O scheduler exit before disassociating from the block | |
746 | * cgroup controller. | |
747 | */ | |
748 | if (q->elevator) { | |
749 | ioc_clear_queue(q); | |
c3e22192 | 750 | __elevator_exit(q, q->elevator); |
47cdee29 ML |
751 | } |
752 | ||
753 | /* | |
754 | * Remove all references to @q from the block cgroup controller before | |
755 | * restoring @q->queue_lock to avoid that restoring this pointer causes | |
756 | * e.g. blkcg_print_blkgs() to crash. | |
757 | */ | |
758 | blkcg_exit_queue(q); | |
47cdee29 ML |
759 | } |
760 | ||
8324aa91 | 761 | /** |
e8c7d14a LC |
762 | * blk_release_queue - releases all allocated resources of the request_queue |
763 | * @kobj: pointer to a kobject, whose container is a request_queue | |
764 | * | |
765 | * This function releases all allocated resources of the request queue. | |
766 | * | |
767 | * The struct request_queue refcount is incremented with blk_get_queue() and | |
768 | * decremented with blk_put_queue(). Once the refcount reaches 0 this function | |
769 | * is called. | |
770 | * | |
771 | * For drivers that have a request_queue on a gendisk and added with | |
772 | * __device_add_disk() the refcount to request_queue will reach 0 with | |
773 | * the last put_disk() called by the driver. For drivers which don't use | |
774 | * __device_add_disk() this happens with blk_cleanup_queue(). | |
8324aa91 | 775 | * |
e8c7d14a LC |
776 | * Drivers exist which depend on the release of the request_queue to be |
777 | * synchronous, it should not be deferred. | |
778 | * | |
779 | * Context: can sleep | |
dc9edc44 | 780 | */ |
e8c7d14a | 781 | static void blk_release_queue(struct kobject *kobj) |
8324aa91 | 782 | { |
e8c7d14a LC |
783 | struct request_queue *q = |
784 | container_of(kobj, struct request_queue, kobj); | |
785 | ||
786 | might_sleep(); | |
8324aa91 | 787 | |
34dbad5d OS |
788 | if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) |
789 | blk_stat_remove_callback(q, q->poll_cb); | |
790 | blk_stat_free_callback(q->poll_cb); | |
777eb1bf | 791 | |
34dbad5d OS |
792 | blk_free_queue_stats(q->stats); |
793 | ||
47ce030b YY |
794 | if (queue_is_mq(q)) { |
795 | struct blk_mq_hw_ctx *hctx; | |
796 | int i; | |
797 | ||
e26cc082 | 798 | cancel_delayed_work_sync(&q->requeue_work); |
799 | ||
47ce030b YY |
800 | queue_for_each_hw_ctx(q, hctx, i) |
801 | cancel_delayed_work_sync(&hctx->run_work); | |
802 | } | |
803 | ||
47cdee29 ML |
804 | blk_exit_queue(q); |
805 | ||
bf505456 DLM |
806 | blk_queue_free_zone_bitmaps(q); |
807 | ||
344e9ffc | 808 | if (queue_is_mq(q)) |
e09aae7e | 809 | blk_mq_release(q); |
18741986 | 810 | |
8324aa91 | 811 | blk_trace_shutdown(q); |
85e0cbbb LC |
812 | mutex_lock(&q->debugfs_mutex); |
813 | debugfs_remove_recursive(q->debugfs_dir); | |
814 | mutex_unlock(&q->debugfs_mutex); | |
8324aa91 | 815 | |
344e9ffc | 816 | if (queue_is_mq(q)) |
62ebce16 OS |
817 | blk_mq_debugfs_unregister(q); |
818 | ||
338aa96d | 819 | bioset_exit(&q->bio_split); |
54efd50b | 820 | |
a73f730d | 821 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 822 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
823 | } |
824 | ||
52cf25d0 | 825 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
826 | .show = queue_attr_show, |
827 | .store = queue_attr_store, | |
828 | }; | |
829 | ||
830 | struct kobj_type blk_queue_ktype = { | |
831 | .sysfs_ops = &queue_sysfs_ops, | |
8324aa91 JA |
832 | .release = blk_release_queue, |
833 | }; | |
834 | ||
2c2086af BVA |
835 | /** |
836 | * blk_register_queue - register a block layer queue with sysfs | |
837 | * @disk: Disk of which the request queue should be registered with sysfs. | |
838 | */ | |
8324aa91 JA |
839 | int blk_register_queue(struct gendisk *disk) |
840 | { | |
841 | int ret; | |
1d54ad6d | 842 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
843 | struct request_queue *q = disk->queue; |
844 | ||
1d54ad6d LZ |
845 | ret = blk_trace_init_sysfs(dev); |
846 | if (ret) | |
847 | return ret; | |
848 | ||
cecf5d87 | 849 | mutex_lock(&q->sysfs_dir_lock); |
b410aff2 | 850 | |
c9059598 | 851 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
852 | if (ret < 0) { |
853 | blk_trace_remove_sysfs(dev); | |
b410aff2 | 854 | goto unlock; |
ed5302d3 | 855 | } |
8324aa91 | 856 | |
4d25339e WZ |
857 | ret = sysfs_create_group(&q->kobj, &queue_attr_group); |
858 | if (ret) { | |
859 | blk_trace_remove_sysfs(dev); | |
860 | kobject_del(&q->kobj); | |
861 | kobject_put(&dev->kobj); | |
862 | goto unlock; | |
863 | } | |
864 | ||
85e0cbbb LC |
865 | mutex_lock(&q->debugfs_mutex); |
866 | q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), | |
867 | blk_debugfs_root); | |
868 | mutex_unlock(&q->debugfs_mutex); | |
869 | ||
344e9ffc | 870 | if (queue_is_mq(q)) { |
2d0364c8 | 871 | __blk_mq_register_dev(dev, q); |
a8ecdd71 BVA |
872 | blk_mq_debugfs_register(q); |
873 | } | |
9c1051aa | 874 | |
b89f625e | 875 | mutex_lock(&q->sysfs_lock); |
a2247f19 DLM |
876 | |
877 | ret = disk_register_independent_access_ranges(disk, NULL); | |
878 | if (ret) | |
879 | goto put_dev; | |
880 | ||
344e9ffc | 881 | if (q->elevator) { |
cecf5d87 | 882 | ret = elv_register_queue(q, false); |
a2247f19 DLM |
883 | if (ret) |
884 | goto put_dev; | |
8324aa91 | 885 | } |
cecf5d87 | 886 | |
cecf5d87 ML |
887 | blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); |
888 | wbt_enable_default(q); | |
889 | blk_throtl_register_queue(q); | |
890 | ||
891 | /* Now everything is ready and send out KOBJ_ADD uevent */ | |
892 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
0546858c | 893 | if (q->elevator) |
cecf5d87 ML |
894 | kobject_uevent(&q->elevator->kobj, KOBJ_ADD); |
895 | mutex_unlock(&q->sysfs_lock); | |
896 | ||
b410aff2 TE |
897 | ret = 0; |
898 | unlock: | |
cecf5d87 | 899 | mutex_unlock(&q->sysfs_dir_lock); |
a72c374f ML |
900 | |
901 | /* | |
902 | * SCSI probing may synchronously create and destroy a lot of | |
903 | * request_queues for non-existent devices. Shutting down a fully | |
904 | * functional queue takes measureable wallclock time as RCU grace | |
905 | * periods are involved. To avoid excessive latency in these | |
906 | * cases, a request_queue starts out in a degraded mode which is | |
907 | * faster to shut down and is made fully functional here as | |
908 | * request_queues for non-existent devices never get registered. | |
909 | */ | |
910 | if (!blk_queue_init_done(q)) { | |
911 | blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); | |
912 | percpu_ref_switch_to_percpu(&q->q_usage_counter); | |
913 | } | |
914 | ||
a2247f19 DLM |
915 | return ret; |
916 | ||
917 | put_dev: | |
918 | disk_unregister_independent_access_ranges(disk); | |
919 | mutex_unlock(&q->sysfs_lock); | |
920 | mutex_unlock(&q->sysfs_dir_lock); | |
921 | kobject_del(&q->kobj); | |
922 | blk_trace_remove_sysfs(dev); | |
923 | kobject_put(&dev->kobj); | |
924 | ||
b410aff2 | 925 | return ret; |
8324aa91 JA |
926 | } |
927 | ||
2c2086af BVA |
928 | /** |
929 | * blk_unregister_queue - counterpart of blk_register_queue() | |
930 | * @disk: Disk of which the request queue should be unregistered from sysfs. | |
931 | * | |
932 | * Note: the caller is responsible for guaranteeing that this function is called | |
933 | * after blk_register_queue() has finished. | |
934 | */ | |
8324aa91 JA |
935 | void blk_unregister_queue(struct gendisk *disk) |
936 | { | |
937 | struct request_queue *q = disk->queue; | |
938 | ||
fb199746 AM |
939 | if (WARN_ON(!q)) |
940 | return; | |
941 | ||
fa70d2e2 | 942 | /* Return early if disk->queue was never registered. */ |
58c898ba | 943 | if (!blk_queue_registered(q)) |
fa70d2e2 MS |
944 | return; |
945 | ||
667257e8 | 946 | /* |
2c2086af BVA |
947 | * Since sysfs_remove_dir() prevents adding new directory entries |
948 | * before removal of existing entries starts, protect against | |
949 | * concurrent elv_iosched_store() calls. | |
667257e8 | 950 | */ |
e9a823fb | 951 | mutex_lock(&q->sysfs_lock); |
8814ce8a | 952 | blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); |
cecf5d87 | 953 | mutex_unlock(&q->sysfs_lock); |
02ba8893 | 954 | |
cecf5d87 | 955 | mutex_lock(&q->sysfs_dir_lock); |
2c2086af BVA |
956 | /* |
957 | * Remove the sysfs attributes before unregistering the queue data | |
958 | * structures that can be modified through sysfs. | |
959 | */ | |
344e9ffc | 960 | if (queue_is_mq(q)) |
b21d5b30 | 961 | blk_mq_unregister_dev(disk_to_dev(disk), q); |
8324aa91 | 962 | |
48c0d4d4 ZK |
963 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
964 | kobject_del(&q->kobj); | |
965 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
667257e8 | 966 | |
b89f625e | 967 | mutex_lock(&q->sysfs_lock); |
344e9ffc | 968 | if (q->elevator) |
2c2086af | 969 | elv_unregister_queue(q); |
a2247f19 | 970 | disk_unregister_independent_access_ranges(disk); |
b89f625e | 971 | mutex_unlock(&q->sysfs_lock); |
cecf5d87 | 972 | mutex_unlock(&q->sysfs_dir_lock); |
2c2086af BVA |
973 | |
974 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 975 | } |