]>
Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
8324aa91 JA |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
66114cad | 9 | #include <linux/backing-dev.h> |
8324aa91 | 10 | #include <linux/blktrace_api.h> |
320ae51f | 11 | #include <linux/blk-mq.h> |
eea8f41c | 12 | #include <linux/blk-cgroup.h> |
8324aa91 JA |
13 | |
14 | #include "blk.h" | |
3edcc0ce | 15 | #include "blk-mq.h" |
8324aa91 JA |
16 | |
17 | struct queue_sysfs_entry { | |
18 | struct attribute attr; | |
19 | ssize_t (*show)(struct request_queue *, char *); | |
20 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
21 | }; | |
22 | ||
23 | static ssize_t | |
9cb308ce | 24 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 25 | { |
9cb308ce | 26 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
27 | } |
28 | ||
29 | static ssize_t | |
30 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
31 | { | |
b1f3b64d DR |
32 | int err; |
33 | unsigned long v; | |
34 | ||
ed751e68 | 35 | err = kstrtoul(page, 10, &v); |
b1f3b64d DR |
36 | if (err || v > UINT_MAX) |
37 | return -EINVAL; | |
38 | ||
39 | *var = v; | |
8324aa91 | 40 | |
8324aa91 JA |
41 | return count; |
42 | } | |
43 | ||
44 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
45 | { | |
46 | return queue_var_show(q->nr_requests, (page)); | |
47 | } | |
48 | ||
49 | static ssize_t | |
50 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
51 | { | |
8324aa91 | 52 | unsigned long nr; |
e3a2b3f9 | 53 | int ret, err; |
b8a9ae77 | 54 | |
e3a2b3f9 | 55 | if (!q->request_fn && !q->mq_ops) |
b8a9ae77 JA |
56 | return -EINVAL; |
57 | ||
58 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
59 | if (ret < 0) |
60 | return ret; | |
61 | ||
8324aa91 JA |
62 | if (nr < BLKDEV_MIN_RQ) |
63 | nr = BLKDEV_MIN_RQ; | |
64 | ||
e3a2b3f9 JA |
65 | if (q->request_fn) |
66 | err = blk_update_nr_requests(q, nr); | |
67 | else | |
68 | err = blk_mq_update_nr_requests(q, nr); | |
69 | ||
70 | if (err) | |
71 | return err; | |
72 | ||
8324aa91 JA |
73 | return ret; |
74 | } | |
75 | ||
76 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
77 | { | |
9cb308ce XF |
78 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
79 | (PAGE_CACHE_SHIFT - 10); | |
8324aa91 JA |
80 | |
81 | return queue_var_show(ra_kb, (page)); | |
82 | } | |
83 | ||
84 | static ssize_t | |
85 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
86 | { | |
87 | unsigned long ra_kb; | |
88 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
89 | ||
b1f3b64d DR |
90 | if (ret < 0) |
91 | return ret; | |
92 | ||
8324aa91 | 93 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91 JA |
94 | |
95 | return ret; | |
96 | } | |
97 | ||
98 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
99 | { | |
ae03bf63 | 100 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
101 | |
102 | return queue_var_show(max_sectors_kb, (page)); | |
103 | } | |
104 | ||
c77a5710 MP |
105 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
106 | { | |
107 | return queue_var_show(queue_max_segments(q), (page)); | |
108 | } | |
109 | ||
13f05c8d MP |
110 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
111 | { | |
112 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
113 | } | |
114 | ||
c77a5710 MP |
115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
116 | { | |
e692cb66 | 117 | if (blk_queue_cluster(q)) |
c77a5710 MP |
118 | return queue_var_show(queue_max_segment_size(q), (page)); |
119 | ||
120 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | |
121 | } | |
122 | ||
e1defc4f | 123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 124 | { |
e1defc4f | 125 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
126 | } |
127 | ||
c72758f3 MP |
128 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
129 | { | |
130 | return queue_var_show(queue_physical_block_size(q), page); | |
131 | } | |
132 | ||
133 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
134 | { | |
135 | return queue_var_show(queue_io_min(q), page); | |
136 | } | |
137 | ||
138 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
139 | { | |
140 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
141 | } |
142 | ||
86b37281 MP |
143 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
144 | { | |
145 | return queue_var_show(q->limits.discard_granularity, page); | |
146 | } | |
147 | ||
0034af03 JA |
148 | static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) |
149 | { | |
150 | unsigned long long val; | |
151 | ||
152 | val = q->limits.max_hw_discard_sectors << 9; | |
153 | return sprintf(page, "%llu\n", val); | |
154 | } | |
155 | ||
86b37281 MP |
156 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
157 | { | |
a934a00a MP |
158 | return sprintf(page, "%llu\n", |
159 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
160 | } |
161 | ||
0034af03 JA |
162 | static ssize_t queue_discard_max_store(struct request_queue *q, |
163 | const char *page, size_t count) | |
164 | { | |
165 | unsigned long max_discard; | |
166 | ssize_t ret = queue_var_store(&max_discard, page, count); | |
167 | ||
168 | if (ret < 0) | |
169 | return ret; | |
170 | ||
171 | if (max_discard & (q->limits.discard_granularity - 1)) | |
172 | return -EINVAL; | |
173 | ||
174 | max_discard >>= 9; | |
175 | if (max_discard > UINT_MAX) | |
176 | return -EINVAL; | |
177 | ||
178 | if (max_discard > q->limits.max_hw_discard_sectors) | |
179 | max_discard = q->limits.max_hw_discard_sectors; | |
180 | ||
181 | q->limits.max_discard_sectors = max_discard; | |
182 | return ret; | |
183 | } | |
184 | ||
98262f27 MP |
185 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
186 | { | |
187 | return queue_var_show(queue_discard_zeroes_data(q), page); | |
188 | } | |
189 | ||
4363ac7c MP |
190 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
191 | { | |
192 | return sprintf(page, "%llu\n", | |
193 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
194 | } | |
195 | ||
196 | ||
8324aa91 JA |
197 | static ssize_t |
198 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
199 | { | |
200 | unsigned long max_sectors_kb, | |
ae03bf63 | 201 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
8324aa91 JA |
202 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
203 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
204 | ||
b1f3b64d DR |
205 | if (ret < 0) |
206 | return ret; | |
207 | ||
ca369d51 MP |
208 | max_hw_sectors_kb = min_not_zero(max_hw_sectors_kb, (unsigned long) |
209 | q->limits.max_dev_sectors >> 1); | |
210 | ||
8324aa91 JA |
211 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
212 | return -EINVAL; | |
7c239517 | 213 | |
8324aa91 | 214 | spin_lock_irq(q->queue_lock); |
c295fc05 | 215 | q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91 JA |
216 | spin_unlock_irq(q->queue_lock); |
217 | ||
218 | return ret; | |
219 | } | |
220 | ||
221 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
222 | { | |
ae03bf63 | 223 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
224 | |
225 | return queue_var_show(max_hw_sectors_kb, (page)); | |
226 | } | |
227 | ||
956bcb7c JA |
228 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
229 | static ssize_t \ | |
230 | queue_show_##name(struct request_queue *q, char *page) \ | |
231 | { \ | |
232 | int bit; \ | |
233 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
234 | return queue_var_show(neg ? !bit : bit, page); \ | |
235 | } \ | |
236 | static ssize_t \ | |
237 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
238 | { \ | |
239 | unsigned long val; \ | |
240 | ssize_t ret; \ | |
241 | ret = queue_var_store(&val, page, count); \ | |
c678ef52 AB |
242 | if (ret < 0) \ |
243 | return ret; \ | |
956bcb7c JA |
244 | if (neg) \ |
245 | val = !val; \ | |
246 | \ | |
247 | spin_lock_irq(q->queue_lock); \ | |
248 | if (val) \ | |
249 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
250 | else \ | |
251 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
252 | spin_unlock_irq(q->queue_lock); \ | |
253 | return ret; \ | |
1308835f BZ |
254 | } |
255 | ||
956bcb7c JA |
256 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
257 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
258 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
259 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 260 | |
ac9fafa1 AB |
261 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
262 | { | |
488991e2 AB |
263 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
264 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
265 | } |
266 | ||
267 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
268 | size_t count) | |
269 | { | |
270 | unsigned long nm; | |
271 | ssize_t ret = queue_var_store(&nm, page, count); | |
272 | ||
b1f3b64d DR |
273 | if (ret < 0) |
274 | return ret; | |
275 | ||
bf0f9702 | 276 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
277 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
278 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
279 | if (nm == 2) | |
bf0f9702 | 280 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
281 | else if (nm) |
282 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 283 | spin_unlock_irq(q->queue_lock); |
1308835f | 284 | |
ac9fafa1 AB |
285 | return ret; |
286 | } | |
287 | ||
c7c22e4d JA |
288 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
289 | { | |
9cb308ce | 290 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 291 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 292 | |
5757a6d7 | 293 | return queue_var_show(set << force, page); |
c7c22e4d JA |
294 | } |
295 | ||
296 | static ssize_t | |
297 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
298 | { | |
299 | ssize_t ret = -EINVAL; | |
0a06ff06 | 300 | #ifdef CONFIG_SMP |
c7c22e4d JA |
301 | unsigned long val; |
302 | ||
303 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
304 | if (ret < 0) |
305 | return ret; | |
306 | ||
c7c22e4d | 307 | spin_lock_irq(q->queue_lock); |
e8037d49 | 308 | if (val == 2) { |
c7c22e4d | 309 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d49 ES |
310 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
311 | } else if (val == 1) { | |
312 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
313 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
314 | } else if (val == 0) { | |
5757a6d7 DW |
315 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
316 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
317 | } | |
c7c22e4d JA |
318 | spin_unlock_irq(q->queue_lock); |
319 | #endif | |
320 | return ret; | |
321 | } | |
8324aa91 | 322 | |
05229bee JA |
323 | static ssize_t queue_poll_show(struct request_queue *q, char *page) |
324 | { | |
325 | return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); | |
326 | } | |
327 | ||
328 | static ssize_t queue_poll_store(struct request_queue *q, const char *page, | |
329 | size_t count) | |
330 | { | |
331 | unsigned long poll_on; | |
332 | ssize_t ret; | |
333 | ||
334 | if (!q->mq_ops || !q->mq_ops->poll) | |
335 | return -EINVAL; | |
336 | ||
337 | ret = queue_var_store(&poll_on, page, count); | |
338 | if (ret < 0) | |
339 | return ret; | |
340 | ||
341 | spin_lock_irq(q->queue_lock); | |
342 | if (poll_on) | |
343 | queue_flag_set(QUEUE_FLAG_POLL, q); | |
344 | else | |
345 | queue_flag_clear(QUEUE_FLAG_POLL, q); | |
346 | spin_unlock_irq(q->queue_lock); | |
347 | ||
348 | return ret; | |
349 | } | |
350 | ||
8324aa91 JA |
351 | static struct queue_sysfs_entry queue_requests_entry = { |
352 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
353 | .show = queue_requests_show, | |
354 | .store = queue_requests_store, | |
355 | }; | |
356 | ||
357 | static struct queue_sysfs_entry queue_ra_entry = { | |
358 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
359 | .show = queue_ra_show, | |
360 | .store = queue_ra_store, | |
361 | }; | |
362 | ||
363 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
364 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
365 | .show = queue_max_sectors_show, | |
366 | .store = queue_max_sectors_store, | |
367 | }; | |
368 | ||
369 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
370 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
371 | .show = queue_max_hw_sectors_show, | |
372 | }; | |
373 | ||
c77a5710 MP |
374 | static struct queue_sysfs_entry queue_max_segments_entry = { |
375 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
376 | .show = queue_max_segments_show, | |
377 | }; | |
378 | ||
13f05c8d MP |
379 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
380 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
381 | .show = queue_max_integrity_segments_show, | |
382 | }; | |
383 | ||
c77a5710 MP |
384 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
385 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
386 | .show = queue_max_segment_size_show, | |
387 | }; | |
388 | ||
8324aa91 JA |
389 | static struct queue_sysfs_entry queue_iosched_entry = { |
390 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
391 | .show = elv_iosched_show, | |
392 | .store = elv_iosched_store, | |
393 | }; | |
394 | ||
e68b903c MP |
395 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
396 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
397 | .show = queue_logical_block_size_show, |
398 | }; | |
399 | ||
400 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
401 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
402 | .show = queue_logical_block_size_show, | |
e68b903c MP |
403 | }; |
404 | ||
c72758f3 MP |
405 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
406 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
407 | .show = queue_physical_block_size_show, | |
408 | }; | |
409 | ||
410 | static struct queue_sysfs_entry queue_io_min_entry = { | |
411 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
412 | .show = queue_io_min_show, | |
413 | }; | |
414 | ||
415 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
416 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
417 | .show = queue_io_opt_show, | |
e68b903c MP |
418 | }; |
419 | ||
86b37281 MP |
420 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
421 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
422 | .show = queue_discard_granularity_show, | |
423 | }; | |
424 | ||
0034af03 JA |
425 | static struct queue_sysfs_entry queue_discard_max_hw_entry = { |
426 | .attr = {.name = "discard_max_hw_bytes", .mode = S_IRUGO }, | |
427 | .show = queue_discard_max_hw_show, | |
428 | }; | |
429 | ||
86b37281 | 430 | static struct queue_sysfs_entry queue_discard_max_entry = { |
0034af03 | 431 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO | S_IWUSR }, |
86b37281 | 432 | .show = queue_discard_max_show, |
0034af03 | 433 | .store = queue_discard_max_store, |
86b37281 MP |
434 | }; |
435 | ||
98262f27 MP |
436 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
437 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
438 | .show = queue_discard_zeroes_data_show, | |
439 | }; | |
440 | ||
4363ac7c MP |
441 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
442 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
443 | .show = queue_write_same_max_show, | |
444 | }; | |
445 | ||
1308835f BZ |
446 | static struct queue_sysfs_entry queue_nonrot_entry = { |
447 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
448 | .show = queue_show_nonrot, |
449 | .store = queue_store_nonrot, | |
1308835f BZ |
450 | }; |
451 | ||
ac9fafa1 AB |
452 | static struct queue_sysfs_entry queue_nomerges_entry = { |
453 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
454 | .show = queue_nomerges_show, | |
455 | .store = queue_nomerges_store, | |
456 | }; | |
457 | ||
c7c22e4d JA |
458 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
459 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
460 | .show = queue_rq_affinity_show, | |
461 | .store = queue_rq_affinity_store, | |
462 | }; | |
463 | ||
bc58ba94 JA |
464 | static struct queue_sysfs_entry queue_iostats_entry = { |
465 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
466 | .show = queue_show_iostats, |
467 | .store = queue_store_iostats, | |
bc58ba94 JA |
468 | }; |
469 | ||
e2e1a148 JA |
470 | static struct queue_sysfs_entry queue_random_entry = { |
471 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
472 | .show = queue_show_random, |
473 | .store = queue_store_random, | |
e2e1a148 JA |
474 | }; |
475 | ||
05229bee JA |
476 | static struct queue_sysfs_entry queue_poll_entry = { |
477 | .attr = {.name = "io_poll", .mode = S_IRUGO | S_IWUSR }, | |
478 | .show = queue_poll_show, | |
479 | .store = queue_poll_store, | |
480 | }; | |
481 | ||
8324aa91 JA |
482 | static struct attribute *default_attrs[] = { |
483 | &queue_requests_entry.attr, | |
484 | &queue_ra_entry.attr, | |
485 | &queue_max_hw_sectors_entry.attr, | |
486 | &queue_max_sectors_entry.attr, | |
c77a5710 | 487 | &queue_max_segments_entry.attr, |
13f05c8d | 488 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 489 | &queue_max_segment_size_entry.attr, |
8324aa91 | 490 | &queue_iosched_entry.attr, |
e68b903c | 491 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 492 | &queue_logical_block_size_entry.attr, |
c72758f3 MP |
493 | &queue_physical_block_size_entry.attr, |
494 | &queue_io_min_entry.attr, | |
495 | &queue_io_opt_entry.attr, | |
86b37281 MP |
496 | &queue_discard_granularity_entry.attr, |
497 | &queue_discard_max_entry.attr, | |
0034af03 | 498 | &queue_discard_max_hw_entry.attr, |
98262f27 | 499 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 500 | &queue_write_same_max_entry.attr, |
1308835f | 501 | &queue_nonrot_entry.attr, |
ac9fafa1 | 502 | &queue_nomerges_entry.attr, |
c7c22e4d | 503 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 504 | &queue_iostats_entry.attr, |
e2e1a148 | 505 | &queue_random_entry.attr, |
05229bee | 506 | &queue_poll_entry.attr, |
8324aa91 JA |
507 | NULL, |
508 | }; | |
509 | ||
510 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
511 | ||
512 | static ssize_t | |
513 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
514 | { | |
515 | struct queue_sysfs_entry *entry = to_queue(attr); | |
516 | struct request_queue *q = | |
517 | container_of(kobj, struct request_queue, kobj); | |
518 | ssize_t res; | |
519 | ||
520 | if (!entry->show) | |
521 | return -EIO; | |
522 | mutex_lock(&q->sysfs_lock); | |
3f3299d5 | 523 | if (blk_queue_dying(q)) { |
8324aa91 JA |
524 | mutex_unlock(&q->sysfs_lock); |
525 | return -ENOENT; | |
526 | } | |
527 | res = entry->show(q, page); | |
528 | mutex_unlock(&q->sysfs_lock); | |
529 | return res; | |
530 | } | |
531 | ||
532 | static ssize_t | |
533 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
534 | const char *page, size_t length) | |
535 | { | |
536 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 537 | struct request_queue *q; |
8324aa91 JA |
538 | ssize_t res; |
539 | ||
540 | if (!entry->store) | |
541 | return -EIO; | |
6728cb0e JA |
542 | |
543 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 544 | mutex_lock(&q->sysfs_lock); |
3f3299d5 | 545 | if (blk_queue_dying(q)) { |
8324aa91 JA |
546 | mutex_unlock(&q->sysfs_lock); |
547 | return -ENOENT; | |
548 | } | |
549 | res = entry->store(q, page, length); | |
550 | mutex_unlock(&q->sysfs_lock); | |
551 | return res; | |
552 | } | |
553 | ||
548bc8e1 TH |
554 | static void blk_free_queue_rcu(struct rcu_head *rcu_head) |
555 | { | |
556 | struct request_queue *q = container_of(rcu_head, struct request_queue, | |
557 | rcu_head); | |
558 | kmem_cache_free(blk_requestq_cachep, q); | |
559 | } | |
560 | ||
8324aa91 | 561 | /** |
499337bb AM |
562 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
563 | * @kobj: the kobj belonging to the request queue to be released | |
8324aa91 JA |
564 | * |
565 | * Description: | |
499337bb | 566 | * blk_release_queue is the pair to blk_init_queue() or |
8324aa91 JA |
567 | * blk_queue_make_request(). It should be called when a request queue is |
568 | * being released; typically when a block device is being de-registered. | |
569 | * Currently, its primary task it to free all the &struct request | |
570 | * structures that were allocated to the queue and the queue itself. | |
571 | * | |
45a9c9d9 BVA |
572 | * Note: |
573 | * The low level driver must have finished any outstanding requests first | |
574 | * via blk_cleanup_queue(). | |
8324aa91 JA |
575 | **/ |
576 | static void blk_release_queue(struct kobject *kobj) | |
577 | { | |
578 | struct request_queue *q = | |
579 | container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 580 | |
b02176f3 | 581 | bdi_exit(&q->backing_dev_info); |
e8989fae TH |
582 | blkcg_exit_queue(q); |
583 | ||
7e5a8794 TH |
584 | if (q->elevator) { |
585 | spin_lock_irq(q->queue_lock); | |
586 | ioc_clear_queue(q); | |
587 | spin_unlock_irq(q->queue_lock); | |
777eb1bf | 588 | elevator_exit(q->elevator); |
7e5a8794 | 589 | } |
777eb1bf | 590 | |
a051661c | 591 | blk_exit_rl(&q->root_rl); |
8324aa91 JA |
592 | |
593 | if (q->queue_tags) | |
594 | __blk_queue_free_tags(q); | |
595 | ||
45a9c9d9 | 596 | if (!q->mq_ops) |
f70ced09 | 597 | blk_free_flush_queue(q->fq); |
e09aae7e ML |
598 | else |
599 | blk_mq_release(q); | |
18741986 | 600 | |
8324aa91 JA |
601 | blk_trace_shutdown(q); |
602 | ||
54efd50b KO |
603 | if (q->bio_split) |
604 | bioset_free(q->bio_split); | |
605 | ||
a73f730d | 606 | ida_simple_remove(&blk_queue_ida, q->id); |
548bc8e1 | 607 | call_rcu(&q->rcu_head, blk_free_queue_rcu); |
8324aa91 JA |
608 | } |
609 | ||
52cf25d0 | 610 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
611 | .show = queue_attr_show, |
612 | .store = queue_attr_store, | |
613 | }; | |
614 | ||
615 | struct kobj_type blk_queue_ktype = { | |
616 | .sysfs_ops = &queue_sysfs_ops, | |
617 | .default_attrs = default_attrs, | |
618 | .release = blk_release_queue, | |
619 | }; | |
620 | ||
621 | int blk_register_queue(struct gendisk *disk) | |
622 | { | |
623 | int ret; | |
1d54ad6d | 624 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
625 | struct request_queue *q = disk->queue; |
626 | ||
fb199746 | 627 | if (WARN_ON(!q)) |
8324aa91 JA |
628 | return -ENXIO; |
629 | ||
749fefe6 | 630 | /* |
17497acb TH |
631 | * SCSI probing may synchronously create and destroy a lot of |
632 | * request_queues for non-existent devices. Shutting down a fully | |
633 | * functional queue takes measureable wallclock time as RCU grace | |
634 | * periods are involved. To avoid excessive latency in these | |
635 | * cases, a request_queue starts out in a degraded mode which is | |
636 | * faster to shut down and is made fully functional here as | |
637 | * request_queues for non-existent devices never get registered. | |
749fefe6 | 638 | */ |
df35c7c9 AS |
639 | if (!blk_queue_init_done(q)) { |
640 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | |
3ef28e83 | 641 | percpu_ref_switch_to_percpu(&q->q_usage_counter); |
df35c7c9 AS |
642 | blk_queue_bypass_end(q); |
643 | } | |
749fefe6 | 644 | |
1d54ad6d LZ |
645 | ret = blk_trace_init_sysfs(dev); |
646 | if (ret) | |
647 | return ret; | |
648 | ||
c9059598 | 649 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
650 | if (ret < 0) { |
651 | blk_trace_remove_sysfs(dev); | |
8324aa91 | 652 | return ret; |
ed5302d3 | 653 | } |
8324aa91 JA |
654 | |
655 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
656 | ||
320ae51f JA |
657 | if (q->mq_ops) |
658 | blk_mq_register_disk(disk); | |
659 | ||
cd43e26f MP |
660 | if (!q->request_fn) |
661 | return 0; | |
662 | ||
8324aa91 JA |
663 | ret = elv_register_queue(q); |
664 | if (ret) { | |
665 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
666 | kobject_del(&q->kobj); | |
80656b67 | 667 | blk_trace_remove_sysfs(dev); |
c87ffbb8 | 668 | kobject_put(&dev->kobj); |
8324aa91 JA |
669 | return ret; |
670 | } | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | void blk_unregister_queue(struct gendisk *disk) | |
676 | { | |
677 | struct request_queue *q = disk->queue; | |
678 | ||
fb199746 AM |
679 | if (WARN_ON(!q)) |
680 | return; | |
681 | ||
320ae51f JA |
682 | if (q->mq_ops) |
683 | blk_mq_unregister_disk(disk); | |
684 | ||
48c0d4d4 | 685 | if (q->request_fn) |
8324aa91 JA |
686 | elv_unregister_queue(q); |
687 | ||
48c0d4d4 ZK |
688 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
689 | kobject_del(&q->kobj); | |
690 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
691 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 692 | } |