]>
Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
8324aa91 JA |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/blktrace_api.h> | |
10 | ||
11 | #include "blk.h" | |
5efd6113 | 12 | #include "blk-cgroup.h" |
8324aa91 JA |
13 | |
14 | struct queue_sysfs_entry { | |
15 | struct attribute attr; | |
16 | ssize_t (*show)(struct request_queue *, char *); | |
17 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
18 | }; | |
19 | ||
20 | static ssize_t | |
9cb308ce | 21 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 22 | { |
9cb308ce | 23 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
24 | } |
25 | ||
26 | static ssize_t | |
27 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
28 | { | |
b1f3b64d DR |
29 | int err; |
30 | unsigned long v; | |
31 | ||
32 | err = strict_strtoul(page, 10, &v); | |
33 | if (err || v > UINT_MAX) | |
34 | return -EINVAL; | |
35 | ||
36 | *var = v; | |
8324aa91 | 37 | |
8324aa91 JA |
38 | return count; |
39 | } | |
40 | ||
41 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
42 | { | |
43 | return queue_var_show(q->nr_requests, (page)); | |
44 | } | |
45 | ||
46 | static ssize_t | |
47 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
48 | { | |
a051661c | 49 | struct request_list *rl; |
8324aa91 | 50 | unsigned long nr; |
b8a9ae77 JA |
51 | int ret; |
52 | ||
53 | if (!q->request_fn) | |
54 | return -EINVAL; | |
55 | ||
56 | ret = queue_var_store(&nr, page, count); | |
b1f3b64d DR |
57 | if (ret < 0) |
58 | return ret; | |
59 | ||
8324aa91 JA |
60 | if (nr < BLKDEV_MIN_RQ) |
61 | nr = BLKDEV_MIN_RQ; | |
62 | ||
63 | spin_lock_irq(q->queue_lock); | |
64 | q->nr_requests = nr; | |
65 | blk_queue_congestion_threshold(q); | |
66 | ||
a051661c TH |
67 | /* congestion isn't cgroup aware and follows root blkcg for now */ |
68 | rl = &q->root_rl; | |
69 | ||
1faa16d2 JA |
70 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) |
71 | blk_set_queue_congested(q, BLK_RW_SYNC); | |
72 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) | |
73 | blk_clear_queue_congested(q, BLK_RW_SYNC); | |
74 | ||
75 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) | |
76 | blk_set_queue_congested(q, BLK_RW_ASYNC); | |
77 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) | |
78 | blk_clear_queue_congested(q, BLK_RW_ASYNC); | |
79 | ||
a051661c TH |
80 | blk_queue_for_each_rl(rl, q) { |
81 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | |
82 | blk_set_rl_full(rl, BLK_RW_SYNC); | |
83 | } else { | |
84 | blk_clear_rl_full(rl, BLK_RW_SYNC); | |
85 | wake_up(&rl->wait[BLK_RW_SYNC]); | |
86 | } | |
87 | ||
88 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { | |
89 | blk_set_rl_full(rl, BLK_RW_ASYNC); | |
90 | } else { | |
91 | blk_clear_rl_full(rl, BLK_RW_ASYNC); | |
92 | wake_up(&rl->wait[BLK_RW_ASYNC]); | |
93 | } | |
8324aa91 JA |
94 | } |
95 | ||
8324aa91 JA |
96 | spin_unlock_irq(q->queue_lock); |
97 | return ret; | |
98 | } | |
99 | ||
100 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
101 | { | |
9cb308ce XF |
102 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
103 | (PAGE_CACHE_SHIFT - 10); | |
8324aa91 JA |
104 | |
105 | return queue_var_show(ra_kb, (page)); | |
106 | } | |
107 | ||
108 | static ssize_t | |
109 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
110 | { | |
111 | unsigned long ra_kb; | |
112 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
113 | ||
b1f3b64d DR |
114 | if (ret < 0) |
115 | return ret; | |
116 | ||
8324aa91 | 117 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91 JA |
118 | |
119 | return ret; | |
120 | } | |
121 | ||
122 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
123 | { | |
ae03bf63 | 124 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
125 | |
126 | return queue_var_show(max_sectors_kb, (page)); | |
127 | } | |
128 | ||
c77a5710 MP |
129 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
130 | { | |
131 | return queue_var_show(queue_max_segments(q), (page)); | |
132 | } | |
133 | ||
13f05c8d MP |
134 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
135 | { | |
136 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
137 | } | |
138 | ||
c77a5710 MP |
139 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
140 | { | |
e692cb66 | 141 | if (blk_queue_cluster(q)) |
c77a5710 MP |
142 | return queue_var_show(queue_max_segment_size(q), (page)); |
143 | ||
144 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | |
145 | } | |
146 | ||
e1defc4f | 147 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 148 | { |
e1defc4f | 149 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
150 | } |
151 | ||
c72758f3 MP |
152 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
153 | { | |
154 | return queue_var_show(queue_physical_block_size(q), page); | |
155 | } | |
156 | ||
157 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
158 | { | |
159 | return queue_var_show(queue_io_min(q), page); | |
160 | } | |
161 | ||
162 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
163 | { | |
164 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
165 | } |
166 | ||
86b37281 MP |
167 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
168 | { | |
169 | return queue_var_show(q->limits.discard_granularity, page); | |
170 | } | |
171 | ||
172 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | |
173 | { | |
a934a00a MP |
174 | return sprintf(page, "%llu\n", |
175 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
176 | } |
177 | ||
98262f27 MP |
178 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
179 | { | |
180 | return queue_var_show(queue_discard_zeroes_data(q), page); | |
181 | } | |
182 | ||
4363ac7c MP |
183 | static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) |
184 | { | |
185 | return sprintf(page, "%llu\n", | |
186 | (unsigned long long)q->limits.max_write_same_sectors << 9); | |
187 | } | |
188 | ||
189 | ||
8324aa91 JA |
190 | static ssize_t |
191 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
192 | { | |
193 | unsigned long max_sectors_kb, | |
ae03bf63 | 194 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
8324aa91 JA |
195 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
196 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
197 | ||
b1f3b64d DR |
198 | if (ret < 0) |
199 | return ret; | |
200 | ||
8324aa91 JA |
201 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
202 | return -EINVAL; | |
7c239517 | 203 | |
8324aa91 | 204 | spin_lock_irq(q->queue_lock); |
c295fc05 | 205 | q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91 JA |
206 | spin_unlock_irq(q->queue_lock); |
207 | ||
208 | return ret; | |
209 | } | |
210 | ||
211 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
212 | { | |
ae03bf63 | 213 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
214 | |
215 | return queue_var_show(max_hw_sectors_kb, (page)); | |
216 | } | |
217 | ||
956bcb7c JA |
218 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
219 | static ssize_t \ | |
220 | queue_show_##name(struct request_queue *q, char *page) \ | |
221 | { \ | |
222 | int bit; \ | |
223 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
224 | return queue_var_show(neg ? !bit : bit, page); \ | |
225 | } \ | |
226 | static ssize_t \ | |
227 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
228 | { \ | |
229 | unsigned long val; \ | |
230 | ssize_t ret; \ | |
231 | ret = queue_var_store(&val, page, count); \ | |
232 | if (neg) \ | |
233 | val = !val; \ | |
234 | \ | |
235 | spin_lock_irq(q->queue_lock); \ | |
236 | if (val) \ | |
237 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
238 | else \ | |
239 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
240 | spin_unlock_irq(q->queue_lock); \ | |
241 | return ret; \ | |
1308835f BZ |
242 | } |
243 | ||
956bcb7c JA |
244 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
245 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
246 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
247 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 248 | |
ac9fafa1 AB |
249 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
250 | { | |
488991e2 AB |
251 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
252 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
253 | } |
254 | ||
255 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
256 | size_t count) | |
257 | { | |
258 | unsigned long nm; | |
259 | ssize_t ret = queue_var_store(&nm, page, count); | |
260 | ||
b1f3b64d DR |
261 | if (ret < 0) |
262 | return ret; | |
263 | ||
bf0f9702 | 264 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
265 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
266 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
267 | if (nm == 2) | |
bf0f9702 | 268 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
269 | else if (nm) |
270 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 271 | spin_unlock_irq(q->queue_lock); |
1308835f | 272 | |
ac9fafa1 AB |
273 | return ret; |
274 | } | |
275 | ||
c7c22e4d JA |
276 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
277 | { | |
9cb308ce | 278 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
5757a6d7 | 279 | bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); |
c7c22e4d | 280 | |
5757a6d7 | 281 | return queue_var_show(set << force, page); |
c7c22e4d JA |
282 | } |
283 | ||
284 | static ssize_t | |
285 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
286 | { | |
287 | ssize_t ret = -EINVAL; | |
288 | #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) | |
289 | unsigned long val; | |
290 | ||
291 | ret = queue_var_store(&val, page, count); | |
b1f3b64d DR |
292 | if (ret < 0) |
293 | return ret; | |
294 | ||
c7c22e4d | 295 | spin_lock_irq(q->queue_lock); |
e8037d49 | 296 | if (val == 2) { |
c7c22e4d | 297 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
e8037d49 ES |
298 | queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); |
299 | } else if (val == 1) { | |
300 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
301 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
302 | } else if (val == 0) { | |
5757a6d7 DW |
303 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
304 | queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); | |
305 | } | |
c7c22e4d JA |
306 | spin_unlock_irq(q->queue_lock); |
307 | #endif | |
308 | return ret; | |
309 | } | |
8324aa91 JA |
310 | |
311 | static struct queue_sysfs_entry queue_requests_entry = { | |
312 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
313 | .show = queue_requests_show, | |
314 | .store = queue_requests_store, | |
315 | }; | |
316 | ||
317 | static struct queue_sysfs_entry queue_ra_entry = { | |
318 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
319 | .show = queue_ra_show, | |
320 | .store = queue_ra_store, | |
321 | }; | |
322 | ||
323 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
324 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
325 | .show = queue_max_sectors_show, | |
326 | .store = queue_max_sectors_store, | |
327 | }; | |
328 | ||
329 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
330 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
331 | .show = queue_max_hw_sectors_show, | |
332 | }; | |
333 | ||
c77a5710 MP |
334 | static struct queue_sysfs_entry queue_max_segments_entry = { |
335 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
336 | .show = queue_max_segments_show, | |
337 | }; | |
338 | ||
13f05c8d MP |
339 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
340 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
341 | .show = queue_max_integrity_segments_show, | |
342 | }; | |
343 | ||
c77a5710 MP |
344 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
345 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
346 | .show = queue_max_segment_size_show, | |
347 | }; | |
348 | ||
8324aa91 JA |
349 | static struct queue_sysfs_entry queue_iosched_entry = { |
350 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
351 | .show = elv_iosched_show, | |
352 | .store = elv_iosched_store, | |
353 | }; | |
354 | ||
e68b903c MP |
355 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
356 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
357 | .show = queue_logical_block_size_show, |
358 | }; | |
359 | ||
360 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
361 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
362 | .show = queue_logical_block_size_show, | |
e68b903c MP |
363 | }; |
364 | ||
c72758f3 MP |
365 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
366 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
367 | .show = queue_physical_block_size_show, | |
368 | }; | |
369 | ||
370 | static struct queue_sysfs_entry queue_io_min_entry = { | |
371 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
372 | .show = queue_io_min_show, | |
373 | }; | |
374 | ||
375 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
376 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
377 | .show = queue_io_opt_show, | |
e68b903c MP |
378 | }; |
379 | ||
86b37281 MP |
380 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
381 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
382 | .show = queue_discard_granularity_show, | |
383 | }; | |
384 | ||
385 | static struct queue_sysfs_entry queue_discard_max_entry = { | |
386 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, | |
387 | .show = queue_discard_max_show, | |
388 | }; | |
389 | ||
98262f27 MP |
390 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
391 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
392 | .show = queue_discard_zeroes_data_show, | |
393 | }; | |
394 | ||
4363ac7c MP |
395 | static struct queue_sysfs_entry queue_write_same_max_entry = { |
396 | .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO }, | |
397 | .show = queue_write_same_max_show, | |
398 | }; | |
399 | ||
1308835f BZ |
400 | static struct queue_sysfs_entry queue_nonrot_entry = { |
401 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
402 | .show = queue_show_nonrot, |
403 | .store = queue_store_nonrot, | |
1308835f BZ |
404 | }; |
405 | ||
ac9fafa1 AB |
406 | static struct queue_sysfs_entry queue_nomerges_entry = { |
407 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
408 | .show = queue_nomerges_show, | |
409 | .store = queue_nomerges_store, | |
410 | }; | |
411 | ||
c7c22e4d JA |
412 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
413 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
414 | .show = queue_rq_affinity_show, | |
415 | .store = queue_rq_affinity_store, | |
416 | }; | |
417 | ||
bc58ba94 JA |
418 | static struct queue_sysfs_entry queue_iostats_entry = { |
419 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
420 | .show = queue_show_iostats, |
421 | .store = queue_store_iostats, | |
bc58ba94 JA |
422 | }; |
423 | ||
e2e1a148 JA |
424 | static struct queue_sysfs_entry queue_random_entry = { |
425 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
426 | .show = queue_show_random, |
427 | .store = queue_store_random, | |
e2e1a148 JA |
428 | }; |
429 | ||
8324aa91 JA |
430 | static struct attribute *default_attrs[] = { |
431 | &queue_requests_entry.attr, | |
432 | &queue_ra_entry.attr, | |
433 | &queue_max_hw_sectors_entry.attr, | |
434 | &queue_max_sectors_entry.attr, | |
c77a5710 | 435 | &queue_max_segments_entry.attr, |
13f05c8d | 436 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 437 | &queue_max_segment_size_entry.attr, |
8324aa91 | 438 | &queue_iosched_entry.attr, |
e68b903c | 439 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 440 | &queue_logical_block_size_entry.attr, |
c72758f3 MP |
441 | &queue_physical_block_size_entry.attr, |
442 | &queue_io_min_entry.attr, | |
443 | &queue_io_opt_entry.attr, | |
86b37281 MP |
444 | &queue_discard_granularity_entry.attr, |
445 | &queue_discard_max_entry.attr, | |
98262f27 | 446 | &queue_discard_zeroes_data_entry.attr, |
4363ac7c | 447 | &queue_write_same_max_entry.attr, |
1308835f | 448 | &queue_nonrot_entry.attr, |
ac9fafa1 | 449 | &queue_nomerges_entry.attr, |
c7c22e4d | 450 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 451 | &queue_iostats_entry.attr, |
e2e1a148 | 452 | &queue_random_entry.attr, |
8324aa91 JA |
453 | NULL, |
454 | }; | |
455 | ||
456 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
457 | ||
458 | static ssize_t | |
459 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
460 | { | |
461 | struct queue_sysfs_entry *entry = to_queue(attr); | |
462 | struct request_queue *q = | |
463 | container_of(kobj, struct request_queue, kobj); | |
464 | ssize_t res; | |
465 | ||
466 | if (!entry->show) | |
467 | return -EIO; | |
468 | mutex_lock(&q->sysfs_lock); | |
34f6055c | 469 | if (blk_queue_dead(q)) { |
8324aa91 JA |
470 | mutex_unlock(&q->sysfs_lock); |
471 | return -ENOENT; | |
472 | } | |
473 | res = entry->show(q, page); | |
474 | mutex_unlock(&q->sysfs_lock); | |
475 | return res; | |
476 | } | |
477 | ||
478 | static ssize_t | |
479 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
480 | const char *page, size_t length) | |
481 | { | |
482 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 483 | struct request_queue *q; |
8324aa91 JA |
484 | ssize_t res; |
485 | ||
486 | if (!entry->store) | |
487 | return -EIO; | |
6728cb0e JA |
488 | |
489 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 | 490 | mutex_lock(&q->sysfs_lock); |
34f6055c | 491 | if (blk_queue_dead(q)) { |
8324aa91 JA |
492 | mutex_unlock(&q->sysfs_lock); |
493 | return -ENOENT; | |
494 | } | |
495 | res = entry->store(q, page, length); | |
496 | mutex_unlock(&q->sysfs_lock); | |
497 | return res; | |
498 | } | |
499 | ||
500 | /** | |
499337bb AM |
501 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
502 | * @kobj: the kobj belonging to the request queue to be released | |
8324aa91 JA |
503 | * |
504 | * Description: | |
499337bb | 505 | * blk_release_queue is the pair to blk_init_queue() or |
8324aa91 JA |
506 | * blk_queue_make_request(). It should be called when a request queue is |
507 | * being released; typically when a block device is being de-registered. | |
508 | * Currently, its primary task it to free all the &struct request | |
509 | * structures that were allocated to the queue and the queue itself. | |
510 | * | |
511 | * Caveat: | |
512 | * Hopefully the low level driver will have finished any | |
513 | * outstanding requests first... | |
514 | **/ | |
515 | static void blk_release_queue(struct kobject *kobj) | |
516 | { | |
517 | struct request_queue *q = | |
518 | container_of(kobj, struct request_queue, kobj); | |
8324aa91 JA |
519 | |
520 | blk_sync_queue(q); | |
521 | ||
e8989fae TH |
522 | blkcg_exit_queue(q); |
523 | ||
7e5a8794 TH |
524 | if (q->elevator) { |
525 | spin_lock_irq(q->queue_lock); | |
526 | ioc_clear_queue(q); | |
527 | spin_unlock_irq(q->queue_lock); | |
777eb1bf | 528 | elevator_exit(q->elevator); |
7e5a8794 | 529 | } |
777eb1bf | 530 | |
a051661c | 531 | blk_exit_rl(&q->root_rl); |
8324aa91 JA |
532 | |
533 | if (q->queue_tags) | |
534 | __blk_queue_free_tags(q); | |
535 | ||
536 | blk_trace_shutdown(q); | |
537 | ||
538 | bdi_destroy(&q->backing_dev_info); | |
a73f730d TH |
539 | |
540 | ida_simple_remove(&blk_queue_ida, q->id); | |
8324aa91 JA |
541 | kmem_cache_free(blk_requestq_cachep, q); |
542 | } | |
543 | ||
52cf25d0 | 544 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
545 | .show = queue_attr_show, |
546 | .store = queue_attr_store, | |
547 | }; | |
548 | ||
549 | struct kobj_type blk_queue_ktype = { | |
550 | .sysfs_ops = &queue_sysfs_ops, | |
551 | .default_attrs = default_attrs, | |
552 | .release = blk_release_queue, | |
553 | }; | |
554 | ||
555 | int blk_register_queue(struct gendisk *disk) | |
556 | { | |
557 | int ret; | |
1d54ad6d | 558 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
559 | struct request_queue *q = disk->queue; |
560 | ||
fb199746 | 561 | if (WARN_ON(!q)) |
8324aa91 JA |
562 | return -ENXIO; |
563 | ||
749fefe6 TH |
564 | /* |
565 | * Initialization must be complete by now. Finish the initial | |
566 | * bypass from queue allocation. | |
567 | */ | |
568 | blk_queue_bypass_end(q); | |
569 | ||
1d54ad6d LZ |
570 | ret = blk_trace_init_sysfs(dev); |
571 | if (ret) | |
572 | return ret; | |
573 | ||
c9059598 | 574 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
575 | if (ret < 0) { |
576 | blk_trace_remove_sysfs(dev); | |
8324aa91 | 577 | return ret; |
ed5302d3 | 578 | } |
8324aa91 JA |
579 | |
580 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
581 | ||
cd43e26f MP |
582 | if (!q->request_fn) |
583 | return 0; | |
584 | ||
8324aa91 JA |
585 | ret = elv_register_queue(q); |
586 | if (ret) { | |
587 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
588 | kobject_del(&q->kobj); | |
80656b67 | 589 | blk_trace_remove_sysfs(dev); |
c87ffbb8 | 590 | kobject_put(&dev->kobj); |
8324aa91 JA |
591 | return ret; |
592 | } | |
593 | ||
594 | return 0; | |
595 | } | |
596 | ||
597 | void blk_unregister_queue(struct gendisk *disk) | |
598 | { | |
599 | struct request_queue *q = disk->queue; | |
600 | ||
fb199746 AM |
601 | if (WARN_ON(!q)) |
602 | return; | |
603 | ||
48c0d4d4 | 604 | if (q->request_fn) |
8324aa91 JA |
605 | elv_unregister_queue(q); |
606 | ||
48c0d4d4 ZK |
607 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
608 | kobject_del(&q->kobj); | |
609 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
610 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 611 | } |