]>
Commit | Line | Data |
---|---|---|
8324aa91 JA |
1 | /* |
2 | * Functions related to sysfs handling | |
3 | */ | |
4 | #include <linux/kernel.h> | |
5a0e3ad6 | 5 | #include <linux/slab.h> |
8324aa91 JA |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> | |
8 | #include <linux/blkdev.h> | |
9 | #include <linux/blktrace_api.h> | |
10 | ||
11 | #include "blk.h" | |
12 | ||
13 | struct queue_sysfs_entry { | |
14 | struct attribute attr; | |
15 | ssize_t (*show)(struct request_queue *, char *); | |
16 | ssize_t (*store)(struct request_queue *, const char *, size_t); | |
17 | }; | |
18 | ||
19 | static ssize_t | |
9cb308ce | 20 | queue_var_show(unsigned long var, char *page) |
8324aa91 | 21 | { |
9cb308ce | 22 | return sprintf(page, "%lu\n", var); |
8324aa91 JA |
23 | } |
24 | ||
25 | static ssize_t | |
26 | queue_var_store(unsigned long *var, const char *page, size_t count) | |
27 | { | |
28 | char *p = (char *) page; | |
29 | ||
30 | *var = simple_strtoul(p, &p, 10); | |
31 | return count; | |
32 | } | |
33 | ||
34 | static ssize_t queue_requests_show(struct request_queue *q, char *page) | |
35 | { | |
36 | return queue_var_show(q->nr_requests, (page)); | |
37 | } | |
38 | ||
39 | static ssize_t | |
40 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | |
41 | { | |
42 | struct request_list *rl = &q->rq; | |
43 | unsigned long nr; | |
b8a9ae77 JA |
44 | int ret; |
45 | ||
46 | if (!q->request_fn) | |
47 | return -EINVAL; | |
48 | ||
49 | ret = queue_var_store(&nr, page, count); | |
8324aa91 JA |
50 | if (nr < BLKDEV_MIN_RQ) |
51 | nr = BLKDEV_MIN_RQ; | |
52 | ||
53 | spin_lock_irq(q->queue_lock); | |
54 | q->nr_requests = nr; | |
55 | blk_queue_congestion_threshold(q); | |
56 | ||
1faa16d2 JA |
57 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) |
58 | blk_set_queue_congested(q, BLK_RW_SYNC); | |
59 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) | |
60 | blk_clear_queue_congested(q, BLK_RW_SYNC); | |
61 | ||
62 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) | |
63 | blk_set_queue_congested(q, BLK_RW_ASYNC); | |
64 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) | |
65 | blk_clear_queue_congested(q, BLK_RW_ASYNC); | |
66 | ||
67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { | |
68 | blk_set_queue_full(q, BLK_RW_SYNC); | |
60735b63 | 69 | } else { |
1faa16d2 JA |
70 | blk_clear_queue_full(q, BLK_RW_SYNC); |
71 | wake_up(&rl->wait[BLK_RW_SYNC]); | |
8324aa91 JA |
72 | } |
73 | ||
1faa16d2 JA |
74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
75 | blk_set_queue_full(q, BLK_RW_ASYNC); | |
60735b63 | 76 | } else { |
1faa16d2 JA |
77 | blk_clear_queue_full(q, BLK_RW_ASYNC); |
78 | wake_up(&rl->wait[BLK_RW_ASYNC]); | |
8324aa91 JA |
79 | } |
80 | spin_unlock_irq(q->queue_lock); | |
81 | return ret; | |
82 | } | |
83 | ||
84 | static ssize_t queue_ra_show(struct request_queue *q, char *page) | |
85 | { | |
9cb308ce XF |
86 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
87 | (PAGE_CACHE_SHIFT - 10); | |
8324aa91 JA |
88 | |
89 | return queue_var_show(ra_kb, (page)); | |
90 | } | |
91 | ||
92 | static ssize_t | |
93 | queue_ra_store(struct request_queue *q, const char *page, size_t count) | |
94 | { | |
95 | unsigned long ra_kb; | |
96 | ssize_t ret = queue_var_store(&ra_kb, page, count); | |
97 | ||
8324aa91 | 98 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
8324aa91 JA |
99 | |
100 | return ret; | |
101 | } | |
102 | ||
103 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) | |
104 | { | |
ae03bf63 | 105 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
8324aa91 JA |
106 | |
107 | return queue_var_show(max_sectors_kb, (page)); | |
108 | } | |
109 | ||
c77a5710 MP |
110 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
111 | { | |
112 | return queue_var_show(queue_max_segments(q), (page)); | |
113 | } | |
114 | ||
13f05c8d MP |
115 | static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) |
116 | { | |
117 | return queue_var_show(q->limits.max_integrity_segments, (page)); | |
118 | } | |
119 | ||
c77a5710 MP |
120 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
121 | { | |
e692cb66 | 122 | if (blk_queue_cluster(q)) |
c77a5710 MP |
123 | return queue_var_show(queue_max_segment_size(q), (page)); |
124 | ||
125 | return queue_var_show(PAGE_CACHE_SIZE, (page)); | |
126 | } | |
127 | ||
e1defc4f | 128 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
e68b903c | 129 | { |
e1defc4f | 130 | return queue_var_show(queue_logical_block_size(q), page); |
e68b903c MP |
131 | } |
132 | ||
c72758f3 MP |
133 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
134 | { | |
135 | return queue_var_show(queue_physical_block_size(q), page); | |
136 | } | |
137 | ||
138 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) | |
139 | { | |
140 | return queue_var_show(queue_io_min(q), page); | |
141 | } | |
142 | ||
143 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) | |
144 | { | |
145 | return queue_var_show(queue_io_opt(q), page); | |
e68b903c MP |
146 | } |
147 | ||
86b37281 MP |
148 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
149 | { | |
150 | return queue_var_show(q->limits.discard_granularity, page); | |
151 | } | |
152 | ||
153 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) | |
154 | { | |
a934a00a MP |
155 | return sprintf(page, "%llu\n", |
156 | (unsigned long long)q->limits.max_discard_sectors << 9); | |
86b37281 MP |
157 | } |
158 | ||
98262f27 MP |
159 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
160 | { | |
161 | return queue_var_show(queue_discard_zeroes_data(q), page); | |
162 | } | |
163 | ||
8324aa91 JA |
164 | static ssize_t |
165 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |
166 | { | |
167 | unsigned long max_sectors_kb, | |
ae03bf63 | 168 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
8324aa91 JA |
169 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
170 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); | |
171 | ||
172 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) | |
173 | return -EINVAL; | |
7c239517 | 174 | |
8324aa91 | 175 | spin_lock_irq(q->queue_lock); |
c295fc05 | 176 | q->limits.max_sectors = max_sectors_kb << 1; |
8324aa91 JA |
177 | spin_unlock_irq(q->queue_lock); |
178 | ||
179 | return ret; | |
180 | } | |
181 | ||
182 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) | |
183 | { | |
ae03bf63 | 184 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
8324aa91 JA |
185 | |
186 | return queue_var_show(max_hw_sectors_kb, (page)); | |
187 | } | |
188 | ||
956bcb7c JA |
189 | #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ |
190 | static ssize_t \ | |
191 | queue_show_##name(struct request_queue *q, char *page) \ | |
192 | { \ | |
193 | int bit; \ | |
194 | bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ | |
195 | return queue_var_show(neg ? !bit : bit, page); \ | |
196 | } \ | |
197 | static ssize_t \ | |
198 | queue_store_##name(struct request_queue *q, const char *page, size_t count) \ | |
199 | { \ | |
200 | unsigned long val; \ | |
201 | ssize_t ret; \ | |
202 | ret = queue_var_store(&val, page, count); \ | |
203 | if (neg) \ | |
204 | val = !val; \ | |
205 | \ | |
206 | spin_lock_irq(q->queue_lock); \ | |
207 | if (val) \ | |
208 | queue_flag_set(QUEUE_FLAG_##flag, q); \ | |
209 | else \ | |
210 | queue_flag_clear(QUEUE_FLAG_##flag, q); \ | |
211 | spin_unlock_irq(q->queue_lock); \ | |
212 | return ret; \ | |
1308835f BZ |
213 | } |
214 | ||
956bcb7c JA |
215 | QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); |
216 | QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); | |
217 | QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); | |
218 | #undef QUEUE_SYSFS_BIT_FNS | |
1308835f | 219 | |
ac9fafa1 AB |
220 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
221 | { | |
488991e2 AB |
222 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
223 | blk_queue_noxmerges(q), page); | |
ac9fafa1 AB |
224 | } |
225 | ||
226 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |
227 | size_t count) | |
228 | { | |
229 | unsigned long nm; | |
230 | ssize_t ret = queue_var_store(&nm, page, count); | |
231 | ||
bf0f9702 | 232 | spin_lock_irq(q->queue_lock); |
488991e2 AB |
233 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
234 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | |
235 | if (nm == 2) | |
bf0f9702 | 236 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
488991e2 AB |
237 | else if (nm) |
238 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | |
bf0f9702 | 239 | spin_unlock_irq(q->queue_lock); |
1308835f | 240 | |
ac9fafa1 AB |
241 | return ret; |
242 | } | |
243 | ||
c7c22e4d JA |
244 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
245 | { | |
9cb308ce | 246 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
c7c22e4d | 247 | |
9cb308ce | 248 | return queue_var_show(set, page); |
c7c22e4d JA |
249 | } |
250 | ||
251 | static ssize_t | |
252 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) | |
253 | { | |
254 | ssize_t ret = -EINVAL; | |
255 | #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) | |
256 | unsigned long val; | |
257 | ||
258 | ret = queue_var_store(&val, page, count); | |
259 | spin_lock_irq(q->queue_lock); | |
260 | if (val) | |
261 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); | |
262 | else | |
263 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); | |
264 | spin_unlock_irq(q->queue_lock); | |
265 | #endif | |
266 | return ret; | |
267 | } | |
8324aa91 JA |
268 | |
269 | static struct queue_sysfs_entry queue_requests_entry = { | |
270 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, | |
271 | .show = queue_requests_show, | |
272 | .store = queue_requests_store, | |
273 | }; | |
274 | ||
275 | static struct queue_sysfs_entry queue_ra_entry = { | |
276 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, | |
277 | .show = queue_ra_show, | |
278 | .store = queue_ra_store, | |
279 | }; | |
280 | ||
281 | static struct queue_sysfs_entry queue_max_sectors_entry = { | |
282 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, | |
283 | .show = queue_max_sectors_show, | |
284 | .store = queue_max_sectors_store, | |
285 | }; | |
286 | ||
287 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { | |
288 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, | |
289 | .show = queue_max_hw_sectors_show, | |
290 | }; | |
291 | ||
c77a5710 MP |
292 | static struct queue_sysfs_entry queue_max_segments_entry = { |
293 | .attr = {.name = "max_segments", .mode = S_IRUGO }, | |
294 | .show = queue_max_segments_show, | |
295 | }; | |
296 | ||
13f05c8d MP |
297 | static struct queue_sysfs_entry queue_max_integrity_segments_entry = { |
298 | .attr = {.name = "max_integrity_segments", .mode = S_IRUGO }, | |
299 | .show = queue_max_integrity_segments_show, | |
300 | }; | |
301 | ||
c77a5710 MP |
302 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
303 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, | |
304 | .show = queue_max_segment_size_show, | |
305 | }; | |
306 | ||
8324aa91 JA |
307 | static struct queue_sysfs_entry queue_iosched_entry = { |
308 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, | |
309 | .show = elv_iosched_show, | |
310 | .store = elv_iosched_store, | |
311 | }; | |
312 | ||
e68b903c MP |
313 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
314 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, | |
e1defc4f MP |
315 | .show = queue_logical_block_size_show, |
316 | }; | |
317 | ||
318 | static struct queue_sysfs_entry queue_logical_block_size_entry = { | |
319 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, | |
320 | .show = queue_logical_block_size_show, | |
e68b903c MP |
321 | }; |
322 | ||
c72758f3 MP |
323 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
324 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, | |
325 | .show = queue_physical_block_size_show, | |
326 | }; | |
327 | ||
328 | static struct queue_sysfs_entry queue_io_min_entry = { | |
329 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, | |
330 | .show = queue_io_min_show, | |
331 | }; | |
332 | ||
333 | static struct queue_sysfs_entry queue_io_opt_entry = { | |
334 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, | |
335 | .show = queue_io_opt_show, | |
e68b903c MP |
336 | }; |
337 | ||
86b37281 MP |
338 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
339 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, | |
340 | .show = queue_discard_granularity_show, | |
341 | }; | |
342 | ||
343 | static struct queue_sysfs_entry queue_discard_max_entry = { | |
344 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, | |
345 | .show = queue_discard_max_show, | |
346 | }; | |
347 | ||
98262f27 MP |
348 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
349 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, | |
350 | .show = queue_discard_zeroes_data_show, | |
351 | }; | |
352 | ||
1308835f BZ |
353 | static struct queue_sysfs_entry queue_nonrot_entry = { |
354 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
355 | .show = queue_show_nonrot, |
356 | .store = queue_store_nonrot, | |
1308835f BZ |
357 | }; |
358 | ||
ac9fafa1 AB |
359 | static struct queue_sysfs_entry queue_nomerges_entry = { |
360 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, | |
361 | .show = queue_nomerges_show, | |
362 | .store = queue_nomerges_store, | |
363 | }; | |
364 | ||
c7c22e4d JA |
365 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
366 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, | |
367 | .show = queue_rq_affinity_show, | |
368 | .store = queue_rq_affinity_store, | |
369 | }; | |
370 | ||
bc58ba94 JA |
371 | static struct queue_sysfs_entry queue_iostats_entry = { |
372 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
373 | .show = queue_show_iostats, |
374 | .store = queue_store_iostats, | |
bc58ba94 JA |
375 | }; |
376 | ||
e2e1a148 JA |
377 | static struct queue_sysfs_entry queue_random_entry = { |
378 | .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, | |
956bcb7c JA |
379 | .show = queue_show_random, |
380 | .store = queue_store_random, | |
e2e1a148 JA |
381 | }; |
382 | ||
8324aa91 JA |
383 | static struct attribute *default_attrs[] = { |
384 | &queue_requests_entry.attr, | |
385 | &queue_ra_entry.attr, | |
386 | &queue_max_hw_sectors_entry.attr, | |
387 | &queue_max_sectors_entry.attr, | |
c77a5710 | 388 | &queue_max_segments_entry.attr, |
13f05c8d | 389 | &queue_max_integrity_segments_entry.attr, |
c77a5710 | 390 | &queue_max_segment_size_entry.attr, |
8324aa91 | 391 | &queue_iosched_entry.attr, |
e68b903c | 392 | &queue_hw_sector_size_entry.attr, |
e1defc4f | 393 | &queue_logical_block_size_entry.attr, |
c72758f3 MP |
394 | &queue_physical_block_size_entry.attr, |
395 | &queue_io_min_entry.attr, | |
396 | &queue_io_opt_entry.attr, | |
86b37281 MP |
397 | &queue_discard_granularity_entry.attr, |
398 | &queue_discard_max_entry.attr, | |
98262f27 | 399 | &queue_discard_zeroes_data_entry.attr, |
1308835f | 400 | &queue_nonrot_entry.attr, |
ac9fafa1 | 401 | &queue_nomerges_entry.attr, |
c7c22e4d | 402 | &queue_rq_affinity_entry.attr, |
bc58ba94 | 403 | &queue_iostats_entry.attr, |
e2e1a148 | 404 | &queue_random_entry.attr, |
8324aa91 JA |
405 | NULL, |
406 | }; | |
407 | ||
408 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) | |
409 | ||
410 | static ssize_t | |
411 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | |
412 | { | |
413 | struct queue_sysfs_entry *entry = to_queue(attr); | |
414 | struct request_queue *q = | |
415 | container_of(kobj, struct request_queue, kobj); | |
416 | ssize_t res; | |
417 | ||
418 | if (!entry->show) | |
419 | return -EIO; | |
420 | mutex_lock(&q->sysfs_lock); | |
421 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | |
422 | mutex_unlock(&q->sysfs_lock); | |
423 | return -ENOENT; | |
424 | } | |
425 | res = entry->show(q, page); | |
426 | mutex_unlock(&q->sysfs_lock); | |
427 | return res; | |
428 | } | |
429 | ||
430 | static ssize_t | |
431 | queue_attr_store(struct kobject *kobj, struct attribute *attr, | |
432 | const char *page, size_t length) | |
433 | { | |
434 | struct queue_sysfs_entry *entry = to_queue(attr); | |
6728cb0e | 435 | struct request_queue *q; |
8324aa91 JA |
436 | ssize_t res; |
437 | ||
438 | if (!entry->store) | |
439 | return -EIO; | |
6728cb0e JA |
440 | |
441 | q = container_of(kobj, struct request_queue, kobj); | |
8324aa91 JA |
442 | mutex_lock(&q->sysfs_lock); |
443 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { | |
444 | mutex_unlock(&q->sysfs_lock); | |
445 | return -ENOENT; | |
446 | } | |
447 | res = entry->store(q, page, length); | |
448 | mutex_unlock(&q->sysfs_lock); | |
449 | return res; | |
450 | } | |
451 | ||
452 | /** | |
453 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | |
454 | * @kobj: the kobj belonging of the request queue to be released | |
455 | * | |
456 | * Description: | |
457 | * blk_cleanup_queue is the pair to blk_init_queue() or | |
458 | * blk_queue_make_request(). It should be called when a request queue is | |
459 | * being released; typically when a block device is being de-registered. | |
460 | * Currently, its primary task it to free all the &struct request | |
461 | * structures that were allocated to the queue and the queue itself. | |
462 | * | |
463 | * Caveat: | |
464 | * Hopefully the low level driver will have finished any | |
465 | * outstanding requests first... | |
466 | **/ | |
467 | static void blk_release_queue(struct kobject *kobj) | |
468 | { | |
469 | struct request_queue *q = | |
470 | container_of(kobj, struct request_queue, kobj); | |
471 | struct request_list *rl = &q->rq; | |
472 | ||
473 | blk_sync_queue(q); | |
474 | ||
475 | if (rl->rq_pool) | |
476 | mempool_destroy(rl->rq_pool); | |
477 | ||
478 | if (q->queue_tags) | |
479 | __blk_queue_free_tags(q); | |
480 | ||
481 | blk_trace_shutdown(q); | |
482 | ||
483 | bdi_destroy(&q->backing_dev_info); | |
484 | kmem_cache_free(blk_requestq_cachep, q); | |
485 | } | |
486 | ||
52cf25d0 | 487 | static const struct sysfs_ops queue_sysfs_ops = { |
8324aa91 JA |
488 | .show = queue_attr_show, |
489 | .store = queue_attr_store, | |
490 | }; | |
491 | ||
492 | struct kobj_type blk_queue_ktype = { | |
493 | .sysfs_ops = &queue_sysfs_ops, | |
494 | .default_attrs = default_attrs, | |
495 | .release = blk_release_queue, | |
496 | }; | |
497 | ||
498 | int blk_register_queue(struct gendisk *disk) | |
499 | { | |
500 | int ret; | |
1d54ad6d | 501 | struct device *dev = disk_to_dev(disk); |
8324aa91 JA |
502 | struct request_queue *q = disk->queue; |
503 | ||
fb199746 | 504 | if (WARN_ON(!q)) |
8324aa91 JA |
505 | return -ENXIO; |
506 | ||
1d54ad6d LZ |
507 | ret = blk_trace_init_sysfs(dev); |
508 | if (ret) | |
509 | return ret; | |
510 | ||
c9059598 | 511 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
ed5302d3 LY |
512 | if (ret < 0) { |
513 | blk_trace_remove_sysfs(dev); | |
8324aa91 | 514 | return ret; |
ed5302d3 | 515 | } |
8324aa91 JA |
516 | |
517 | kobject_uevent(&q->kobj, KOBJ_ADD); | |
518 | ||
cd43e26f MP |
519 | if (!q->request_fn) |
520 | return 0; | |
521 | ||
8324aa91 JA |
522 | ret = elv_register_queue(q); |
523 | if (ret) { | |
524 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | |
525 | kobject_del(&q->kobj); | |
80656b67 | 526 | blk_trace_remove_sysfs(dev); |
c87ffbb8 | 527 | kobject_put(&dev->kobj); |
8324aa91 JA |
528 | return ret; |
529 | } | |
530 | ||
531 | return 0; | |
532 | } | |
533 | ||
534 | void blk_unregister_queue(struct gendisk *disk) | |
535 | { | |
536 | struct request_queue *q = disk->queue; | |
537 | ||
fb199746 AM |
538 | if (WARN_ON(!q)) |
539 | return; | |
540 | ||
48c0d4d4 | 541 | if (q->request_fn) |
8324aa91 JA |
542 | elv_unregister_queue(q); |
543 | ||
48c0d4d4 ZK |
544 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
545 | kobject_del(&q->kobj); | |
546 | blk_trace_remove_sysfs(disk_to_dev(disk)); | |
547 | kobject_put(&disk_to_dev(disk)->kobj); | |
8324aa91 | 548 | } |