]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <[email protected]> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <[email protected]> | |
8 | * Paolo Valente <[email protected]> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <[email protected]> | |
11 | * Nauman Rafique <[email protected]> | |
e48453c3 AA |
12 | * |
13 | * For policy-specific per-blkcg data: | |
14 | * Copyright (C) 2015 Paolo Valente <[email protected]> | |
15 | * Arianna Avanzini <[email protected]> | |
31e4c28d VG |
16 | */ |
17 | #include <linux/ioprio.h> | |
22084190 | 18 | #include <linux/kdev_t.h> |
9d6a986c | 19 | #include <linux/module.h> |
accee785 | 20 | #include <linux/err.h> |
9195291e | 21 | #include <linux/blkdev.h> |
52ebea74 | 22 | #include <linux/backing-dev.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
34d0f179 | 24 | #include <linux/genhd.h> |
72e06c25 | 25 | #include <linux/delay.h> |
9a9e8a26 | 26 | #include <linux/atomic.h> |
36aa9e5f | 27 | #include <linux/ctype.h> |
eea8f41c | 28 | #include <linux/blk-cgroup.h> |
5efd6113 | 29 | #include "blk.h" |
3e252066 | 30 | |
84c124da DS |
31 | #define MAX_KEY_LEN 100 |
32 | ||
838f13bf TH |
33 | /* |
34 | * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. | |
35 | * blkcg_pol_register_mutex nests outside of it and synchronizes entire | |
36 | * policy [un]register operations including cgroup file additions / | |
37 | * removals. Putting cgroup file registration outside blkcg_pol_mutex | |
38 | * allows grabbing it from cgroup callbacks. | |
39 | */ | |
40 | static DEFINE_MUTEX(blkcg_pol_register_mutex); | |
bc0d6501 | 41 | static DEFINE_MUTEX(blkcg_pol_mutex); |
923adde1 | 42 | |
e48453c3 | 43 | struct blkcg blkcg_root; |
3c798398 | 44 | EXPORT_SYMBOL_GPL(blkcg_root); |
9d6a986c | 45 | |
496d5e75 TH |
46 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
47 | ||
3c798398 | 48 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
035d10b2 | 49 | |
7876f930 TH |
50 | static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ |
51 | ||
a2b1693b | 52 | static bool blkcg_policy_enabled(struct request_queue *q, |
3c798398 | 53 | const struct blkcg_policy *pol) |
a2b1693b TH |
54 | { |
55 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
56 | } | |
57 | ||
0381411e TH |
58 | /** |
59 | * blkg_free - free a blkg | |
60 | * @blkg: blkg to free | |
61 | * | |
62 | * Free @blkg which may be partially allocated. | |
63 | */ | |
3c798398 | 64 | static void blkg_free(struct blkcg_gq *blkg) |
0381411e | 65 | { |
e8989fae | 66 | int i; |
549d3aa8 TH |
67 | |
68 | if (!blkg) | |
69 | return; | |
70 | ||
db613670 | 71 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
001bea73 TH |
72 | if (blkg->pd[i]) |
73 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); | |
e8989fae | 74 | |
994b7832 TH |
75 | if (blkg->blkcg != &blkcg_root) |
76 | blk_exit_rl(&blkg->rl); | |
77ea7338 TH |
77 | |
78 | blkg_rwstat_exit(&blkg->stat_ios); | |
79 | blkg_rwstat_exit(&blkg->stat_bytes); | |
549d3aa8 | 80 | kfree(blkg); |
0381411e TH |
81 | } |
82 | ||
83 | /** | |
84 | * blkg_alloc - allocate a blkg | |
85 | * @blkcg: block cgroup the new blkg is associated with | |
86 | * @q: request_queue the new blkg is associated with | |
15974993 | 87 | * @gfp_mask: allocation mask to use |
0381411e | 88 | * |
e8989fae | 89 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e | 90 | */ |
15974993 TH |
91 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
92 | gfp_t gfp_mask) | |
0381411e | 93 | { |
3c798398 | 94 | struct blkcg_gq *blkg; |
e8989fae | 95 | int i; |
0381411e TH |
96 | |
97 | /* alloc and init base part */ | |
15974993 | 98 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
0381411e TH |
99 | if (!blkg) |
100 | return NULL; | |
101 | ||
77ea7338 TH |
102 | if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || |
103 | blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) | |
104 | goto err_free; | |
105 | ||
c875f4d0 | 106 | blkg->q = q; |
e8989fae | 107 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 108 | blkg->blkcg = blkcg; |
a5049a8a | 109 | atomic_set(&blkg->refcnt, 1); |
0381411e | 110 | |
a051661c TH |
111 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
112 | if (blkcg != &blkcg_root) { | |
113 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
114 | goto err_free; | |
115 | blkg->rl.blkg = blkg; | |
116 | } | |
117 | ||
8bd435b3 | 118 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 119 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae | 120 | struct blkg_policy_data *pd; |
0381411e | 121 | |
a2b1693b | 122 | if (!blkcg_policy_enabled(q, pol)) |
e8989fae TH |
123 | continue; |
124 | ||
125 | /* alloc per-policy data and attach it to blkg */ | |
001bea73 | 126 | pd = pol->pd_alloc_fn(gfp_mask, q->node); |
a051661c TH |
127 | if (!pd) |
128 | goto err_free; | |
549d3aa8 | 129 | |
e8989fae TH |
130 | blkg->pd[i] = pd; |
131 | pd->blkg = blkg; | |
b276a876 | 132 | pd->plid = i; |
e8989fae TH |
133 | } |
134 | ||
0381411e | 135 | return blkg; |
a051661c TH |
136 | |
137 | err_free: | |
138 | blkg_free(blkg); | |
139 | return NULL; | |
0381411e TH |
140 | } |
141 | ||
24f29046 TH |
142 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
143 | struct request_queue *q, bool update_hint) | |
80fd9979 | 144 | { |
3c798398 | 145 | struct blkcg_gq *blkg; |
80fd9979 | 146 | |
a637120e | 147 | /* |
86cde6b6 TH |
148 | * Hint didn't match. Look up from the radix tree. Note that the |
149 | * hint can only be updated under queue_lock as otherwise @blkg | |
150 | * could have already been removed from blkg_tree. The caller is | |
151 | * responsible for grabbing queue_lock if @update_hint. | |
a637120e TH |
152 | */ |
153 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
86cde6b6 TH |
154 | if (blkg && blkg->q == q) { |
155 | if (update_hint) { | |
156 | lockdep_assert_held(q->queue_lock); | |
157 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
158 | } | |
a637120e | 159 | return blkg; |
86cde6b6 | 160 | } |
a637120e | 161 | |
80fd9979 TH |
162 | return NULL; |
163 | } | |
ae118896 | 164 | EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); |
80fd9979 | 165 | |
15974993 TH |
166 | /* |
167 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
d93a11f1 | 168 | * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. |
15974993 | 169 | */ |
86cde6b6 TH |
170 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
171 | struct request_queue *q, | |
172 | struct blkcg_gq *new_blkg) | |
5624a4e4 | 173 | { |
3c798398 | 174 | struct blkcg_gq *blkg; |
ce7acfea | 175 | struct bdi_writeback_congested *wb_congested; |
f427d909 | 176 | int i, ret; |
5624a4e4 | 177 | |
cd1604fa TH |
178 | WARN_ON_ONCE(!rcu_read_lock_held()); |
179 | lockdep_assert_held(q->queue_lock); | |
180 | ||
7ee9c562 | 181 | /* blkg holds a reference to blkcg */ |
ec903c0c | 182 | if (!css_tryget_online(&blkcg->css)) { |
20386ce0 | 183 | ret = -ENODEV; |
93e6d5d8 | 184 | goto err_free_blkg; |
15974993 | 185 | } |
cd1604fa | 186 | |
ce7acfea | 187 | wb_congested = wb_congested_get_create(&q->backing_dev_info, |
e00f4f4d TH |
188 | blkcg->css.id, |
189 | GFP_NOWAIT | __GFP_NOWARN); | |
ce7acfea TH |
190 | if (!wb_congested) { |
191 | ret = -ENOMEM; | |
192 | goto err_put_css; | |
193 | } | |
194 | ||
496fb780 | 195 | /* allocate */ |
15974993 | 196 | if (!new_blkg) { |
e00f4f4d | 197 | new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); |
15974993 | 198 | if (unlikely(!new_blkg)) { |
93e6d5d8 | 199 | ret = -ENOMEM; |
ce7acfea | 200 | goto err_put_congested; |
15974993 TH |
201 | } |
202 | } | |
203 | blkg = new_blkg; | |
ce7acfea | 204 | blkg->wb_congested = wb_congested; |
cd1604fa | 205 | |
db613670 | 206 | /* link parent */ |
3c547865 TH |
207 | if (blkcg_parent(blkcg)) { |
208 | blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); | |
209 | if (WARN_ON_ONCE(!blkg->parent)) { | |
20386ce0 | 210 | ret = -ENODEV; |
ce7acfea | 211 | goto err_put_congested; |
3c547865 TH |
212 | } |
213 | blkg_get(blkg->parent); | |
214 | } | |
215 | ||
db613670 TH |
216 | /* invoke per-policy init */ |
217 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
218 | struct blkcg_policy *pol = blkcg_policy[i]; | |
219 | ||
220 | if (blkg->pd[i] && pol->pd_init_fn) | |
a9520cd6 | 221 | pol->pd_init_fn(blkg->pd[i]); |
db613670 TH |
222 | } |
223 | ||
224 | /* insert */ | |
cd1604fa | 225 | spin_lock(&blkcg->lock); |
a637120e TH |
226 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
227 | if (likely(!ret)) { | |
228 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
229 | list_add(&blkg->q_node, &q->blkg_list); | |
f427d909 TH |
230 | |
231 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
232 | struct blkcg_policy *pol = blkcg_policy[i]; | |
233 | ||
234 | if (blkg->pd[i] && pol->pd_online_fn) | |
a9520cd6 | 235 | pol->pd_online_fn(blkg->pd[i]); |
f427d909 | 236 | } |
a637120e | 237 | } |
f427d909 | 238 | blkg->online = true; |
cd1604fa | 239 | spin_unlock(&blkcg->lock); |
496fb780 | 240 | |
ec13b1d6 | 241 | if (!ret) |
a637120e | 242 | return blkg; |
15974993 | 243 | |
3c547865 TH |
244 | /* @blkg failed fully initialized, use the usual release path */ |
245 | blkg_put(blkg); | |
246 | return ERR_PTR(ret); | |
247 | ||
ce7acfea TH |
248 | err_put_congested: |
249 | wb_congested_put(wb_congested); | |
93e6d5d8 | 250 | err_put_css: |
496fb780 | 251 | css_put(&blkcg->css); |
93e6d5d8 | 252 | err_free_blkg: |
15974993 | 253 | blkg_free(new_blkg); |
93e6d5d8 | 254 | return ERR_PTR(ret); |
31e4c28d | 255 | } |
3c96cb32 | 256 | |
86cde6b6 TH |
257 | /** |
258 | * blkg_lookup_create - lookup blkg, try to create one if not there | |
259 | * @blkcg: blkcg of interest | |
260 | * @q: request_queue of interest | |
261 | * | |
262 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to | |
3c547865 TH |
263 | * create one. blkg creation is performed recursively from blkcg_root such |
264 | * that all non-root blkg's have access to the parent blkg. This function | |
265 | * should be called under RCU read lock and @q->queue_lock. | |
86cde6b6 TH |
266 | * |
267 | * Returns pointer to the looked up or created blkg on success, ERR_PTR() | |
268 | * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not | |
269 | * dead and bypassing, returns ERR_PTR(-EBUSY). | |
270 | */ | |
3c798398 TH |
271 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
272 | struct request_queue *q) | |
3c96cb32 | 273 | { |
86cde6b6 TH |
274 | struct blkcg_gq *blkg; |
275 | ||
276 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
277 | lockdep_assert_held(q->queue_lock); | |
278 | ||
3c96cb32 TH |
279 | /* |
280 | * This could be the first entry point of blkcg implementation and | |
281 | * we shouldn't allow anything to go through for a bypassing queue. | |
282 | */ | |
283 | if (unlikely(blk_queue_bypass(q))) | |
20386ce0 | 284 | return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); |
86cde6b6 TH |
285 | |
286 | blkg = __blkg_lookup(blkcg, q, true); | |
287 | if (blkg) | |
288 | return blkg; | |
289 | ||
3c547865 TH |
290 | /* |
291 | * Create blkgs walking down from blkcg_root to @blkcg, so that all | |
292 | * non-root blkgs have access to their parents. | |
293 | */ | |
294 | while (true) { | |
295 | struct blkcg *pos = blkcg; | |
296 | struct blkcg *parent = blkcg_parent(blkcg); | |
297 | ||
298 | while (parent && !__blkg_lookup(parent, q, false)) { | |
299 | pos = parent; | |
300 | parent = blkcg_parent(parent); | |
301 | } | |
302 | ||
303 | blkg = blkg_create(pos, q, NULL); | |
304 | if (pos == blkcg || IS_ERR(blkg)) | |
305 | return blkg; | |
306 | } | |
3c96cb32 | 307 | } |
31e4c28d | 308 | |
3c798398 | 309 | static void blkg_destroy(struct blkcg_gq *blkg) |
03aa264a | 310 | { |
3c798398 | 311 | struct blkcg *blkcg = blkg->blkcg; |
77ea7338 | 312 | struct blkcg_gq *parent = blkg->parent; |
f427d909 | 313 | int i; |
03aa264a | 314 | |
27e1f9d1 | 315 | lockdep_assert_held(blkg->q->queue_lock); |
9f13ef67 | 316 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
317 | |
318 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 319 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 320 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
a637120e | 321 | |
f427d909 TH |
322 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
323 | struct blkcg_policy *pol = blkcg_policy[i]; | |
324 | ||
325 | if (blkg->pd[i] && pol->pd_offline_fn) | |
a9520cd6 | 326 | pol->pd_offline_fn(blkg->pd[i]); |
f427d909 | 327 | } |
77ea7338 TH |
328 | |
329 | if (parent) { | |
330 | blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); | |
331 | blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); | |
332 | } | |
333 | ||
f427d909 TH |
334 | blkg->online = false; |
335 | ||
a637120e | 336 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
e8989fae | 337 | list_del_init(&blkg->q_node); |
9f13ef67 | 338 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 339 | |
a637120e TH |
340 | /* |
341 | * Both setting lookup hint to and clearing it from @blkg are done | |
342 | * under queue_lock. If it's not pointing to @blkg now, it never | |
343 | * will. Hint assignment itself can race safely. | |
344 | */ | |
ec6c676a | 345 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
a637120e TH |
346 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
347 | ||
03aa264a TH |
348 | /* |
349 | * Put the reference taken at the time of creation so that when all | |
350 | * queues are gone, group can be destroyed. | |
351 | */ | |
352 | blkg_put(blkg); | |
353 | } | |
354 | ||
9f13ef67 TH |
355 | /** |
356 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
357 | * @q: request_queue of interest | |
9f13ef67 | 358 | * |
3c96cb32 | 359 | * Destroy all blkgs associated with @q. |
9f13ef67 | 360 | */ |
3c96cb32 | 361 | static void blkg_destroy_all(struct request_queue *q) |
72e06c25 | 362 | { |
3c798398 | 363 | struct blkcg_gq *blkg, *n; |
72e06c25 | 364 | |
6d18b008 | 365 | lockdep_assert_held(q->queue_lock); |
72e06c25 | 366 | |
9f13ef67 | 367 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
3c798398 | 368 | struct blkcg *blkcg = blkg->blkcg; |
72e06c25 | 369 | |
9f13ef67 TH |
370 | spin_lock(&blkcg->lock); |
371 | blkg_destroy(blkg); | |
372 | spin_unlock(&blkcg->lock); | |
72e06c25 | 373 | } |
6fe810bd TH |
374 | |
375 | q->root_blkg = NULL; | |
376 | q->root_rl.blkg = NULL; | |
72e06c25 TH |
377 | } |
378 | ||
2a4fd070 TH |
379 | /* |
380 | * A group is RCU protected, but having an rcu lock does not mean that one | |
381 | * can access all the fields of blkg and assume these are valid. For | |
382 | * example, don't try to follow throtl_data and request queue links. | |
383 | * | |
384 | * Having a reference to blkg under an rcu allows accesses to only values | |
385 | * local to groups like group stats and group rate limits. | |
386 | */ | |
387 | void __blkg_release_rcu(struct rcu_head *rcu_head) | |
1adaf3dd | 388 | { |
2a4fd070 | 389 | struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); |
db613670 | 390 | |
3c547865 | 391 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
1adaf3dd | 392 | css_put(&blkg->blkcg->css); |
a5049a8a | 393 | if (blkg->parent) |
3c547865 | 394 | blkg_put(blkg->parent); |
1adaf3dd | 395 | |
ce7acfea TH |
396 | wb_congested_put(blkg->wb_congested); |
397 | ||
2a4fd070 | 398 | blkg_free(blkg); |
1adaf3dd | 399 | } |
2a4fd070 | 400 | EXPORT_SYMBOL_GPL(__blkg_release_rcu); |
1adaf3dd | 401 | |
a051661c TH |
402 | /* |
403 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
404 | * because the root blkg uses @q->root_rl instead of its own rl. | |
405 | */ | |
406 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
407 | struct request_queue *q) | |
408 | { | |
409 | struct list_head *ent; | |
410 | struct blkcg_gq *blkg; | |
411 | ||
412 | /* | |
413 | * Determine the current blkg list_head. The first entry is | |
414 | * root_rl which is off @q->blkg_list and mapped to the head. | |
415 | */ | |
416 | if (rl == &q->root_rl) { | |
417 | ent = &q->blkg_list; | |
65c77fd9 JN |
418 | /* There are no more block groups, hence no request lists */ |
419 | if (list_empty(ent)) | |
420 | return NULL; | |
a051661c TH |
421 | } else { |
422 | blkg = container_of(rl, struct blkcg_gq, rl); | |
423 | ent = &blkg->q_node; | |
424 | } | |
425 | ||
426 | /* walk to the next list_head, skip root blkcg */ | |
427 | ent = ent->next; | |
428 | if (ent == &q->root_blkg->q_node) | |
429 | ent = ent->next; | |
430 | if (ent == &q->blkg_list) | |
431 | return NULL; | |
432 | ||
433 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
434 | return &blkg->rl; | |
435 | } | |
436 | ||
182446d0 TH |
437 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
438 | struct cftype *cftype, u64 val) | |
303a3acb | 439 | { |
182446d0 | 440 | struct blkcg *blkcg = css_to_blkcg(css); |
3c798398 | 441 | struct blkcg_gq *blkg; |
bc0d6501 | 442 | int i; |
303a3acb | 443 | |
838f13bf | 444 | mutex_lock(&blkcg_pol_mutex); |
303a3acb | 445 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
446 | |
447 | /* | |
448 | * Note that stat reset is racy - it doesn't synchronize against | |
449 | * stat updates. This is a debug feature which shouldn't exist | |
450 | * anyway. If you get hit by a race, retry. | |
451 | */ | |
b67bfe0d | 452 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
77ea7338 TH |
453 | blkg_rwstat_reset(&blkg->stat_bytes); |
454 | blkg_rwstat_reset(&blkg->stat_ios); | |
455 | ||
8bd435b3 | 456 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 457 | struct blkcg_policy *pol = blkcg_policy[i]; |
549d3aa8 | 458 | |
a9520cd6 TH |
459 | if (blkg->pd[i] && pol->pd_reset_stats_fn) |
460 | pol->pd_reset_stats_fn(blkg->pd[i]); | |
bc0d6501 | 461 | } |
303a3acb | 462 | } |
f0bdc8cd | 463 | |
303a3acb | 464 | spin_unlock_irq(&blkcg->lock); |
bc0d6501 | 465 | mutex_unlock(&blkcg_pol_mutex); |
303a3acb DS |
466 | return 0; |
467 | } | |
468 | ||
dd165eb3 | 469 | const char *blkg_dev_name(struct blkcg_gq *blkg) |
303a3acb | 470 | { |
d3d32e69 TH |
471 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
472 | if (blkg->q->backing_dev_info.dev) | |
473 | return dev_name(blkg->q->backing_dev_info.dev); | |
474 | return NULL; | |
303a3acb | 475 | } |
dd165eb3 | 476 | EXPORT_SYMBOL_GPL(blkg_dev_name); |
303a3acb | 477 | |
d3d32e69 TH |
478 | /** |
479 | * blkcg_print_blkgs - helper for printing per-blkg data | |
480 | * @sf: seq_file to print to | |
481 | * @blkcg: blkcg of interest | |
482 | * @prfill: fill function to print out a blkg | |
483 | * @pol: policy in question | |
484 | * @data: data to be passed to @prfill | |
485 | * @show_total: to print out sum of prfill return values or not | |
486 | * | |
487 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
488 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
810ecfa7 TH |
489 | * policy data and @data and the matching queue lock held. If @show_total |
490 | * is %true, the sum of the return values from @prfill is printed with | |
491 | * "Total" label at the end. | |
d3d32e69 TH |
492 | * |
493 | * This is to be used to construct print functions for | |
494 | * cftype->read_seq_string method. | |
495 | */ | |
3c798398 | 496 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
497 | u64 (*prfill)(struct seq_file *, |
498 | struct blkg_policy_data *, int), | |
3c798398 | 499 | const struct blkcg_policy *pol, int data, |
ec399347 | 500 | bool show_total) |
5624a4e4 | 501 | { |
3c798398 | 502 | struct blkcg_gq *blkg; |
d3d32e69 | 503 | u64 total = 0; |
5624a4e4 | 504 | |
810ecfa7 | 505 | rcu_read_lock(); |
ee89f812 | 506 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
810ecfa7 | 507 | spin_lock_irq(blkg->q->queue_lock); |
a2b1693b | 508 | if (blkcg_policy_enabled(blkg->q, pol)) |
f95a04af | 509 | total += prfill(sf, blkg->pd[pol->plid], data); |
810ecfa7 TH |
510 | spin_unlock_irq(blkg->q->queue_lock); |
511 | } | |
512 | rcu_read_unlock(); | |
d3d32e69 TH |
513 | |
514 | if (show_total) | |
515 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
516 | } | |
829fdb50 | 517 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
518 | |
519 | /** | |
520 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
521 | * @sf: seq_file to print to | |
f95a04af | 522 | * @pd: policy private data of interest |
d3d32e69 TH |
523 | * @v: value to print |
524 | * | |
f95a04af | 525 | * Print @v to @sf for the device assocaited with @pd. |
d3d32e69 | 526 | */ |
f95a04af | 527 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 | 528 | { |
f95a04af | 529 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
530 | |
531 | if (!dname) | |
532 | return 0; | |
533 | ||
534 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
535 | return v; | |
536 | } | |
829fdb50 | 537 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
538 | |
539 | /** | |
540 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
541 | * @sf: seq_file to print to | |
f95a04af | 542 | * @pd: policy private data of interest |
d3d32e69 TH |
543 | * @rwstat: rwstat to print |
544 | * | |
f95a04af | 545 | * Print @rwstat to @sf for the device assocaited with @pd. |
d3d32e69 | 546 | */ |
f95a04af | 547 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
829fdb50 | 548 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
549 | { |
550 | static const char *rwstr[] = { | |
551 | [BLKG_RWSTAT_READ] = "Read", | |
552 | [BLKG_RWSTAT_WRITE] = "Write", | |
553 | [BLKG_RWSTAT_SYNC] = "Sync", | |
554 | [BLKG_RWSTAT_ASYNC] = "Async", | |
555 | }; | |
f95a04af | 556 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
557 | u64 v; |
558 | int i; | |
559 | ||
560 | if (!dname) | |
561 | return 0; | |
562 | ||
563 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
564 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
24bdb8ef | 565 | (unsigned long long)atomic64_read(&rwstat->aux_cnt[i])); |
d3d32e69 | 566 | |
24bdb8ef TH |
567 | v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) + |
568 | atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]); | |
d3d32e69 TH |
569 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
570 | return v; | |
571 | } | |
b50da39f | 572 | EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); |
d3d32e69 | 573 | |
5bc4afb1 TH |
574 | /** |
575 | * blkg_prfill_stat - prfill callback for blkg_stat | |
576 | * @sf: seq_file to print to | |
f95a04af TH |
577 | * @pd: policy private data of interest |
578 | * @off: offset to the blkg_stat in @pd | |
5bc4afb1 TH |
579 | * |
580 | * prfill callback for printing a blkg_stat. | |
581 | */ | |
f95a04af | 582 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
d3d32e69 | 583 | { |
f95a04af | 584 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
d3d32e69 | 585 | } |
5bc4afb1 | 586 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 587 | |
5bc4afb1 TH |
588 | /** |
589 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
590 | * @sf: seq_file to print to | |
f95a04af TH |
591 | * @pd: policy private data of interest |
592 | * @off: offset to the blkg_rwstat in @pd | |
5bc4afb1 TH |
593 | * |
594 | * prfill callback for printing a blkg_rwstat. | |
595 | */ | |
f95a04af TH |
596 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
597 | int off) | |
d3d32e69 | 598 | { |
f95a04af | 599 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
d3d32e69 | 600 | |
f95a04af | 601 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
d3d32e69 | 602 | } |
5bc4afb1 | 603 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 604 | |
77ea7338 TH |
605 | static u64 blkg_prfill_rwstat_field(struct seq_file *sf, |
606 | struct blkg_policy_data *pd, int off) | |
607 | { | |
608 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off); | |
609 | ||
610 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
611 | } | |
612 | ||
613 | /** | |
614 | * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes | |
615 | * @sf: seq_file to print to | |
616 | * @v: unused | |
617 | * | |
618 | * To be used as cftype->seq_show to print blkg->stat_bytes. | |
619 | * cftype->private must be set to the blkcg_policy. | |
620 | */ | |
621 | int blkg_print_stat_bytes(struct seq_file *sf, void *v) | |
622 | { | |
623 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
624 | blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, | |
625 | offsetof(struct blkcg_gq, stat_bytes), true); | |
626 | return 0; | |
627 | } | |
628 | EXPORT_SYMBOL_GPL(blkg_print_stat_bytes); | |
629 | ||
630 | /** | |
631 | * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios | |
632 | * @sf: seq_file to print to | |
633 | * @v: unused | |
634 | * | |
635 | * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private | |
636 | * must be set to the blkcg_policy. | |
637 | */ | |
638 | int blkg_print_stat_ios(struct seq_file *sf, void *v) | |
639 | { | |
640 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
641 | blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, | |
642 | offsetof(struct blkcg_gq, stat_ios), true); | |
643 | return 0; | |
644 | } | |
645 | EXPORT_SYMBOL_GPL(blkg_print_stat_ios); | |
646 | ||
647 | static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf, | |
648 | struct blkg_policy_data *pd, | |
649 | int off) | |
650 | { | |
651 | struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg, | |
652 | NULL, off); | |
653 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
654 | } | |
655 | ||
656 | /** | |
657 | * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes | |
658 | * @sf: seq_file to print to | |
659 | * @v: unused | |
660 | */ | |
661 | int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v) | |
662 | { | |
663 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
664 | blkg_prfill_rwstat_field_recursive, | |
665 | (void *)seq_cft(sf)->private, | |
666 | offsetof(struct blkcg_gq, stat_bytes), true); | |
667 | return 0; | |
668 | } | |
669 | EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive); | |
670 | ||
671 | /** | |
672 | * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios | |
673 | * @sf: seq_file to print to | |
674 | * @v: unused | |
675 | */ | |
676 | int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v) | |
677 | { | |
678 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
679 | blkg_prfill_rwstat_field_recursive, | |
680 | (void *)seq_cft(sf)->private, | |
681 | offsetof(struct blkcg_gq, stat_ios), true); | |
682 | return 0; | |
683 | } | |
684 | EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); | |
685 | ||
16b3de66 TH |
686 | /** |
687 | * blkg_stat_recursive_sum - collect hierarchical blkg_stat | |
f12c74ca TH |
688 | * @blkg: blkg of interest |
689 | * @pol: blkcg_policy which contains the blkg_stat | |
690 | * @off: offset to the blkg_stat in blkg_policy_data or @blkg | |
16b3de66 | 691 | * |
f12c74ca TH |
692 | * Collect the blkg_stat specified by @blkg, @pol and @off and all its |
693 | * online descendants and their aux counts. The caller must be holding the | |
694 | * queue lock for online tests. | |
695 | * | |
696 | * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is | |
697 | * at @off bytes into @blkg's blkg_policy_data of the policy. | |
16b3de66 | 698 | */ |
f12c74ca TH |
699 | u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, |
700 | struct blkcg_policy *pol, int off) | |
16b3de66 | 701 | { |
16b3de66 | 702 | struct blkcg_gq *pos_blkg; |
492eb21b | 703 | struct cgroup_subsys_state *pos_css; |
bd8815a6 | 704 | u64 sum = 0; |
16b3de66 | 705 | |
f12c74ca | 706 | lockdep_assert_held(blkg->q->queue_lock); |
16b3de66 | 707 | |
16b3de66 | 708 | rcu_read_lock(); |
f12c74ca TH |
709 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
710 | struct blkg_stat *stat; | |
711 | ||
712 | if (!pos_blkg->online) | |
713 | continue; | |
16b3de66 | 714 | |
f12c74ca TH |
715 | if (pol) |
716 | stat = (void *)blkg_to_pd(pos_blkg, pol) + off; | |
717 | else | |
718 | stat = (void *)blkg + off; | |
719 | ||
720 | sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt); | |
16b3de66 TH |
721 | } |
722 | rcu_read_unlock(); | |
723 | ||
724 | return sum; | |
725 | } | |
726 | EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); | |
727 | ||
728 | /** | |
729 | * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat | |
f12c74ca TH |
730 | * @blkg: blkg of interest |
731 | * @pol: blkcg_policy which contains the blkg_rwstat | |
732 | * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg | |
16b3de66 | 733 | * |
f12c74ca TH |
734 | * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its |
735 | * online descendants and their aux counts. The caller must be holding the | |
736 | * queue lock for online tests. | |
737 | * | |
738 | * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it | |
739 | * is at @off bytes into @blkg's blkg_policy_data of the policy. | |
16b3de66 | 740 | */ |
f12c74ca TH |
741 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, |
742 | struct blkcg_policy *pol, int off) | |
16b3de66 | 743 | { |
16b3de66 | 744 | struct blkcg_gq *pos_blkg; |
492eb21b | 745 | struct cgroup_subsys_state *pos_css; |
bd8815a6 | 746 | struct blkg_rwstat sum = { }; |
16b3de66 TH |
747 | int i; |
748 | ||
f12c74ca | 749 | lockdep_assert_held(blkg->q->queue_lock); |
16b3de66 | 750 | |
16b3de66 | 751 | rcu_read_lock(); |
f12c74ca | 752 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
3a7faead | 753 | struct blkg_rwstat *rwstat; |
16b3de66 TH |
754 | |
755 | if (!pos_blkg->online) | |
756 | continue; | |
757 | ||
f12c74ca TH |
758 | if (pol) |
759 | rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off; | |
760 | else | |
761 | rwstat = (void *)pos_blkg + off; | |
762 | ||
16b3de66 | 763 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
3a7faead TH |
764 | atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) + |
765 | percpu_counter_sum_positive(&rwstat->cpu_cnt[i]), | |
766 | &sum.aux_cnt[i]); | |
16b3de66 TH |
767 | } |
768 | rcu_read_unlock(); | |
769 | ||
770 | return sum; | |
771 | } | |
772 | EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); | |
773 | ||
3a8b31d3 TH |
774 | /** |
775 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
776 | * @blkcg: target block cgroup | |
da8b0662 | 777 | * @pol: target policy |
3a8b31d3 TH |
778 | * @input: input string |
779 | * @ctx: blkg_conf_ctx to be filled | |
780 | * | |
781 | * Parse per-blkg config update from @input and initialize @ctx with the | |
36aa9e5f TH |
782 | * result. @ctx->blkg points to the blkg to be updated and @ctx->body the |
783 | * part of @input following MAJ:MIN. This function returns with RCU read | |
784 | * lock and queue lock held and must be paired with blkg_conf_finish(). | |
3a8b31d3 | 785 | */ |
3c798398 | 786 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
36aa9e5f | 787 | char *input, struct blkg_conf_ctx *ctx) |
da8b0662 | 788 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
34d0f179 | 789 | { |
3a8b31d3 | 790 | struct gendisk *disk; |
3c798398 | 791 | struct blkcg_gq *blkg; |
39a169b6 | 792 | struct module *owner; |
726fa694 | 793 | unsigned int major, minor; |
36aa9e5f TH |
794 | int key_len, part, ret; |
795 | char *body; | |
34d0f179 | 796 | |
36aa9e5f | 797 | if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) |
726fa694 | 798 | return -EINVAL; |
3a8b31d3 | 799 | |
36aa9e5f TH |
800 | body = input + key_len; |
801 | if (!isspace(*body)) | |
802 | return -EINVAL; | |
803 | body = skip_spaces(body); | |
804 | ||
726fa694 | 805 | disk = get_gendisk(MKDEV(major, minor), &part); |
5f6c2d2b | 806 | if (!disk) |
20386ce0 | 807 | return -ENODEV; |
5f6c2d2b | 808 | if (part) { |
39a169b6 | 809 | owner = disk->fops->owner; |
5f6c2d2b | 810 | put_disk(disk); |
39a169b6 | 811 | module_put(owner); |
20386ce0 | 812 | return -ENODEV; |
5f6c2d2b | 813 | } |
e56da7e2 TH |
814 | |
815 | rcu_read_lock(); | |
4bfd482e | 816 | spin_lock_irq(disk->queue->queue_lock); |
da8b0662 | 817 | |
a2b1693b | 818 | if (blkcg_policy_enabled(disk->queue, pol)) |
3c96cb32 | 819 | blkg = blkg_lookup_create(blkcg, disk->queue); |
a2b1693b | 820 | else |
20386ce0 | 821 | blkg = ERR_PTR(-EOPNOTSUPP); |
e56da7e2 | 822 | |
4bfd482e TH |
823 | if (IS_ERR(blkg)) { |
824 | ret = PTR_ERR(blkg); | |
3a8b31d3 | 825 | rcu_read_unlock(); |
da8b0662 | 826 | spin_unlock_irq(disk->queue->queue_lock); |
39a169b6 | 827 | owner = disk->fops->owner; |
3a8b31d3 | 828 | put_disk(disk); |
39a169b6 | 829 | module_put(owner); |
3a8b31d3 TH |
830 | /* |
831 | * If queue was bypassing, we should retry. Do so after a | |
832 | * short msleep(). It isn't strictly necessary but queue | |
833 | * can be bypassing for some time and it's always nice to | |
834 | * avoid busy looping. | |
835 | */ | |
836 | if (ret == -EBUSY) { | |
837 | msleep(10); | |
838 | ret = restart_syscall(); | |
7702e8f4 | 839 | } |
726fa694 | 840 | return ret; |
062a644d | 841 | } |
3a8b31d3 TH |
842 | |
843 | ctx->disk = disk; | |
844 | ctx->blkg = blkg; | |
36aa9e5f | 845 | ctx->body = body; |
726fa694 | 846 | return 0; |
34d0f179 | 847 | } |
829fdb50 | 848 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 849 | |
3a8b31d3 TH |
850 | /** |
851 | * blkg_conf_finish - finish up per-blkg config update | |
852 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
853 | * | |
854 | * Finish up after per-blkg config update. This function must be paired | |
855 | * with blkg_conf_prep(). | |
856 | */ | |
829fdb50 | 857 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
da8b0662 | 858 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
34d0f179 | 859 | { |
39a169b6 RP |
860 | struct module *owner; |
861 | ||
da8b0662 | 862 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
3a8b31d3 | 863 | rcu_read_unlock(); |
39a169b6 | 864 | owner = ctx->disk->fops->owner; |
3a8b31d3 | 865 | put_disk(ctx->disk); |
39a169b6 | 866 | module_put(owner); |
34d0f179 | 867 | } |
829fdb50 | 868 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 869 | |
2ee867dc TH |
870 | static int blkcg_print_stat(struct seq_file *sf, void *v) |
871 | { | |
872 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
873 | struct blkcg_gq *blkg; | |
874 | ||
875 | rcu_read_lock(); | |
876 | ||
877 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { | |
878 | const char *dname; | |
879 | struct blkg_rwstat rwstat; | |
880 | u64 rbytes, wbytes, rios, wios; | |
881 | ||
882 | dname = blkg_dev_name(blkg); | |
883 | if (!dname) | |
884 | continue; | |
885 | ||
886 | spin_lock_irq(blkg->q->queue_lock); | |
887 | ||
888 | rwstat = blkg_rwstat_recursive_sum(blkg, NULL, | |
889 | offsetof(struct blkcg_gq, stat_bytes)); | |
890 | rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); | |
891 | wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); | |
892 | ||
893 | rwstat = blkg_rwstat_recursive_sum(blkg, NULL, | |
894 | offsetof(struct blkcg_gq, stat_ios)); | |
895 | rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); | |
896 | wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); | |
897 | ||
898 | spin_unlock_irq(blkg->q->queue_lock); | |
899 | ||
900 | if (rbytes || wbytes || rios || wios) | |
901 | seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n", | |
902 | dname, rbytes, wbytes, rios, wios); | |
903 | } | |
904 | ||
905 | rcu_read_unlock(); | |
906 | return 0; | |
907 | } | |
908 | ||
e1f3b941 | 909 | static struct cftype blkcg_files[] = { |
2ee867dc TH |
910 | { |
911 | .name = "stat", | |
ca0752c5 | 912 | .flags = CFTYPE_NOT_ON_ROOT, |
2ee867dc TH |
913 | .seq_show = blkcg_print_stat, |
914 | }, | |
915 | { } /* terminate */ | |
916 | }; | |
917 | ||
e1f3b941 | 918 | static struct cftype blkcg_legacy_files[] = { |
84c124da DS |
919 | { |
920 | .name = "reset_stats", | |
3c798398 | 921 | .write_u64 = blkcg_reset_stats, |
22084190 | 922 | }, |
4baf6e33 | 923 | { } /* terminate */ |
31e4c28d VG |
924 | }; |
925 | ||
9f13ef67 | 926 | /** |
92fb9748 | 927 | * blkcg_css_offline - cgroup css_offline callback |
eb95419b | 928 | * @css: css of interest |
9f13ef67 | 929 | * |
eb95419b TH |
930 | * This function is called when @css is about to go away and responsible |
931 | * for shooting down all blkgs associated with @css. blkgs should be | |
9f13ef67 TH |
932 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
933 | * inside q lock, this function performs reverse double lock dancing. | |
934 | * | |
935 | * This is the blkcg counterpart of ioc_release_fn(). | |
936 | */ | |
eb95419b | 937 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
31e4c28d | 938 | { |
eb95419b | 939 | struct blkcg *blkcg = css_to_blkcg(css); |
b1c35769 | 940 | |
9f13ef67 | 941 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 942 | |
9f13ef67 | 943 | while (!hlist_empty(&blkcg->blkg_list)) { |
3c798398 TH |
944 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
945 | struct blkcg_gq, blkcg_node); | |
c875f4d0 | 946 | struct request_queue *q = blkg->q; |
b1c35769 | 947 | |
9f13ef67 TH |
948 | if (spin_trylock(q->queue_lock)) { |
949 | blkg_destroy(blkg); | |
950 | spin_unlock(q->queue_lock); | |
951 | } else { | |
952 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 953 | cpu_relax(); |
a5567932 | 954 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 955 | } |
9f13ef67 | 956 | } |
b1c35769 | 957 | |
9f13ef67 | 958 | spin_unlock_irq(&blkcg->lock); |
52ebea74 TH |
959 | |
960 | wb_blkcg_offline(blkcg); | |
7ee9c562 TH |
961 | } |
962 | ||
eb95419b | 963 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
7ee9c562 | 964 | { |
eb95419b | 965 | struct blkcg *blkcg = css_to_blkcg(css); |
bc915e61 | 966 | int i; |
7ee9c562 | 967 | |
7876f930 | 968 | mutex_lock(&blkcg_pol_mutex); |
e4a9bde9 | 969 | |
7876f930 | 970 | list_del(&blkcg->all_blkcgs_node); |
7876f930 | 971 | |
bc915e61 | 972 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
e4a9bde9 TH |
973 | if (blkcg->cpd[i]) |
974 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); | |
975 | ||
976 | mutex_unlock(&blkcg_pol_mutex); | |
977 | ||
bc915e61 | 978 | kfree(blkcg); |
31e4c28d VG |
979 | } |
980 | ||
eb95419b TH |
981 | static struct cgroup_subsys_state * |
982 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) | |
31e4c28d | 983 | { |
3c798398 | 984 | struct blkcg *blkcg; |
e48453c3 AA |
985 | struct cgroup_subsys_state *ret; |
986 | int i; | |
31e4c28d | 987 | |
7876f930 TH |
988 | mutex_lock(&blkcg_pol_mutex); |
989 | ||
eb95419b | 990 | if (!parent_css) { |
3c798398 | 991 | blkcg = &blkcg_root; |
bc915e61 TH |
992 | } else { |
993 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
994 | if (!blkcg) { | |
995 | ret = ERR_PTR(-ENOMEM); | |
996 | goto free_blkcg; | |
997 | } | |
e48453c3 AA |
998 | } |
999 | ||
1000 | for (i = 0; i < BLKCG_MAX_POLS ; i++) { | |
1001 | struct blkcg_policy *pol = blkcg_policy[i]; | |
1002 | struct blkcg_policy_data *cpd; | |
1003 | ||
1004 | /* | |
1005 | * If the policy hasn't been attached yet, wait for it | |
1006 | * to be attached before doing anything else. Otherwise, | |
1007 | * check if the policy requires any specific per-cgroup | |
1008 | * data: if it does, allocate and initialize it. | |
1009 | */ | |
e4a9bde9 | 1010 | if (!pol || !pol->cpd_alloc_fn) |
e48453c3 AA |
1011 | continue; |
1012 | ||
e4a9bde9 | 1013 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
e48453c3 AA |
1014 | if (!cpd) { |
1015 | ret = ERR_PTR(-ENOMEM); | |
1016 | goto free_pd_blkcg; | |
1017 | } | |
81437648 TH |
1018 | blkcg->cpd[i] = cpd; |
1019 | cpd->blkcg = blkcg; | |
e48453c3 | 1020 | cpd->plid = i; |
e4a9bde9 TH |
1021 | if (pol->cpd_init_fn) |
1022 | pol->cpd_init_fn(cpd); | |
e48453c3 | 1023 | } |
31e4c28d | 1024 | |
31e4c28d | 1025 | spin_lock_init(&blkcg->lock); |
e00f4f4d | 1026 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT | __GFP_NOWARN); |
31e4c28d | 1027 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
52ebea74 TH |
1028 | #ifdef CONFIG_CGROUP_WRITEBACK |
1029 | INIT_LIST_HEAD(&blkcg->cgwb_list); | |
1030 | #endif | |
7876f930 TH |
1031 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
1032 | ||
1033 | mutex_unlock(&blkcg_pol_mutex); | |
31e4c28d | 1034 | return &blkcg->css; |
e48453c3 AA |
1035 | |
1036 | free_pd_blkcg: | |
1037 | for (i--; i >= 0; i--) | |
e4a9bde9 TH |
1038 | if (blkcg->cpd[i]) |
1039 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); | |
e48453c3 AA |
1040 | free_blkcg: |
1041 | kfree(blkcg); | |
7876f930 | 1042 | mutex_unlock(&blkcg_pol_mutex); |
e48453c3 | 1043 | return ret; |
31e4c28d VG |
1044 | } |
1045 | ||
5efd6113 TH |
1046 | /** |
1047 | * blkcg_init_queue - initialize blkcg part of request queue | |
1048 | * @q: request_queue to initialize | |
1049 | * | |
1050 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
1051 | * part of new request_queue @q. | |
1052 | * | |
1053 | * RETURNS: | |
1054 | * 0 on success, -errno on failure. | |
1055 | */ | |
1056 | int blkcg_init_queue(struct request_queue *q) | |
1057 | { | |
ec13b1d6 TH |
1058 | struct blkcg_gq *new_blkg, *blkg; |
1059 | bool preloaded; | |
1060 | int ret; | |
1061 | ||
1062 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
1063 | if (!new_blkg) | |
1064 | return -ENOMEM; | |
1065 | ||
1066 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
5efd6113 | 1067 | |
ec13b1d6 TH |
1068 | /* |
1069 | * Make sure the root blkg exists and count the existing blkgs. As | |
1070 | * @q is bypassing at this point, blkg_lookup_create() can't be | |
1071 | * used. Open code insertion. | |
1072 | */ | |
1073 | rcu_read_lock(); | |
1074 | spin_lock_irq(q->queue_lock); | |
1075 | blkg = blkg_create(&blkcg_root, q, new_blkg); | |
1076 | spin_unlock_irq(q->queue_lock); | |
1077 | rcu_read_unlock(); | |
1078 | ||
1079 | if (preloaded) | |
1080 | radix_tree_preload_end(); | |
1081 | ||
1082 | if (IS_ERR(blkg)) { | |
994b7832 | 1083 | blkg_free(new_blkg); |
ec13b1d6 TH |
1084 | return PTR_ERR(blkg); |
1085 | } | |
1086 | ||
1087 | q->root_blkg = blkg; | |
1088 | q->root_rl.blkg = blkg; | |
5efd6113 | 1089 | |
ec13b1d6 TH |
1090 | ret = blk_throtl_init(q); |
1091 | if (ret) { | |
1092 | spin_lock_irq(q->queue_lock); | |
1093 | blkg_destroy_all(q); | |
1094 | spin_unlock_irq(q->queue_lock); | |
1095 | } | |
1096 | return ret; | |
5efd6113 TH |
1097 | } |
1098 | ||
1099 | /** | |
1100 | * blkcg_drain_queue - drain blkcg part of request_queue | |
1101 | * @q: request_queue to drain | |
1102 | * | |
1103 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
1104 | */ | |
1105 | void blkcg_drain_queue(struct request_queue *q) | |
1106 | { | |
1107 | lockdep_assert_held(q->queue_lock); | |
1108 | ||
0b462c89 TH |
1109 | /* |
1110 | * @q could be exiting and already have destroyed all blkgs as | |
1111 | * indicated by NULL root_blkg. If so, don't confuse policies. | |
1112 | */ | |
1113 | if (!q->root_blkg) | |
1114 | return; | |
1115 | ||
5efd6113 TH |
1116 | blk_throtl_drain(q); |
1117 | } | |
1118 | ||
1119 | /** | |
1120 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
1121 | * @q: request_queue being released | |
1122 | * | |
1123 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
1124 | */ | |
1125 | void blkcg_exit_queue(struct request_queue *q) | |
1126 | { | |
6d18b008 | 1127 | spin_lock_irq(q->queue_lock); |
3c96cb32 | 1128 | blkg_destroy_all(q); |
6d18b008 TH |
1129 | spin_unlock_irq(q->queue_lock); |
1130 | ||
5efd6113 TH |
1131 | blk_throtl_exit(q); |
1132 | } | |
1133 | ||
31e4c28d VG |
1134 | /* |
1135 | * We cannot support shared io contexts, as we have no mean to support | |
1136 | * two tasks with the same ioc in two different groups without major rework | |
1137 | * of the main cic data structures. For now we allow a task to change | |
1138 | * its cgroup only if it's the only owner of its ioc. | |
1139 | */ | |
1f7dd3e5 | 1140 | static int blkcg_can_attach(struct cgroup_taskset *tset) |
31e4c28d | 1141 | { |
bb9d97b6 | 1142 | struct task_struct *task; |
1f7dd3e5 | 1143 | struct cgroup_subsys_state *dst_css; |
31e4c28d VG |
1144 | struct io_context *ioc; |
1145 | int ret = 0; | |
1146 | ||
1147 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
1f7dd3e5 | 1148 | cgroup_taskset_for_each(task, dst_css, tset) { |
bb9d97b6 TH |
1149 | task_lock(task); |
1150 | ioc = task->io_context; | |
1151 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1152 | ret = -EINVAL; | |
1153 | task_unlock(task); | |
1154 | if (ret) | |
1155 | break; | |
1156 | } | |
31e4c28d VG |
1157 | return ret; |
1158 | } | |
1159 | ||
69d7fde5 TH |
1160 | static void blkcg_bind(struct cgroup_subsys_state *root_css) |
1161 | { | |
1162 | int i; | |
1163 | ||
1164 | mutex_lock(&blkcg_pol_mutex); | |
1165 | ||
1166 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
1167 | struct blkcg_policy *pol = blkcg_policy[i]; | |
1168 | struct blkcg *blkcg; | |
1169 | ||
1170 | if (!pol || !pol->cpd_bind_fn) | |
1171 | continue; | |
1172 | ||
1173 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) | |
1174 | if (blkcg->cpd[pol->plid]) | |
1175 | pol->cpd_bind_fn(blkcg->cpd[pol->plid]); | |
1176 | } | |
1177 | mutex_unlock(&blkcg_pol_mutex); | |
1178 | } | |
1179 | ||
c165b3e3 | 1180 | struct cgroup_subsys io_cgrp_subsys = { |
92fb9748 TH |
1181 | .css_alloc = blkcg_css_alloc, |
1182 | .css_offline = blkcg_css_offline, | |
1183 | .css_free = blkcg_css_free, | |
3c798398 | 1184 | .can_attach = blkcg_can_attach, |
69d7fde5 | 1185 | .bind = blkcg_bind, |
2ee867dc | 1186 | .dfl_cftypes = blkcg_files, |
880f50e2 | 1187 | .legacy_cftypes = blkcg_legacy_files, |
c165b3e3 | 1188 | .legacy_name = "blkio", |
1ced953b TH |
1189 | #ifdef CONFIG_MEMCG |
1190 | /* | |
1191 | * This ensures that, if available, memcg is automatically enabled | |
1192 | * together on the default hierarchy so that the owner cgroup can | |
1193 | * be retrieved from writeback pages. | |
1194 | */ | |
1195 | .depends_on = 1 << memory_cgrp_id, | |
1196 | #endif | |
676f7c8f | 1197 | }; |
c165b3e3 | 1198 | EXPORT_SYMBOL_GPL(io_cgrp_subsys); |
676f7c8f | 1199 | |
a2b1693b TH |
1200 | /** |
1201 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
1202 | * @q: request_queue of interest | |
1203 | * @pol: blkcg policy to activate | |
1204 | * | |
1205 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
1206 | * bypass mode to populate its blkgs with policy_data for @pol. | |
1207 | * | |
1208 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
1209 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
1210 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
1211 | * always enough for dereferencing policy data. | |
1212 | * | |
1213 | * The caller is responsible for synchronizing [de]activations and policy | |
1214 | * [un]registerations. Returns 0 on success, -errno on failure. | |
1215 | */ | |
1216 | int blkcg_activate_policy(struct request_queue *q, | |
3c798398 | 1217 | const struct blkcg_policy *pol) |
a2b1693b | 1218 | { |
4c55f4f9 | 1219 | struct blkg_policy_data *pd_prealloc = NULL; |
ec13b1d6 | 1220 | struct blkcg_gq *blkg; |
4c55f4f9 | 1221 | int ret; |
a2b1693b TH |
1222 | |
1223 | if (blkcg_policy_enabled(q, pol)) | |
1224 | return 0; | |
1225 | ||
1226 | blk_queue_bypass_start(q); | |
4c55f4f9 TH |
1227 | pd_prealloc: |
1228 | if (!pd_prealloc) { | |
001bea73 | 1229 | pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); |
4c55f4f9 | 1230 | if (!pd_prealloc) { |
a2b1693b | 1231 | ret = -ENOMEM; |
4c55f4f9 | 1232 | goto out_bypass_end; |
a2b1693b | 1233 | } |
a2b1693b TH |
1234 | } |
1235 | ||
a2b1693b TH |
1236 | spin_lock_irq(q->queue_lock); |
1237 | ||
1238 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
4c55f4f9 TH |
1239 | struct blkg_policy_data *pd; |
1240 | ||
1241 | if (blkg->pd[pol->plid]) | |
1242 | continue; | |
a2b1693b | 1243 | |
e00f4f4d | 1244 | pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q->node); |
4c55f4f9 TH |
1245 | if (!pd) |
1246 | swap(pd, pd_prealloc); | |
1247 | if (!pd) { | |
1248 | spin_unlock_irq(q->queue_lock); | |
1249 | goto pd_prealloc; | |
1250 | } | |
a2b1693b TH |
1251 | |
1252 | blkg->pd[pol->plid] = pd; | |
1253 | pd->blkg = blkg; | |
b276a876 | 1254 | pd->plid = pol->plid; |
3e418710 | 1255 | if (pol->pd_init_fn) |
a9520cd6 | 1256 | pol->pd_init_fn(pd); |
a2b1693b TH |
1257 | } |
1258 | ||
1259 | __set_bit(pol->plid, q->blkcg_pols); | |
1260 | ret = 0; | |
4c55f4f9 | 1261 | |
a2b1693b | 1262 | spin_unlock_irq(q->queue_lock); |
4c55f4f9 | 1263 | out_bypass_end: |
a2b1693b | 1264 | blk_queue_bypass_end(q); |
001bea73 TH |
1265 | if (pd_prealloc) |
1266 | pol->pd_free_fn(pd_prealloc); | |
a2b1693b TH |
1267 | return ret; |
1268 | } | |
1269 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
1270 | ||
1271 | /** | |
1272 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
1273 | * @q: request_queue of interest | |
1274 | * @pol: blkcg policy to deactivate | |
1275 | * | |
1276 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
1277 | * blkcg_activate_policy(). | |
1278 | */ | |
1279 | void blkcg_deactivate_policy(struct request_queue *q, | |
3c798398 | 1280 | const struct blkcg_policy *pol) |
a2b1693b | 1281 | { |
3c798398 | 1282 | struct blkcg_gq *blkg; |
a2b1693b TH |
1283 | |
1284 | if (!blkcg_policy_enabled(q, pol)) | |
1285 | return; | |
1286 | ||
1287 | blk_queue_bypass_start(q); | |
1288 | spin_lock_irq(q->queue_lock); | |
1289 | ||
1290 | __clear_bit(pol->plid, q->blkcg_pols); | |
1291 | ||
1292 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
1293 | /* grab blkcg lock too while removing @pd from @blkg */ | |
1294 | spin_lock(&blkg->blkcg->lock); | |
1295 | ||
001bea73 | 1296 | if (blkg->pd[pol->plid]) { |
a9520cd6 TH |
1297 | if (pol->pd_offline_fn) |
1298 | pol->pd_offline_fn(blkg->pd[pol->plid]); | |
001bea73 TH |
1299 | pol->pd_free_fn(blkg->pd[pol->plid]); |
1300 | blkg->pd[pol->plid] = NULL; | |
1301 | } | |
a2b1693b TH |
1302 | |
1303 | spin_unlock(&blkg->blkcg->lock); | |
1304 | } | |
1305 | ||
1306 | spin_unlock_irq(q->queue_lock); | |
1307 | blk_queue_bypass_end(q); | |
1308 | } | |
1309 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
1310 | ||
8bd435b3 | 1311 | /** |
3c798398 TH |
1312 | * blkcg_policy_register - register a blkcg policy |
1313 | * @pol: blkcg policy to register | |
8bd435b3 | 1314 | * |
3c798398 TH |
1315 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
1316 | * successful registration. Returns 0 on success and -errno on failure. | |
8bd435b3 | 1317 | */ |
d5bf0291 | 1318 | int blkcg_policy_register(struct blkcg_policy *pol) |
3e252066 | 1319 | { |
06b285bd | 1320 | struct blkcg *blkcg; |
8bd435b3 | 1321 | int i, ret; |
e8989fae | 1322 | |
838f13bf | 1323 | mutex_lock(&blkcg_pol_register_mutex); |
bc0d6501 TH |
1324 | mutex_lock(&blkcg_pol_mutex); |
1325 | ||
8bd435b3 TH |
1326 | /* find an empty slot */ |
1327 | ret = -ENOSPC; | |
1328 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
3c798398 | 1329 | if (!blkcg_policy[i]) |
8bd435b3 TH |
1330 | break; |
1331 | if (i >= BLKCG_MAX_POLS) | |
838f13bf | 1332 | goto err_unlock; |
035d10b2 | 1333 | |
06b285bd | 1334 | /* register @pol */ |
3c798398 | 1335 | pol->plid = i; |
06b285bd TH |
1336 | blkcg_policy[pol->plid] = pol; |
1337 | ||
1338 | /* allocate and install cpd's */ | |
e4a9bde9 | 1339 | if (pol->cpd_alloc_fn) { |
06b285bd TH |
1340 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
1341 | struct blkcg_policy_data *cpd; | |
1342 | ||
e4a9bde9 | 1343 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
bbb427e3 | 1344 | if (!cpd) |
06b285bd | 1345 | goto err_free_cpds; |
06b285bd | 1346 | |
81437648 TH |
1347 | blkcg->cpd[pol->plid] = cpd; |
1348 | cpd->blkcg = blkcg; | |
06b285bd | 1349 | cpd->plid = pol->plid; |
81437648 | 1350 | pol->cpd_init_fn(cpd); |
06b285bd TH |
1351 | } |
1352 | } | |
1353 | ||
838f13bf | 1354 | mutex_unlock(&blkcg_pol_mutex); |
8bd435b3 | 1355 | |
8bd435b3 | 1356 | /* everything is in place, add intf files for the new policy */ |
2ee867dc TH |
1357 | if (pol->dfl_cftypes) |
1358 | WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, | |
1359 | pol->dfl_cftypes)); | |
880f50e2 | 1360 | if (pol->legacy_cftypes) |
c165b3e3 | 1361 | WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, |
880f50e2 | 1362 | pol->legacy_cftypes)); |
838f13bf TH |
1363 | mutex_unlock(&blkcg_pol_register_mutex); |
1364 | return 0; | |
1365 | ||
06b285bd | 1366 | err_free_cpds: |
e4a9bde9 | 1367 | if (pol->cpd_alloc_fn) { |
06b285bd | 1368 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
e4a9bde9 TH |
1369 | if (blkcg->cpd[pol->plid]) { |
1370 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); | |
1371 | blkcg->cpd[pol->plid] = NULL; | |
1372 | } | |
06b285bd TH |
1373 | } |
1374 | } | |
1375 | blkcg_policy[pol->plid] = NULL; | |
838f13bf | 1376 | err_unlock: |
bc0d6501 | 1377 | mutex_unlock(&blkcg_pol_mutex); |
838f13bf | 1378 | mutex_unlock(&blkcg_pol_register_mutex); |
8bd435b3 | 1379 | return ret; |
3e252066 | 1380 | } |
3c798398 | 1381 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
3e252066 | 1382 | |
8bd435b3 | 1383 | /** |
3c798398 TH |
1384 | * blkcg_policy_unregister - unregister a blkcg policy |
1385 | * @pol: blkcg policy to unregister | |
8bd435b3 | 1386 | * |
3c798398 | 1387 | * Undo blkcg_policy_register(@pol). Might sleep. |
8bd435b3 | 1388 | */ |
3c798398 | 1389 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
3e252066 | 1390 | { |
06b285bd TH |
1391 | struct blkcg *blkcg; |
1392 | ||
838f13bf | 1393 | mutex_lock(&blkcg_pol_register_mutex); |
bc0d6501 | 1394 | |
3c798398 | 1395 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
8bd435b3 TH |
1396 | goto out_unlock; |
1397 | ||
1398 | /* kill the intf files first */ | |
2ee867dc TH |
1399 | if (pol->dfl_cftypes) |
1400 | cgroup_rm_cftypes(pol->dfl_cftypes); | |
880f50e2 TH |
1401 | if (pol->legacy_cftypes) |
1402 | cgroup_rm_cftypes(pol->legacy_cftypes); | |
44ea53de | 1403 | |
06b285bd | 1404 | /* remove cpds and unregister */ |
838f13bf | 1405 | mutex_lock(&blkcg_pol_mutex); |
06b285bd | 1406 | |
e4a9bde9 | 1407 | if (pol->cpd_alloc_fn) { |
06b285bd | 1408 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
e4a9bde9 TH |
1409 | if (blkcg->cpd[pol->plid]) { |
1410 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); | |
1411 | blkcg->cpd[pol->plid] = NULL; | |
1412 | } | |
06b285bd TH |
1413 | } |
1414 | } | |
3c798398 | 1415 | blkcg_policy[pol->plid] = NULL; |
06b285bd | 1416 | |
bc0d6501 | 1417 | mutex_unlock(&blkcg_pol_mutex); |
838f13bf TH |
1418 | out_unlock: |
1419 | mutex_unlock(&blkcg_pol_register_mutex); | |
3e252066 | 1420 | } |
3c798398 | 1421 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |