]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <[email protected]> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <[email protected]> | |
8 | * Paolo Valente <[email protected]> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <[email protected]> | |
11 | * Nauman Rafique <[email protected]> | |
e48453c3 AA |
12 | * |
13 | * For policy-specific per-blkcg data: | |
14 | * Copyright (C) 2015 Paolo Valente <[email protected]> | |
15 | * Arianna Avanzini <[email protected]> | |
31e4c28d VG |
16 | */ |
17 | #include <linux/ioprio.h> | |
22084190 | 18 | #include <linux/kdev_t.h> |
9d6a986c | 19 | #include <linux/module.h> |
accee785 | 20 | #include <linux/err.h> |
9195291e | 21 | #include <linux/blkdev.h> |
52ebea74 | 22 | #include <linux/backing-dev.h> |
5a0e3ad6 | 23 | #include <linux/slab.h> |
34d0f179 | 24 | #include <linux/genhd.h> |
72e06c25 | 25 | #include <linux/delay.h> |
9a9e8a26 | 26 | #include <linux/atomic.h> |
36aa9e5f | 27 | #include <linux/ctype.h> |
eea8f41c | 28 | #include <linux/blk-cgroup.h> |
5efd6113 | 29 | #include "blk.h" |
3e252066 | 30 | |
84c124da DS |
31 | #define MAX_KEY_LEN 100 |
32 | ||
838f13bf TH |
33 | /* |
34 | * blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation. | |
35 | * blkcg_pol_register_mutex nests outside of it and synchronizes entire | |
36 | * policy [un]register operations including cgroup file additions / | |
37 | * removals. Putting cgroup file registration outside blkcg_pol_mutex | |
38 | * allows grabbing it from cgroup callbacks. | |
39 | */ | |
40 | static DEFINE_MUTEX(blkcg_pol_register_mutex); | |
bc0d6501 | 41 | static DEFINE_MUTEX(blkcg_pol_mutex); |
923adde1 | 42 | |
e48453c3 | 43 | struct blkcg blkcg_root; |
3c798398 | 44 | EXPORT_SYMBOL_GPL(blkcg_root); |
9d6a986c | 45 | |
496d5e75 TH |
46 | struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css; |
47 | ||
3c798398 | 48 | static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS]; |
035d10b2 | 49 | |
7876f930 TH |
50 | static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */ |
51 | ||
a2b1693b | 52 | static bool blkcg_policy_enabled(struct request_queue *q, |
3c798398 | 53 | const struct blkcg_policy *pol) |
a2b1693b TH |
54 | { |
55 | return pol && test_bit(pol->plid, q->blkcg_pols); | |
56 | } | |
57 | ||
0381411e TH |
58 | /** |
59 | * blkg_free - free a blkg | |
60 | * @blkg: blkg to free | |
61 | * | |
62 | * Free @blkg which may be partially allocated. | |
63 | */ | |
3c798398 | 64 | static void blkg_free(struct blkcg_gq *blkg) |
0381411e | 65 | { |
e8989fae | 66 | int i; |
549d3aa8 TH |
67 | |
68 | if (!blkg) | |
69 | return; | |
70 | ||
db613670 | 71 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
001bea73 TH |
72 | if (blkg->pd[i]) |
73 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); | |
e8989fae | 74 | |
994b7832 TH |
75 | if (blkg->blkcg != &blkcg_root) |
76 | blk_exit_rl(&blkg->rl); | |
77ea7338 TH |
77 | |
78 | blkg_rwstat_exit(&blkg->stat_ios); | |
79 | blkg_rwstat_exit(&blkg->stat_bytes); | |
549d3aa8 | 80 | kfree(blkg); |
0381411e TH |
81 | } |
82 | ||
83 | /** | |
84 | * blkg_alloc - allocate a blkg | |
85 | * @blkcg: block cgroup the new blkg is associated with | |
86 | * @q: request_queue the new blkg is associated with | |
15974993 | 87 | * @gfp_mask: allocation mask to use |
0381411e | 88 | * |
e8989fae | 89 | * Allocate a new blkg assocating @blkcg and @q. |
0381411e | 90 | */ |
15974993 TH |
91 | static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, |
92 | gfp_t gfp_mask) | |
0381411e | 93 | { |
3c798398 | 94 | struct blkcg_gq *blkg; |
e8989fae | 95 | int i; |
0381411e TH |
96 | |
97 | /* alloc and init base part */ | |
15974993 | 98 | blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); |
0381411e TH |
99 | if (!blkg) |
100 | return NULL; | |
101 | ||
77ea7338 TH |
102 | if (blkg_rwstat_init(&blkg->stat_bytes, gfp_mask) || |
103 | blkg_rwstat_init(&blkg->stat_ios, gfp_mask)) | |
104 | goto err_free; | |
105 | ||
c875f4d0 | 106 | blkg->q = q; |
e8989fae | 107 | INIT_LIST_HEAD(&blkg->q_node); |
0381411e | 108 | blkg->blkcg = blkcg; |
a5049a8a | 109 | atomic_set(&blkg->refcnt, 1); |
0381411e | 110 | |
a051661c TH |
111 | /* root blkg uses @q->root_rl, init rl only for !root blkgs */ |
112 | if (blkcg != &blkcg_root) { | |
113 | if (blk_init_rl(&blkg->rl, q, gfp_mask)) | |
114 | goto err_free; | |
115 | blkg->rl.blkg = blkg; | |
116 | } | |
117 | ||
8bd435b3 | 118 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 119 | struct blkcg_policy *pol = blkcg_policy[i]; |
e8989fae | 120 | struct blkg_policy_data *pd; |
0381411e | 121 | |
a2b1693b | 122 | if (!blkcg_policy_enabled(q, pol)) |
e8989fae TH |
123 | continue; |
124 | ||
125 | /* alloc per-policy data and attach it to blkg */ | |
001bea73 | 126 | pd = pol->pd_alloc_fn(gfp_mask, q->node); |
a051661c TH |
127 | if (!pd) |
128 | goto err_free; | |
549d3aa8 | 129 | |
e8989fae TH |
130 | blkg->pd[i] = pd; |
131 | pd->blkg = blkg; | |
b276a876 | 132 | pd->plid = i; |
e8989fae TH |
133 | } |
134 | ||
0381411e | 135 | return blkg; |
a051661c TH |
136 | |
137 | err_free: | |
138 | blkg_free(blkg); | |
139 | return NULL; | |
0381411e TH |
140 | } |
141 | ||
24f29046 TH |
142 | struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg, |
143 | struct request_queue *q, bool update_hint) | |
80fd9979 | 144 | { |
3c798398 | 145 | struct blkcg_gq *blkg; |
80fd9979 | 146 | |
a637120e | 147 | /* |
86cde6b6 TH |
148 | * Hint didn't match. Look up from the radix tree. Note that the |
149 | * hint can only be updated under queue_lock as otherwise @blkg | |
150 | * could have already been removed from blkg_tree. The caller is | |
151 | * responsible for grabbing queue_lock if @update_hint. | |
a637120e TH |
152 | */ |
153 | blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); | |
86cde6b6 TH |
154 | if (blkg && blkg->q == q) { |
155 | if (update_hint) { | |
156 | lockdep_assert_held(q->queue_lock); | |
157 | rcu_assign_pointer(blkcg->blkg_hint, blkg); | |
158 | } | |
a637120e | 159 | return blkg; |
86cde6b6 | 160 | } |
a637120e | 161 | |
80fd9979 TH |
162 | return NULL; |
163 | } | |
ae118896 | 164 | EXPORT_SYMBOL_GPL(blkg_lookup_slowpath); |
80fd9979 | 165 | |
15974993 TH |
166 | /* |
167 | * If @new_blkg is %NULL, this function tries to allocate a new one as | |
d93a11f1 | 168 | * necessary using %GFP_NOWAIT. @new_blkg is always consumed on return. |
15974993 | 169 | */ |
86cde6b6 TH |
170 | static struct blkcg_gq *blkg_create(struct blkcg *blkcg, |
171 | struct request_queue *q, | |
172 | struct blkcg_gq *new_blkg) | |
5624a4e4 | 173 | { |
3c798398 | 174 | struct blkcg_gq *blkg; |
ce7acfea | 175 | struct bdi_writeback_congested *wb_congested; |
f427d909 | 176 | int i, ret; |
5624a4e4 | 177 | |
cd1604fa TH |
178 | WARN_ON_ONCE(!rcu_read_lock_held()); |
179 | lockdep_assert_held(q->queue_lock); | |
180 | ||
7ee9c562 | 181 | /* blkg holds a reference to blkcg */ |
ec903c0c | 182 | if (!css_tryget_online(&blkcg->css)) { |
20386ce0 | 183 | ret = -ENODEV; |
93e6d5d8 | 184 | goto err_free_blkg; |
15974993 | 185 | } |
cd1604fa | 186 | |
ce7acfea | 187 | wb_congested = wb_congested_get_create(&q->backing_dev_info, |
d93a11f1 | 188 | blkcg->css.id, GFP_NOWAIT); |
ce7acfea TH |
189 | if (!wb_congested) { |
190 | ret = -ENOMEM; | |
191 | goto err_put_css; | |
192 | } | |
193 | ||
496fb780 | 194 | /* allocate */ |
15974993 | 195 | if (!new_blkg) { |
d93a11f1 | 196 | new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT); |
15974993 | 197 | if (unlikely(!new_blkg)) { |
93e6d5d8 | 198 | ret = -ENOMEM; |
ce7acfea | 199 | goto err_put_congested; |
15974993 TH |
200 | } |
201 | } | |
202 | blkg = new_blkg; | |
ce7acfea | 203 | blkg->wb_congested = wb_congested; |
cd1604fa | 204 | |
db613670 | 205 | /* link parent */ |
3c547865 TH |
206 | if (blkcg_parent(blkcg)) { |
207 | blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); | |
208 | if (WARN_ON_ONCE(!blkg->parent)) { | |
20386ce0 | 209 | ret = -ENODEV; |
ce7acfea | 210 | goto err_put_congested; |
3c547865 TH |
211 | } |
212 | blkg_get(blkg->parent); | |
213 | } | |
214 | ||
db613670 TH |
215 | /* invoke per-policy init */ |
216 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
217 | struct blkcg_policy *pol = blkcg_policy[i]; | |
218 | ||
219 | if (blkg->pd[i] && pol->pd_init_fn) | |
a9520cd6 | 220 | pol->pd_init_fn(blkg->pd[i]); |
db613670 TH |
221 | } |
222 | ||
223 | /* insert */ | |
cd1604fa | 224 | spin_lock(&blkcg->lock); |
a637120e TH |
225 | ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); |
226 | if (likely(!ret)) { | |
227 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); | |
228 | list_add(&blkg->q_node, &q->blkg_list); | |
f427d909 TH |
229 | |
230 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
231 | struct blkcg_policy *pol = blkcg_policy[i]; | |
232 | ||
233 | if (blkg->pd[i] && pol->pd_online_fn) | |
a9520cd6 | 234 | pol->pd_online_fn(blkg->pd[i]); |
f427d909 | 235 | } |
a637120e | 236 | } |
f427d909 | 237 | blkg->online = true; |
cd1604fa | 238 | spin_unlock(&blkcg->lock); |
496fb780 | 239 | |
ec13b1d6 | 240 | if (!ret) |
a637120e | 241 | return blkg; |
15974993 | 242 | |
3c547865 TH |
243 | /* @blkg failed fully initialized, use the usual release path */ |
244 | blkg_put(blkg); | |
245 | return ERR_PTR(ret); | |
246 | ||
ce7acfea TH |
247 | err_put_congested: |
248 | wb_congested_put(wb_congested); | |
93e6d5d8 | 249 | err_put_css: |
496fb780 | 250 | css_put(&blkcg->css); |
93e6d5d8 | 251 | err_free_blkg: |
15974993 | 252 | blkg_free(new_blkg); |
93e6d5d8 | 253 | return ERR_PTR(ret); |
31e4c28d | 254 | } |
3c96cb32 | 255 | |
86cde6b6 TH |
256 | /** |
257 | * blkg_lookup_create - lookup blkg, try to create one if not there | |
258 | * @blkcg: blkcg of interest | |
259 | * @q: request_queue of interest | |
260 | * | |
261 | * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to | |
3c547865 TH |
262 | * create one. blkg creation is performed recursively from blkcg_root such |
263 | * that all non-root blkg's have access to the parent blkg. This function | |
264 | * should be called under RCU read lock and @q->queue_lock. | |
86cde6b6 TH |
265 | * |
266 | * Returns pointer to the looked up or created blkg on success, ERR_PTR() | |
267 | * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not | |
268 | * dead and bypassing, returns ERR_PTR(-EBUSY). | |
269 | */ | |
3c798398 TH |
270 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
271 | struct request_queue *q) | |
3c96cb32 | 272 | { |
86cde6b6 TH |
273 | struct blkcg_gq *blkg; |
274 | ||
275 | WARN_ON_ONCE(!rcu_read_lock_held()); | |
276 | lockdep_assert_held(q->queue_lock); | |
277 | ||
3c96cb32 TH |
278 | /* |
279 | * This could be the first entry point of blkcg implementation and | |
280 | * we shouldn't allow anything to go through for a bypassing queue. | |
281 | */ | |
282 | if (unlikely(blk_queue_bypass(q))) | |
20386ce0 | 283 | return ERR_PTR(blk_queue_dying(q) ? -ENODEV : -EBUSY); |
86cde6b6 TH |
284 | |
285 | blkg = __blkg_lookup(blkcg, q, true); | |
286 | if (blkg) | |
287 | return blkg; | |
288 | ||
3c547865 TH |
289 | /* |
290 | * Create blkgs walking down from blkcg_root to @blkcg, so that all | |
291 | * non-root blkgs have access to their parents. | |
292 | */ | |
293 | while (true) { | |
294 | struct blkcg *pos = blkcg; | |
295 | struct blkcg *parent = blkcg_parent(blkcg); | |
296 | ||
297 | while (parent && !__blkg_lookup(parent, q, false)) { | |
298 | pos = parent; | |
299 | parent = blkcg_parent(parent); | |
300 | } | |
301 | ||
302 | blkg = blkg_create(pos, q, NULL); | |
303 | if (pos == blkcg || IS_ERR(blkg)) | |
304 | return blkg; | |
305 | } | |
3c96cb32 | 306 | } |
31e4c28d | 307 | |
3c798398 | 308 | static void blkg_destroy(struct blkcg_gq *blkg) |
03aa264a | 309 | { |
3c798398 | 310 | struct blkcg *blkcg = blkg->blkcg; |
77ea7338 | 311 | struct blkcg_gq *parent = blkg->parent; |
f427d909 | 312 | int i; |
03aa264a | 313 | |
27e1f9d1 | 314 | lockdep_assert_held(blkg->q->queue_lock); |
9f13ef67 | 315 | lockdep_assert_held(&blkcg->lock); |
03aa264a TH |
316 | |
317 | /* Something wrong if we are trying to remove same group twice */ | |
e8989fae | 318 | WARN_ON_ONCE(list_empty(&blkg->q_node)); |
9f13ef67 | 319 | WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node)); |
a637120e | 320 | |
f427d909 TH |
321 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
322 | struct blkcg_policy *pol = blkcg_policy[i]; | |
323 | ||
324 | if (blkg->pd[i] && pol->pd_offline_fn) | |
a9520cd6 | 325 | pol->pd_offline_fn(blkg->pd[i]); |
f427d909 | 326 | } |
77ea7338 TH |
327 | |
328 | if (parent) { | |
329 | blkg_rwstat_add_aux(&parent->stat_bytes, &blkg->stat_bytes); | |
330 | blkg_rwstat_add_aux(&parent->stat_ios, &blkg->stat_ios); | |
331 | } | |
332 | ||
f427d909 TH |
333 | blkg->online = false; |
334 | ||
a637120e | 335 | radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); |
e8989fae | 336 | list_del_init(&blkg->q_node); |
9f13ef67 | 337 | hlist_del_init_rcu(&blkg->blkcg_node); |
03aa264a | 338 | |
a637120e TH |
339 | /* |
340 | * Both setting lookup hint to and clearing it from @blkg are done | |
341 | * under queue_lock. If it's not pointing to @blkg now, it never | |
342 | * will. Hint assignment itself can race safely. | |
343 | */ | |
ec6c676a | 344 | if (rcu_access_pointer(blkcg->blkg_hint) == blkg) |
a637120e TH |
345 | rcu_assign_pointer(blkcg->blkg_hint, NULL); |
346 | ||
03aa264a TH |
347 | /* |
348 | * Put the reference taken at the time of creation so that when all | |
349 | * queues are gone, group can be destroyed. | |
350 | */ | |
351 | blkg_put(blkg); | |
352 | } | |
353 | ||
9f13ef67 TH |
354 | /** |
355 | * blkg_destroy_all - destroy all blkgs associated with a request_queue | |
356 | * @q: request_queue of interest | |
9f13ef67 | 357 | * |
3c96cb32 | 358 | * Destroy all blkgs associated with @q. |
9f13ef67 | 359 | */ |
3c96cb32 | 360 | static void blkg_destroy_all(struct request_queue *q) |
72e06c25 | 361 | { |
3c798398 | 362 | struct blkcg_gq *blkg, *n; |
72e06c25 | 363 | |
6d18b008 | 364 | lockdep_assert_held(q->queue_lock); |
72e06c25 | 365 | |
9f13ef67 | 366 | list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { |
3c798398 | 367 | struct blkcg *blkcg = blkg->blkcg; |
72e06c25 | 368 | |
9f13ef67 TH |
369 | spin_lock(&blkcg->lock); |
370 | blkg_destroy(blkg); | |
371 | spin_unlock(&blkcg->lock); | |
72e06c25 | 372 | } |
6fe810bd TH |
373 | |
374 | q->root_blkg = NULL; | |
375 | q->root_rl.blkg = NULL; | |
72e06c25 TH |
376 | } |
377 | ||
2a4fd070 TH |
378 | /* |
379 | * A group is RCU protected, but having an rcu lock does not mean that one | |
380 | * can access all the fields of blkg and assume these are valid. For | |
381 | * example, don't try to follow throtl_data and request queue links. | |
382 | * | |
383 | * Having a reference to blkg under an rcu allows accesses to only values | |
384 | * local to groups like group stats and group rate limits. | |
385 | */ | |
386 | void __blkg_release_rcu(struct rcu_head *rcu_head) | |
1adaf3dd | 387 | { |
2a4fd070 | 388 | struct blkcg_gq *blkg = container_of(rcu_head, struct blkcg_gq, rcu_head); |
db613670 | 389 | |
3c547865 | 390 | /* release the blkcg and parent blkg refs this blkg has been holding */ |
1adaf3dd | 391 | css_put(&blkg->blkcg->css); |
a5049a8a | 392 | if (blkg->parent) |
3c547865 | 393 | blkg_put(blkg->parent); |
1adaf3dd | 394 | |
ce7acfea TH |
395 | wb_congested_put(blkg->wb_congested); |
396 | ||
2a4fd070 | 397 | blkg_free(blkg); |
1adaf3dd | 398 | } |
2a4fd070 | 399 | EXPORT_SYMBOL_GPL(__blkg_release_rcu); |
1adaf3dd | 400 | |
a051661c TH |
401 | /* |
402 | * The next function used by blk_queue_for_each_rl(). It's a bit tricky | |
403 | * because the root blkg uses @q->root_rl instead of its own rl. | |
404 | */ | |
405 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
406 | struct request_queue *q) | |
407 | { | |
408 | struct list_head *ent; | |
409 | struct blkcg_gq *blkg; | |
410 | ||
411 | /* | |
412 | * Determine the current blkg list_head. The first entry is | |
413 | * root_rl which is off @q->blkg_list and mapped to the head. | |
414 | */ | |
415 | if (rl == &q->root_rl) { | |
416 | ent = &q->blkg_list; | |
65c77fd9 JN |
417 | /* There are no more block groups, hence no request lists */ |
418 | if (list_empty(ent)) | |
419 | return NULL; | |
a051661c TH |
420 | } else { |
421 | blkg = container_of(rl, struct blkcg_gq, rl); | |
422 | ent = &blkg->q_node; | |
423 | } | |
424 | ||
425 | /* walk to the next list_head, skip root blkcg */ | |
426 | ent = ent->next; | |
427 | if (ent == &q->root_blkg->q_node) | |
428 | ent = ent->next; | |
429 | if (ent == &q->blkg_list) | |
430 | return NULL; | |
431 | ||
432 | blkg = container_of(ent, struct blkcg_gq, q_node); | |
433 | return &blkg->rl; | |
434 | } | |
435 | ||
182446d0 TH |
436 | static int blkcg_reset_stats(struct cgroup_subsys_state *css, |
437 | struct cftype *cftype, u64 val) | |
303a3acb | 438 | { |
182446d0 | 439 | struct blkcg *blkcg = css_to_blkcg(css); |
3c798398 | 440 | struct blkcg_gq *blkg; |
bc0d6501 | 441 | int i; |
303a3acb | 442 | |
838f13bf | 443 | mutex_lock(&blkcg_pol_mutex); |
303a3acb | 444 | spin_lock_irq(&blkcg->lock); |
997a026c TH |
445 | |
446 | /* | |
447 | * Note that stat reset is racy - it doesn't synchronize against | |
448 | * stat updates. This is a debug feature which shouldn't exist | |
449 | * anyway. If you get hit by a race, retry. | |
450 | */ | |
b67bfe0d | 451 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
77ea7338 TH |
452 | blkg_rwstat_reset(&blkg->stat_bytes); |
453 | blkg_rwstat_reset(&blkg->stat_ios); | |
454 | ||
8bd435b3 | 455 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
3c798398 | 456 | struct blkcg_policy *pol = blkcg_policy[i]; |
549d3aa8 | 457 | |
a9520cd6 TH |
458 | if (blkg->pd[i] && pol->pd_reset_stats_fn) |
459 | pol->pd_reset_stats_fn(blkg->pd[i]); | |
bc0d6501 | 460 | } |
303a3acb | 461 | } |
f0bdc8cd | 462 | |
303a3acb | 463 | spin_unlock_irq(&blkcg->lock); |
bc0d6501 | 464 | mutex_unlock(&blkcg_pol_mutex); |
303a3acb DS |
465 | return 0; |
466 | } | |
467 | ||
dd165eb3 | 468 | const char *blkg_dev_name(struct blkcg_gq *blkg) |
303a3acb | 469 | { |
d3d32e69 TH |
470 | /* some drivers (floppy) instantiate a queue w/o disk registered */ |
471 | if (blkg->q->backing_dev_info.dev) | |
472 | return dev_name(blkg->q->backing_dev_info.dev); | |
473 | return NULL; | |
303a3acb | 474 | } |
dd165eb3 | 475 | EXPORT_SYMBOL_GPL(blkg_dev_name); |
303a3acb | 476 | |
d3d32e69 TH |
477 | /** |
478 | * blkcg_print_blkgs - helper for printing per-blkg data | |
479 | * @sf: seq_file to print to | |
480 | * @blkcg: blkcg of interest | |
481 | * @prfill: fill function to print out a blkg | |
482 | * @pol: policy in question | |
483 | * @data: data to be passed to @prfill | |
484 | * @show_total: to print out sum of prfill return values or not | |
485 | * | |
486 | * This function invokes @prfill on each blkg of @blkcg if pd for the | |
487 | * policy specified by @pol exists. @prfill is invoked with @sf, the | |
810ecfa7 TH |
488 | * policy data and @data and the matching queue lock held. If @show_total |
489 | * is %true, the sum of the return values from @prfill is printed with | |
490 | * "Total" label at the end. | |
d3d32e69 TH |
491 | * |
492 | * This is to be used to construct print functions for | |
493 | * cftype->read_seq_string method. | |
494 | */ | |
3c798398 | 495 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
496 | u64 (*prfill)(struct seq_file *, |
497 | struct blkg_policy_data *, int), | |
3c798398 | 498 | const struct blkcg_policy *pol, int data, |
ec399347 | 499 | bool show_total) |
5624a4e4 | 500 | { |
3c798398 | 501 | struct blkcg_gq *blkg; |
d3d32e69 | 502 | u64 total = 0; |
5624a4e4 | 503 | |
810ecfa7 | 504 | rcu_read_lock(); |
ee89f812 | 505 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
810ecfa7 | 506 | spin_lock_irq(blkg->q->queue_lock); |
a2b1693b | 507 | if (blkcg_policy_enabled(blkg->q, pol)) |
f95a04af | 508 | total += prfill(sf, blkg->pd[pol->plid], data); |
810ecfa7 TH |
509 | spin_unlock_irq(blkg->q->queue_lock); |
510 | } | |
511 | rcu_read_unlock(); | |
d3d32e69 TH |
512 | |
513 | if (show_total) | |
514 | seq_printf(sf, "Total %llu\n", (unsigned long long)total); | |
515 | } | |
829fdb50 | 516 | EXPORT_SYMBOL_GPL(blkcg_print_blkgs); |
d3d32e69 TH |
517 | |
518 | /** | |
519 | * __blkg_prfill_u64 - prfill helper for a single u64 value | |
520 | * @sf: seq_file to print to | |
f95a04af | 521 | * @pd: policy private data of interest |
d3d32e69 TH |
522 | * @v: value to print |
523 | * | |
f95a04af | 524 | * Print @v to @sf for the device assocaited with @pd. |
d3d32e69 | 525 | */ |
f95a04af | 526 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v) |
d3d32e69 | 527 | { |
f95a04af | 528 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
529 | |
530 | if (!dname) | |
531 | return 0; | |
532 | ||
533 | seq_printf(sf, "%s %llu\n", dname, (unsigned long long)v); | |
534 | return v; | |
535 | } | |
829fdb50 | 536 | EXPORT_SYMBOL_GPL(__blkg_prfill_u64); |
d3d32e69 TH |
537 | |
538 | /** | |
539 | * __blkg_prfill_rwstat - prfill helper for a blkg_rwstat | |
540 | * @sf: seq_file to print to | |
f95a04af | 541 | * @pd: policy private data of interest |
d3d32e69 TH |
542 | * @rwstat: rwstat to print |
543 | * | |
f95a04af | 544 | * Print @rwstat to @sf for the device assocaited with @pd. |
d3d32e69 | 545 | */ |
f95a04af | 546 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
829fdb50 | 547 | const struct blkg_rwstat *rwstat) |
d3d32e69 TH |
548 | { |
549 | static const char *rwstr[] = { | |
550 | [BLKG_RWSTAT_READ] = "Read", | |
551 | [BLKG_RWSTAT_WRITE] = "Write", | |
552 | [BLKG_RWSTAT_SYNC] = "Sync", | |
553 | [BLKG_RWSTAT_ASYNC] = "Async", | |
554 | }; | |
f95a04af | 555 | const char *dname = blkg_dev_name(pd->blkg); |
d3d32e69 TH |
556 | u64 v; |
557 | int i; | |
558 | ||
559 | if (!dname) | |
560 | return 0; | |
561 | ||
562 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
563 | seq_printf(sf, "%s %s %llu\n", dname, rwstr[i], | |
24bdb8ef | 564 | (unsigned long long)atomic64_read(&rwstat->aux_cnt[i])); |
d3d32e69 | 565 | |
24bdb8ef TH |
566 | v = atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_READ]) + |
567 | atomic64_read(&rwstat->aux_cnt[BLKG_RWSTAT_WRITE]); | |
d3d32e69 TH |
568 | seq_printf(sf, "%s Total %llu\n", dname, (unsigned long long)v); |
569 | return v; | |
570 | } | |
b50da39f | 571 | EXPORT_SYMBOL_GPL(__blkg_prfill_rwstat); |
d3d32e69 | 572 | |
5bc4afb1 TH |
573 | /** |
574 | * blkg_prfill_stat - prfill callback for blkg_stat | |
575 | * @sf: seq_file to print to | |
f95a04af TH |
576 | * @pd: policy private data of interest |
577 | * @off: offset to the blkg_stat in @pd | |
5bc4afb1 TH |
578 | * |
579 | * prfill callback for printing a blkg_stat. | |
580 | */ | |
f95a04af | 581 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off) |
d3d32e69 | 582 | { |
f95a04af | 583 | return __blkg_prfill_u64(sf, pd, blkg_stat_read((void *)pd + off)); |
d3d32e69 | 584 | } |
5bc4afb1 | 585 | EXPORT_SYMBOL_GPL(blkg_prfill_stat); |
d3d32e69 | 586 | |
5bc4afb1 TH |
587 | /** |
588 | * blkg_prfill_rwstat - prfill callback for blkg_rwstat | |
589 | * @sf: seq_file to print to | |
f95a04af TH |
590 | * @pd: policy private data of interest |
591 | * @off: offset to the blkg_rwstat in @pd | |
5bc4afb1 TH |
592 | * |
593 | * prfill callback for printing a blkg_rwstat. | |
594 | */ | |
f95a04af TH |
595 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
596 | int off) | |
d3d32e69 | 597 | { |
f95a04af | 598 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd + off); |
d3d32e69 | 599 | |
f95a04af | 600 | return __blkg_prfill_rwstat(sf, pd, &rwstat); |
d3d32e69 | 601 | } |
5bc4afb1 | 602 | EXPORT_SYMBOL_GPL(blkg_prfill_rwstat); |
d3d32e69 | 603 | |
77ea7338 TH |
604 | static u64 blkg_prfill_rwstat_field(struct seq_file *sf, |
605 | struct blkg_policy_data *pd, int off) | |
606 | { | |
607 | struct blkg_rwstat rwstat = blkg_rwstat_read((void *)pd->blkg + off); | |
608 | ||
609 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
610 | } | |
611 | ||
612 | /** | |
613 | * blkg_print_stat_bytes - seq_show callback for blkg->stat_bytes | |
614 | * @sf: seq_file to print to | |
615 | * @v: unused | |
616 | * | |
617 | * To be used as cftype->seq_show to print blkg->stat_bytes. | |
618 | * cftype->private must be set to the blkcg_policy. | |
619 | */ | |
620 | int blkg_print_stat_bytes(struct seq_file *sf, void *v) | |
621 | { | |
622 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
623 | blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, | |
624 | offsetof(struct blkcg_gq, stat_bytes), true); | |
625 | return 0; | |
626 | } | |
627 | EXPORT_SYMBOL_GPL(blkg_print_stat_bytes); | |
628 | ||
629 | /** | |
630 | * blkg_print_stat_bytes - seq_show callback for blkg->stat_ios | |
631 | * @sf: seq_file to print to | |
632 | * @v: unused | |
633 | * | |
634 | * To be used as cftype->seq_show to print blkg->stat_ios. cftype->private | |
635 | * must be set to the blkcg_policy. | |
636 | */ | |
637 | int blkg_print_stat_ios(struct seq_file *sf, void *v) | |
638 | { | |
639 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
640 | blkg_prfill_rwstat_field, (void *)seq_cft(sf)->private, | |
641 | offsetof(struct blkcg_gq, stat_ios), true); | |
642 | return 0; | |
643 | } | |
644 | EXPORT_SYMBOL_GPL(blkg_print_stat_ios); | |
645 | ||
646 | static u64 blkg_prfill_rwstat_field_recursive(struct seq_file *sf, | |
647 | struct blkg_policy_data *pd, | |
648 | int off) | |
649 | { | |
650 | struct blkg_rwstat rwstat = blkg_rwstat_recursive_sum(pd->blkg, | |
651 | NULL, off); | |
652 | return __blkg_prfill_rwstat(sf, pd, &rwstat); | |
653 | } | |
654 | ||
655 | /** | |
656 | * blkg_print_stat_bytes_recursive - recursive version of blkg_print_stat_bytes | |
657 | * @sf: seq_file to print to | |
658 | * @v: unused | |
659 | */ | |
660 | int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v) | |
661 | { | |
662 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
663 | blkg_prfill_rwstat_field_recursive, | |
664 | (void *)seq_cft(sf)->private, | |
665 | offsetof(struct blkcg_gq, stat_bytes), true); | |
666 | return 0; | |
667 | } | |
668 | EXPORT_SYMBOL_GPL(blkg_print_stat_bytes_recursive); | |
669 | ||
670 | /** | |
671 | * blkg_print_stat_ios_recursive - recursive version of blkg_print_stat_ios | |
672 | * @sf: seq_file to print to | |
673 | * @v: unused | |
674 | */ | |
675 | int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v) | |
676 | { | |
677 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
678 | blkg_prfill_rwstat_field_recursive, | |
679 | (void *)seq_cft(sf)->private, | |
680 | offsetof(struct blkcg_gq, stat_ios), true); | |
681 | return 0; | |
682 | } | |
683 | EXPORT_SYMBOL_GPL(blkg_print_stat_ios_recursive); | |
684 | ||
16b3de66 TH |
685 | /** |
686 | * blkg_stat_recursive_sum - collect hierarchical blkg_stat | |
f12c74ca TH |
687 | * @blkg: blkg of interest |
688 | * @pol: blkcg_policy which contains the blkg_stat | |
689 | * @off: offset to the blkg_stat in blkg_policy_data or @blkg | |
16b3de66 | 690 | * |
f12c74ca TH |
691 | * Collect the blkg_stat specified by @blkg, @pol and @off and all its |
692 | * online descendants and their aux counts. The caller must be holding the | |
693 | * queue lock for online tests. | |
694 | * | |
695 | * If @pol is NULL, blkg_stat is at @off bytes into @blkg; otherwise, it is | |
696 | * at @off bytes into @blkg's blkg_policy_data of the policy. | |
16b3de66 | 697 | */ |
f12c74ca TH |
698 | u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg, |
699 | struct blkcg_policy *pol, int off) | |
16b3de66 | 700 | { |
16b3de66 | 701 | struct blkcg_gq *pos_blkg; |
492eb21b | 702 | struct cgroup_subsys_state *pos_css; |
bd8815a6 | 703 | u64 sum = 0; |
16b3de66 | 704 | |
f12c74ca | 705 | lockdep_assert_held(blkg->q->queue_lock); |
16b3de66 | 706 | |
16b3de66 | 707 | rcu_read_lock(); |
f12c74ca TH |
708 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
709 | struct blkg_stat *stat; | |
710 | ||
711 | if (!pos_blkg->online) | |
712 | continue; | |
16b3de66 | 713 | |
f12c74ca TH |
714 | if (pol) |
715 | stat = (void *)blkg_to_pd(pos_blkg, pol) + off; | |
716 | else | |
717 | stat = (void *)blkg + off; | |
718 | ||
719 | sum += blkg_stat_read(stat) + atomic64_read(&stat->aux_cnt); | |
16b3de66 TH |
720 | } |
721 | rcu_read_unlock(); | |
722 | ||
723 | return sum; | |
724 | } | |
725 | EXPORT_SYMBOL_GPL(blkg_stat_recursive_sum); | |
726 | ||
727 | /** | |
728 | * blkg_rwstat_recursive_sum - collect hierarchical blkg_rwstat | |
f12c74ca TH |
729 | * @blkg: blkg of interest |
730 | * @pol: blkcg_policy which contains the blkg_rwstat | |
731 | * @off: offset to the blkg_rwstat in blkg_policy_data or @blkg | |
16b3de66 | 732 | * |
f12c74ca TH |
733 | * Collect the blkg_rwstat specified by @blkg, @pol and @off and all its |
734 | * online descendants and their aux counts. The caller must be holding the | |
735 | * queue lock for online tests. | |
736 | * | |
737 | * If @pol is NULL, blkg_rwstat is at @off bytes into @blkg; otherwise, it | |
738 | * is at @off bytes into @blkg's blkg_policy_data of the policy. | |
16b3de66 | 739 | */ |
f12c74ca TH |
740 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, |
741 | struct blkcg_policy *pol, int off) | |
16b3de66 | 742 | { |
16b3de66 | 743 | struct blkcg_gq *pos_blkg; |
492eb21b | 744 | struct cgroup_subsys_state *pos_css; |
bd8815a6 | 745 | struct blkg_rwstat sum = { }; |
16b3de66 TH |
746 | int i; |
747 | ||
f12c74ca | 748 | lockdep_assert_held(blkg->q->queue_lock); |
16b3de66 | 749 | |
16b3de66 | 750 | rcu_read_lock(); |
f12c74ca | 751 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { |
3a7faead | 752 | struct blkg_rwstat *rwstat; |
16b3de66 TH |
753 | |
754 | if (!pos_blkg->online) | |
755 | continue; | |
756 | ||
f12c74ca TH |
757 | if (pol) |
758 | rwstat = (void *)blkg_to_pd(pos_blkg, pol) + off; | |
759 | else | |
760 | rwstat = (void *)pos_blkg + off; | |
761 | ||
16b3de66 | 762 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
3a7faead TH |
763 | atomic64_add(atomic64_read(&rwstat->aux_cnt[i]) + |
764 | percpu_counter_sum_positive(&rwstat->cpu_cnt[i]), | |
765 | &sum.aux_cnt[i]); | |
16b3de66 TH |
766 | } |
767 | rcu_read_unlock(); | |
768 | ||
769 | return sum; | |
770 | } | |
771 | EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum); | |
772 | ||
3a8b31d3 TH |
773 | /** |
774 | * blkg_conf_prep - parse and prepare for per-blkg config update | |
775 | * @blkcg: target block cgroup | |
da8b0662 | 776 | * @pol: target policy |
3a8b31d3 TH |
777 | * @input: input string |
778 | * @ctx: blkg_conf_ctx to be filled | |
779 | * | |
780 | * Parse per-blkg config update from @input and initialize @ctx with the | |
36aa9e5f TH |
781 | * result. @ctx->blkg points to the blkg to be updated and @ctx->body the |
782 | * part of @input following MAJ:MIN. This function returns with RCU read | |
783 | * lock and queue lock held and must be paired with blkg_conf_finish(). | |
3a8b31d3 | 784 | */ |
3c798398 | 785 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
36aa9e5f | 786 | char *input, struct blkg_conf_ctx *ctx) |
da8b0662 | 787 | __acquires(rcu) __acquires(disk->queue->queue_lock) |
34d0f179 | 788 | { |
3a8b31d3 | 789 | struct gendisk *disk; |
3c798398 | 790 | struct blkcg_gq *blkg; |
39a169b6 | 791 | struct module *owner; |
726fa694 | 792 | unsigned int major, minor; |
36aa9e5f TH |
793 | int key_len, part, ret; |
794 | char *body; | |
34d0f179 | 795 | |
36aa9e5f | 796 | if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2) |
726fa694 | 797 | return -EINVAL; |
3a8b31d3 | 798 | |
36aa9e5f TH |
799 | body = input + key_len; |
800 | if (!isspace(*body)) | |
801 | return -EINVAL; | |
802 | body = skip_spaces(body); | |
803 | ||
726fa694 | 804 | disk = get_gendisk(MKDEV(major, minor), &part); |
5f6c2d2b | 805 | if (!disk) |
20386ce0 | 806 | return -ENODEV; |
5f6c2d2b | 807 | if (part) { |
39a169b6 | 808 | owner = disk->fops->owner; |
5f6c2d2b | 809 | put_disk(disk); |
39a169b6 | 810 | module_put(owner); |
20386ce0 | 811 | return -ENODEV; |
5f6c2d2b | 812 | } |
e56da7e2 TH |
813 | |
814 | rcu_read_lock(); | |
4bfd482e | 815 | spin_lock_irq(disk->queue->queue_lock); |
da8b0662 | 816 | |
a2b1693b | 817 | if (blkcg_policy_enabled(disk->queue, pol)) |
3c96cb32 | 818 | blkg = blkg_lookup_create(blkcg, disk->queue); |
a2b1693b | 819 | else |
20386ce0 | 820 | blkg = ERR_PTR(-EOPNOTSUPP); |
e56da7e2 | 821 | |
4bfd482e TH |
822 | if (IS_ERR(blkg)) { |
823 | ret = PTR_ERR(blkg); | |
3a8b31d3 | 824 | rcu_read_unlock(); |
da8b0662 | 825 | spin_unlock_irq(disk->queue->queue_lock); |
39a169b6 | 826 | owner = disk->fops->owner; |
3a8b31d3 | 827 | put_disk(disk); |
39a169b6 | 828 | module_put(owner); |
3a8b31d3 TH |
829 | /* |
830 | * If queue was bypassing, we should retry. Do so after a | |
831 | * short msleep(). It isn't strictly necessary but queue | |
832 | * can be bypassing for some time and it's always nice to | |
833 | * avoid busy looping. | |
834 | */ | |
835 | if (ret == -EBUSY) { | |
836 | msleep(10); | |
837 | ret = restart_syscall(); | |
7702e8f4 | 838 | } |
726fa694 | 839 | return ret; |
062a644d | 840 | } |
3a8b31d3 TH |
841 | |
842 | ctx->disk = disk; | |
843 | ctx->blkg = blkg; | |
36aa9e5f | 844 | ctx->body = body; |
726fa694 | 845 | return 0; |
34d0f179 | 846 | } |
829fdb50 | 847 | EXPORT_SYMBOL_GPL(blkg_conf_prep); |
34d0f179 | 848 | |
3a8b31d3 TH |
849 | /** |
850 | * blkg_conf_finish - finish up per-blkg config update | |
851 | * @ctx: blkg_conf_ctx intiailized by blkg_conf_prep() | |
852 | * | |
853 | * Finish up after per-blkg config update. This function must be paired | |
854 | * with blkg_conf_prep(). | |
855 | */ | |
829fdb50 | 856 | void blkg_conf_finish(struct blkg_conf_ctx *ctx) |
da8b0662 | 857 | __releases(ctx->disk->queue->queue_lock) __releases(rcu) |
34d0f179 | 858 | { |
39a169b6 RP |
859 | struct module *owner; |
860 | ||
da8b0662 | 861 | spin_unlock_irq(ctx->disk->queue->queue_lock); |
3a8b31d3 | 862 | rcu_read_unlock(); |
39a169b6 | 863 | owner = ctx->disk->fops->owner; |
3a8b31d3 | 864 | put_disk(ctx->disk); |
39a169b6 | 865 | module_put(owner); |
34d0f179 | 866 | } |
829fdb50 | 867 | EXPORT_SYMBOL_GPL(blkg_conf_finish); |
34d0f179 | 868 | |
2ee867dc TH |
869 | static int blkcg_print_stat(struct seq_file *sf, void *v) |
870 | { | |
871 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
872 | struct blkcg_gq *blkg; | |
873 | ||
874 | rcu_read_lock(); | |
875 | ||
876 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { | |
877 | const char *dname; | |
878 | struct blkg_rwstat rwstat; | |
879 | u64 rbytes, wbytes, rios, wios; | |
880 | ||
881 | dname = blkg_dev_name(blkg); | |
882 | if (!dname) | |
883 | continue; | |
884 | ||
885 | spin_lock_irq(blkg->q->queue_lock); | |
886 | ||
887 | rwstat = blkg_rwstat_recursive_sum(blkg, NULL, | |
888 | offsetof(struct blkcg_gq, stat_bytes)); | |
889 | rbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); | |
890 | wbytes = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); | |
891 | ||
892 | rwstat = blkg_rwstat_recursive_sum(blkg, NULL, | |
893 | offsetof(struct blkcg_gq, stat_ios)); | |
894 | rios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_READ]); | |
895 | wios = atomic64_read(&rwstat.aux_cnt[BLKG_RWSTAT_WRITE]); | |
896 | ||
897 | spin_unlock_irq(blkg->q->queue_lock); | |
898 | ||
899 | if (rbytes || wbytes || rios || wios) | |
900 | seq_printf(sf, "%s rbytes=%llu wbytes=%llu rios=%llu wios=%llu\n", | |
901 | dname, rbytes, wbytes, rios, wios); | |
902 | } | |
903 | ||
904 | rcu_read_unlock(); | |
905 | return 0; | |
906 | } | |
907 | ||
e1f3b941 | 908 | static struct cftype blkcg_files[] = { |
2ee867dc TH |
909 | { |
910 | .name = "stat", | |
ca0752c5 | 911 | .flags = CFTYPE_NOT_ON_ROOT, |
2ee867dc TH |
912 | .seq_show = blkcg_print_stat, |
913 | }, | |
914 | { } /* terminate */ | |
915 | }; | |
916 | ||
e1f3b941 | 917 | static struct cftype blkcg_legacy_files[] = { |
84c124da DS |
918 | { |
919 | .name = "reset_stats", | |
3c798398 | 920 | .write_u64 = blkcg_reset_stats, |
22084190 | 921 | }, |
4baf6e33 | 922 | { } /* terminate */ |
31e4c28d VG |
923 | }; |
924 | ||
9f13ef67 | 925 | /** |
92fb9748 | 926 | * blkcg_css_offline - cgroup css_offline callback |
eb95419b | 927 | * @css: css of interest |
9f13ef67 | 928 | * |
eb95419b TH |
929 | * This function is called when @css is about to go away and responsible |
930 | * for shooting down all blkgs associated with @css. blkgs should be | |
9f13ef67 TH |
931 | * removed while holding both q and blkcg locks. As blkcg lock is nested |
932 | * inside q lock, this function performs reverse double lock dancing. | |
933 | * | |
934 | * This is the blkcg counterpart of ioc_release_fn(). | |
935 | */ | |
eb95419b | 936 | static void blkcg_css_offline(struct cgroup_subsys_state *css) |
31e4c28d | 937 | { |
eb95419b | 938 | struct blkcg *blkcg = css_to_blkcg(css); |
b1c35769 | 939 | |
9f13ef67 | 940 | spin_lock_irq(&blkcg->lock); |
7ee9c562 | 941 | |
9f13ef67 | 942 | while (!hlist_empty(&blkcg->blkg_list)) { |
3c798398 TH |
943 | struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first, |
944 | struct blkcg_gq, blkcg_node); | |
c875f4d0 | 945 | struct request_queue *q = blkg->q; |
b1c35769 | 946 | |
9f13ef67 TH |
947 | if (spin_trylock(q->queue_lock)) { |
948 | blkg_destroy(blkg); | |
949 | spin_unlock(q->queue_lock); | |
950 | } else { | |
951 | spin_unlock_irq(&blkcg->lock); | |
9f13ef67 | 952 | cpu_relax(); |
a5567932 | 953 | spin_lock_irq(&blkcg->lock); |
0f3942a3 | 954 | } |
9f13ef67 | 955 | } |
b1c35769 | 956 | |
9f13ef67 | 957 | spin_unlock_irq(&blkcg->lock); |
52ebea74 TH |
958 | |
959 | wb_blkcg_offline(blkcg); | |
7ee9c562 TH |
960 | } |
961 | ||
eb95419b | 962 | static void blkcg_css_free(struct cgroup_subsys_state *css) |
7ee9c562 | 963 | { |
eb95419b | 964 | struct blkcg *blkcg = css_to_blkcg(css); |
bc915e61 | 965 | int i; |
7ee9c562 | 966 | |
7876f930 | 967 | mutex_lock(&blkcg_pol_mutex); |
e4a9bde9 | 968 | |
7876f930 | 969 | list_del(&blkcg->all_blkcgs_node); |
7876f930 | 970 | |
bc915e61 | 971 | for (i = 0; i < BLKCG_MAX_POLS; i++) |
e4a9bde9 TH |
972 | if (blkcg->cpd[i]) |
973 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); | |
974 | ||
975 | mutex_unlock(&blkcg_pol_mutex); | |
976 | ||
bc915e61 | 977 | kfree(blkcg); |
31e4c28d VG |
978 | } |
979 | ||
eb95419b TH |
980 | static struct cgroup_subsys_state * |
981 | blkcg_css_alloc(struct cgroup_subsys_state *parent_css) | |
31e4c28d | 982 | { |
3c798398 | 983 | struct blkcg *blkcg; |
e48453c3 AA |
984 | struct cgroup_subsys_state *ret; |
985 | int i; | |
31e4c28d | 986 | |
7876f930 TH |
987 | mutex_lock(&blkcg_pol_mutex); |
988 | ||
eb95419b | 989 | if (!parent_css) { |
3c798398 | 990 | blkcg = &blkcg_root; |
bc915e61 TH |
991 | } else { |
992 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
993 | if (!blkcg) { | |
994 | ret = ERR_PTR(-ENOMEM); | |
995 | goto free_blkcg; | |
996 | } | |
e48453c3 AA |
997 | } |
998 | ||
999 | for (i = 0; i < BLKCG_MAX_POLS ; i++) { | |
1000 | struct blkcg_policy *pol = blkcg_policy[i]; | |
1001 | struct blkcg_policy_data *cpd; | |
1002 | ||
1003 | /* | |
1004 | * If the policy hasn't been attached yet, wait for it | |
1005 | * to be attached before doing anything else. Otherwise, | |
1006 | * check if the policy requires any specific per-cgroup | |
1007 | * data: if it does, allocate and initialize it. | |
1008 | */ | |
e4a9bde9 | 1009 | if (!pol || !pol->cpd_alloc_fn) |
e48453c3 AA |
1010 | continue; |
1011 | ||
e4a9bde9 | 1012 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
e48453c3 AA |
1013 | if (!cpd) { |
1014 | ret = ERR_PTR(-ENOMEM); | |
1015 | goto free_pd_blkcg; | |
1016 | } | |
81437648 TH |
1017 | blkcg->cpd[i] = cpd; |
1018 | cpd->blkcg = blkcg; | |
e48453c3 | 1019 | cpd->plid = i; |
e4a9bde9 TH |
1020 | if (pol->cpd_init_fn) |
1021 | pol->cpd_init_fn(cpd); | |
e48453c3 | 1022 | } |
31e4c28d | 1023 | |
31e4c28d | 1024 | spin_lock_init(&blkcg->lock); |
d93a11f1 | 1025 | INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_NOWAIT); |
31e4c28d | 1026 | INIT_HLIST_HEAD(&blkcg->blkg_list); |
52ebea74 TH |
1027 | #ifdef CONFIG_CGROUP_WRITEBACK |
1028 | INIT_LIST_HEAD(&blkcg->cgwb_list); | |
1029 | #endif | |
7876f930 TH |
1030 | list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs); |
1031 | ||
1032 | mutex_unlock(&blkcg_pol_mutex); | |
31e4c28d | 1033 | return &blkcg->css; |
e48453c3 AA |
1034 | |
1035 | free_pd_blkcg: | |
1036 | for (i--; i >= 0; i--) | |
e4a9bde9 TH |
1037 | if (blkcg->cpd[i]) |
1038 | blkcg_policy[i]->cpd_free_fn(blkcg->cpd[i]); | |
e48453c3 AA |
1039 | free_blkcg: |
1040 | kfree(blkcg); | |
7876f930 | 1041 | mutex_unlock(&blkcg_pol_mutex); |
e48453c3 | 1042 | return ret; |
31e4c28d VG |
1043 | } |
1044 | ||
5efd6113 TH |
1045 | /** |
1046 | * blkcg_init_queue - initialize blkcg part of request queue | |
1047 | * @q: request_queue to initialize | |
1048 | * | |
1049 | * Called from blk_alloc_queue_node(). Responsible for initializing blkcg | |
1050 | * part of new request_queue @q. | |
1051 | * | |
1052 | * RETURNS: | |
1053 | * 0 on success, -errno on failure. | |
1054 | */ | |
1055 | int blkcg_init_queue(struct request_queue *q) | |
1056 | { | |
ec13b1d6 TH |
1057 | struct blkcg_gq *new_blkg, *blkg; |
1058 | bool preloaded; | |
1059 | int ret; | |
1060 | ||
1061 | new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); | |
1062 | if (!new_blkg) | |
1063 | return -ENOMEM; | |
1064 | ||
1065 | preloaded = !radix_tree_preload(GFP_KERNEL); | |
5efd6113 | 1066 | |
ec13b1d6 TH |
1067 | /* |
1068 | * Make sure the root blkg exists and count the existing blkgs. As | |
1069 | * @q is bypassing at this point, blkg_lookup_create() can't be | |
1070 | * used. Open code insertion. | |
1071 | */ | |
1072 | rcu_read_lock(); | |
1073 | spin_lock_irq(q->queue_lock); | |
1074 | blkg = blkg_create(&blkcg_root, q, new_blkg); | |
1075 | spin_unlock_irq(q->queue_lock); | |
1076 | rcu_read_unlock(); | |
1077 | ||
1078 | if (preloaded) | |
1079 | radix_tree_preload_end(); | |
1080 | ||
1081 | if (IS_ERR(blkg)) { | |
994b7832 | 1082 | blkg_free(new_blkg); |
ec13b1d6 TH |
1083 | return PTR_ERR(blkg); |
1084 | } | |
1085 | ||
1086 | q->root_blkg = blkg; | |
1087 | q->root_rl.blkg = blkg; | |
5efd6113 | 1088 | |
ec13b1d6 TH |
1089 | ret = blk_throtl_init(q); |
1090 | if (ret) { | |
1091 | spin_lock_irq(q->queue_lock); | |
1092 | blkg_destroy_all(q); | |
1093 | spin_unlock_irq(q->queue_lock); | |
1094 | } | |
1095 | return ret; | |
5efd6113 TH |
1096 | } |
1097 | ||
1098 | /** | |
1099 | * blkcg_drain_queue - drain blkcg part of request_queue | |
1100 | * @q: request_queue to drain | |
1101 | * | |
1102 | * Called from blk_drain_queue(). Responsible for draining blkcg part. | |
1103 | */ | |
1104 | void blkcg_drain_queue(struct request_queue *q) | |
1105 | { | |
1106 | lockdep_assert_held(q->queue_lock); | |
1107 | ||
0b462c89 TH |
1108 | /* |
1109 | * @q could be exiting and already have destroyed all blkgs as | |
1110 | * indicated by NULL root_blkg. If so, don't confuse policies. | |
1111 | */ | |
1112 | if (!q->root_blkg) | |
1113 | return; | |
1114 | ||
5efd6113 TH |
1115 | blk_throtl_drain(q); |
1116 | } | |
1117 | ||
1118 | /** | |
1119 | * blkcg_exit_queue - exit and release blkcg part of request_queue | |
1120 | * @q: request_queue being released | |
1121 | * | |
1122 | * Called from blk_release_queue(). Responsible for exiting blkcg part. | |
1123 | */ | |
1124 | void blkcg_exit_queue(struct request_queue *q) | |
1125 | { | |
6d18b008 | 1126 | spin_lock_irq(q->queue_lock); |
3c96cb32 | 1127 | blkg_destroy_all(q); |
6d18b008 TH |
1128 | spin_unlock_irq(q->queue_lock); |
1129 | ||
5efd6113 TH |
1130 | blk_throtl_exit(q); |
1131 | } | |
1132 | ||
31e4c28d VG |
1133 | /* |
1134 | * We cannot support shared io contexts, as we have no mean to support | |
1135 | * two tasks with the same ioc in two different groups without major rework | |
1136 | * of the main cic data structures. For now we allow a task to change | |
1137 | * its cgroup only if it's the only owner of its ioc. | |
1138 | */ | |
1f7dd3e5 | 1139 | static int blkcg_can_attach(struct cgroup_taskset *tset) |
31e4c28d | 1140 | { |
bb9d97b6 | 1141 | struct task_struct *task; |
1f7dd3e5 | 1142 | struct cgroup_subsys_state *dst_css; |
31e4c28d VG |
1143 | struct io_context *ioc; |
1144 | int ret = 0; | |
1145 | ||
1146 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
1f7dd3e5 | 1147 | cgroup_taskset_for_each(task, dst_css, tset) { |
bb9d97b6 TH |
1148 | task_lock(task); |
1149 | ioc = task->io_context; | |
1150 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
1151 | ret = -EINVAL; | |
1152 | task_unlock(task); | |
1153 | if (ret) | |
1154 | break; | |
1155 | } | |
31e4c28d VG |
1156 | return ret; |
1157 | } | |
1158 | ||
69d7fde5 TH |
1159 | static void blkcg_bind(struct cgroup_subsys_state *root_css) |
1160 | { | |
1161 | int i; | |
1162 | ||
1163 | mutex_lock(&blkcg_pol_mutex); | |
1164 | ||
1165 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | |
1166 | struct blkcg_policy *pol = blkcg_policy[i]; | |
1167 | struct blkcg *blkcg; | |
1168 | ||
1169 | if (!pol || !pol->cpd_bind_fn) | |
1170 | continue; | |
1171 | ||
1172 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) | |
1173 | if (blkcg->cpd[pol->plid]) | |
1174 | pol->cpd_bind_fn(blkcg->cpd[pol->plid]); | |
1175 | } | |
1176 | mutex_unlock(&blkcg_pol_mutex); | |
1177 | } | |
1178 | ||
c165b3e3 | 1179 | struct cgroup_subsys io_cgrp_subsys = { |
92fb9748 TH |
1180 | .css_alloc = blkcg_css_alloc, |
1181 | .css_offline = blkcg_css_offline, | |
1182 | .css_free = blkcg_css_free, | |
3c798398 | 1183 | .can_attach = blkcg_can_attach, |
69d7fde5 | 1184 | .bind = blkcg_bind, |
2ee867dc | 1185 | .dfl_cftypes = blkcg_files, |
880f50e2 | 1186 | .legacy_cftypes = blkcg_legacy_files, |
c165b3e3 | 1187 | .legacy_name = "blkio", |
1ced953b TH |
1188 | #ifdef CONFIG_MEMCG |
1189 | /* | |
1190 | * This ensures that, if available, memcg is automatically enabled | |
1191 | * together on the default hierarchy so that the owner cgroup can | |
1192 | * be retrieved from writeback pages. | |
1193 | */ | |
1194 | .depends_on = 1 << memory_cgrp_id, | |
1195 | #endif | |
676f7c8f | 1196 | }; |
c165b3e3 | 1197 | EXPORT_SYMBOL_GPL(io_cgrp_subsys); |
676f7c8f | 1198 | |
a2b1693b TH |
1199 | /** |
1200 | * blkcg_activate_policy - activate a blkcg policy on a request_queue | |
1201 | * @q: request_queue of interest | |
1202 | * @pol: blkcg policy to activate | |
1203 | * | |
1204 | * Activate @pol on @q. Requires %GFP_KERNEL context. @q goes through | |
1205 | * bypass mode to populate its blkgs with policy_data for @pol. | |
1206 | * | |
1207 | * Activation happens with @q bypassed, so nobody would be accessing blkgs | |
1208 | * from IO path. Update of each blkg is protected by both queue and blkcg | |
1209 | * locks so that holding either lock and testing blkcg_policy_enabled() is | |
1210 | * always enough for dereferencing policy data. | |
1211 | * | |
1212 | * The caller is responsible for synchronizing [de]activations and policy | |
1213 | * [un]registerations. Returns 0 on success, -errno on failure. | |
1214 | */ | |
1215 | int blkcg_activate_policy(struct request_queue *q, | |
3c798398 | 1216 | const struct blkcg_policy *pol) |
a2b1693b | 1217 | { |
4c55f4f9 | 1218 | struct blkg_policy_data *pd_prealloc = NULL; |
ec13b1d6 | 1219 | struct blkcg_gq *blkg; |
4c55f4f9 | 1220 | int ret; |
a2b1693b TH |
1221 | |
1222 | if (blkcg_policy_enabled(q, pol)) | |
1223 | return 0; | |
1224 | ||
1225 | blk_queue_bypass_start(q); | |
4c55f4f9 TH |
1226 | pd_prealloc: |
1227 | if (!pd_prealloc) { | |
001bea73 | 1228 | pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q->node); |
4c55f4f9 | 1229 | if (!pd_prealloc) { |
a2b1693b | 1230 | ret = -ENOMEM; |
4c55f4f9 | 1231 | goto out_bypass_end; |
a2b1693b | 1232 | } |
a2b1693b TH |
1233 | } |
1234 | ||
a2b1693b TH |
1235 | spin_lock_irq(q->queue_lock); |
1236 | ||
1237 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
4c55f4f9 TH |
1238 | struct blkg_policy_data *pd; |
1239 | ||
1240 | if (blkg->pd[pol->plid]) | |
1241 | continue; | |
a2b1693b | 1242 | |
001bea73 | 1243 | pd = pol->pd_alloc_fn(GFP_NOWAIT, q->node); |
4c55f4f9 TH |
1244 | if (!pd) |
1245 | swap(pd, pd_prealloc); | |
1246 | if (!pd) { | |
1247 | spin_unlock_irq(q->queue_lock); | |
1248 | goto pd_prealloc; | |
1249 | } | |
a2b1693b TH |
1250 | |
1251 | blkg->pd[pol->plid] = pd; | |
1252 | pd->blkg = blkg; | |
b276a876 | 1253 | pd->plid = pol->plid; |
3e418710 | 1254 | if (pol->pd_init_fn) |
a9520cd6 | 1255 | pol->pd_init_fn(pd); |
a2b1693b TH |
1256 | } |
1257 | ||
1258 | __set_bit(pol->plid, q->blkcg_pols); | |
1259 | ret = 0; | |
4c55f4f9 | 1260 | |
a2b1693b | 1261 | spin_unlock_irq(q->queue_lock); |
4c55f4f9 | 1262 | out_bypass_end: |
a2b1693b | 1263 | blk_queue_bypass_end(q); |
001bea73 TH |
1264 | if (pd_prealloc) |
1265 | pol->pd_free_fn(pd_prealloc); | |
a2b1693b TH |
1266 | return ret; |
1267 | } | |
1268 | EXPORT_SYMBOL_GPL(blkcg_activate_policy); | |
1269 | ||
1270 | /** | |
1271 | * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue | |
1272 | * @q: request_queue of interest | |
1273 | * @pol: blkcg policy to deactivate | |
1274 | * | |
1275 | * Deactivate @pol on @q. Follows the same synchronization rules as | |
1276 | * blkcg_activate_policy(). | |
1277 | */ | |
1278 | void blkcg_deactivate_policy(struct request_queue *q, | |
3c798398 | 1279 | const struct blkcg_policy *pol) |
a2b1693b | 1280 | { |
3c798398 | 1281 | struct blkcg_gq *blkg; |
a2b1693b TH |
1282 | |
1283 | if (!blkcg_policy_enabled(q, pol)) | |
1284 | return; | |
1285 | ||
1286 | blk_queue_bypass_start(q); | |
1287 | spin_lock_irq(q->queue_lock); | |
1288 | ||
1289 | __clear_bit(pol->plid, q->blkcg_pols); | |
1290 | ||
1291 | list_for_each_entry(blkg, &q->blkg_list, q_node) { | |
1292 | /* grab blkcg lock too while removing @pd from @blkg */ | |
1293 | spin_lock(&blkg->blkcg->lock); | |
1294 | ||
001bea73 | 1295 | if (blkg->pd[pol->plid]) { |
a9520cd6 TH |
1296 | if (pol->pd_offline_fn) |
1297 | pol->pd_offline_fn(blkg->pd[pol->plid]); | |
001bea73 TH |
1298 | pol->pd_free_fn(blkg->pd[pol->plid]); |
1299 | blkg->pd[pol->plid] = NULL; | |
1300 | } | |
a2b1693b TH |
1301 | |
1302 | spin_unlock(&blkg->blkcg->lock); | |
1303 | } | |
1304 | ||
1305 | spin_unlock_irq(q->queue_lock); | |
1306 | blk_queue_bypass_end(q); | |
1307 | } | |
1308 | EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); | |
1309 | ||
8bd435b3 | 1310 | /** |
3c798398 TH |
1311 | * blkcg_policy_register - register a blkcg policy |
1312 | * @pol: blkcg policy to register | |
8bd435b3 | 1313 | * |
3c798398 TH |
1314 | * Register @pol with blkcg core. Might sleep and @pol may be modified on |
1315 | * successful registration. Returns 0 on success and -errno on failure. | |
8bd435b3 | 1316 | */ |
d5bf0291 | 1317 | int blkcg_policy_register(struct blkcg_policy *pol) |
3e252066 | 1318 | { |
06b285bd | 1319 | struct blkcg *blkcg; |
8bd435b3 | 1320 | int i, ret; |
e8989fae | 1321 | |
838f13bf | 1322 | mutex_lock(&blkcg_pol_register_mutex); |
bc0d6501 TH |
1323 | mutex_lock(&blkcg_pol_mutex); |
1324 | ||
8bd435b3 TH |
1325 | /* find an empty slot */ |
1326 | ret = -ENOSPC; | |
1327 | for (i = 0; i < BLKCG_MAX_POLS; i++) | |
3c798398 | 1328 | if (!blkcg_policy[i]) |
8bd435b3 TH |
1329 | break; |
1330 | if (i >= BLKCG_MAX_POLS) | |
838f13bf | 1331 | goto err_unlock; |
035d10b2 | 1332 | |
06b285bd | 1333 | /* register @pol */ |
3c798398 | 1334 | pol->plid = i; |
06b285bd TH |
1335 | blkcg_policy[pol->plid] = pol; |
1336 | ||
1337 | /* allocate and install cpd's */ | |
e4a9bde9 | 1338 | if (pol->cpd_alloc_fn) { |
06b285bd TH |
1339 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
1340 | struct blkcg_policy_data *cpd; | |
1341 | ||
e4a9bde9 | 1342 | cpd = pol->cpd_alloc_fn(GFP_KERNEL); |
06b285bd TH |
1343 | if (!cpd) { |
1344 | mutex_unlock(&blkcg_pol_mutex); | |
1345 | goto err_free_cpds; | |
1346 | } | |
1347 | ||
81437648 TH |
1348 | blkcg->cpd[pol->plid] = cpd; |
1349 | cpd->blkcg = blkcg; | |
06b285bd | 1350 | cpd->plid = pol->plid; |
81437648 | 1351 | pol->cpd_init_fn(cpd); |
06b285bd TH |
1352 | } |
1353 | } | |
1354 | ||
838f13bf | 1355 | mutex_unlock(&blkcg_pol_mutex); |
8bd435b3 | 1356 | |
8bd435b3 | 1357 | /* everything is in place, add intf files for the new policy */ |
2ee867dc TH |
1358 | if (pol->dfl_cftypes) |
1359 | WARN_ON(cgroup_add_dfl_cftypes(&io_cgrp_subsys, | |
1360 | pol->dfl_cftypes)); | |
880f50e2 | 1361 | if (pol->legacy_cftypes) |
c165b3e3 | 1362 | WARN_ON(cgroup_add_legacy_cftypes(&io_cgrp_subsys, |
880f50e2 | 1363 | pol->legacy_cftypes)); |
838f13bf TH |
1364 | mutex_unlock(&blkcg_pol_register_mutex); |
1365 | return 0; | |
1366 | ||
06b285bd | 1367 | err_free_cpds: |
e4a9bde9 | 1368 | if (pol->cpd_alloc_fn) { |
06b285bd | 1369 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
e4a9bde9 TH |
1370 | if (blkcg->cpd[pol->plid]) { |
1371 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); | |
1372 | blkcg->cpd[pol->plid] = NULL; | |
1373 | } | |
06b285bd TH |
1374 | } |
1375 | } | |
1376 | blkcg_policy[pol->plid] = NULL; | |
838f13bf | 1377 | err_unlock: |
bc0d6501 | 1378 | mutex_unlock(&blkcg_pol_mutex); |
838f13bf | 1379 | mutex_unlock(&blkcg_pol_register_mutex); |
8bd435b3 | 1380 | return ret; |
3e252066 | 1381 | } |
3c798398 | 1382 | EXPORT_SYMBOL_GPL(blkcg_policy_register); |
3e252066 | 1383 | |
8bd435b3 | 1384 | /** |
3c798398 TH |
1385 | * blkcg_policy_unregister - unregister a blkcg policy |
1386 | * @pol: blkcg policy to unregister | |
8bd435b3 | 1387 | * |
3c798398 | 1388 | * Undo blkcg_policy_register(@pol). Might sleep. |
8bd435b3 | 1389 | */ |
3c798398 | 1390 | void blkcg_policy_unregister(struct blkcg_policy *pol) |
3e252066 | 1391 | { |
06b285bd TH |
1392 | struct blkcg *blkcg; |
1393 | ||
838f13bf | 1394 | mutex_lock(&blkcg_pol_register_mutex); |
bc0d6501 | 1395 | |
3c798398 | 1396 | if (WARN_ON(blkcg_policy[pol->plid] != pol)) |
8bd435b3 TH |
1397 | goto out_unlock; |
1398 | ||
1399 | /* kill the intf files first */ | |
2ee867dc TH |
1400 | if (pol->dfl_cftypes) |
1401 | cgroup_rm_cftypes(pol->dfl_cftypes); | |
880f50e2 TH |
1402 | if (pol->legacy_cftypes) |
1403 | cgroup_rm_cftypes(pol->legacy_cftypes); | |
44ea53de | 1404 | |
06b285bd | 1405 | /* remove cpds and unregister */ |
838f13bf | 1406 | mutex_lock(&blkcg_pol_mutex); |
06b285bd | 1407 | |
e4a9bde9 | 1408 | if (pol->cpd_alloc_fn) { |
06b285bd | 1409 | list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) { |
e4a9bde9 TH |
1410 | if (blkcg->cpd[pol->plid]) { |
1411 | pol->cpd_free_fn(blkcg->cpd[pol->plid]); | |
1412 | blkcg->cpd[pol->plid] = NULL; | |
1413 | } | |
06b285bd TH |
1414 | } |
1415 | } | |
3c798398 | 1416 | blkcg_policy[pol->plid] = NULL; |
06b285bd | 1417 | |
bc0d6501 | 1418 | mutex_unlock(&blkcg_pol_mutex); |
838f13bf TH |
1419 | out_unlock: |
1420 | mutex_unlock(&blkcg_pol_register_mutex); | |
3e252066 | 1421 | } |
3c798398 | 1422 | EXPORT_SYMBOL_GPL(blkcg_policy_unregister); |