]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | /* |
2 | * Common Block IO controller cgroup interface | |
3 | * | |
4 | * Based on ideas and code from CFQ, CFS and BFQ: | |
5 | * Copyright (C) 2003 Jens Axboe <[email protected]> | |
6 | * | |
7 | * Copyright (C) 2008 Fabio Checconi <[email protected]> | |
8 | * Paolo Valente <[email protected]> | |
9 | * | |
10 | * Copyright (C) 2009 Vivek Goyal <[email protected]> | |
11 | * Nauman Rafique <[email protected]> | |
12 | */ | |
13 | #include <linux/ioprio.h> | |
22084190 VG |
14 | #include <linux/seq_file.h> |
15 | #include <linux/kdev_t.h> | |
9d6a986c | 16 | #include <linux/module.h> |
accee785 | 17 | #include <linux/err.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
31e4c28d | 19 | #include "blk-cgroup.h" |
3e252066 VG |
20 | |
21 | static DEFINE_SPINLOCK(blkio_list_lock); | |
22 | static LIST_HEAD(blkio_list); | |
b1c35769 | 23 | |
31e4c28d | 24 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
9d6a986c VG |
25 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
26 | ||
67523c48 BB |
27 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, |
28 | struct cgroup *); | |
29 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, | |
30 | struct task_struct *, bool); | |
31 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | |
32 | struct cgroup *, struct task_struct *, bool); | |
33 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); | |
34 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |
35 | ||
36 | struct cgroup_subsys blkio_subsys = { | |
37 | .name = "blkio", | |
38 | .create = blkiocg_create, | |
39 | .can_attach = blkiocg_can_attach, | |
40 | .attach = blkiocg_attach, | |
41 | .destroy = blkiocg_destroy, | |
42 | .populate = blkiocg_populate, | |
43 | #ifdef CONFIG_BLK_CGROUP | |
44 | /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */ | |
45 | .subsys_id = blkio_subsys_id, | |
46 | #endif | |
47 | .use_id = 1, | |
48 | .module = THIS_MODULE, | |
49 | }; | |
50 | EXPORT_SYMBOL_GPL(blkio_subsys); | |
51 | ||
31e4c28d VG |
52 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
53 | { | |
54 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
55 | struct blkio_cgroup, css); | |
56 | } | |
9d6a986c | 57 | EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup); |
31e4c28d | 58 | |
22084190 VG |
59 | void blkiocg_update_blkio_group_stats(struct blkio_group *blkg, |
60 | unsigned long time, unsigned long sectors) | |
61 | { | |
62 | blkg->time += time; | |
63 | blkg->sectors += sectors; | |
64 | } | |
9d6a986c | 65 | EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_stats); |
22084190 | 66 | |
31e4c28d | 67 | void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
22084190 | 68 | struct blkio_group *blkg, void *key, dev_t dev) |
31e4c28d VG |
69 | { |
70 | unsigned long flags; | |
71 | ||
72 | spin_lock_irqsave(&blkcg->lock, flags); | |
73 | rcu_assign_pointer(blkg->key, key); | |
b1c35769 | 74 | blkg->blkcg_id = css_id(&blkcg->css); |
31e4c28d VG |
75 | hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list); |
76 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
2868ef7b VG |
77 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
78 | /* Need to take css reference ? */ | |
79 | cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path)); | |
80 | #endif | |
22084190 | 81 | blkg->dev = dev; |
31e4c28d | 82 | } |
9d6a986c | 83 | EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group); |
31e4c28d | 84 | |
b1c35769 VG |
85 | static void __blkiocg_del_blkio_group(struct blkio_group *blkg) |
86 | { | |
87 | hlist_del_init_rcu(&blkg->blkcg_node); | |
88 | blkg->blkcg_id = 0; | |
89 | } | |
90 | ||
91 | /* | |
92 | * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1 | |
93 | * indicating that blk_group was unhashed by the time we got to it. | |
94 | */ | |
31e4c28d VG |
95 | int blkiocg_del_blkio_group(struct blkio_group *blkg) |
96 | { | |
b1c35769 VG |
97 | struct blkio_cgroup *blkcg; |
98 | unsigned long flags; | |
99 | struct cgroup_subsys_state *css; | |
100 | int ret = 1; | |
101 | ||
102 | rcu_read_lock(); | |
103 | css = css_lookup(&blkio_subsys, blkg->blkcg_id); | |
104 | if (!css) | |
105 | goto out; | |
106 | ||
107 | blkcg = container_of(css, struct blkio_cgroup, css); | |
108 | spin_lock_irqsave(&blkcg->lock, flags); | |
109 | if (!hlist_unhashed(&blkg->blkcg_node)) { | |
110 | __blkiocg_del_blkio_group(blkg); | |
111 | ret = 0; | |
112 | } | |
113 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
114 | out: | |
115 | rcu_read_unlock(); | |
116 | return ret; | |
31e4c28d | 117 | } |
9d6a986c | 118 | EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group); |
31e4c28d VG |
119 | |
120 | /* called under rcu_read_lock(). */ | |
121 | struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) | |
122 | { | |
123 | struct blkio_group *blkg; | |
124 | struct hlist_node *n; | |
125 | void *__key; | |
126 | ||
127 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | |
128 | __key = blkg->key; | |
129 | if (__key == key) | |
130 | return blkg; | |
131 | } | |
132 | ||
133 | return NULL; | |
134 | } | |
9d6a986c | 135 | EXPORT_SYMBOL_GPL(blkiocg_lookup_group); |
31e4c28d VG |
136 | |
137 | #define SHOW_FUNCTION(__VAR) \ | |
138 | static u64 blkiocg_##__VAR##_read(struct cgroup *cgroup, \ | |
139 | struct cftype *cftype) \ | |
140 | { \ | |
141 | struct blkio_cgroup *blkcg; \ | |
142 | \ | |
143 | blkcg = cgroup_to_blkio_cgroup(cgroup); \ | |
144 | return (u64)blkcg->__VAR; \ | |
145 | } | |
146 | ||
147 | SHOW_FUNCTION(weight); | |
148 | #undef SHOW_FUNCTION | |
149 | ||
150 | static int | |
151 | blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val) | |
152 | { | |
153 | struct blkio_cgroup *blkcg; | |
f8d461d6 VG |
154 | struct blkio_group *blkg; |
155 | struct hlist_node *n; | |
3e252066 | 156 | struct blkio_policy_type *blkiop; |
31e4c28d VG |
157 | |
158 | if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX) | |
159 | return -EINVAL; | |
160 | ||
161 | blkcg = cgroup_to_blkio_cgroup(cgroup); | |
bcf4dd43 | 162 | spin_lock(&blkio_list_lock); |
f8d461d6 | 163 | spin_lock_irq(&blkcg->lock); |
31e4c28d | 164 | blkcg->weight = (unsigned int)val; |
3e252066 | 165 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { |
3e252066 VG |
166 | list_for_each_entry(blkiop, &blkio_list, list) |
167 | blkiop->ops.blkio_update_group_weight_fn(blkg, | |
168 | blkcg->weight); | |
3e252066 | 169 | } |
f8d461d6 | 170 | spin_unlock_irq(&blkcg->lock); |
bcf4dd43 | 171 | spin_unlock(&blkio_list_lock); |
31e4c28d VG |
172 | return 0; |
173 | } | |
174 | ||
22084190 VG |
175 | #define SHOW_FUNCTION_PER_GROUP(__VAR) \ |
176 | static int blkiocg_##__VAR##_read(struct cgroup *cgroup, \ | |
177 | struct cftype *cftype, struct seq_file *m) \ | |
178 | { \ | |
179 | struct blkio_cgroup *blkcg; \ | |
180 | struct blkio_group *blkg; \ | |
181 | struct hlist_node *n; \ | |
182 | \ | |
183 | if (!cgroup_lock_live_group(cgroup)) \ | |
184 | return -ENODEV; \ | |
185 | \ | |
186 | blkcg = cgroup_to_blkio_cgroup(cgroup); \ | |
187 | rcu_read_lock(); \ | |
188 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {\ | |
189 | if (blkg->dev) \ | |
190 | seq_printf(m, "%u:%u %lu\n", MAJOR(blkg->dev), \ | |
191 | MINOR(blkg->dev), blkg->__VAR); \ | |
192 | } \ | |
193 | rcu_read_unlock(); \ | |
194 | cgroup_unlock(); \ | |
195 | return 0; \ | |
196 | } | |
197 | ||
198 | SHOW_FUNCTION_PER_GROUP(time); | |
199 | SHOW_FUNCTION_PER_GROUP(sectors); | |
200 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
201 | SHOW_FUNCTION_PER_GROUP(dequeue); | |
202 | #endif | |
203 | #undef SHOW_FUNCTION_PER_GROUP | |
204 | ||
205 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
206 | void blkiocg_update_blkio_group_dequeue_stats(struct blkio_group *blkg, | |
207 | unsigned long dequeue) | |
208 | { | |
209 | blkg->dequeue += dequeue; | |
210 | } | |
9d6a986c | 211 | EXPORT_SYMBOL_GPL(blkiocg_update_blkio_group_dequeue_stats); |
22084190 VG |
212 | #endif |
213 | ||
31e4c28d VG |
214 | struct cftype blkio_files[] = { |
215 | { | |
216 | .name = "weight", | |
217 | .read_u64 = blkiocg_weight_read, | |
218 | .write_u64 = blkiocg_weight_write, | |
219 | }, | |
22084190 VG |
220 | { |
221 | .name = "time", | |
222 | .read_seq_string = blkiocg_time_read, | |
223 | }, | |
224 | { | |
225 | .name = "sectors", | |
226 | .read_seq_string = blkiocg_sectors_read, | |
227 | }, | |
228 | #ifdef CONFIG_DEBUG_BLK_CGROUP | |
229 | { | |
230 | .name = "dequeue", | |
231 | .read_seq_string = blkiocg_dequeue_read, | |
232 | }, | |
233 | #endif | |
31e4c28d VG |
234 | }; |
235 | ||
236 | static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
237 | { | |
238 | return cgroup_add_files(cgroup, subsys, blkio_files, | |
239 | ARRAY_SIZE(blkio_files)); | |
240 | } | |
241 | ||
242 | static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
243 | { | |
244 | struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup); | |
b1c35769 VG |
245 | unsigned long flags; |
246 | struct blkio_group *blkg; | |
247 | void *key; | |
3e252066 | 248 | struct blkio_policy_type *blkiop; |
b1c35769 VG |
249 | |
250 | rcu_read_lock(); | |
251 | remove_entry: | |
252 | spin_lock_irqsave(&blkcg->lock, flags); | |
253 | ||
254 | if (hlist_empty(&blkcg->blkg_list)) { | |
255 | spin_unlock_irqrestore(&blkcg->lock, flags); | |
256 | goto done; | |
257 | } | |
258 | ||
259 | blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group, | |
260 | blkcg_node); | |
261 | key = rcu_dereference(blkg->key); | |
262 | __blkiocg_del_blkio_group(blkg); | |
31e4c28d | 263 | |
b1c35769 VG |
264 | spin_unlock_irqrestore(&blkcg->lock, flags); |
265 | ||
266 | /* | |
267 | * This blkio_group is being unlinked as associated cgroup is going | |
268 | * away. Let all the IO controlling policies know about this event. | |
269 | * | |
270 | * Currently this is static call to one io controlling policy. Once | |
271 | * we have more policies in place, we need some dynamic registration | |
272 | * of callback function. | |
273 | */ | |
3e252066 VG |
274 | spin_lock(&blkio_list_lock); |
275 | list_for_each_entry(blkiop, &blkio_list, list) | |
276 | blkiop->ops.blkio_unlink_group_fn(key, blkg); | |
277 | spin_unlock(&blkio_list_lock); | |
b1c35769 VG |
278 | goto remove_entry; |
279 | done: | |
31e4c28d | 280 | free_css_id(&blkio_subsys, &blkcg->css); |
b1c35769 | 281 | rcu_read_unlock(); |
67523c48 BB |
282 | if (blkcg != &blkio_root_cgroup) |
283 | kfree(blkcg); | |
31e4c28d VG |
284 | } |
285 | ||
286 | static struct cgroup_subsys_state * | |
287 | blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) | |
288 | { | |
289 | struct blkio_cgroup *blkcg, *parent_blkcg; | |
290 | ||
291 | if (!cgroup->parent) { | |
292 | blkcg = &blkio_root_cgroup; | |
293 | goto done; | |
294 | } | |
295 | ||
296 | /* Currently we do not support hierarchy deeper than two level (0,1) */ | |
297 | parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); | |
298 | if (css_depth(&parent_blkcg->css) > 0) | |
299 | return ERR_PTR(-EINVAL); | |
300 | ||
301 | blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); | |
302 | if (!blkcg) | |
303 | return ERR_PTR(-ENOMEM); | |
304 | ||
305 | blkcg->weight = BLKIO_WEIGHT_DEFAULT; | |
306 | done: | |
307 | spin_lock_init(&blkcg->lock); | |
308 | INIT_HLIST_HEAD(&blkcg->blkg_list); | |
309 | ||
310 | return &blkcg->css; | |
311 | } | |
312 | ||
313 | /* | |
314 | * We cannot support shared io contexts, as we have no mean to support | |
315 | * two tasks with the same ioc in two different groups without major rework | |
316 | * of the main cic data structures. For now we allow a task to change | |
317 | * its cgroup only if it's the only owner of its ioc. | |
318 | */ | |
319 | static int blkiocg_can_attach(struct cgroup_subsys *subsys, | |
320 | struct cgroup *cgroup, struct task_struct *tsk, | |
321 | bool threadgroup) | |
322 | { | |
323 | struct io_context *ioc; | |
324 | int ret = 0; | |
325 | ||
326 | /* task_lock() is needed to avoid races with exit_io_context() */ | |
327 | task_lock(tsk); | |
328 | ioc = tsk->io_context; | |
329 | if (ioc && atomic_read(&ioc->nr_tasks) > 1) | |
330 | ret = -EINVAL; | |
331 | task_unlock(tsk); | |
332 | ||
333 | return ret; | |
334 | } | |
335 | ||
336 | static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, | |
337 | struct cgroup *prev, struct task_struct *tsk, | |
338 | bool threadgroup) | |
339 | { | |
340 | struct io_context *ioc; | |
341 | ||
342 | task_lock(tsk); | |
343 | ioc = tsk->io_context; | |
344 | if (ioc) | |
345 | ioc->cgroup_changed = 1; | |
346 | task_unlock(tsk); | |
347 | } | |
348 | ||
3e252066 VG |
349 | void blkio_policy_register(struct blkio_policy_type *blkiop) |
350 | { | |
351 | spin_lock(&blkio_list_lock); | |
352 | list_add_tail(&blkiop->list, &blkio_list); | |
353 | spin_unlock(&blkio_list_lock); | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(blkio_policy_register); | |
356 | ||
357 | void blkio_policy_unregister(struct blkio_policy_type *blkiop) | |
358 | { | |
359 | spin_lock(&blkio_list_lock); | |
360 | list_del_init(&blkiop->list); | |
361 | spin_unlock(&blkio_list_lock); | |
362 | } | |
363 | EXPORT_SYMBOL_GPL(blkio_policy_unregister); | |
67523c48 BB |
364 | |
365 | static int __init init_cgroup_blkio(void) | |
366 | { | |
367 | return cgroup_load_subsys(&blkio_subsys); | |
368 | } | |
369 | ||
370 | static void __exit exit_cgroup_blkio(void) | |
371 | { | |
372 | cgroup_unload_subsys(&blkio_subsys); | |
373 | } | |
374 | ||
375 | module_init(init_cgroup_blkio); | |
376 | module_exit(exit_cgroup_blkio); | |
377 | MODULE_LICENSE("GPL"); |