2 * Common Block IO controller cgroup interface
4 * Based on ideas and code from CFQ, CFS and BFQ:
13 #include <linux/ioprio.h>
14 #include <linux/seq_file.h>
15 #include <linux/kdev_t.h>
16 #include <linux/module.h>
17 #include <linux/err.h>
18 #include <linux/blkdev.h>
19 #include <linux/slab.h>
20 #include <linux/genhd.h>
21 #include <linux/delay.h>
22 #include "blk-cgroup.h"
25 #define MAX_KEY_LEN 100
27 static DEFINE_SPINLOCK(blkio_list_lock);
28 static LIST_HEAD(blkio_list);
30 static DEFINE_MUTEX(all_q_mutex);
31 static LIST_HEAD(all_q_list);
33 struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
34 EXPORT_SYMBOL_GPL(blkio_root_cgroup);
36 static struct blkio_policy_type *blkio_policy[BLKIO_NR_POLICIES];
38 static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *,
40 static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *,
41 struct cgroup_taskset *);
42 static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *,
43 struct cgroup_taskset *);
44 static int blkiocg_pre_destroy(struct cgroup_subsys *, struct cgroup *);
45 static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *);
46 static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *);
48 /* for encoding cft->private value on file */
49 #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
50 /* What policy owns the file, proportional or throttle */
51 #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
52 #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
54 struct cgroup_subsys blkio_subsys = {
56 .create = blkiocg_create,
57 .can_attach = blkiocg_can_attach,
58 .attach = blkiocg_attach,
59 .pre_destroy = blkiocg_pre_destroy,
60 .destroy = blkiocg_destroy,
61 .populate = blkiocg_populate,
62 .subsys_id = blkio_subsys_id,
63 .module = THIS_MODULE,
65 EXPORT_SYMBOL_GPL(blkio_subsys);
67 struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
69 return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
70 struct blkio_cgroup, css);
72 EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
74 struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
76 return container_of(task_subsys_state(tsk, blkio_subsys_id),
77 struct blkio_cgroup, css);
79 EXPORT_SYMBOL_GPL(task_blkio_cgroup);
81 static inline void blkio_update_group_weight(struct blkio_group *blkg,
82 int plid, unsigned int weight)
84 struct blkio_policy_type *blkiop;
86 list_for_each_entry(blkiop, &blkio_list, list) {
87 /* If this policy does not own the blkg, do not send updates */
88 if (blkiop->plid != plid)
90 if (blkiop->ops.blkio_update_group_weight_fn)
91 blkiop->ops.blkio_update_group_weight_fn(blkg->q,
96 static inline void blkio_update_group_bps(struct blkio_group *blkg, int plid,
99 struct blkio_policy_type *blkiop;
101 list_for_each_entry(blkiop, &blkio_list, list) {
103 /* If this policy does not own the blkg, do not send updates */
104 if (blkiop->plid != plid)
107 if (fileid == BLKIO_THROTL_read_bps_device
108 && blkiop->ops.blkio_update_group_read_bps_fn)
109 blkiop->ops.blkio_update_group_read_bps_fn(blkg->q,
112 if (fileid == BLKIO_THROTL_write_bps_device
113 && blkiop->ops.blkio_update_group_write_bps_fn)
114 blkiop->ops.blkio_update_group_write_bps_fn(blkg->q,
119 static inline void blkio_update_group_iops(struct blkio_group *blkg,
120 int plid, unsigned int iops,
123 struct blkio_policy_type *blkiop;
125 list_for_each_entry(blkiop, &blkio_list, list) {
127 /* If this policy does not own the blkg, do not send updates */
128 if (blkiop->plid != plid)
131 if (fileid == BLKIO_THROTL_read_iops_device
132 && blkiop->ops.blkio_update_group_read_iops_fn)
133 blkiop->ops.blkio_update_group_read_iops_fn(blkg->q,
136 if (fileid == BLKIO_THROTL_write_iops_device
137 && blkiop->ops.blkio_update_group_write_iops_fn)
138 blkiop->ops.blkio_update_group_write_iops_fn(blkg->q,
144 * Add to the appropriate stat variable depending on the request type.
145 * This should be called with the blkg->stats_lock held.
147 static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
151 stat[BLKIO_STAT_WRITE] += add;
153 stat[BLKIO_STAT_READ] += add;
155 stat[BLKIO_STAT_SYNC] += add;
157 stat[BLKIO_STAT_ASYNC] += add;
161 * Decrements the appropriate stat variable if non-zero depending on the
162 * request type. Panics on value being zero.
163 * This should be called with the blkg->stats_lock held.
165 static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
168 BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
169 stat[BLKIO_STAT_WRITE]--;
171 BUG_ON(stat[BLKIO_STAT_READ] == 0);
172 stat[BLKIO_STAT_READ]--;
175 BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
176 stat[BLKIO_STAT_SYNC]--;
178 BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
179 stat[BLKIO_STAT_ASYNC]--;
183 #ifdef CONFIG_DEBUG_BLK_CGROUP
184 /* This should be called with the blkg->stats_lock held. */
185 static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
186 struct blkio_policy_type *pol,
187 struct blkio_group *curr_blkg)
189 struct blkg_policy_data *pd = blkg->pd[pol->plid];
191 if (blkio_blkg_waiting(&pd->stats))
193 if (blkg == curr_blkg)
195 pd->stats.start_group_wait_time = sched_clock();
196 blkio_mark_blkg_waiting(&pd->stats);
199 /* This should be called with the blkg->stats_lock held. */
200 static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
202 unsigned long long now;
204 if (!blkio_blkg_waiting(stats))
208 if (time_after64(now, stats->start_group_wait_time))
209 stats->group_wait_time += now - stats->start_group_wait_time;
210 blkio_clear_blkg_waiting(stats);
213 /* This should be called with the blkg->stats_lock held. */
214 static void blkio_end_empty_time(struct blkio_group_stats *stats)
216 unsigned long long now;
218 if (!blkio_blkg_empty(stats))
222 if (time_after64(now, stats->start_empty_time))
223 stats->empty_time += now - stats->start_empty_time;
224 blkio_clear_blkg_empty(stats);
227 void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg,
228 struct blkio_policy_type *pol)
230 struct blkg_policy_data *pd = blkg->pd[pol->plid];
233 spin_lock_irqsave(&blkg->stats_lock, flags);
234 BUG_ON(blkio_blkg_idling(&pd->stats));
235 pd->stats.start_idle_time = sched_clock();
236 blkio_mark_blkg_idling(&pd->stats);
237 spin_unlock_irqrestore(&blkg->stats_lock, flags);
239 EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
241 void blkiocg_update_idle_time_stats(struct blkio_group *blkg,
242 struct blkio_policy_type *pol)
244 struct blkg_policy_data *pd = blkg->pd[pol->plid];
246 unsigned long long now;
247 struct blkio_group_stats *stats;
249 spin_lock_irqsave(&blkg->stats_lock, flags);
251 if (blkio_blkg_idling(stats)) {
253 if (time_after64(now, stats->start_idle_time))
254 stats->idle_time += now - stats->start_idle_time;
255 blkio_clear_blkg_idling(stats);
257 spin_unlock_irqrestore(&blkg->stats_lock, flags);
259 EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
261 void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg,
262 struct blkio_policy_type *pol)
264 struct blkg_policy_data *pd = blkg->pd[pol->plid];
266 struct blkio_group_stats *stats;
268 spin_lock_irqsave(&blkg->stats_lock, flags);
270 stats->avg_queue_size_sum +=
271 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
272 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
273 stats->avg_queue_size_samples++;
274 blkio_update_group_wait_time(stats);
275 spin_unlock_irqrestore(&blkg->stats_lock, flags);
277 EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
279 void blkiocg_set_start_empty_time(struct blkio_group *blkg,
280 struct blkio_policy_type *pol)
282 struct blkg_policy_data *pd = blkg->pd[pol->plid];
284 struct blkio_group_stats *stats;
286 spin_lock_irqsave(&blkg->stats_lock, flags);
289 if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
290 stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
291 spin_unlock_irqrestore(&blkg->stats_lock, flags);
296 * group is already marked empty. This can happen if cfqq got new
297 * request in parent group and moved to this group while being added
298 * to service tree. Just ignore the event and move on.
300 if(blkio_blkg_empty(stats)) {
301 spin_unlock_irqrestore(&blkg->stats_lock, flags);
305 stats->start_empty_time = sched_clock();
306 blkio_mark_blkg_empty(stats);
307 spin_unlock_irqrestore(&blkg->stats_lock, flags);
309 EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
311 void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
312 struct blkio_policy_type *pol,
313 unsigned long dequeue)
315 struct blkg_policy_data *pd = blkg->pd[pol->plid];
317 pd->stats.dequeue += dequeue;
319 EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
321 static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
322 struct blkio_policy_type *pol,
323 struct blkio_group *curr_blkg) { }
324 static inline void blkio_end_empty_time(struct blkio_group_stats *stats) { }
327 void blkiocg_update_io_add_stats(struct blkio_group *blkg,
328 struct blkio_policy_type *pol,
329 struct blkio_group *curr_blkg, bool direction,
332 struct blkg_policy_data *pd = blkg->pd[pol->plid];
335 spin_lock_irqsave(&blkg->stats_lock, flags);
336 blkio_add_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
338 blkio_end_empty_time(&pd->stats);
339 blkio_set_start_group_wait_time(blkg, pol, curr_blkg);
340 spin_unlock_irqrestore(&blkg->stats_lock, flags);
342 EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
344 void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
345 struct blkio_policy_type *pol,
346 bool direction, bool sync)
348 struct blkg_policy_data *pd = blkg->pd[pol->plid];
351 spin_lock_irqsave(&blkg->stats_lock, flags);
352 blkio_check_and_dec_stat(pd->stats.stat_arr[BLKIO_STAT_QUEUED],
354 spin_unlock_irqrestore(&blkg->stats_lock, flags);
356 EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
358 void blkiocg_update_timeslice_used(struct blkio_group *blkg,
359 struct blkio_policy_type *pol,
361 unsigned long unaccounted_time)
363 struct blkg_policy_data *pd = blkg->pd[pol->plid];
366 spin_lock_irqsave(&blkg->stats_lock, flags);
367 pd->stats.time += time;
368 #ifdef CONFIG_DEBUG_BLK_CGROUP
369 pd->stats.unaccounted_time += unaccounted_time;
371 spin_unlock_irqrestore(&blkg->stats_lock, flags);
373 EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
376 * should be called under rcu read lock or queue lock to make sure blkg pointer
379 void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
380 struct blkio_policy_type *pol,
381 uint64_t bytes, bool direction, bool sync)
383 struct blkg_policy_data *pd = blkg->pd[pol->plid];
384 struct blkio_group_stats_cpu *stats_cpu;
388 * Disabling interrupts to provide mutual exclusion between two
389 * writes on same cpu. It probably is not needed for 64bit. Not
390 * optimizing that case yet.
392 local_irq_save(flags);
394 stats_cpu = this_cpu_ptr(pd->stats_cpu);
396 u64_stats_update_begin(&stats_cpu->syncp);
397 stats_cpu->sectors += bytes >> 9;
398 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
400 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
401 bytes, direction, sync);
402 u64_stats_update_end(&stats_cpu->syncp);
403 local_irq_restore(flags);
405 EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
407 void blkiocg_update_completion_stats(struct blkio_group *blkg,
408 struct blkio_policy_type *pol,
410 uint64_t io_start_time, bool direction,
413 struct blkg_policy_data *pd = blkg->pd[pol->plid];
414 struct blkio_group_stats *stats;
416 unsigned long long now = sched_clock();
418 spin_lock_irqsave(&blkg->stats_lock, flags);
420 if (time_after64(now, io_start_time))
421 blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
422 now - io_start_time, direction, sync);
423 if (time_after64(io_start_time, start_time))
424 blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
425 io_start_time - start_time, direction, sync);
426 spin_unlock_irqrestore(&blkg->stats_lock, flags);
428 EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
430 /* Merged stats are per cpu. */
431 void blkiocg_update_io_merged_stats(struct blkio_group *blkg,
432 struct blkio_policy_type *pol,
433 bool direction, bool sync)
435 struct blkg_policy_data *pd = blkg->pd[pol->plid];
436 struct blkio_group_stats_cpu *stats_cpu;
440 * Disabling interrupts to provide mutual exclusion between two
441 * writes on same cpu. It probably is not needed for 64bit. Not
442 * optimizing that case yet.
444 local_irq_save(flags);
446 stats_cpu = this_cpu_ptr(pd->stats_cpu);
448 u64_stats_update_begin(&stats_cpu->syncp);
449 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
451 u64_stats_update_end(&stats_cpu->syncp);
452 local_irq_restore(flags);
454 EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
457 * blkg_free - free a blkg
458 * @blkg: blkg to free
460 * Free @blkg which may be partially allocated.
462 static void blkg_free(struct blkio_group *blkg)
464 struct blkg_policy_data *pd;
469 pd = blkg->pd[blkg->plid];
471 free_percpu(pd->stats_cpu);
478 * blkg_alloc - allocate a blkg
479 * @blkcg: block cgroup the new blkg is associated with
480 * @q: request_queue the new blkg is associated with
481 * @pol: policy the new blkg is associated with
483 * Allocate a new blkg assocating @blkcg and @q for @pol.
485 * FIXME: Should be called with queue locked but currently isn't due to
486 * percpu stat breakage.
488 static struct blkio_group *blkg_alloc(struct blkio_cgroup *blkcg,
489 struct request_queue *q,
490 struct blkio_policy_type *pol)
492 struct blkio_group *blkg;
493 struct blkg_policy_data *pd;
495 /* alloc and init base part */
496 blkg = kzalloc_node(sizeof(*blkg), GFP_ATOMIC, q->node);
500 spin_lock_init(&blkg->stats_lock);
501 rcu_assign_pointer(blkg->q, q);
503 blkg->plid = pol->plid;
505 cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
507 /* alloc per-policy data and attach it to blkg */
508 pd = kzalloc_node(sizeof(*pd) + pol->pdata_size, GFP_ATOMIC,
515 blkg->pd[pol->plid] = pd;
518 /* broken, read comment in the callsite */
520 pd->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
521 if (!pd->stats_cpu) {
526 /* invoke per-policy init */
527 pol->ops.blkio_init_group_fn(blkg);
531 struct blkio_group *blkg_lookup_create(struct blkio_cgroup *blkcg,
532 struct request_queue *q,
533 enum blkio_policy_id plid,
535 __releases(q->queue_lock) __acquires(q->queue_lock)
537 struct blkio_policy_type *pol = blkio_policy[plid];
538 struct blkio_group *blkg, *new_blkg;
540 WARN_ON_ONCE(!rcu_read_lock_held());
541 lockdep_assert_held(q->queue_lock);
544 * This could be the first entry point of blkcg implementation and
545 * we shouldn't allow anything to go through for a bypassing queue.
546 * The following can be removed if blkg lookup is guaranteed to
547 * fail on a bypassing queue.
549 if (unlikely(blk_queue_bypass(q)) && !for_root)
550 return ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
552 blkg = blkg_lookup(blkcg, q, plid);
556 /* blkg holds a reference to blkcg */
557 if (!css_tryget(&blkcg->css))
558 return ERR_PTR(-EINVAL);
561 * Allocate and initialize.
563 * FIXME: The following is broken. Percpu memory allocation
564 * requires %GFP_KERNEL context and can't be performed from IO
565 * path. Allocation here should inherently be atomic and the
566 * following lock dancing can be removed once the broken percpu
567 * allocation is fixed.
569 spin_unlock_irq(q->queue_lock);
572 new_blkg = blkg_alloc(blkcg, q, pol);
575 spin_lock_irq(q->queue_lock);
577 /* did bypass get turned on inbetween? */
578 if (unlikely(blk_queue_bypass(q)) && !for_root) {
579 blkg = ERR_PTR(blk_queue_dead(q) ? -EINVAL : -EBUSY);
583 /* did someone beat us to it? */
584 blkg = blkg_lookup(blkcg, q, plid);
588 /* did alloc fail? */
589 if (unlikely(!new_blkg)) {
590 blkg = ERR_PTR(-ENOMEM);
595 spin_lock(&blkcg->lock);
596 swap(blkg, new_blkg);
597 hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
598 pol->ops.blkio_link_group_fn(q, blkg);
599 spin_unlock(&blkcg->lock);
604 EXPORT_SYMBOL_GPL(blkg_lookup_create);
606 static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
608 hlist_del_init_rcu(&blkg->blkcg_node);
612 * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
613 * indicating that blk_group was unhashed by the time we got to it.
615 int blkiocg_del_blkio_group(struct blkio_group *blkg)
617 struct blkio_cgroup *blkcg = blkg->blkcg;
621 spin_lock_irqsave(&blkcg->lock, flags);
622 if (!hlist_unhashed(&blkg->blkcg_node)) {
623 __blkiocg_del_blkio_group(blkg);
626 spin_unlock_irqrestore(&blkcg->lock, flags);
630 EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
632 /* called under rcu_read_lock(). */
633 struct blkio_group *blkg_lookup(struct blkio_cgroup *blkcg,
634 struct request_queue *q,
635 enum blkio_policy_id plid)
637 struct blkio_group *blkg;
638 struct hlist_node *n;
640 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node)
641 if (blkg->q == q && blkg->plid == plid)
645 EXPORT_SYMBOL_GPL(blkg_lookup);
647 void blkg_destroy_all(struct request_queue *q)
649 struct blkio_policy_type *pol;
654 spin_lock(&blkio_list_lock);
655 spin_lock_irq(q->queue_lock);
658 * clear_queue_fn() might return with non-empty group list
659 * if it raced cgroup removal and lost. cgroup removal is
660 * guaranteed to make forward progress and retrying after a
661 * while is enough. This ugliness is scheduled to be
662 * removed after locking update.
664 list_for_each_entry(pol, &blkio_list, list)
665 if (!pol->ops.blkio_clear_queue_fn(q))
668 spin_unlock_irq(q->queue_lock);
669 spin_unlock(&blkio_list_lock);
674 msleep(10); /* just some random duration I like */
678 static void blkg_rcu_free(struct rcu_head *rcu_head)
680 blkg_free(container_of(rcu_head, struct blkio_group, rcu_head));
683 void __blkg_release(struct blkio_group *blkg)
685 /* release the extra blkcg reference this blkg has been holding */
686 css_put(&blkg->blkcg->css);
689 * A group is freed in rcu manner. But having an rcu lock does not
690 * mean that one can access all the fields of blkg and assume these
691 * are valid. For example, don't try to follow throtl_data and
692 * request queue links.
694 * Having a reference to blkg under an rcu allows acess to only
695 * values local to groups like group stats and group rate limits
697 call_rcu(&blkg->rcu_head, blkg_rcu_free);
699 EXPORT_SYMBOL_GPL(__blkg_release);
701 static void blkio_reset_stats_cpu(struct blkio_group *blkg, int plid)
703 struct blkg_policy_data *pd = blkg->pd[plid];
704 struct blkio_group_stats_cpu *stats_cpu;
707 * Note: On 64 bit arch this should not be an issue. This has the
708 * possibility of returning some inconsistent value on 32bit arch
709 * as 64bit update on 32bit is non atomic. Taking care of this
710 * corner case makes code very complicated, like sending IPIs to
711 * cpus, taking care of stats of offline cpus etc.
713 * reset stats is anyway more of a debug feature and this sounds a
714 * corner case. So I am not complicating the code yet until and
715 * unless this becomes a real issue.
717 for_each_possible_cpu(i) {
718 stats_cpu = per_cpu_ptr(pd->stats_cpu, i);
719 stats_cpu->sectors = 0;
720 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
721 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
722 stats_cpu->stat_arr_cpu[j][k] = 0;
727 blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
729 struct blkio_cgroup *blkcg;
730 struct blkio_group *blkg;
731 struct blkio_group_stats *stats;
732 struct hlist_node *n;
733 uint64_t queued[BLKIO_STAT_TOTAL];
735 #ifdef CONFIG_DEBUG_BLK_CGROUP
736 bool idling, waiting, empty;
737 unsigned long long now = sched_clock();
740 blkcg = cgroup_to_blkio_cgroup(cgroup);
741 spin_lock_irq(&blkcg->lock);
742 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
743 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
745 spin_lock(&blkg->stats_lock);
747 #ifdef CONFIG_DEBUG_BLK_CGROUP
748 idling = blkio_blkg_idling(stats);
749 waiting = blkio_blkg_waiting(stats);
750 empty = blkio_blkg_empty(stats);
752 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
753 queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
754 memset(stats, 0, sizeof(struct blkio_group_stats));
755 for (i = 0; i < BLKIO_STAT_TOTAL; i++)
756 stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
757 #ifdef CONFIG_DEBUG_BLK_CGROUP
759 blkio_mark_blkg_idling(stats);
760 stats->start_idle_time = now;
763 blkio_mark_blkg_waiting(stats);
764 stats->start_group_wait_time = now;
767 blkio_mark_blkg_empty(stats);
768 stats->start_empty_time = now;
771 spin_unlock(&blkg->stats_lock);
773 /* Reset Per cpu stats which don't take blkg->stats_lock */
774 blkio_reset_stats_cpu(blkg, blkg->plid);
777 spin_unlock_irq(&blkcg->lock);
781 static void blkio_get_key_name(enum stat_sub_type type, const char *dname,
782 char *str, int chars_left, bool diskname_only)
784 snprintf(str, chars_left, "%s", dname);
785 chars_left -= strlen(str);
786 if (chars_left <= 0) {
788 "Possibly incorrect cgroup stat display format");
794 case BLKIO_STAT_READ:
795 strlcat(str, " Read", chars_left);
797 case BLKIO_STAT_WRITE:
798 strlcat(str, " Write", chars_left);
800 case BLKIO_STAT_SYNC:
801 strlcat(str, " Sync", chars_left);
803 case BLKIO_STAT_ASYNC:
804 strlcat(str, " Async", chars_left);
806 case BLKIO_STAT_TOTAL:
807 strlcat(str, " Total", chars_left);
810 strlcat(str, " Invalid", chars_left);
814 static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
815 struct cgroup_map_cb *cb, const char *dname)
817 blkio_get_key_name(0, dname, str, chars_left, true);
818 cb->fill(cb, str, val);
823 static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg, int plid,
824 enum stat_type_cpu type, enum stat_sub_type sub_type)
826 struct blkg_policy_data *pd = blkg->pd[plid];
828 struct blkio_group_stats_cpu *stats_cpu;
831 for_each_possible_cpu(cpu) {
833 stats_cpu = per_cpu_ptr(pd->stats_cpu, cpu);
836 start = u64_stats_fetch_begin(&stats_cpu->syncp);
837 if (type == BLKIO_STAT_CPU_SECTORS)
838 tval = stats_cpu->sectors;
840 tval = stats_cpu->stat_arr_cpu[type][sub_type];
841 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
849 static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg, int plid,
850 struct cgroup_map_cb *cb, const char *dname,
851 enum stat_type_cpu type)
853 uint64_t disk_total, val;
854 char key_str[MAX_KEY_LEN];
855 enum stat_sub_type sub_type;
857 if (type == BLKIO_STAT_CPU_SECTORS) {
858 val = blkio_read_stat_cpu(blkg, plid, type, 0);
859 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb,
863 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
865 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
867 val = blkio_read_stat_cpu(blkg, plid, type, sub_type);
868 cb->fill(cb, key_str, val);
871 disk_total = blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_READ) +
872 blkio_read_stat_cpu(blkg, plid, type, BLKIO_STAT_WRITE);
874 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
876 cb->fill(cb, key_str, disk_total);
880 /* This should be called with blkg->stats_lock held */
881 static uint64_t blkio_get_stat(struct blkio_group *blkg, int plid,
882 struct cgroup_map_cb *cb, const char *dname,
885 struct blkg_policy_data *pd = blkg->pd[plid];
887 char key_str[MAX_KEY_LEN];
888 enum stat_sub_type sub_type;
890 if (type == BLKIO_STAT_TIME)
891 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
892 pd->stats.time, cb, dname);
893 #ifdef CONFIG_DEBUG_BLK_CGROUP
894 if (type == BLKIO_STAT_UNACCOUNTED_TIME)
895 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
896 pd->stats.unaccounted_time, cb, dname);
897 if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
898 uint64_t sum = pd->stats.avg_queue_size_sum;
899 uint64_t samples = pd->stats.avg_queue_size_samples;
901 do_div(sum, samples);
904 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
907 if (type == BLKIO_STAT_GROUP_WAIT_TIME)
908 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
909 pd->stats.group_wait_time, cb, dname);
910 if (type == BLKIO_STAT_IDLE_TIME)
911 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
912 pd->stats.idle_time, cb, dname);
913 if (type == BLKIO_STAT_EMPTY_TIME)
914 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
915 pd->stats.empty_time, cb, dname);
916 if (type == BLKIO_STAT_DEQUEUE)
917 return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
918 pd->stats.dequeue, cb, dname);
921 for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
923 blkio_get_key_name(sub_type, dname, key_str, MAX_KEY_LEN,
925 cb->fill(cb, key_str, pd->stats.stat_arr[type][sub_type]);
927 disk_total = pd->stats.stat_arr[type][BLKIO_STAT_READ] +
928 pd->stats.stat_arr[type][BLKIO_STAT_WRITE];
929 blkio_get_key_name(BLKIO_STAT_TOTAL, dname, key_str, MAX_KEY_LEN,
931 cb->fill(cb, key_str, disk_total);
935 static int blkio_policy_parse_and_set(char *buf, enum blkio_policy_id plid,
936 int fileid, struct blkio_cgroup *blkcg)
938 struct gendisk *disk = NULL;
939 struct blkio_group *blkg = NULL;
940 struct blkg_policy_data *pd;
941 char *s[4], *p, *major_s = NULL, *minor_s = NULL;
942 unsigned long major, minor;
943 int i = 0, ret = -EINVAL;
948 memset(s, 0, sizeof(s));
950 while ((p = strsep(&buf, " ")) != NULL) {
956 /* Prevent from inputing too many things */
964 p = strsep(&s[0], ":");
974 if (strict_strtoul(major_s, 10, &major))
977 if (strict_strtoul(minor_s, 10, &minor))
980 dev = MKDEV(major, minor);
982 if (strict_strtoull(s[1], 10, &temp))
985 disk = get_gendisk(dev, &part);
991 spin_lock_irq(disk->queue->queue_lock);
992 blkg = blkg_lookup_create(blkcg, disk->queue, plid, false);
993 spin_unlock_irq(disk->queue->queue_lock);
1000 pd = blkg->pd[plid];
1003 case BLKIO_POLICY_PROP:
1004 if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
1005 temp > BLKIO_WEIGHT_MAX)
1008 pd->conf.weight = temp;
1009 blkio_update_group_weight(blkg, plid, temp ?: blkcg->weight);
1011 case BLKIO_POLICY_THROTL:
1013 case BLKIO_THROTL_read_bps_device:
1014 pd->conf.bps[READ] = temp;
1015 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1017 case BLKIO_THROTL_write_bps_device:
1018 pd->conf.bps[WRITE] = temp;
1019 blkio_update_group_bps(blkg, plid, temp ?: -1, fileid);
1021 case BLKIO_THROTL_read_iops_device:
1022 if (temp > THROTL_IOPS_MAX)
1024 pd->conf.iops[READ] = temp;
1025 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1027 case BLKIO_THROTL_write_iops_device:
1028 if (temp > THROTL_IOPS_MAX)
1030 pd->conf.iops[WRITE] = temp;
1031 blkio_update_group_iops(blkg, plid, temp ?: -1, fileid);
1045 * If queue was bypassing, we should retry. Do so after a short
1046 * msleep(). It isn't strictly necessary but queue can be
1047 * bypassing for some time and it's always nice to avoid busy
1050 if (ret == -EBUSY) {
1052 return restart_syscall();
1057 static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
1062 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
1063 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1064 int fileid = BLKIOFILE_ATTR(cft->private);
1066 buf = kstrdup(buffer, GFP_KERNEL);
1070 ret = blkio_policy_parse_and_set(buf, plid, fileid, blkcg);
1075 static const char *blkg_dev_name(struct blkio_group *blkg)
1077 /* some drivers (floppy) instantiate a queue w/o disk registered */
1078 if (blkg->q->backing_dev_info.dev)
1079 return dev_name(blkg->q->backing_dev_info.dev);
1083 static void blkio_print_group_conf(struct cftype *cft, struct blkio_group *blkg,
1086 int plid = BLKIOFILE_POLICY(cft->private);
1087 int fileid = BLKIOFILE_ATTR(cft->private);
1088 struct blkg_policy_data *pd = blkg->pd[plid];
1089 const char *dname = blkg_dev_name(blkg);
1096 case BLKIO_POLICY_PROP:
1097 if (pd->conf.weight)
1098 seq_printf(m, "%s\t%u\n",
1099 dname, pd->conf.weight);
1101 case BLKIO_POLICY_THROTL:
1103 case BLKIO_THROTL_read_bps_device:
1105 case BLKIO_THROTL_write_bps_device:
1106 if (pd->conf.bps[rw])
1107 seq_printf(m, "%s\t%llu\n",
1108 dname, pd->conf.bps[rw]);
1110 case BLKIO_THROTL_read_iops_device:
1112 case BLKIO_THROTL_write_iops_device:
1113 if (pd->conf.iops[rw])
1114 seq_printf(m, "%s\t%u\n",
1115 dname, pd->conf.iops[rw]);
1124 /* cgroup files which read their data from policy nodes end up here */
1125 static void blkio_read_conf(struct cftype *cft, struct blkio_cgroup *blkcg,
1128 struct blkio_group *blkg;
1129 struct hlist_node *n;
1131 spin_lock_irq(&blkcg->lock);
1132 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node)
1133 if (BLKIOFILE_POLICY(cft->private) == blkg->plid)
1134 blkio_print_group_conf(cft, blkg, m);
1135 spin_unlock_irq(&blkcg->lock);
1138 static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
1141 struct blkio_cgroup *blkcg;
1142 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1143 int name = BLKIOFILE_ATTR(cft->private);
1145 blkcg = cgroup_to_blkio_cgroup(cgrp);
1148 case BLKIO_POLICY_PROP:
1150 case BLKIO_PROP_weight_device:
1151 blkio_read_conf(cft, blkcg, m);
1157 case BLKIO_POLICY_THROTL:
1159 case BLKIO_THROTL_read_bps_device:
1160 case BLKIO_THROTL_write_bps_device:
1161 case BLKIO_THROTL_read_iops_device:
1162 case BLKIO_THROTL_write_iops_device:
1163 blkio_read_conf(cft, blkcg, m);
1176 static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
1177 struct cftype *cft, struct cgroup_map_cb *cb,
1178 enum stat_type type, bool show_total, bool pcpu)
1180 struct blkio_group *blkg;
1181 struct hlist_node *n;
1182 uint64_t cgroup_total = 0;
1185 hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
1186 const char *dname = blkg_dev_name(blkg);
1187 int plid = BLKIOFILE_POLICY(cft->private);
1189 if (!dname || plid != blkg->plid)
1192 cgroup_total += blkio_get_stat_cpu(blkg, plid,
1195 spin_lock_irq(&blkg->stats_lock);
1196 cgroup_total += blkio_get_stat(blkg, plid,
1198 spin_unlock_irq(&blkg->stats_lock);
1202 cb->fill(cb, "Total", cgroup_total);
1207 /* All map kind of cgroup file get serviced by this function */
1208 static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
1209 struct cgroup_map_cb *cb)
1211 struct blkio_cgroup *blkcg;
1212 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1213 int name = BLKIOFILE_ATTR(cft->private);
1215 blkcg = cgroup_to_blkio_cgroup(cgrp);
1218 case BLKIO_POLICY_PROP:
1220 case BLKIO_PROP_time:
1221 return blkio_read_blkg_stats(blkcg, cft, cb,
1222 BLKIO_STAT_TIME, 0, 0);
1223 case BLKIO_PROP_sectors:
1224 return blkio_read_blkg_stats(blkcg, cft, cb,
1225 BLKIO_STAT_CPU_SECTORS, 0, 1);
1226 case BLKIO_PROP_io_service_bytes:
1227 return blkio_read_blkg_stats(blkcg, cft, cb,
1228 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1229 case BLKIO_PROP_io_serviced:
1230 return blkio_read_blkg_stats(blkcg, cft, cb,
1231 BLKIO_STAT_CPU_SERVICED, 1, 1);
1232 case BLKIO_PROP_io_service_time:
1233 return blkio_read_blkg_stats(blkcg, cft, cb,
1234 BLKIO_STAT_SERVICE_TIME, 1, 0);
1235 case BLKIO_PROP_io_wait_time:
1236 return blkio_read_blkg_stats(blkcg, cft, cb,
1237 BLKIO_STAT_WAIT_TIME, 1, 0);
1238 case BLKIO_PROP_io_merged:
1239 return blkio_read_blkg_stats(blkcg, cft, cb,
1240 BLKIO_STAT_CPU_MERGED, 1, 1);
1241 case BLKIO_PROP_io_queued:
1242 return blkio_read_blkg_stats(blkcg, cft, cb,
1243 BLKIO_STAT_QUEUED, 1, 0);
1244 #ifdef CONFIG_DEBUG_BLK_CGROUP
1245 case BLKIO_PROP_unaccounted_time:
1246 return blkio_read_blkg_stats(blkcg, cft, cb,
1247 BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
1248 case BLKIO_PROP_dequeue:
1249 return blkio_read_blkg_stats(blkcg, cft, cb,
1250 BLKIO_STAT_DEQUEUE, 0, 0);
1251 case BLKIO_PROP_avg_queue_size:
1252 return blkio_read_blkg_stats(blkcg, cft, cb,
1253 BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
1254 case BLKIO_PROP_group_wait_time:
1255 return blkio_read_blkg_stats(blkcg, cft, cb,
1256 BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
1257 case BLKIO_PROP_idle_time:
1258 return blkio_read_blkg_stats(blkcg, cft, cb,
1259 BLKIO_STAT_IDLE_TIME, 0, 0);
1260 case BLKIO_PROP_empty_time:
1261 return blkio_read_blkg_stats(blkcg, cft, cb,
1262 BLKIO_STAT_EMPTY_TIME, 0, 0);
1268 case BLKIO_POLICY_THROTL:
1270 case BLKIO_THROTL_io_service_bytes:
1271 return blkio_read_blkg_stats(blkcg, cft, cb,
1272 BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
1273 case BLKIO_THROTL_io_serviced:
1274 return blkio_read_blkg_stats(blkcg, cft, cb,
1275 BLKIO_STAT_CPU_SERVICED, 1, 1);
1287 static int blkio_weight_write(struct blkio_cgroup *blkcg, int plid, u64 val)
1289 struct blkio_group *blkg;
1290 struct hlist_node *n;
1292 if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
1295 spin_lock(&blkio_list_lock);
1296 spin_lock_irq(&blkcg->lock);
1297 blkcg->weight = (unsigned int)val;
1299 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
1300 struct blkg_policy_data *pd = blkg->pd[blkg->plid];
1302 if (blkg->plid == plid && !pd->conf.weight)
1303 blkio_update_group_weight(blkg, plid, blkcg->weight);
1306 spin_unlock_irq(&blkcg->lock);
1307 spin_unlock(&blkio_list_lock);
1311 static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
1312 struct blkio_cgroup *blkcg;
1313 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1314 int name = BLKIOFILE_ATTR(cft->private);
1316 blkcg = cgroup_to_blkio_cgroup(cgrp);
1319 case BLKIO_POLICY_PROP:
1321 case BLKIO_PROP_weight:
1322 return (u64)blkcg->weight;
1332 blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
1334 struct blkio_cgroup *blkcg;
1335 enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
1336 int name = BLKIOFILE_ATTR(cft->private);
1338 blkcg = cgroup_to_blkio_cgroup(cgrp);
1341 case BLKIO_POLICY_PROP:
1343 case BLKIO_PROP_weight:
1344 return blkio_weight_write(blkcg, plid, val);
1354 struct cftype blkio_files[] = {
1356 .name = "weight_device",
1357 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1358 BLKIO_PROP_weight_device),
1359 .read_seq_string = blkiocg_file_read,
1360 .write_string = blkiocg_file_write,
1361 .max_write_len = 256,
1365 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1367 .read_u64 = blkiocg_file_read_u64,
1368 .write_u64 = blkiocg_file_write_u64,
1372 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1374 .read_map = blkiocg_file_read_map,
1378 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1379 BLKIO_PROP_sectors),
1380 .read_map = blkiocg_file_read_map,
1383 .name = "io_service_bytes",
1384 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1385 BLKIO_PROP_io_service_bytes),
1386 .read_map = blkiocg_file_read_map,
1389 .name = "io_serviced",
1390 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1391 BLKIO_PROP_io_serviced),
1392 .read_map = blkiocg_file_read_map,
1395 .name = "io_service_time",
1396 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1397 BLKIO_PROP_io_service_time),
1398 .read_map = blkiocg_file_read_map,
1401 .name = "io_wait_time",
1402 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1403 BLKIO_PROP_io_wait_time),
1404 .read_map = blkiocg_file_read_map,
1407 .name = "io_merged",
1408 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1409 BLKIO_PROP_io_merged),
1410 .read_map = blkiocg_file_read_map,
1413 .name = "io_queued",
1414 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1415 BLKIO_PROP_io_queued),
1416 .read_map = blkiocg_file_read_map,
1419 .name = "reset_stats",
1420 .write_u64 = blkiocg_reset_stats,
1422 #ifdef CONFIG_BLK_DEV_THROTTLING
1424 .name = "throttle.read_bps_device",
1425 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1426 BLKIO_THROTL_read_bps_device),
1427 .read_seq_string = blkiocg_file_read,
1428 .write_string = blkiocg_file_write,
1429 .max_write_len = 256,
1433 .name = "throttle.write_bps_device",
1434 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1435 BLKIO_THROTL_write_bps_device),
1436 .read_seq_string = blkiocg_file_read,
1437 .write_string = blkiocg_file_write,
1438 .max_write_len = 256,
1442 .name = "throttle.read_iops_device",
1443 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1444 BLKIO_THROTL_read_iops_device),
1445 .read_seq_string = blkiocg_file_read,
1446 .write_string = blkiocg_file_write,
1447 .max_write_len = 256,
1451 .name = "throttle.write_iops_device",
1452 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1453 BLKIO_THROTL_write_iops_device),
1454 .read_seq_string = blkiocg_file_read,
1455 .write_string = blkiocg_file_write,
1456 .max_write_len = 256,
1459 .name = "throttle.io_service_bytes",
1460 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1461 BLKIO_THROTL_io_service_bytes),
1462 .read_map = blkiocg_file_read_map,
1465 .name = "throttle.io_serviced",
1466 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
1467 BLKIO_THROTL_io_serviced),
1468 .read_map = blkiocg_file_read_map,
1470 #endif /* CONFIG_BLK_DEV_THROTTLING */
1472 #ifdef CONFIG_DEBUG_BLK_CGROUP
1474 .name = "avg_queue_size",
1475 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1476 BLKIO_PROP_avg_queue_size),
1477 .read_map = blkiocg_file_read_map,
1480 .name = "group_wait_time",
1481 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1482 BLKIO_PROP_group_wait_time),
1483 .read_map = blkiocg_file_read_map,
1486 .name = "idle_time",
1487 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1488 BLKIO_PROP_idle_time),
1489 .read_map = blkiocg_file_read_map,
1492 .name = "empty_time",
1493 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1494 BLKIO_PROP_empty_time),
1495 .read_map = blkiocg_file_read_map,
1499 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1500 BLKIO_PROP_dequeue),
1501 .read_map = blkiocg_file_read_map,
1504 .name = "unaccounted_time",
1505 .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
1506 BLKIO_PROP_unaccounted_time),
1507 .read_map = blkiocg_file_read_map,
1512 static int blkiocg_populate(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1514 return cgroup_add_files(cgroup, subsys, blkio_files,
1515 ARRAY_SIZE(blkio_files));
1518 static int blkiocg_pre_destroy(struct cgroup_subsys *subsys,
1519 struct cgroup *cgroup)
1521 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1522 unsigned long flags;
1523 struct blkio_group *blkg;
1524 struct request_queue *q;
1525 struct blkio_policy_type *blkiop;
1530 spin_lock_irqsave(&blkcg->lock, flags);
1532 if (hlist_empty(&blkcg->blkg_list)) {
1533 spin_unlock_irqrestore(&blkcg->lock, flags);
1537 blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
1539 q = rcu_dereference(blkg->q);
1540 __blkiocg_del_blkio_group(blkg);
1542 spin_unlock_irqrestore(&blkcg->lock, flags);
1545 * This blkio_group is being unlinked as associated cgroup is
1546 * going away. Let all the IO controlling policies know about
1549 spin_lock(&blkio_list_lock);
1550 list_for_each_entry(blkiop, &blkio_list, list) {
1551 if (blkiop->plid != blkg->plid)
1553 blkiop->ops.blkio_unlink_group_fn(q, blkg);
1555 spin_unlock(&blkio_list_lock);
1563 static void blkiocg_destroy(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1565 struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
1567 if (blkcg != &blkio_root_cgroup)
1571 static struct cgroup_subsys_state *
1572 blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
1574 struct blkio_cgroup *blkcg;
1575 struct cgroup *parent = cgroup->parent;
1578 blkcg = &blkio_root_cgroup;
1582 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
1584 return ERR_PTR(-ENOMEM);
1586 blkcg->weight = BLKIO_WEIGHT_DEFAULT;
1588 spin_lock_init(&blkcg->lock);
1589 INIT_HLIST_HEAD(&blkcg->blkg_list);
1595 * blkcg_init_queue - initialize blkcg part of request queue
1596 * @q: request_queue to initialize
1598 * Called from blk_alloc_queue_node(). Responsible for initializing blkcg
1599 * part of new request_queue @q.
1602 * 0 on success, -errno on failure.
1604 int blkcg_init_queue(struct request_queue *q)
1610 ret = blk_throtl_init(q);
1614 mutex_lock(&all_q_mutex);
1615 INIT_LIST_HEAD(&q->all_q_node);
1616 list_add_tail(&q->all_q_node, &all_q_list);
1617 mutex_unlock(&all_q_mutex);
1623 * blkcg_drain_queue - drain blkcg part of request_queue
1624 * @q: request_queue to drain
1626 * Called from blk_drain_queue(). Responsible for draining blkcg part.
1628 void blkcg_drain_queue(struct request_queue *q)
1630 lockdep_assert_held(q->queue_lock);
1632 blk_throtl_drain(q);
1636 * blkcg_exit_queue - exit and release blkcg part of request_queue
1637 * @q: request_queue being released
1639 * Called from blk_release_queue(). Responsible for exiting blkcg part.
1641 void blkcg_exit_queue(struct request_queue *q)
1643 mutex_lock(&all_q_mutex);
1644 list_del_init(&q->all_q_node);
1645 mutex_unlock(&all_q_mutex);
1651 * We cannot support shared io contexts, as we have no mean to support
1652 * two tasks with the same ioc in two different groups without major rework
1653 * of the main cic data structures. For now we allow a task to change
1654 * its cgroup only if it's the only owner of its ioc.
1656 static int blkiocg_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1657 struct cgroup_taskset *tset)
1659 struct task_struct *task;
1660 struct io_context *ioc;
1663 /* task_lock() is needed to avoid races with exit_io_context() */
1664 cgroup_taskset_for_each(task, cgrp, tset) {
1666 ioc = task->io_context;
1667 if (ioc && atomic_read(&ioc->nr_tasks) > 1)
1676 static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
1677 struct cgroup_taskset *tset)
1679 struct task_struct *task;
1680 struct io_context *ioc;
1682 cgroup_taskset_for_each(task, cgrp, tset) {
1683 /* we don't lose anything even if ioc allocation fails */
1684 ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
1686 ioc_cgroup_changed(ioc);
1687 put_io_context(ioc);
1692 static void blkcg_bypass_start(void)
1693 __acquires(&all_q_mutex)
1695 struct request_queue *q;
1697 mutex_lock(&all_q_mutex);
1699 list_for_each_entry(q, &all_q_list, all_q_node) {
1700 blk_queue_bypass_start(q);
1701 blkg_destroy_all(q);
1705 static void blkcg_bypass_end(void)
1706 __releases(&all_q_mutex)
1708 struct request_queue *q;
1710 list_for_each_entry(q, &all_q_list, all_q_node)
1711 blk_queue_bypass_end(q);
1713 mutex_unlock(&all_q_mutex);
1716 void blkio_policy_register(struct blkio_policy_type *blkiop)
1718 blkcg_bypass_start();
1719 spin_lock(&blkio_list_lock);
1721 BUG_ON(blkio_policy[blkiop->plid]);
1722 blkio_policy[blkiop->plid] = blkiop;
1723 list_add_tail(&blkiop->list, &blkio_list);
1725 spin_unlock(&blkio_list_lock);
1728 EXPORT_SYMBOL_GPL(blkio_policy_register);
1730 void blkio_policy_unregister(struct blkio_policy_type *blkiop)
1732 blkcg_bypass_start();
1733 spin_lock(&blkio_list_lock);
1735 BUG_ON(blkio_policy[blkiop->plid] != blkiop);
1736 blkio_policy[blkiop->plid] = NULL;
1737 list_del_init(&blkiop->list);
1739 spin_unlock(&blkio_list_lock);
1742 EXPORT_SYMBOL_GPL(blkio_policy_unregister);