1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
25 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
26 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
28 /* Max limits for throttle policy */
29 #define THROTL_IOPS_MAX UINT_MAX
31 #ifdef CONFIG_BLK_CGROUP
33 enum blkg_rwstat_type {
40 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
46 struct cgroup_subsys_state css;
49 struct radix_tree_root blkg_tree;
50 struct blkcg_gq __rcu *blkg_hint;
51 struct hlist_head blkg_list;
53 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
55 struct list_head all_blkcgs_node;
56 #ifdef CONFIG_CGROUP_WRITEBACK
57 struct list_head cgwb_list;
62 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
63 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
64 * to carry result values from read and sum operations.
67 struct percpu_counter cpu_cnt;
72 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
73 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
77 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
78 * request_queue (q). This is used by blkcg policies which need to track
79 * information per blkcg - q pair.
81 * There can be multiple active blkcg policies and each blkg:policy pair is
82 * represented by a blkg_policy_data which is allocated and freed by each
83 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
84 * area by allocating larger data structure which embeds blkg_policy_data
87 struct blkg_policy_data {
88 /* the blkg and policy id this per-policy data belongs to */
89 struct blkcg_gq *blkg;
95 * Policies that need to keep per-blkcg data which is independent from any
96 * request_queue associated to it should implement cpd_alloc/free_fn()
97 * methods. A policy can allocate private data area by allocating larger
98 * data structure which embeds blkcg_policy_data at the beginning.
99 * cpd_init() is invoked to let each policy handle per-blkcg data.
101 struct blkcg_policy_data {
102 /* the blkcg and policy id this per-policy data belongs to */
107 /* association between a blk cgroup and a request queue */
109 /* Pointer to the associated request_queue */
110 struct request_queue *q;
111 struct list_head q_node;
112 struct hlist_node blkcg_node;
116 * Each blkg gets congested separately and the congestion state is
117 * propagated to the matching bdi_writeback_congested.
119 struct bdi_writeback_congested *wb_congested;
121 /* all non-root blkcg_gq's are guaranteed to have access to parent */
122 struct blkcg_gq *parent;
124 /* request allocation list for this blkcg-q pair */
125 struct request_list rl;
127 /* reference count */
130 /* is this blkg online? protected by both blkcg and q locks */
133 struct blkg_rwstat stat_bytes;
134 struct blkg_rwstat stat_ios;
136 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
138 struct rcu_head rcu_head;
141 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
142 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
143 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
144 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
145 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
146 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
147 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
148 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
149 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
150 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
152 struct blkcg_policy {
154 /* cgroup files for the policy */
155 struct cftype *dfl_cftypes;
156 struct cftype *legacy_cftypes;
159 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
160 blkcg_pol_init_cpd_fn *cpd_init_fn;
161 blkcg_pol_free_cpd_fn *cpd_free_fn;
162 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
164 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
165 blkcg_pol_init_pd_fn *pd_init_fn;
166 blkcg_pol_online_pd_fn *pd_online_fn;
167 blkcg_pol_offline_pd_fn *pd_offline_fn;
168 blkcg_pol_free_pd_fn *pd_free_fn;
169 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
172 extern struct blkcg blkcg_root;
173 extern struct cgroup_subsys_state * const blkcg_root_css;
175 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
176 struct request_queue *q, bool update_hint);
177 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
178 struct request_queue *q);
179 int blkcg_init_queue(struct request_queue *q);
180 void blkcg_drain_queue(struct request_queue *q);
181 void blkcg_exit_queue(struct request_queue *q);
183 /* Blkio controller policy registration */
184 int blkcg_policy_register(struct blkcg_policy *pol);
185 void blkcg_policy_unregister(struct blkcg_policy *pol);
186 int blkcg_activate_policy(struct request_queue *q,
187 const struct blkcg_policy *pol);
188 void blkcg_deactivate_policy(struct request_queue *q,
189 const struct blkcg_policy *pol);
191 const char *blkg_dev_name(struct blkcg_gq *blkg);
192 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
193 u64 (*prfill)(struct seq_file *,
194 struct blkg_policy_data *, int),
195 const struct blkcg_policy *pol, int data,
197 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
198 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
199 const struct blkg_rwstat *rwstat);
200 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
201 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
203 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
204 int blkg_print_stat_ios(struct seq_file *sf, void *v);
205 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
206 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
208 u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
209 struct blkcg_policy *pol, int off);
210 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
211 struct blkcg_policy *pol, int off);
213 struct blkg_conf_ctx {
214 struct gendisk *disk;
215 struct blkcg_gq *blkg;
219 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
220 char *input, struct blkg_conf_ctx *ctx);
221 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
224 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
226 return css ? container_of(css, struct blkcg, css) : NULL;
229 static inline struct blkcg *bio_blkcg(struct bio *bio)
231 struct cgroup_subsys_state *css;
233 if (bio && bio->bi_css)
234 return css_to_blkcg(bio->bi_css);
235 css = kthread_blkcg();
237 return css_to_blkcg(css);
238 return css_to_blkcg(task_css(current, io_cgrp_id));
242 * blkcg_parent - get the parent of a blkcg
243 * @blkcg: blkcg of interest
245 * Return the parent blkcg of @blkcg. Can be called anytime.
247 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
249 return css_to_blkcg(blkcg->css.parent);
253 * __blkg_lookup - internal version of blkg_lookup()
254 * @blkcg: blkcg of interest
255 * @q: request_queue of interest
256 * @update_hint: whether to update lookup hint with the result or not
258 * This is internal version and shouldn't be used by policy
259 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
260 * @q's bypass state. If @update_hint is %true, the caller should be
261 * holding @q->queue_lock and lookup hint is updated on success.
263 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
264 struct request_queue *q,
267 struct blkcg_gq *blkg;
269 if (blkcg == &blkcg_root)
272 blkg = rcu_dereference(blkcg->blkg_hint);
273 if (blkg && blkg->q == q)
276 return blkg_lookup_slowpath(blkcg, q, update_hint);
280 * blkg_lookup - lookup blkg for the specified blkcg - q pair
281 * @blkcg: blkcg of interest
282 * @q: request_queue of interest
284 * Lookup blkg for the @blkcg - @q pair. This function should be called
285 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
286 * - see blk_queue_bypass_start() for details.
288 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
289 struct request_queue *q)
291 WARN_ON_ONCE(!rcu_read_lock_held());
293 if (unlikely(blk_queue_bypass(q)))
295 return __blkg_lookup(blkcg, q, false);
299 * blkg_to_pdata - get policy private data
300 * @blkg: blkg of interest
301 * @pol: policy of interest
303 * Return pointer to private data associated with the @blkg-@pol pair.
305 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
306 struct blkcg_policy *pol)
308 return blkg ? blkg->pd[pol->plid] : NULL;
311 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
312 struct blkcg_policy *pol)
314 return blkcg ? blkcg->cpd[pol->plid] : NULL;
318 * pdata_to_blkg - get blkg associated with policy private data
319 * @pd: policy private data of interest
321 * @pd is policy private data. Determine the blkg it's associated with.
323 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
325 return pd ? pd->blkg : NULL;
328 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
330 return cpd ? cpd->blkcg : NULL;
334 * blkg_path - format cgroup path of blkg
335 * @blkg: blkg of interest
336 * @buf: target buffer
337 * @buflen: target buffer length
339 * Format the path of the cgroup of @blkg into @buf.
341 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
343 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
347 * blkg_get - get a blkg reference
350 * The caller should be holding an existing reference.
352 static inline void blkg_get(struct blkcg_gq *blkg)
354 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
355 atomic_inc(&blkg->refcnt);
358 void __blkg_release_rcu(struct rcu_head *rcu);
361 * blkg_put - put a blkg reference
364 static inline void blkg_put(struct blkcg_gq *blkg)
366 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
367 if (atomic_dec_and_test(&blkg->refcnt))
368 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
372 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
373 * @d_blkg: loop cursor pointing to the current descendant
374 * @pos_css: used for iteration
375 * @p_blkg: target blkg to walk descendants of
377 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
378 * read locked. If called under either blkcg or queue lock, the iteration
379 * is guaranteed to include all and only online blkgs. The caller may
380 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
381 * @p_blkg is included in the iteration and the first node to be visited.
383 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
384 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
385 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
386 (p_blkg)->q, false)))
389 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
390 * @d_blkg: loop cursor pointing to the current descendant
391 * @pos_css: used for iteration
392 * @p_blkg: target blkg to walk descendants of
394 * Similar to blkg_for_each_descendant_pre() but performs post-order
395 * traversal instead. Synchronization rules are the same. @p_blkg is
396 * included in the iteration and the last node to be visited.
398 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
399 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
400 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
401 (p_blkg)->q, false)))
404 * blk_get_rl - get request_list to use
405 * @q: request_queue of interest
406 * @bio: bio which will be attached to the allocated request (may be %NULL)
408 * The caller wants to allocate a request from @q to use for @bio. Find
409 * the request_list to use and obtain a reference on it. Should be called
410 * under queue_lock. This function is guaranteed to return non-%NULL
413 static inline struct request_list *blk_get_rl(struct request_queue *q,
417 struct blkcg_gq *blkg;
421 blkcg = bio_blkcg(bio);
423 /* bypass blkg lookup and use @q->root_rl directly for root */
424 if (blkcg == &blkcg_root)
428 * Try to use blkg->rl. blkg lookup may fail under memory pressure
429 * or if either the blkcg or queue is going away. Fall back to
430 * root_rl in such cases.
432 blkg = blkg_lookup(blkcg, q);
445 * blk_put_rl - put request_list
446 * @rl: request_list to put
448 * Put the reference acquired by blk_get_rl(). Should be called under
451 static inline void blk_put_rl(struct request_list *rl)
453 if (rl->blkg->blkcg != &blkcg_root)
458 * blk_rq_set_rl - associate a request with a request_list
459 * @rq: request of interest
460 * @rl: target request_list
462 * Associate @rq with @rl so that accounting and freeing can know the
463 * request_list @rq came from.
465 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
471 * blk_rq_rl - return the request_list a request came from
472 * @rq: request of interest
474 * Return the request_list @rq is allocated from.
476 static inline struct request_list *blk_rq_rl(struct request *rq)
481 struct request_list *__blk_queue_next_rl(struct request_list *rl,
482 struct request_queue *q);
484 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
486 * Should be used under queue_lock.
488 #define blk_queue_for_each_rl(rl, q) \
489 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
491 static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
495 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
499 atomic64_set(&stat->aux_cnt, 0);
503 static inline void blkg_stat_exit(struct blkg_stat *stat)
505 percpu_counter_destroy(&stat->cpu_cnt);
509 * blkg_stat_add - add a value to a blkg_stat
510 * @stat: target blkg_stat
513 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
514 * don't re-enter this function for the same counter.
516 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
518 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
522 * blkg_stat_read - read the current value of a blkg_stat
523 * @stat: blkg_stat to read
525 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
527 return percpu_counter_sum_positive(&stat->cpu_cnt);
531 * blkg_stat_reset - reset a blkg_stat
532 * @stat: blkg_stat to reset
534 static inline void blkg_stat_reset(struct blkg_stat *stat)
536 percpu_counter_set(&stat->cpu_cnt, 0);
537 atomic64_set(&stat->aux_cnt, 0);
541 * blkg_stat_add_aux - add a blkg_stat into another's aux count
542 * @to: the destination blkg_stat
545 * Add @from's count including the aux one to @to's aux count.
547 static inline void blkg_stat_add_aux(struct blkg_stat *to,
548 struct blkg_stat *from)
550 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
554 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
558 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
559 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
562 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
565 atomic64_set(&rwstat->aux_cnt[i], 0);
570 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
574 for (i = 0; i < BLKG_RWSTAT_NR; i++)
575 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
579 * blkg_rwstat_add - add a value to a blkg_rwstat
580 * @rwstat: target blkg_rwstat
581 * @op: REQ_OP and flags
584 * Add @val to @rwstat. The counters are chosen according to @rw. The
585 * caller is responsible for synchronizing calls to this function.
587 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
588 unsigned int op, uint64_t val)
590 struct percpu_counter *cnt;
593 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
595 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
597 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
600 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
602 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
604 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
608 * blkg_rwstat_read - read the current values of a blkg_rwstat
609 * @rwstat: blkg_rwstat to read
611 * Read the current snapshot of @rwstat and return it in the aux counts.
613 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
615 struct blkg_rwstat result;
618 for (i = 0; i < BLKG_RWSTAT_NR; i++)
619 atomic64_set(&result.aux_cnt[i],
620 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
625 * blkg_rwstat_total - read the total count of a blkg_rwstat
626 * @rwstat: blkg_rwstat to read
628 * Return the total count of @rwstat regardless of the IO direction. This
629 * function can be called without synchronization and takes care of u64
632 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
634 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
636 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
637 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
641 * blkg_rwstat_reset - reset a blkg_rwstat
642 * @rwstat: blkg_rwstat to reset
644 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
648 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
649 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
650 atomic64_set(&rwstat->aux_cnt[i], 0);
655 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
656 * @to: the destination blkg_rwstat
659 * Add @from's count including the aux one to @to's aux count.
661 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
662 struct blkg_rwstat *from)
664 u64 sum[BLKG_RWSTAT_NR];
667 for (i = 0; i < BLKG_RWSTAT_NR; i++)
668 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
670 for (i = 0; i < BLKG_RWSTAT_NR; i++)
671 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
675 #ifdef CONFIG_BLK_DEV_THROTTLING
676 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
679 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
680 struct bio *bio) { return false; }
683 static inline bool blkcg_bio_issue_check(struct request_queue *q,
687 struct blkcg_gq *blkg;
691 blkcg = bio_blkcg(bio);
693 /* associate blkcg if bio hasn't attached one */
694 bio_associate_blkcg(bio, &blkcg->css);
696 blkg = blkg_lookup(blkcg, q);
697 if (unlikely(!blkg)) {
698 spin_lock_irq(q->queue_lock);
699 blkg = blkg_lookup_create(blkcg, q);
702 spin_unlock_irq(q->queue_lock);
705 throtl = blk_throtl_bio(q, blkg, bio);
708 blkg = blkg ?: q->root_blkg;
709 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
710 bio->bi_iter.bi_size);
711 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
718 #else /* CONFIG_BLK_CGROUP */
723 struct blkg_policy_data {
726 struct blkcg_policy_data {
732 struct blkcg_policy {
735 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
739 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
740 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
741 static inline void blkcg_drain_queue(struct request_queue *q) { }
742 static inline void blkcg_exit_queue(struct request_queue *q) { }
743 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
744 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
745 static inline int blkcg_activate_policy(struct request_queue *q,
746 const struct blkcg_policy *pol) { return 0; }
747 static inline void blkcg_deactivate_policy(struct request_queue *q,
748 const struct blkcg_policy *pol) { }
750 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
752 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
753 struct blkcg_policy *pol) { return NULL; }
754 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
755 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
756 static inline void blkg_get(struct blkcg_gq *blkg) { }
757 static inline void blkg_put(struct blkcg_gq *blkg) { }
759 static inline struct request_list *blk_get_rl(struct request_queue *q,
760 struct bio *bio) { return &q->root_rl; }
761 static inline void blk_put_rl(struct request_list *rl) { }
762 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
763 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
765 static inline bool blkcg_bio_issue_check(struct request_queue *q,
766 struct bio *bio) { return true; }
768 #define blk_queue_for_each_rl(rl, q) \
769 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
771 #endif /* CONFIG_BLOCK */
772 #endif /* CONFIG_BLK_CGROUP */
773 #endif /* _BLK_CGROUP_H */