1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
17 #include <linux/cgroup.h>
18 #include <linux/percpu_counter.h>
19 #include <linux/seq_file.h>
20 #include <linux/radix-tree.h>
21 #include <linux/blkdev.h>
22 #include <linux/atomic.h>
23 #include <linux/kthread.h>
26 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
27 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
29 /* Max limits for throttle policy */
30 #define THROTL_IOPS_MAX UINT_MAX
32 #ifdef CONFIG_BLK_CGROUP
34 enum blkg_rwstat_type {
42 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
48 struct cgroup_subsys_state css;
51 struct radix_tree_root blkg_tree;
52 struct blkcg_gq __rcu *blkg_hint;
53 struct hlist_head blkg_list;
55 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
57 struct list_head all_blkcgs_node;
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list;
60 refcount_t cgwb_refcnt;
65 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
66 * recursive. Used to carry stats of dead children.
69 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
70 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
73 struct blkg_rwstat_sample {
74 u64 cnt[BLKG_RWSTAT_NR];
78 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
79 * request_queue (q). This is used by blkcg policies which need to track
80 * information per blkcg - q pair.
82 * There can be multiple active blkcg policies and each blkg:policy pair is
83 * represented by a blkg_policy_data which is allocated and freed by each
84 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
85 * area by allocating larger data structure which embeds blkg_policy_data
88 struct blkg_policy_data {
89 /* the blkg and policy id this per-policy data belongs to */
90 struct blkcg_gq *blkg;
95 * Policies that need to keep per-blkcg data which is independent from any
96 * request_queue associated to it should implement cpd_alloc/free_fn()
97 * methods. A policy can allocate private data area by allocating larger
98 * data structure which embeds blkcg_policy_data at the beginning.
99 * cpd_init() is invoked to let each policy handle per-blkcg data.
101 struct blkcg_policy_data {
102 /* the blkcg and policy id this per-policy data belongs to */
107 /* association between a blk cgroup and a request queue */
109 /* Pointer to the associated request_queue */
110 struct request_queue *q;
111 struct list_head q_node;
112 struct hlist_node blkcg_node;
116 * Each blkg gets congested separately and the congestion state is
117 * propagated to the matching bdi_writeback_congested.
119 struct bdi_writeback_congested *wb_congested;
121 /* all non-root blkcg_gq's are guaranteed to have access to parent */
122 struct blkcg_gq *parent;
124 /* reference count */
125 struct percpu_ref refcnt;
127 /* is this blkg online? protected by both blkcg and q locks */
130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
135 struct rcu_head rcu_head;
138 atomic64_t delay_nsec;
139 atomic64_t delay_start;
144 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
145 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
146 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
147 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
148 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
149 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
150 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
151 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
152 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
153 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
154 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
157 struct blkcg_policy {
159 /* cgroup files for the policy */
160 struct cftype *dfl_cftypes;
161 struct cftype *legacy_cftypes;
164 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
165 blkcg_pol_init_cpd_fn *cpd_init_fn;
166 blkcg_pol_free_cpd_fn *cpd_free_fn;
167 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
169 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
170 blkcg_pol_init_pd_fn *pd_init_fn;
171 blkcg_pol_online_pd_fn *pd_online_fn;
172 blkcg_pol_offline_pd_fn *pd_offline_fn;
173 blkcg_pol_free_pd_fn *pd_free_fn;
174 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
175 blkcg_pol_stat_pd_fn *pd_stat_fn;
178 extern struct blkcg blkcg_root;
179 extern struct cgroup_subsys_state * const blkcg_root_css;
181 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
182 struct request_queue *q, bool update_hint);
183 struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
184 struct request_queue *q);
185 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
186 struct request_queue *q);
187 int blkcg_init_queue(struct request_queue *q);
188 void blkcg_drain_queue(struct request_queue *q);
189 void blkcg_exit_queue(struct request_queue *q);
191 /* Blkio controller policy registration */
192 int blkcg_policy_register(struct blkcg_policy *pol);
193 void blkcg_policy_unregister(struct blkcg_policy *pol);
194 int blkcg_activate_policy(struct request_queue *q,
195 const struct blkcg_policy *pol);
196 void blkcg_deactivate_policy(struct request_queue *q,
197 const struct blkcg_policy *pol);
199 static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
202 return atomic64_read(&rwstat->aux_cnt[idx]) +
203 percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
206 const char *blkg_dev_name(struct blkcg_gq *blkg);
207 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
208 u64 (*prfill)(struct seq_file *,
209 struct blkg_policy_data *, int),
210 const struct blkcg_policy *pol, int data,
212 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
213 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
214 const struct blkg_rwstat_sample *rwstat);
215 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
217 int blkg_print_stat_bytes(struct seq_file *sf, void *v);
218 int blkg_print_stat_ios(struct seq_file *sf, void *v);
219 int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
220 int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
222 void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
223 int off, struct blkg_rwstat_sample *sum);
225 struct blkg_conf_ctx {
226 struct gendisk *disk;
227 struct blkcg_gq *blkg;
231 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
232 char *input, struct blkg_conf_ctx *ctx);
233 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
236 * blkcg_css - find the current css
238 * Find the css associated with either the kthread or the current task.
239 * This may return a dying css, so it is up to the caller to use tryget logic
240 * to confirm it is alive and well.
242 static inline struct cgroup_subsys_state *blkcg_css(void)
244 struct cgroup_subsys_state *css;
246 css = kthread_blkcg();
249 return task_css(current, io_cgrp_id);
252 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
254 return css ? container_of(css, struct blkcg, css) : NULL;
258 * __bio_blkcg - internal, inconsistent version to get blkcg
261 * This function is inconsistent and consequently is dangerous to use. The
262 * first part of the function returns a blkcg where a reference is owned by the
263 * bio. This means it does not need to be rcu protected as it cannot go away
264 * with the bio owning a reference to it. However, the latter potentially gets
265 * it from task_css(). This can race against task migration and the cgroup
266 * dying. It is also semantically different as it must be called rcu protected
267 * and is susceptible to failure when trying to get a reference to it.
268 * Therefore, it is not ok to assume that *_get() will always succeed on the
269 * blkcg returned here.
271 static inline struct blkcg *__bio_blkcg(struct bio *bio)
273 if (bio && bio->bi_blkg)
274 return bio->bi_blkg->blkcg;
275 return css_to_blkcg(blkcg_css());
279 * bio_blkcg - grab the blkcg associated with a bio
282 * This returns the blkcg associated with a bio, %NULL if not associated.
283 * Callers are expected to either handle %NULL or know association has been
284 * done prior to calling this.
286 static inline struct blkcg *bio_blkcg(struct bio *bio)
288 if (bio && bio->bi_blkg)
289 return bio->bi_blkg->blkcg;
293 static inline bool blk_cgroup_congested(void)
295 struct cgroup_subsys_state *css;
299 css = kthread_blkcg();
301 css = task_css(current, io_cgrp_id);
303 if (atomic_read(&css->cgroup->congestion_count)) {
314 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
315 * @return: true if this bio needs to be submitted with the root blkg context.
317 * In order to avoid priority inversions we sometimes need to issue a bio as if
318 * it were attached to the root blkg, and then backcharge to the actual owning
319 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
320 * bio and attach the appropriate blkg to the bio. Then we call this helper and
321 * if it is true run with the root blkg for that queue and then do any
322 * backcharging to the originating cgroup once the io is complete.
324 static inline bool bio_issue_as_root_blkg(struct bio *bio)
326 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
330 * blkcg_parent - get the parent of a blkcg
331 * @blkcg: blkcg of interest
333 * Return the parent blkcg of @blkcg. Can be called anytime.
335 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
337 return css_to_blkcg(blkcg->css.parent);
341 * __blkg_lookup - internal version of blkg_lookup()
342 * @blkcg: blkcg of interest
343 * @q: request_queue of interest
344 * @update_hint: whether to update lookup hint with the result or not
346 * This is internal version and shouldn't be used by policy
347 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
348 * @q's bypass state. If @update_hint is %true, the caller should be
349 * holding @q->queue_lock and lookup hint is updated on success.
351 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
352 struct request_queue *q,
355 struct blkcg_gq *blkg;
357 if (blkcg == &blkcg_root)
360 blkg = rcu_dereference(blkcg->blkg_hint);
361 if (blkg && blkg->q == q)
364 return blkg_lookup_slowpath(blkcg, q, update_hint);
368 * blkg_lookup - lookup blkg for the specified blkcg - q pair
369 * @blkcg: blkcg of interest
370 * @q: request_queue of interest
372 * Lookup blkg for the @blkcg - @q pair. This function should be called
373 * under RCU read loc.
375 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
376 struct request_queue *q)
378 WARN_ON_ONCE(!rcu_read_lock_held());
379 return __blkg_lookup(blkcg, q, false);
383 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
384 * @q: request_queue of interest
386 * Lookup blkg for @q at the root level. See also blkg_lookup().
388 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
394 * blkg_to_pdata - get policy private data
395 * @blkg: blkg of interest
396 * @pol: policy of interest
398 * Return pointer to private data associated with the @blkg-@pol pair.
400 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
401 struct blkcg_policy *pol)
403 return blkg ? blkg->pd[pol->plid] : NULL;
406 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
407 struct blkcg_policy *pol)
409 return blkcg ? blkcg->cpd[pol->plid] : NULL;
413 * pdata_to_blkg - get blkg associated with policy private data
414 * @pd: policy private data of interest
416 * @pd is policy private data. Determine the blkg it's associated with.
418 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
420 return pd ? pd->blkg : NULL;
423 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
425 return cpd ? cpd->blkcg : NULL;
428 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
430 #ifdef CONFIG_CGROUP_WRITEBACK
433 * blkcg_cgwb_get - get a reference for blkcg->cgwb_list
434 * @blkcg: blkcg of interest
436 * This is used to track the number of active wb's related to a blkcg.
438 static inline void blkcg_cgwb_get(struct blkcg *blkcg)
440 refcount_inc(&blkcg->cgwb_refcnt);
444 * blkcg_cgwb_put - put a reference for @blkcg->cgwb_list
445 * @blkcg: blkcg of interest
447 * This is used to track the number of active wb's related to a blkcg.
448 * When this count goes to zero, all active wb has finished so the
449 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
450 * This work may occur in cgwb_release_workfn() on the cgwb_release
453 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
455 if (refcount_dec_and_test(&blkcg->cgwb_refcnt))
456 blkcg_destroy_blkgs(blkcg);
461 static inline void blkcg_cgwb_get(struct blkcg *blkcg) { }
463 static inline void blkcg_cgwb_put(struct blkcg *blkcg)
465 /* wb isn't being accounted, so trigger destruction right away */
466 blkcg_destroy_blkgs(blkcg);
472 * blkg_path - format cgroup path of blkg
473 * @blkg: blkg of interest
474 * @buf: target buffer
475 * @buflen: target buffer length
477 * Format the path of the cgroup of @blkg into @buf.
479 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
481 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
485 * blkg_get - get a blkg reference
488 * The caller should be holding an existing reference.
490 static inline void blkg_get(struct blkcg_gq *blkg)
492 percpu_ref_get(&blkg->refcnt);
496 * blkg_tryget - try and get a blkg reference
499 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
500 * of freeing this blkg, so we can only use it if the refcnt is not zero.
502 static inline bool blkg_tryget(struct blkcg_gq *blkg)
504 return blkg && percpu_ref_tryget(&blkg->refcnt);
508 * blkg_tryget_closest - try and get a blkg ref on the closet blkg
511 * This needs to be called rcu protected. As the failure mode here is to walk
512 * up the blkg tree, this ensure that the blkg->parent pointers are always
513 * valid. This returns the blkg that it ended up taking a reference on or %NULL
514 * if no reference was taken.
516 static inline struct blkcg_gq *blkg_tryget_closest(struct blkcg_gq *blkg)
518 struct blkcg_gq *ret_blkg = NULL;
520 WARN_ON_ONCE(!rcu_read_lock_held());
523 if (blkg_tryget(blkg)) {
534 * blkg_put - put a blkg reference
537 static inline void blkg_put(struct blkcg_gq *blkg)
539 percpu_ref_put(&blkg->refcnt);
543 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
544 * @d_blkg: loop cursor pointing to the current descendant
545 * @pos_css: used for iteration
546 * @p_blkg: target blkg to walk descendants of
548 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
549 * read locked. If called under either blkcg or queue lock, the iteration
550 * is guaranteed to include all and only online blkgs. The caller may
551 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
552 * @p_blkg is included in the iteration and the first node to be visited.
554 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
555 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
556 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
557 (p_blkg)->q, false)))
560 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
561 * @d_blkg: loop cursor pointing to the current descendant
562 * @pos_css: used for iteration
563 * @p_blkg: target blkg to walk descendants of
565 * Similar to blkg_for_each_descendant_pre() but performs post-order
566 * traversal instead. Synchronization rules are the same. @p_blkg is
567 * included in the iteration and the last node to be visited.
569 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
570 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
571 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
572 (p_blkg)->q, false)))
574 static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
578 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
579 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
582 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
585 atomic64_set(&rwstat->aux_cnt[i], 0);
590 static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
594 for (i = 0; i < BLKG_RWSTAT_NR; i++)
595 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
599 * blkg_rwstat_add - add a value to a blkg_rwstat
600 * @rwstat: target blkg_rwstat
601 * @op: REQ_OP and flags
604 * Add @val to @rwstat. The counters are chosen according to @rw. The
605 * caller is responsible for synchronizing calls to this function.
607 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
608 unsigned int op, uint64_t val)
610 struct percpu_counter *cnt;
612 if (op_is_discard(op))
613 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
614 else if (op_is_write(op))
615 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
617 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
619 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
622 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
624 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
626 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
630 * blkg_rwstat_read - read the current values of a blkg_rwstat
631 * @rwstat: blkg_rwstat to read
633 * Read the current snapshot of @rwstat and return it in the aux counts.
635 static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
636 struct blkg_rwstat_sample *result)
640 for (i = 0; i < BLKG_RWSTAT_NR; i++)
642 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
646 * blkg_rwstat_total - read the total count of a blkg_rwstat
647 * @rwstat: blkg_rwstat to read
649 * Return the total count of @rwstat regardless of the IO direction. This
650 * function can be called without synchronization and takes care of u64
653 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
655 struct blkg_rwstat_sample tmp = { };
657 blkg_rwstat_read(rwstat, &tmp);
658 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
662 * blkg_rwstat_reset - reset a blkg_rwstat
663 * @rwstat: blkg_rwstat to reset
665 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
669 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
670 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
671 atomic64_set(&rwstat->aux_cnt[i], 0);
676 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
677 * @to: the destination blkg_rwstat
680 * Add @from's count including the aux one to @to's aux count.
682 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
683 struct blkg_rwstat *from)
685 u64 sum[BLKG_RWSTAT_NR];
688 for (i = 0; i < BLKG_RWSTAT_NR; i++)
689 sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
691 for (i = 0; i < BLKG_RWSTAT_NR; i++)
692 atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
696 #ifdef CONFIG_BLK_DEV_THROTTLING
697 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
700 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
701 struct bio *bio) { return false; }
705 static inline void blkcg_bio_issue_init(struct bio *bio)
707 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
710 static inline bool blkcg_bio_issue_check(struct request_queue *q,
713 struct blkcg_gq *blkg;
719 char b[BDEVNAME_SIZE];
722 "no blkg associated for bio on block-device: %s\n",
723 bio_devname(bio, b));
724 bio_associate_blkg(bio);
729 throtl = blk_throtl_bio(q, blkg, bio);
733 * If the bio is flagged with BIO_QUEUE_ENTERED it means this
734 * is a split bio and we would have already accounted for the
737 if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
738 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
739 bio->bi_iter.bi_size);
740 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
743 blkcg_bio_issue_init(bio);
749 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
751 if (atomic_add_return(1, &blkg->use_delay) == 1)
752 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
755 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
757 int old = atomic_read(&blkg->use_delay);
763 * We do this song and dance because we can race with somebody else
764 * adding or removing delay. If we just did an atomic_dec we'd end up
765 * negative and we'd already be in trouble. We need to subtract 1 and
766 * then check to see if we were the last delay so we can drop the
767 * congestion count on the cgroup.
770 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
779 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
783 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
785 int old = atomic_read(&blkg->use_delay);
788 /* We only want 1 person clearing the congestion count for this blkg. */
790 int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
792 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
799 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
800 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
801 void blkcg_maybe_throttle_current(void);
802 #else /* CONFIG_BLK_CGROUP */
807 struct blkg_policy_data {
810 struct blkcg_policy_data {
816 struct blkcg_policy {
819 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
821 static inline void blkcg_maybe_throttle_current(void) { }
822 static inline bool blk_cgroup_congested(void) { return false; }
826 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
828 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
829 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
831 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
832 static inline void blkcg_drain_queue(struct request_queue *q) { }
833 static inline void blkcg_exit_queue(struct request_queue *q) { }
834 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
835 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
836 static inline int blkcg_activate_policy(struct request_queue *q,
837 const struct blkcg_policy *pol) { return 0; }
838 static inline void blkcg_deactivate_policy(struct request_queue *q,
839 const struct blkcg_policy *pol) { }
841 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
842 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
844 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
845 struct blkcg_policy *pol) { return NULL; }
846 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
847 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
848 static inline void blkg_get(struct blkcg_gq *blkg) { }
849 static inline void blkg_put(struct blkcg_gq *blkg) { }
851 static inline void blkcg_bio_issue_init(struct bio *bio) { }
852 static inline bool blkcg_bio_issue_check(struct request_queue *q,
853 struct bio *bio) { return true; }
855 #define blk_queue_for_each_rl(rl, q) \
856 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
858 #endif /* CONFIG_BLOCK */
859 #endif /* CONFIG_BLK_CGROUP */
860 #endif /* _BLK_CGROUP_H */