1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Common Block IO controller cgroup interface
7 * Based on ideas and code from CFQ, CFS and BFQ:
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
28 #define FC_APPID_LEN 129
30 #ifdef CONFIG_BLK_CGROUP
32 enum blkg_iostat_type {
41 struct blkg_policy_data;
44 struct cgroup_subsys_state css;
46 refcount_t online_pin;
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq __rcu *blkg_hint;
50 struct hlist_head blkg_list;
52 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
54 struct list_head all_blkcgs_node;
55 #ifdef CONFIG_BLK_CGROUP_FC_APPID
56 char fc_app_id[FC_APPID_LEN];
58 #ifdef CONFIG_CGROUP_WRITEBACK
59 struct list_head cgwb_list;
64 u64 bytes[BLKG_IOSTAT_NR];
65 u64 ios[BLKG_IOSTAT_NR];
68 struct blkg_iostat_set {
69 struct u64_stats_sync sync;
70 struct blkg_iostat cur;
71 struct blkg_iostat last;
74 /* association between a blk cgroup and a request queue */
76 /* Pointer to the associated request_queue */
77 struct request_queue *q;
78 struct list_head q_node;
79 struct hlist_node blkcg_node;
82 /* all non-root blkcg_gq's are guaranteed to have access to parent */
83 struct blkcg_gq *parent;
86 struct percpu_ref refcnt;
88 /* is this blkg online? protected by both blkcg and q locks */
91 struct blkg_iostat_set __percpu *iostat_cpu;
92 struct blkg_iostat_set iostat;
94 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
96 spinlock_t async_bio_lock;
97 struct bio_list async_bios;
99 struct work_struct async_bio_work;
100 struct work_struct free_work;
104 atomic64_t delay_nsec;
105 atomic64_t delay_start;
109 struct rcu_head rcu_head;
112 extern struct cgroup_subsys_state * const blkcg_root_css;
114 void blkcg_destroy_blkgs(struct blkcg *blkcg);
115 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
116 void blkcg_maybe_throttle_current(void);
118 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
120 return css ? container_of(css, struct blkcg, css) : NULL;
124 * bio_blkcg - grab the blkcg associated with a bio
127 * This returns the blkcg associated with a bio, %NULL if not associated.
128 * Callers are expected to either handle %NULL or know association has been
129 * done prior to calling this.
131 static inline struct blkcg *bio_blkcg(struct bio *bio)
133 if (bio && bio->bi_blkg)
134 return bio->bi_blkg->blkcg;
138 static inline bool blk_cgroup_congested(void)
140 struct cgroup_subsys_state *css;
144 css = kthread_blkcg();
146 css = task_css(current, io_cgrp_id);
148 if (atomic_read(&css->cgroup->congestion_count)) {
159 * blkcg_parent - get the parent of a blkcg
160 * @blkcg: blkcg of interest
162 * Return the parent blkcg of @blkcg. Can be called anytime.
164 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
166 return css_to_blkcg(blkcg->css.parent);
170 * blkcg_pin_online - pin online state
171 * @blkcg: blkcg of interest
173 * While pinned, a blkcg is kept online. This is primarily used to
174 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
175 * while an associated cgwb is still active.
177 static inline void blkcg_pin_online(struct blkcg *blkcg)
179 refcount_inc(&blkcg->online_pin);
183 * blkcg_unpin_online - unpin online state
184 * @blkcg: blkcg of interest
186 * This is primarily used to impedance-match blkg and cgwb lifetimes so
187 * that blkg doesn't go offline while an associated cgwb is still active.
188 * When this count goes to zero, all active cgwbs have finished so the
189 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
191 static inline void blkcg_unpin_online(struct blkcg *blkcg)
194 if (!refcount_dec_and_test(&blkcg->online_pin))
196 blkcg_destroy_blkgs(blkcg);
197 blkcg = blkcg_parent(blkcg);
201 #else /* CONFIG_BLK_CGROUP */
209 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
211 static inline void blkcg_maybe_throttle_current(void) { }
212 static inline bool blk_cgroup_congested(void) { return false; }
215 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
216 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
217 #endif /* CONFIG_BLOCK */
219 #endif /* CONFIG_BLK_CGROUP */
221 #ifdef CONFIG_BLK_CGROUP_FC_APPID
223 * Sets the fc_app_id field associted to blkcg
224 * @app_id: application identifier
225 * @cgrp_id: cgroup id
226 * @app_id_len: size of application identifier
228 static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
231 struct cgroup_subsys_state *css;
235 if (app_id_len > FC_APPID_LEN)
238 cgrp = cgroup_get_from_id(cgrp_id);
241 css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
246 blkcg = css_to_blkcg(css);
248 * There is a slight race condition on setting the appid.
249 * Worst case an I/O may not find the right id.
250 * This is no different from the I/O we let pass while obtaining
251 * the vmid from the fabric.
252 * Adding the overhead of a lock is not necessary.
254 strlcpy(blkcg->fc_app_id, app_id, app_id_len);
262 * blkcg_get_fc_appid - get the fc app identifier associated with a bio
265 * On success return the fc_app_id, on failure return NULL
267 static inline char *blkcg_get_fc_appid(struct bio *bio)
269 if (bio && bio->bi_blkg &&
270 (bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
271 return bio->bi_blkg->blkcg->fc_app_id;
275 static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
276 static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
277 #endif /*CONFIG_BLK_CGROUP_FC_APPID*/
278 #endif /* _BLK_CGROUP_H */