]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | #ifndef _BLK_CGROUP_H |
2 | #define _BLK_CGROUP_H | |
3 | /* | |
4 | * Common Block IO controller cgroup interface | |
5 | * | |
6 | * Based on ideas and code from CFQ, CFS and BFQ: | |
7 | * Copyright (C) 2003 Jens Axboe <[email protected]> | |
8 | * | |
9 | * Copyright (C) 2008 Fabio Checconi <[email protected]> | |
10 | * Paolo Valente <[email protected]> | |
11 | * | |
12 | * Copyright (C) 2009 Vivek Goyal <[email protected]> | |
13 | * Nauman Rafique <[email protected]> | |
14 | */ | |
15 | ||
16 | #include <linux/cgroup.h> | |
575969a0 | 17 | #include <linux/u64_stats_sync.h> |
829fdb50 | 18 | #include <linux/seq_file.h> |
a637120e | 19 | #include <linux/radix-tree.h> |
a051661c | 20 | #include <linux/blkdev.h> |
31e4c28d | 21 | |
9355aede VG |
22 | /* Max limits for throttle policy */ |
23 | #define THROTL_IOPS_MAX UINT_MAX | |
24 | ||
3381cb8d TH |
25 | /* CFQ specific, out here for blkcg->cfq_weight */ |
26 | #define CFQ_WEIGHT_MIN 10 | |
27 | #define CFQ_WEIGHT_MAX 1000 | |
28 | #define CFQ_WEIGHT_DEFAULT 500 | |
29 | ||
f48ec1d7 TH |
30 | #ifdef CONFIG_BLK_CGROUP |
31 | ||
edcb0722 TH |
32 | enum blkg_rwstat_type { |
33 | BLKG_RWSTAT_READ, | |
34 | BLKG_RWSTAT_WRITE, | |
35 | BLKG_RWSTAT_SYNC, | |
36 | BLKG_RWSTAT_ASYNC, | |
37 | ||
38 | BLKG_RWSTAT_NR, | |
39 | BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, | |
303a3acb DS |
40 | }; |
41 | ||
a637120e TH |
42 | struct blkcg_gq; |
43 | ||
3c798398 | 44 | struct blkcg { |
36558c8a TH |
45 | struct cgroup_subsys_state css; |
46 | spinlock_t lock; | |
a637120e TH |
47 | |
48 | struct radix_tree_root blkg_tree; | |
49 | struct blkcg_gq *blkg_hint; | |
36558c8a | 50 | struct hlist_head blkg_list; |
9a9e8a26 TH |
51 | |
52 | /* for policies to test whether associated blkcg has changed */ | |
36558c8a | 53 | uint64_t id; |
3381cb8d | 54 | |
3c798398 | 55 | /* TODO: per-policy storage in blkcg */ |
36558c8a | 56 | unsigned int cfq_weight; /* belongs to cfq */ |
31e4c28d VG |
57 | }; |
58 | ||
edcb0722 TH |
59 | struct blkg_stat { |
60 | struct u64_stats_sync syncp; | |
61 | uint64_t cnt; | |
62 | }; | |
63 | ||
64 | struct blkg_rwstat { | |
65 | struct u64_stats_sync syncp; | |
66 | uint64_t cnt[BLKG_RWSTAT_NR]; | |
67 | }; | |
68 | ||
f95a04af TH |
69 | /* |
70 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a | |
71 | * request_queue (q). This is used by blkcg policies which need to track | |
72 | * information per blkcg - q pair. | |
73 | * | |
74 | * There can be multiple active blkcg policies and each has its private | |
75 | * data on each blkg, the size of which is determined by | |
76 | * blkcg_policy->pd_size. blkcg core allocates and frees such areas | |
77 | * together with blkg and invokes pd_init/exit_fn() methods. | |
78 | * | |
79 | * Such private data must embed struct blkg_policy_data (pd) at the | |
80 | * beginning and pd_size can't be smaller than pd. | |
81 | */ | |
0381411e TH |
82 | struct blkg_policy_data { |
83 | /* the blkg this per-policy data belongs to */ | |
3c798398 | 84 | struct blkcg_gq *blkg; |
0381411e | 85 | |
a2b1693b | 86 | /* used during policy activation */ |
36558c8a | 87 | struct list_head alloc_node; |
0381411e TH |
88 | }; |
89 | ||
3c798398 TH |
90 | /* association between a blk cgroup and a request queue */ |
91 | struct blkcg_gq { | |
c875f4d0 | 92 | /* Pointer to the associated request_queue */ |
36558c8a TH |
93 | struct request_queue *q; |
94 | struct list_head q_node; | |
95 | struct hlist_node blkcg_node; | |
3c798398 | 96 | struct blkcg *blkcg; |
a051661c TH |
97 | /* request allocation list for this blkcg-q pair */ |
98 | struct request_list rl; | |
1adaf3dd | 99 | /* reference count */ |
36558c8a | 100 | int refcnt; |
22084190 | 101 | |
36558c8a | 102 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
1adaf3dd | 103 | |
36558c8a | 104 | struct rcu_head rcu_head; |
31e4c28d VG |
105 | }; |
106 | ||
3c798398 TH |
107 | typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); |
108 | typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); | |
109 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); | |
3e252066 | 110 | |
3c798398 | 111 | struct blkcg_policy { |
36558c8a TH |
112 | int plid; |
113 | /* policy specific private data size */ | |
f95a04af | 114 | size_t pd_size; |
36558c8a TH |
115 | /* cgroup files for the policy */ |
116 | struct cftype *cftypes; | |
f9fcc2d3 TH |
117 | |
118 | /* operations */ | |
119 | blkcg_pol_init_pd_fn *pd_init_fn; | |
120 | blkcg_pol_exit_pd_fn *pd_exit_fn; | |
121 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; | |
3e252066 VG |
122 | }; |
123 | ||
3c798398 | 124 | extern struct blkcg blkcg_root; |
36558c8a | 125 | |
3c798398 TH |
126 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); |
127 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | |
128 | struct request_queue *q); | |
36558c8a TH |
129 | int blkcg_init_queue(struct request_queue *q); |
130 | void blkcg_drain_queue(struct request_queue *q); | |
131 | void blkcg_exit_queue(struct request_queue *q); | |
5efd6113 | 132 | |
3e252066 | 133 | /* Blkio controller policy registration */ |
3c798398 TH |
134 | int blkcg_policy_register(struct blkcg_policy *pol); |
135 | void blkcg_policy_unregister(struct blkcg_policy *pol); | |
36558c8a | 136 | int blkcg_activate_policy(struct request_queue *q, |
3c798398 | 137 | const struct blkcg_policy *pol); |
36558c8a | 138 | void blkcg_deactivate_policy(struct request_queue *q, |
3c798398 | 139 | const struct blkcg_policy *pol); |
3e252066 | 140 | |
3c798398 | 141 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
142 | u64 (*prfill)(struct seq_file *, |
143 | struct blkg_policy_data *, int), | |
3c798398 | 144 | const struct blkcg_policy *pol, int data, |
ec399347 | 145 | bool show_total); |
f95a04af TH |
146 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); |
147 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
829fdb50 | 148 | const struct blkg_rwstat *rwstat); |
f95a04af TH |
149 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); |
150 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
151 | int off); | |
829fdb50 TH |
152 | |
153 | struct blkg_conf_ctx { | |
36558c8a | 154 | struct gendisk *disk; |
3c798398 | 155 | struct blkcg_gq *blkg; |
36558c8a | 156 | u64 v; |
829fdb50 TH |
157 | }; |
158 | ||
3c798398 TH |
159 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
160 | const char *input, struct blkg_conf_ctx *ctx); | |
829fdb50 TH |
161 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
162 | ||
163 | ||
b1208b56 TH |
164 | static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) |
165 | { | |
166 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | |
167 | struct blkcg, css); | |
168 | } | |
169 | ||
170 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) | |
171 | { | |
172 | return container_of(task_subsys_state(tsk, blkio_subsys_id), | |
173 | struct blkcg, css); | |
174 | } | |
175 | ||
176 | static inline struct blkcg *bio_blkcg(struct bio *bio) | |
177 | { | |
178 | if (bio && bio->bi_css) | |
179 | return container_of(bio->bi_css, struct blkcg, css); | |
180 | return task_blkcg(current); | |
181 | } | |
182 | ||
0381411e TH |
183 | /** |
184 | * blkg_to_pdata - get policy private data | |
185 | * @blkg: blkg of interest | |
186 | * @pol: policy of interest | |
187 | * | |
188 | * Return pointer to private data associated with the @blkg-@pol pair. | |
189 | */ | |
f95a04af TH |
190 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
191 | struct blkcg_policy *pol) | |
0381411e | 192 | { |
f95a04af | 193 | return blkg ? blkg->pd[pol->plid] : NULL; |
0381411e TH |
194 | } |
195 | ||
196 | /** | |
197 | * pdata_to_blkg - get blkg associated with policy private data | |
f95a04af | 198 | * @pd: policy private data of interest |
0381411e | 199 | * |
f95a04af | 200 | * @pd is policy private data. Determine the blkg it's associated with. |
0381411e | 201 | */ |
f95a04af | 202 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) |
0381411e | 203 | { |
f95a04af | 204 | return pd ? pd->blkg : NULL; |
0381411e TH |
205 | } |
206 | ||
54e7ed12 TH |
207 | /** |
208 | * blkg_path - format cgroup path of blkg | |
209 | * @blkg: blkg of interest | |
210 | * @buf: target buffer | |
211 | * @buflen: target buffer length | |
212 | * | |
213 | * Format the path of the cgroup of @blkg into @buf. | |
214 | */ | |
3c798398 | 215 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) |
afc24d49 | 216 | { |
54e7ed12 TH |
217 | int ret; |
218 | ||
219 | rcu_read_lock(); | |
220 | ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); | |
221 | rcu_read_unlock(); | |
222 | if (ret) | |
223 | strncpy(buf, "<unavailable>", buflen); | |
224 | return ret; | |
afc24d49 VG |
225 | } |
226 | ||
1adaf3dd TH |
227 | /** |
228 | * blkg_get - get a blkg reference | |
229 | * @blkg: blkg to get | |
230 | * | |
231 | * The caller should be holding queue_lock and an existing reference. | |
232 | */ | |
3c798398 | 233 | static inline void blkg_get(struct blkcg_gq *blkg) |
1adaf3dd TH |
234 | { |
235 | lockdep_assert_held(blkg->q->queue_lock); | |
236 | WARN_ON_ONCE(!blkg->refcnt); | |
237 | blkg->refcnt++; | |
238 | } | |
239 | ||
3c798398 | 240 | void __blkg_release(struct blkcg_gq *blkg); |
1adaf3dd TH |
241 | |
242 | /** | |
243 | * blkg_put - put a blkg reference | |
244 | * @blkg: blkg to put | |
245 | * | |
246 | * The caller should be holding queue_lock. | |
247 | */ | |
3c798398 | 248 | static inline void blkg_put(struct blkcg_gq *blkg) |
1adaf3dd TH |
249 | { |
250 | lockdep_assert_held(blkg->q->queue_lock); | |
251 | WARN_ON_ONCE(blkg->refcnt <= 0); | |
252 | if (!--blkg->refcnt) | |
253 | __blkg_release(blkg); | |
254 | } | |
255 | ||
a051661c TH |
256 | /** |
257 | * blk_get_rl - get request_list to use | |
258 | * @q: request_queue of interest | |
259 | * @bio: bio which will be attached to the allocated request (may be %NULL) | |
260 | * | |
261 | * The caller wants to allocate a request from @q to use for @bio. Find | |
262 | * the request_list to use and obtain a reference on it. Should be called | |
263 | * under queue_lock. This function is guaranteed to return non-%NULL | |
264 | * request_list. | |
265 | */ | |
266 | static inline struct request_list *blk_get_rl(struct request_queue *q, | |
267 | struct bio *bio) | |
268 | { | |
269 | struct blkcg *blkcg; | |
270 | struct blkcg_gq *blkg; | |
271 | ||
272 | rcu_read_lock(); | |
273 | ||
274 | blkcg = bio_blkcg(bio); | |
275 | ||
276 | /* bypass blkg lookup and use @q->root_rl directly for root */ | |
277 | if (blkcg == &blkcg_root) | |
278 | goto root_rl; | |
279 | ||
280 | /* | |
281 | * Try to use blkg->rl. blkg lookup may fail under memory pressure | |
282 | * or if either the blkcg or queue is going away. Fall back to | |
283 | * root_rl in such cases. | |
284 | */ | |
285 | blkg = blkg_lookup_create(blkcg, q); | |
286 | if (unlikely(IS_ERR(blkg))) | |
287 | goto root_rl; | |
288 | ||
289 | blkg_get(blkg); | |
290 | rcu_read_unlock(); | |
291 | return &blkg->rl; | |
292 | root_rl: | |
293 | rcu_read_unlock(); | |
294 | return &q->root_rl; | |
295 | } | |
296 | ||
297 | /** | |
298 | * blk_put_rl - put request_list | |
299 | * @rl: request_list to put | |
300 | * | |
301 | * Put the reference acquired by blk_get_rl(). Should be called under | |
302 | * queue_lock. | |
303 | */ | |
304 | static inline void blk_put_rl(struct request_list *rl) | |
305 | { | |
306 | /* root_rl may not have blkg set */ | |
307 | if (rl->blkg && rl->blkg->blkcg != &blkcg_root) | |
308 | blkg_put(rl->blkg); | |
309 | } | |
310 | ||
311 | /** | |
312 | * blk_rq_set_rl - associate a request with a request_list | |
313 | * @rq: request of interest | |
314 | * @rl: target request_list | |
315 | * | |
316 | * Associate @rq with @rl so that accounting and freeing can know the | |
317 | * request_list @rq came from. | |
318 | */ | |
319 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) | |
320 | { | |
321 | rq->rl = rl; | |
322 | } | |
323 | ||
324 | /** | |
325 | * blk_rq_rl - return the request_list a request came from | |
326 | * @rq: request of interest | |
327 | * | |
328 | * Return the request_list @rq is allocated from. | |
329 | */ | |
330 | static inline struct request_list *blk_rq_rl(struct request *rq) | |
331 | { | |
332 | return rq->rl; | |
333 | } | |
334 | ||
335 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
336 | struct request_queue *q); | |
337 | /** | |
338 | * blk_queue_for_each_rl - iterate through all request_lists of a request_queue | |
339 | * | |
340 | * Should be used under queue_lock. | |
341 | */ | |
342 | #define blk_queue_for_each_rl(rl, q) \ | |
343 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) | |
344 | ||
edcb0722 TH |
345 | /** |
346 | * blkg_stat_add - add a value to a blkg_stat | |
347 | * @stat: target blkg_stat | |
348 | * @val: value to add | |
349 | * | |
350 | * Add @val to @stat. The caller is responsible for synchronizing calls to | |
351 | * this function. | |
352 | */ | |
353 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) | |
354 | { | |
355 | u64_stats_update_begin(&stat->syncp); | |
356 | stat->cnt += val; | |
357 | u64_stats_update_end(&stat->syncp); | |
358 | } | |
359 | ||
360 | /** | |
361 | * blkg_stat_read - read the current value of a blkg_stat | |
362 | * @stat: blkg_stat to read | |
363 | * | |
364 | * Read the current value of @stat. This function can be called without | |
365 | * synchroniztion and takes care of u64 atomicity. | |
366 | */ | |
367 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | |
368 | { | |
369 | unsigned int start; | |
370 | uint64_t v; | |
371 | ||
372 | do { | |
373 | start = u64_stats_fetch_begin(&stat->syncp); | |
374 | v = stat->cnt; | |
375 | } while (u64_stats_fetch_retry(&stat->syncp, start)); | |
376 | ||
377 | return v; | |
378 | } | |
379 | ||
380 | /** | |
381 | * blkg_stat_reset - reset a blkg_stat | |
382 | * @stat: blkg_stat to reset | |
383 | */ | |
384 | static inline void blkg_stat_reset(struct blkg_stat *stat) | |
385 | { | |
386 | stat->cnt = 0; | |
387 | } | |
388 | ||
389 | /** | |
390 | * blkg_rwstat_add - add a value to a blkg_rwstat | |
391 | * @rwstat: target blkg_rwstat | |
392 | * @rw: mask of REQ_{WRITE|SYNC} | |
393 | * @val: value to add | |
394 | * | |
395 | * Add @val to @rwstat. The counters are chosen according to @rw. The | |
396 | * caller is responsible for synchronizing calls to this function. | |
397 | */ | |
398 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | |
399 | int rw, uint64_t val) | |
400 | { | |
401 | u64_stats_update_begin(&rwstat->syncp); | |
402 | ||
403 | if (rw & REQ_WRITE) | |
404 | rwstat->cnt[BLKG_RWSTAT_WRITE] += val; | |
405 | else | |
406 | rwstat->cnt[BLKG_RWSTAT_READ] += val; | |
407 | if (rw & REQ_SYNC) | |
408 | rwstat->cnt[BLKG_RWSTAT_SYNC] += val; | |
409 | else | |
410 | rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; | |
411 | ||
412 | u64_stats_update_end(&rwstat->syncp); | |
413 | } | |
414 | ||
415 | /** | |
416 | * blkg_rwstat_read - read the current values of a blkg_rwstat | |
417 | * @rwstat: blkg_rwstat to read | |
418 | * | |
419 | * Read the current snapshot of @rwstat and return it as the return value. | |
420 | * This function can be called without synchronization and takes care of | |
421 | * u64 atomicity. | |
422 | */ | |
c94bed89 | 423 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) |
edcb0722 TH |
424 | { |
425 | unsigned int start; | |
426 | struct blkg_rwstat tmp; | |
427 | ||
428 | do { | |
429 | start = u64_stats_fetch_begin(&rwstat->syncp); | |
430 | tmp = *rwstat; | |
431 | } while (u64_stats_fetch_retry(&rwstat->syncp, start)); | |
432 | ||
433 | return tmp; | |
434 | } | |
435 | ||
436 | /** | |
437 | * blkg_rwstat_sum - read the total count of a blkg_rwstat | |
438 | * @rwstat: blkg_rwstat to read | |
439 | * | |
440 | * Return the total count of @rwstat regardless of the IO direction. This | |
441 | * function can be called without synchronization and takes care of u64 | |
442 | * atomicity. | |
443 | */ | |
444 | static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat) | |
445 | { | |
446 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); | |
447 | ||
448 | return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; | |
449 | } | |
450 | ||
451 | /** | |
452 | * blkg_rwstat_reset - reset a blkg_rwstat | |
453 | * @rwstat: blkg_rwstat to reset | |
454 | */ | |
455 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | |
456 | { | |
457 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); | |
458 | } | |
459 | ||
36558c8a TH |
460 | #else /* CONFIG_BLK_CGROUP */ |
461 | ||
462 | struct cgroup; | |
b1208b56 | 463 | struct blkcg; |
2f5ea477 | 464 | |
f95a04af TH |
465 | struct blkg_policy_data { |
466 | }; | |
467 | ||
3c798398 | 468 | struct blkcg_gq { |
2f5ea477 JA |
469 | }; |
470 | ||
3c798398 | 471 | struct blkcg_policy { |
3e252066 VG |
472 | }; |
473 | ||
3c798398 | 474 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
5efd6113 TH |
475 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
476 | static inline void blkcg_drain_queue(struct request_queue *q) { } | |
477 | static inline void blkcg_exit_queue(struct request_queue *q) { } | |
3c798398 TH |
478 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
479 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } | |
a2b1693b | 480 | static inline int blkcg_activate_policy(struct request_queue *q, |
3c798398 | 481 | const struct blkcg_policy *pol) { return 0; } |
a2b1693b | 482 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
3c798398 TH |
483 | const struct blkcg_policy *pol) { } |
484 | ||
b1208b56 TH |
485 | static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } |
486 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } | |
a051661c | 487 | |
f95a04af TH |
488 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
489 | struct blkcg_policy *pol) { return NULL; } | |
490 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } | |
3c798398 TH |
491 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } |
492 | static inline void blkg_get(struct blkcg_gq *blkg) { } | |
493 | static inline void blkg_put(struct blkcg_gq *blkg) { } | |
afc24d49 | 494 | |
a051661c TH |
495 | static inline struct request_list *blk_get_rl(struct request_queue *q, |
496 | struct bio *bio) { return &q->root_rl; } | |
497 | static inline void blk_put_rl(struct request_list *rl) { } | |
498 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } | |
499 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } | |
500 | ||
501 | #define blk_queue_for_each_rl(rl, q) \ | |
502 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | |
503 | ||
36558c8a TH |
504 | #endif /* CONFIG_BLK_CGROUP */ |
505 | #endif /* _BLK_CGROUP_H */ |