]>
Commit | Line | Data |
---|---|---|
31e4c28d VG |
1 | #ifndef _BLK_CGROUP_H |
2 | #define _BLK_CGROUP_H | |
3 | /* | |
4 | * Common Block IO controller cgroup interface | |
5 | * | |
6 | * Based on ideas and code from CFQ, CFS and BFQ: | |
7 | * Copyright (C) 2003 Jens Axboe <[email protected]> | |
8 | * | |
9 | * Copyright (C) 2008 Fabio Checconi <[email protected]> | |
10 | * Paolo Valente <[email protected]> | |
11 | * | |
12 | * Copyright (C) 2009 Vivek Goyal <[email protected]> | |
13 | * Nauman Rafique <[email protected]> | |
14 | */ | |
15 | ||
16 | #include <linux/cgroup.h> | |
575969a0 | 17 | #include <linux/u64_stats_sync.h> |
829fdb50 | 18 | #include <linux/seq_file.h> |
a637120e | 19 | #include <linux/radix-tree.h> |
a051661c | 20 | #include <linux/blkdev.h> |
31e4c28d | 21 | |
9355aede VG |
22 | /* Max limits for throttle policy */ |
23 | #define THROTL_IOPS_MAX UINT_MAX | |
24 | ||
3381cb8d TH |
25 | /* CFQ specific, out here for blkcg->cfq_weight */ |
26 | #define CFQ_WEIGHT_MIN 10 | |
27 | #define CFQ_WEIGHT_MAX 1000 | |
28 | #define CFQ_WEIGHT_DEFAULT 500 | |
29 | ||
f48ec1d7 TH |
30 | #ifdef CONFIG_BLK_CGROUP |
31 | ||
edcb0722 TH |
32 | enum blkg_rwstat_type { |
33 | BLKG_RWSTAT_READ, | |
34 | BLKG_RWSTAT_WRITE, | |
35 | BLKG_RWSTAT_SYNC, | |
36 | BLKG_RWSTAT_ASYNC, | |
37 | ||
38 | BLKG_RWSTAT_NR, | |
39 | BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, | |
303a3acb DS |
40 | }; |
41 | ||
a637120e TH |
42 | struct blkcg_gq; |
43 | ||
3c798398 | 44 | struct blkcg { |
36558c8a TH |
45 | struct cgroup_subsys_state css; |
46 | spinlock_t lock; | |
a637120e TH |
47 | |
48 | struct radix_tree_root blkg_tree; | |
49 | struct blkcg_gq *blkg_hint; | |
36558c8a | 50 | struct hlist_head blkg_list; |
9a9e8a26 TH |
51 | |
52 | /* for policies to test whether associated blkcg has changed */ | |
36558c8a | 53 | uint64_t id; |
3381cb8d | 54 | |
3c798398 | 55 | /* TODO: per-policy storage in blkcg */ |
36558c8a | 56 | unsigned int cfq_weight; /* belongs to cfq */ |
e71357e1 | 57 | unsigned int cfq_leaf_weight; |
31e4c28d VG |
58 | }; |
59 | ||
edcb0722 TH |
60 | struct blkg_stat { |
61 | struct u64_stats_sync syncp; | |
62 | uint64_t cnt; | |
63 | }; | |
64 | ||
65 | struct blkg_rwstat { | |
66 | struct u64_stats_sync syncp; | |
67 | uint64_t cnt[BLKG_RWSTAT_NR]; | |
68 | }; | |
69 | ||
f95a04af TH |
70 | /* |
71 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a | |
72 | * request_queue (q). This is used by blkcg policies which need to track | |
73 | * information per blkcg - q pair. | |
74 | * | |
75 | * There can be multiple active blkcg policies and each has its private | |
76 | * data on each blkg, the size of which is determined by | |
77 | * blkcg_policy->pd_size. blkcg core allocates and frees such areas | |
78 | * together with blkg and invokes pd_init/exit_fn() methods. | |
79 | * | |
80 | * Such private data must embed struct blkg_policy_data (pd) at the | |
81 | * beginning and pd_size can't be smaller than pd. | |
82 | */ | |
0381411e | 83 | struct blkg_policy_data { |
b276a876 | 84 | /* the blkg and policy id this per-policy data belongs to */ |
3c798398 | 85 | struct blkcg_gq *blkg; |
b276a876 | 86 | int plid; |
0381411e | 87 | |
a2b1693b | 88 | /* used during policy activation */ |
36558c8a | 89 | struct list_head alloc_node; |
0381411e TH |
90 | }; |
91 | ||
3c798398 TH |
92 | /* association between a blk cgroup and a request queue */ |
93 | struct blkcg_gq { | |
c875f4d0 | 94 | /* Pointer to the associated request_queue */ |
36558c8a TH |
95 | struct request_queue *q; |
96 | struct list_head q_node; | |
97 | struct hlist_node blkcg_node; | |
3c798398 | 98 | struct blkcg *blkcg; |
3c547865 TH |
99 | |
100 | /* all non-root blkcg_gq's are guaranteed to have access to parent */ | |
101 | struct blkcg_gq *parent; | |
102 | ||
a051661c TH |
103 | /* request allocation list for this blkcg-q pair */ |
104 | struct request_list rl; | |
3c547865 | 105 | |
1adaf3dd | 106 | /* reference count */ |
36558c8a | 107 | int refcnt; |
22084190 | 108 | |
f427d909 TH |
109 | /* is this blkg online? protected by both blkcg and q locks */ |
110 | bool online; | |
111 | ||
36558c8a | 112 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
1adaf3dd | 113 | |
36558c8a | 114 | struct rcu_head rcu_head; |
31e4c28d VG |
115 | }; |
116 | ||
3c798398 | 117 | typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); |
f427d909 TH |
118 | typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg); |
119 | typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg); | |
3c798398 TH |
120 | typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); |
121 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); | |
3e252066 | 122 | |
3c798398 | 123 | struct blkcg_policy { |
36558c8a TH |
124 | int plid; |
125 | /* policy specific private data size */ | |
f95a04af | 126 | size_t pd_size; |
36558c8a TH |
127 | /* cgroup files for the policy */ |
128 | struct cftype *cftypes; | |
f9fcc2d3 TH |
129 | |
130 | /* operations */ | |
131 | blkcg_pol_init_pd_fn *pd_init_fn; | |
f427d909 TH |
132 | blkcg_pol_online_pd_fn *pd_online_fn; |
133 | blkcg_pol_offline_pd_fn *pd_offline_fn; | |
f9fcc2d3 TH |
134 | blkcg_pol_exit_pd_fn *pd_exit_fn; |
135 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; | |
3e252066 VG |
136 | }; |
137 | ||
3c798398 | 138 | extern struct blkcg blkcg_root; |
36558c8a | 139 | |
3c798398 TH |
140 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); |
141 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, | |
142 | struct request_queue *q); | |
36558c8a TH |
143 | int blkcg_init_queue(struct request_queue *q); |
144 | void blkcg_drain_queue(struct request_queue *q); | |
145 | void blkcg_exit_queue(struct request_queue *q); | |
5efd6113 | 146 | |
3e252066 | 147 | /* Blkio controller policy registration */ |
3c798398 TH |
148 | int blkcg_policy_register(struct blkcg_policy *pol); |
149 | void blkcg_policy_unregister(struct blkcg_policy *pol); | |
36558c8a | 150 | int blkcg_activate_policy(struct request_queue *q, |
3c798398 | 151 | const struct blkcg_policy *pol); |
36558c8a | 152 | void blkcg_deactivate_policy(struct request_queue *q, |
3c798398 | 153 | const struct blkcg_policy *pol); |
3e252066 | 154 | |
3c798398 | 155 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
f95a04af TH |
156 | u64 (*prfill)(struct seq_file *, |
157 | struct blkg_policy_data *, int), | |
3c798398 | 158 | const struct blkcg_policy *pol, int data, |
ec399347 | 159 | bool show_total); |
f95a04af TH |
160 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); |
161 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
829fdb50 | 162 | const struct blkg_rwstat *rwstat); |
f95a04af TH |
163 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); |
164 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, | |
165 | int off); | |
829fdb50 | 166 | |
16b3de66 TH |
167 | u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off); |
168 | struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd, | |
169 | int off); | |
170 | ||
829fdb50 | 171 | struct blkg_conf_ctx { |
36558c8a | 172 | struct gendisk *disk; |
3c798398 | 173 | struct blkcg_gq *blkg; |
36558c8a | 174 | u64 v; |
829fdb50 TH |
175 | }; |
176 | ||
3c798398 TH |
177 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
178 | const char *input, struct blkg_conf_ctx *ctx); | |
829fdb50 TH |
179 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
180 | ||
181 | ||
a7c6d554 TH |
182 | static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) |
183 | { | |
184 | return css ? container_of(css, struct blkcg, css) : NULL; | |
185 | } | |
186 | ||
b1208b56 TH |
187 | static inline struct blkcg *task_blkcg(struct task_struct *tsk) |
188 | { | |
073219e9 | 189 | return css_to_blkcg(task_css(tsk, blkio_cgrp_id)); |
b1208b56 TH |
190 | } |
191 | ||
192 | static inline struct blkcg *bio_blkcg(struct bio *bio) | |
193 | { | |
194 | if (bio && bio->bi_css) | |
a7c6d554 | 195 | return css_to_blkcg(bio->bi_css); |
b1208b56 TH |
196 | return task_blkcg(current); |
197 | } | |
198 | ||
3c547865 TH |
199 | /** |
200 | * blkcg_parent - get the parent of a blkcg | |
201 | * @blkcg: blkcg of interest | |
202 | * | |
203 | * Return the parent blkcg of @blkcg. Can be called anytime. | |
204 | */ | |
205 | static inline struct blkcg *blkcg_parent(struct blkcg *blkcg) | |
206 | { | |
63876986 | 207 | return css_to_blkcg(css_parent(&blkcg->css)); |
3c547865 TH |
208 | } |
209 | ||
0381411e TH |
210 | /** |
211 | * blkg_to_pdata - get policy private data | |
212 | * @blkg: blkg of interest | |
213 | * @pol: policy of interest | |
214 | * | |
215 | * Return pointer to private data associated with the @blkg-@pol pair. | |
216 | */ | |
f95a04af TH |
217 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
218 | struct blkcg_policy *pol) | |
0381411e | 219 | { |
f95a04af | 220 | return blkg ? blkg->pd[pol->plid] : NULL; |
0381411e TH |
221 | } |
222 | ||
223 | /** | |
224 | * pdata_to_blkg - get blkg associated with policy private data | |
f95a04af | 225 | * @pd: policy private data of interest |
0381411e | 226 | * |
f95a04af | 227 | * @pd is policy private data. Determine the blkg it's associated with. |
0381411e | 228 | */ |
f95a04af | 229 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) |
0381411e | 230 | { |
f95a04af | 231 | return pd ? pd->blkg : NULL; |
0381411e TH |
232 | } |
233 | ||
54e7ed12 TH |
234 | /** |
235 | * blkg_path - format cgroup path of blkg | |
236 | * @blkg: blkg of interest | |
237 | * @buf: target buffer | |
238 | * @buflen: target buffer length | |
239 | * | |
240 | * Format the path of the cgroup of @blkg into @buf. | |
241 | */ | |
3c798398 | 242 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) |
afc24d49 | 243 | { |
e61734c5 | 244 | char *p; |
54e7ed12 | 245 | |
e61734c5 TH |
246 | p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); |
247 | if (!p) { | |
54e7ed12 | 248 | strncpy(buf, "<unavailable>", buflen); |
e61734c5 TH |
249 | return -ENAMETOOLONG; |
250 | } | |
251 | ||
252 | memmove(buf, p, buf + buflen - p); | |
253 | return 0; | |
afc24d49 VG |
254 | } |
255 | ||
1adaf3dd TH |
256 | /** |
257 | * blkg_get - get a blkg reference | |
258 | * @blkg: blkg to get | |
259 | * | |
260 | * The caller should be holding queue_lock and an existing reference. | |
261 | */ | |
3c798398 | 262 | static inline void blkg_get(struct blkcg_gq *blkg) |
1adaf3dd TH |
263 | { |
264 | lockdep_assert_held(blkg->q->queue_lock); | |
265 | WARN_ON_ONCE(!blkg->refcnt); | |
266 | blkg->refcnt++; | |
267 | } | |
268 | ||
2a4fd070 | 269 | void __blkg_release_rcu(struct rcu_head *rcu); |
1adaf3dd TH |
270 | |
271 | /** | |
272 | * blkg_put - put a blkg reference | |
273 | * @blkg: blkg to put | |
274 | * | |
275 | * The caller should be holding queue_lock. | |
276 | */ | |
3c798398 | 277 | static inline void blkg_put(struct blkcg_gq *blkg) |
1adaf3dd TH |
278 | { |
279 | lockdep_assert_held(blkg->q->queue_lock); | |
280 | WARN_ON_ONCE(blkg->refcnt <= 0); | |
281 | if (!--blkg->refcnt) | |
2a4fd070 | 282 | call_rcu(&blkg->rcu_head, __blkg_release_rcu); |
1adaf3dd TH |
283 | } |
284 | ||
dd4a4ffc TH |
285 | struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, |
286 | bool update_hint); | |
287 | ||
288 | /** | |
289 | * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants | |
290 | * @d_blkg: loop cursor pointing to the current descendant | |
492eb21b | 291 | * @pos_css: used for iteration |
dd4a4ffc TH |
292 | * @p_blkg: target blkg to walk descendants of |
293 | * | |
294 | * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU | |
295 | * read locked. If called under either blkcg or queue lock, the iteration | |
296 | * is guaranteed to include all and only online blkgs. The caller may | |
492eb21b | 297 | * update @pos_css by calling css_rightmost_descendant() to skip subtree. |
bd8815a6 | 298 | * @p_blkg is included in the iteration and the first node to be visited. |
dd4a4ffc | 299 | */ |
492eb21b TH |
300 | #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ |
301 | css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ | |
302 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
dd4a4ffc TH |
303 | (p_blkg)->q, false))) |
304 | ||
aa539cb3 TH |
305 | /** |
306 | * blkg_for_each_descendant_post - post-order walk of a blkg's descendants | |
307 | * @d_blkg: loop cursor pointing to the current descendant | |
492eb21b | 308 | * @pos_css: used for iteration |
aa539cb3 TH |
309 | * @p_blkg: target blkg to walk descendants of |
310 | * | |
311 | * Similar to blkg_for_each_descendant_pre() but performs post-order | |
bd8815a6 TH |
312 | * traversal instead. Synchronization rules are the same. @p_blkg is |
313 | * included in the iteration and the last node to be visited. | |
aa539cb3 | 314 | */ |
492eb21b TH |
315 | #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ |
316 | css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ | |
317 | if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \ | |
aa539cb3 TH |
318 | (p_blkg)->q, false))) |
319 | ||
a051661c TH |
320 | /** |
321 | * blk_get_rl - get request_list to use | |
322 | * @q: request_queue of interest | |
323 | * @bio: bio which will be attached to the allocated request (may be %NULL) | |
324 | * | |
325 | * The caller wants to allocate a request from @q to use for @bio. Find | |
326 | * the request_list to use and obtain a reference on it. Should be called | |
327 | * under queue_lock. This function is guaranteed to return non-%NULL | |
328 | * request_list. | |
329 | */ | |
330 | static inline struct request_list *blk_get_rl(struct request_queue *q, | |
331 | struct bio *bio) | |
332 | { | |
333 | struct blkcg *blkcg; | |
334 | struct blkcg_gq *blkg; | |
335 | ||
336 | rcu_read_lock(); | |
337 | ||
338 | blkcg = bio_blkcg(bio); | |
339 | ||
340 | /* bypass blkg lookup and use @q->root_rl directly for root */ | |
341 | if (blkcg == &blkcg_root) | |
342 | goto root_rl; | |
343 | ||
344 | /* | |
345 | * Try to use blkg->rl. blkg lookup may fail under memory pressure | |
346 | * or if either the blkcg or queue is going away. Fall back to | |
347 | * root_rl in such cases. | |
348 | */ | |
349 | blkg = blkg_lookup_create(blkcg, q); | |
350 | if (unlikely(IS_ERR(blkg))) | |
351 | goto root_rl; | |
352 | ||
353 | blkg_get(blkg); | |
354 | rcu_read_unlock(); | |
355 | return &blkg->rl; | |
356 | root_rl: | |
357 | rcu_read_unlock(); | |
358 | return &q->root_rl; | |
359 | } | |
360 | ||
361 | /** | |
362 | * blk_put_rl - put request_list | |
363 | * @rl: request_list to put | |
364 | * | |
365 | * Put the reference acquired by blk_get_rl(). Should be called under | |
366 | * queue_lock. | |
367 | */ | |
368 | static inline void blk_put_rl(struct request_list *rl) | |
369 | { | |
370 | /* root_rl may not have blkg set */ | |
371 | if (rl->blkg && rl->blkg->blkcg != &blkcg_root) | |
372 | blkg_put(rl->blkg); | |
373 | } | |
374 | ||
375 | /** | |
376 | * blk_rq_set_rl - associate a request with a request_list | |
377 | * @rq: request of interest | |
378 | * @rl: target request_list | |
379 | * | |
380 | * Associate @rq with @rl so that accounting and freeing can know the | |
381 | * request_list @rq came from. | |
382 | */ | |
383 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) | |
384 | { | |
385 | rq->rl = rl; | |
386 | } | |
387 | ||
388 | /** | |
389 | * blk_rq_rl - return the request_list a request came from | |
390 | * @rq: request of interest | |
391 | * | |
392 | * Return the request_list @rq is allocated from. | |
393 | */ | |
394 | static inline struct request_list *blk_rq_rl(struct request *rq) | |
395 | { | |
396 | return rq->rl; | |
397 | } | |
398 | ||
399 | struct request_list *__blk_queue_next_rl(struct request_list *rl, | |
400 | struct request_queue *q); | |
401 | /** | |
402 | * blk_queue_for_each_rl - iterate through all request_lists of a request_queue | |
403 | * | |
404 | * Should be used under queue_lock. | |
405 | */ | |
406 | #define blk_queue_for_each_rl(rl, q) \ | |
407 | for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q))) | |
408 | ||
90d3839b PZ |
409 | static inline void blkg_stat_init(struct blkg_stat *stat) |
410 | { | |
411 | u64_stats_init(&stat->syncp); | |
412 | } | |
413 | ||
edcb0722 TH |
414 | /** |
415 | * blkg_stat_add - add a value to a blkg_stat | |
416 | * @stat: target blkg_stat | |
417 | * @val: value to add | |
418 | * | |
419 | * Add @val to @stat. The caller is responsible for synchronizing calls to | |
420 | * this function. | |
421 | */ | |
422 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) | |
423 | { | |
424 | u64_stats_update_begin(&stat->syncp); | |
425 | stat->cnt += val; | |
426 | u64_stats_update_end(&stat->syncp); | |
427 | } | |
428 | ||
429 | /** | |
430 | * blkg_stat_read - read the current value of a blkg_stat | |
431 | * @stat: blkg_stat to read | |
432 | * | |
433 | * Read the current value of @stat. This function can be called without | |
434 | * synchroniztion and takes care of u64 atomicity. | |
435 | */ | |
436 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) | |
437 | { | |
438 | unsigned int start; | |
439 | uint64_t v; | |
440 | ||
441 | do { | |
57a7744e | 442 | start = u64_stats_fetch_begin_irq(&stat->syncp); |
edcb0722 | 443 | v = stat->cnt; |
57a7744e | 444 | } while (u64_stats_fetch_retry_irq(&stat->syncp, start)); |
edcb0722 TH |
445 | |
446 | return v; | |
447 | } | |
448 | ||
449 | /** | |
450 | * blkg_stat_reset - reset a blkg_stat | |
451 | * @stat: blkg_stat to reset | |
452 | */ | |
453 | static inline void blkg_stat_reset(struct blkg_stat *stat) | |
454 | { | |
455 | stat->cnt = 0; | |
456 | } | |
457 | ||
16b3de66 TH |
458 | /** |
459 | * blkg_stat_merge - merge a blkg_stat into another | |
460 | * @to: the destination blkg_stat | |
461 | * @from: the source | |
462 | * | |
463 | * Add @from's count to @to. | |
464 | */ | |
465 | static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from) | |
466 | { | |
467 | blkg_stat_add(to, blkg_stat_read(from)); | |
468 | } | |
469 | ||
90d3839b PZ |
470 | static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat) |
471 | { | |
472 | u64_stats_init(&rwstat->syncp); | |
473 | } | |
474 | ||
edcb0722 TH |
475 | /** |
476 | * blkg_rwstat_add - add a value to a blkg_rwstat | |
477 | * @rwstat: target blkg_rwstat | |
478 | * @rw: mask of REQ_{WRITE|SYNC} | |
479 | * @val: value to add | |
480 | * | |
481 | * Add @val to @rwstat. The counters are chosen according to @rw. The | |
482 | * caller is responsible for synchronizing calls to this function. | |
483 | */ | |
484 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, | |
485 | int rw, uint64_t val) | |
486 | { | |
487 | u64_stats_update_begin(&rwstat->syncp); | |
488 | ||
489 | if (rw & REQ_WRITE) | |
490 | rwstat->cnt[BLKG_RWSTAT_WRITE] += val; | |
491 | else | |
492 | rwstat->cnt[BLKG_RWSTAT_READ] += val; | |
493 | if (rw & REQ_SYNC) | |
494 | rwstat->cnt[BLKG_RWSTAT_SYNC] += val; | |
495 | else | |
496 | rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; | |
497 | ||
498 | u64_stats_update_end(&rwstat->syncp); | |
499 | } | |
500 | ||
501 | /** | |
502 | * blkg_rwstat_read - read the current values of a blkg_rwstat | |
503 | * @rwstat: blkg_rwstat to read | |
504 | * | |
505 | * Read the current snapshot of @rwstat and return it as the return value. | |
506 | * This function can be called without synchronization and takes care of | |
507 | * u64 atomicity. | |
508 | */ | |
c94bed89 | 509 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) |
edcb0722 TH |
510 | { |
511 | unsigned int start; | |
512 | struct blkg_rwstat tmp; | |
513 | ||
514 | do { | |
57a7744e | 515 | start = u64_stats_fetch_begin_irq(&rwstat->syncp); |
edcb0722 | 516 | tmp = *rwstat; |
57a7744e | 517 | } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start)); |
edcb0722 TH |
518 | |
519 | return tmp; | |
520 | } | |
521 | ||
522 | /** | |
4d5e80a7 | 523 | * blkg_rwstat_total - read the total count of a blkg_rwstat |
edcb0722 TH |
524 | * @rwstat: blkg_rwstat to read |
525 | * | |
526 | * Return the total count of @rwstat regardless of the IO direction. This | |
527 | * function can be called without synchronization and takes care of u64 | |
528 | * atomicity. | |
529 | */ | |
4d5e80a7 | 530 | static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat) |
edcb0722 TH |
531 | { |
532 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); | |
533 | ||
534 | return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; | |
535 | } | |
536 | ||
537 | /** | |
538 | * blkg_rwstat_reset - reset a blkg_rwstat | |
539 | * @rwstat: blkg_rwstat to reset | |
540 | */ | |
541 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | |
542 | { | |
543 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); | |
544 | } | |
545 | ||
16b3de66 TH |
546 | /** |
547 | * blkg_rwstat_merge - merge a blkg_rwstat into another | |
548 | * @to: the destination blkg_rwstat | |
549 | * @from: the source | |
550 | * | |
551 | * Add @from's counts to @to. | |
552 | */ | |
553 | static inline void blkg_rwstat_merge(struct blkg_rwstat *to, | |
554 | struct blkg_rwstat *from) | |
555 | { | |
556 | struct blkg_rwstat v = blkg_rwstat_read(from); | |
557 | int i; | |
558 | ||
559 | u64_stats_update_begin(&to->syncp); | |
560 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | |
561 | to->cnt[i] += v.cnt[i]; | |
562 | u64_stats_update_end(&to->syncp); | |
563 | } | |
564 | ||
36558c8a TH |
565 | #else /* CONFIG_BLK_CGROUP */ |
566 | ||
567 | struct cgroup; | |
b1208b56 | 568 | struct blkcg; |
2f5ea477 | 569 | |
f95a04af TH |
570 | struct blkg_policy_data { |
571 | }; | |
572 | ||
3c798398 | 573 | struct blkcg_gq { |
2f5ea477 JA |
574 | }; |
575 | ||
3c798398 | 576 | struct blkcg_policy { |
3e252066 VG |
577 | }; |
578 | ||
3c798398 | 579 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
5efd6113 TH |
580 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
581 | static inline void blkcg_drain_queue(struct request_queue *q) { } | |
582 | static inline void blkcg_exit_queue(struct request_queue *q) { } | |
3c798398 TH |
583 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
584 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } | |
a2b1693b | 585 | static inline int blkcg_activate_policy(struct request_queue *q, |
3c798398 | 586 | const struct blkcg_policy *pol) { return 0; } |
a2b1693b | 587 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
3c798398 TH |
588 | const struct blkcg_policy *pol) { } |
589 | ||
b1208b56 | 590 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } |
a051661c | 591 | |
f95a04af TH |
592 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
593 | struct blkcg_policy *pol) { return NULL; } | |
594 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } | |
3c798398 TH |
595 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } |
596 | static inline void blkg_get(struct blkcg_gq *blkg) { } | |
597 | static inline void blkg_put(struct blkcg_gq *blkg) { } | |
afc24d49 | 598 | |
a051661c TH |
599 | static inline struct request_list *blk_get_rl(struct request_queue *q, |
600 | struct bio *bio) { return &q->root_rl; } | |
601 | static inline void blk_put_rl(struct request_list *rl) { } | |
602 | static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } | |
603 | static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } | |
604 | ||
605 | #define blk_queue_for_each_rl(rl, q) \ | |
606 | for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) | |
607 | ||
36558c8a TH |
608 | #endif /* CONFIG_BLK_CGROUP */ |
609 | #endif /* _BLK_CGROUP_H */ |