]> Git Repo - linux.git/blame - block/bfq-cgroup.c
Merge branch 'md-fixes' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md...
[linux.git] / block / bfq-cgroup.c
CommitLineData
a497ee34 1// SPDX-License-Identifier: GPL-2.0-or-later
ea25da48
PV
2/*
3 * cgroups support for the BFQ I/O scheduler.
ea25da48
PV
4 */
5#include <linux/module.h>
6#include <linux/slab.h>
7#include <linux/blkdev.h>
8#include <linux/cgroup.h>
ea25da48
PV
9#include <linux/ktime.h>
10#include <linux/rbtree.h>
11#include <linux/ioprio.h>
12#include <linux/sbitmap.h>
13#include <linux/delay.h>
14
2e9bc346 15#include "elevator.h"
ea25da48
PV
16#include "bfq-iosched.h"
17
8060c47b 18#ifdef CONFIG_BFQ_CGROUP_DEBUG
c0ce79dc
CH
19static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp)
20{
21 int ret;
22
23 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
24 if (ret)
25 return ret;
26
27 atomic64_set(&stat->aux_cnt, 0);
28 return 0;
29}
30
31static void bfq_stat_exit(struct bfq_stat *stat)
32{
33 percpu_counter_destroy(&stat->cpu_cnt);
34}
35
36/**
37 * bfq_stat_add - add a value to a bfq_stat
38 * @stat: target bfq_stat
39 * @val: value to add
40 *
41 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
42 * don't re-enter this function for the same counter.
43 */
44static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val)
45{
46 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
47}
48
49/**
50 * bfq_stat_read - read the current value of a bfq_stat
51 * @stat: bfq_stat to read
52 */
53static inline uint64_t bfq_stat_read(struct bfq_stat *stat)
54{
55 return percpu_counter_sum_positive(&stat->cpu_cnt);
56}
57
58/**
59 * bfq_stat_reset - reset a bfq_stat
60 * @stat: bfq_stat to reset
61 */
62static inline void bfq_stat_reset(struct bfq_stat *stat)
63{
64 percpu_counter_set(&stat->cpu_cnt, 0);
65 atomic64_set(&stat->aux_cnt, 0);
66}
67
68/**
69 * bfq_stat_add_aux - add a bfq_stat into another's aux count
70 * @to: the destination bfq_stat
71 * @from: the source
72 *
73 * Add @from's count including the aux one to @to's aux count.
74 */
75static inline void bfq_stat_add_aux(struct bfq_stat *to,
76 struct bfq_stat *from)
77{
78 atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt),
79 &to->aux_cnt);
80}
81
c0ce79dc
CH
82/**
83 * blkg_prfill_stat - prfill callback for bfq_stat
84 * @sf: seq_file to print to
85 * @pd: policy private data of interest
86 * @off: offset to the bfq_stat in @pd
87 *
88 * prfill callback for printing a bfq_stat.
89 */
90static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd,
91 int off)
92{
93 return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off));
94}
95
ea25da48
PV
96/* bfqg stats flags */
97enum bfqg_stats_flags {
98 BFQG_stats_waiting = 0,
99 BFQG_stats_idling,
100 BFQG_stats_empty,
101};
102
103#define BFQG_FLAG_FNS(name) \
104static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \
105{ \
106 stats->flags |= (1 << BFQG_stats_##name); \
107} \
108static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \
109{ \
110 stats->flags &= ~(1 << BFQG_stats_##name); \
111} \
112static int bfqg_stats_##name(struct bfqg_stats *stats) \
113{ \
114 return (stats->flags & (1 << BFQG_stats_##name)) != 0; \
115} \
116
117BFQG_FLAG_FNS(waiting)
118BFQG_FLAG_FNS(idling)
119BFQG_FLAG_FNS(empty)
120#undef BFQG_FLAG_FNS
121
8f9bebc3 122/* This should be called with the scheduler lock held. */
ea25da48
PV
123static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
124{
84c7afce 125 u64 now;
ea25da48
PV
126
127 if (!bfqg_stats_waiting(stats))
128 return;
129
84c7afce
OS
130 now = ktime_get_ns();
131 if (now > stats->start_group_wait_time)
c0ce79dc 132 bfq_stat_add(&stats->group_wait_time,
ea25da48
PV
133 now - stats->start_group_wait_time);
134 bfqg_stats_clear_waiting(stats);
135}
136
8f9bebc3 137/* This should be called with the scheduler lock held. */
ea25da48
PV
138static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
139 struct bfq_group *curr_bfqg)
140{
141 struct bfqg_stats *stats = &bfqg->stats;
142
143 if (bfqg_stats_waiting(stats))
144 return;
145 if (bfqg == curr_bfqg)
146 return;
84c7afce 147 stats->start_group_wait_time = ktime_get_ns();
ea25da48
PV
148 bfqg_stats_mark_waiting(stats);
149}
150
8f9bebc3 151/* This should be called with the scheduler lock held. */
ea25da48
PV
152static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
153{
84c7afce 154 u64 now;
ea25da48
PV
155
156 if (!bfqg_stats_empty(stats))
157 return;
158
84c7afce
OS
159 now = ktime_get_ns();
160 if (now > stats->start_empty_time)
c0ce79dc 161 bfq_stat_add(&stats->empty_time,
ea25da48
PV
162 now - stats->start_empty_time);
163 bfqg_stats_clear_empty(stats);
164}
165
166void bfqg_stats_update_dequeue(struct bfq_group *bfqg)
167{
c0ce79dc 168 bfq_stat_add(&bfqg->stats.dequeue, 1);
ea25da48
PV
169}
170
171void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
172{
173 struct bfqg_stats *stats = &bfqg->stats;
174
175 if (blkg_rwstat_total(&stats->queued))
176 return;
177
178 /*
179 * group is already marked empty. This can happen if bfqq got new
180 * request in parent group and moved to this group while being added
181 * to service tree. Just ignore the event and move on.
182 */
183 if (bfqg_stats_empty(stats))
184 return;
185
84c7afce 186 stats->start_empty_time = ktime_get_ns();
ea25da48
PV
187 bfqg_stats_mark_empty(stats);
188}
189
190void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
191{
192 struct bfqg_stats *stats = &bfqg->stats;
193
194 if (bfqg_stats_idling(stats)) {
84c7afce 195 u64 now = ktime_get_ns();
ea25da48 196
84c7afce 197 if (now > stats->start_idle_time)
c0ce79dc 198 bfq_stat_add(&stats->idle_time,
ea25da48
PV
199 now - stats->start_idle_time);
200 bfqg_stats_clear_idling(stats);
201 }
202}
203
204void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
205{
206 struct bfqg_stats *stats = &bfqg->stats;
207
84c7afce 208 stats->start_idle_time = ktime_get_ns();
ea25da48
PV
209 bfqg_stats_mark_idling(stats);
210}
211
212void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg)
213{
214 struct bfqg_stats *stats = &bfqg->stats;
215
c0ce79dc 216 bfq_stat_add(&stats->avg_queue_size_sum,
ea25da48 217 blkg_rwstat_total(&stats->queued));
c0ce79dc 218 bfq_stat_add(&stats->avg_queue_size_samples, 1);
ea25da48
PV
219 bfqg_stats_update_group_wait_time(stats);
220}
221
a33801e8 222void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq,
dc469ba2 223 blk_opf_t opf)
a33801e8 224{
dc469ba2 225 blkg_rwstat_add(&bfqg->stats.queued, opf, 1);
a33801e8 226 bfqg_stats_end_empty_time(&bfqg->stats);
aa625117 227 if (!(bfqq == bfqg->bfqd->in_service_queue))
a33801e8
LM
228 bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq));
229}
230
dc469ba2 231void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf)
a33801e8 232{
dc469ba2 233 blkg_rwstat_add(&bfqg->stats.queued, opf, -1);
a33801e8
LM
234}
235
dc469ba2 236void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf)
a33801e8 237{
dc469ba2 238 blkg_rwstat_add(&bfqg->stats.merged, opf, 1);
a33801e8
LM
239}
240
84c7afce 241void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
dc469ba2 242 u64 io_start_time_ns, blk_opf_t opf)
a33801e8
LM
243{
244 struct bfqg_stats *stats = &bfqg->stats;
84c7afce 245 u64 now = ktime_get_ns();
a33801e8 246
84c7afce 247 if (now > io_start_time_ns)
dc469ba2 248 blkg_rwstat_add(&stats->service_time, opf,
84c7afce
OS
249 now - io_start_time_ns);
250 if (io_start_time_ns > start_time_ns)
dc469ba2 251 blkg_rwstat_add(&stats->wait_time, opf,
84c7afce 252 io_start_time_ns - start_time_ns);
a33801e8
LM
253}
254
8060c47b 255#else /* CONFIG_BFQ_CGROUP_DEBUG */
a33801e8 256
dc469ba2
BVA
257void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { }
258void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { }
84c7afce 259void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
dc469ba2 260 u64 io_start_time_ns, blk_opf_t opf) { }
a33801e8 261void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { }
a33801e8 262void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { }
a33801e8 263
8060c47b 264#endif /* CONFIG_BFQ_CGROUP_DEBUG */
a33801e8
LM
265
266#ifdef CONFIG_BFQ_GROUP_IOSCHED
267
ea25da48
PV
268/*
269 * blk-cgroup policy-related handlers
270 * The following functions help in converting between blk-cgroup
271 * internal structures and BFQ-specific structures.
272 */
273
274static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd)
275{
276 return pd ? container_of(pd, struct bfq_group, pd) : NULL;
277}
278
279struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg)
280{
281 return pd_to_blkg(&bfqg->pd);
282}
283
284static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
285{
286 return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
287}
288
289/*
290 * bfq_group handlers
291 * The following functions help in navigating the bfq_group hierarchy
292 * by allowing to find the parent of a bfq_group or the bfq_group
293 * associated to a bfq_queue.
294 */
295
296static struct bfq_group *bfqg_parent(struct bfq_group *bfqg)
297{
298 struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent;
299
300 return pblkg ? blkg_to_bfqg(pblkg) : NULL;
301}
302
303struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
304{
305 struct bfq_entity *group_entity = bfqq->entity.parent;
306
307 return group_entity ? container_of(group_entity, struct bfq_group,
308 entity) :
309 bfqq->bfqd->root_group;
310}
311
312/*
313 * The following two functions handle get and put of a bfq_group by
314 * wrapping the related blk-cgroup hooks.
315 */
316
317static void bfqg_get(struct bfq_group *bfqg)
318{
8f9bebc3 319 bfqg->ref++;
ea25da48
PV
320}
321
dfb79af5 322static void bfqg_put(struct bfq_group *bfqg)
ea25da48 323{
8f9bebc3
PV
324 bfqg->ref--;
325
326 if (bfqg->ref == 0)
327 kfree(bfqg);
328}
329
2de791ab 330static void bfqg_and_blkg_get(struct bfq_group *bfqg)
8f9bebc3
PV
331{
332 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
333 bfqg_get(bfqg);
334
335 blkg_get(bfqg_to_blkg(bfqg));
336}
337
338void bfqg_and_blkg_put(struct bfq_group *bfqg)
339{
8f9bebc3 340 blkg_put(bfqg_to_blkg(bfqg));
d5274b3c
KK
341
342 bfqg_put(bfqg);
ea25da48
PV
343}
344
fd41e603
TH
345void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq)
346{
347 struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg);
348
08802ed6
HT
349 if (!bfqg)
350 return;
351
fd41e603
TH
352 blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq));
353 blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1);
354}
355
ea25da48
PV
356/* @stats = 0 */
357static void bfqg_stats_reset(struct bfqg_stats *stats)
358{
8060c47b 359#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
360 /* queued stats shouldn't be cleared */
361 blkg_rwstat_reset(&stats->merged);
362 blkg_rwstat_reset(&stats->service_time);
363 blkg_rwstat_reset(&stats->wait_time);
c0ce79dc
CH
364 bfq_stat_reset(&stats->time);
365 bfq_stat_reset(&stats->avg_queue_size_sum);
366 bfq_stat_reset(&stats->avg_queue_size_samples);
367 bfq_stat_reset(&stats->dequeue);
368 bfq_stat_reset(&stats->group_wait_time);
369 bfq_stat_reset(&stats->idle_time);
370 bfq_stat_reset(&stats->empty_time);
a33801e8 371#endif
ea25da48
PV
372}
373
374/* @to += @from */
375static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from)
376{
377 if (!to || !from)
378 return;
379
8060c47b 380#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
381 /* queued stats shouldn't be cleared */
382 blkg_rwstat_add_aux(&to->merged, &from->merged);
383 blkg_rwstat_add_aux(&to->service_time, &from->service_time);
384 blkg_rwstat_add_aux(&to->wait_time, &from->wait_time);
c0ce79dc
CH
385 bfq_stat_add_aux(&from->time, &from->time);
386 bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
387 bfq_stat_add_aux(&to->avg_queue_size_samples,
ea25da48 388 &from->avg_queue_size_samples);
c0ce79dc
CH
389 bfq_stat_add_aux(&to->dequeue, &from->dequeue);
390 bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
391 bfq_stat_add_aux(&to->idle_time, &from->idle_time);
392 bfq_stat_add_aux(&to->empty_time, &from->empty_time);
a33801e8 393#endif
ea25da48
PV
394}
395
396/*
397 * Transfer @bfqg's stats to its parent's aux counts so that the ancestors'
398 * recursive stats can still account for the amount used by this bfqg after
399 * it's gone.
400 */
401static void bfqg_stats_xfer_dead(struct bfq_group *bfqg)
402{
403 struct bfq_group *parent;
404
405 if (!bfqg) /* root_group */
406 return;
407
408 parent = bfqg_parent(bfqg);
409
0d945c1f 410 lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock);
ea25da48
PV
411
412 if (unlikely(!parent))
413 return;
414
415 bfqg_stats_add_aux(&parent->stats, &bfqg->stats);
416 bfqg_stats_reset(&bfqg->stats);
417}
418
419void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
420{
421 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
422
423 entity->weight = entity->new_weight;
424 entity->orig_weight = entity->new_weight;
425 if (bfqq) {
426 bfqq->ioprio = bfqq->new_ioprio;
427 bfqq->ioprio_class = bfqq->new_ioprio_class;
8f9bebc3
PV
428 /*
429 * Make sure that bfqg and its associated blkg do not
430 * disappear before entity.
431 */
432 bfqg_and_blkg_get(bfqg);
ea25da48
PV
433 }
434 entity->parent = bfqg->my_entity; /* NULL for root group */
435 entity->sched_data = &bfqg->sched_data;
436}
437
438static void bfqg_stats_exit(struct bfqg_stats *stats)
439{
fd41e603
TH
440 blkg_rwstat_exit(&stats->bytes);
441 blkg_rwstat_exit(&stats->ios);
8060c47b 442#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
443 blkg_rwstat_exit(&stats->merged);
444 blkg_rwstat_exit(&stats->service_time);
445 blkg_rwstat_exit(&stats->wait_time);
446 blkg_rwstat_exit(&stats->queued);
c0ce79dc
CH
447 bfq_stat_exit(&stats->time);
448 bfq_stat_exit(&stats->avg_queue_size_sum);
449 bfq_stat_exit(&stats->avg_queue_size_samples);
450 bfq_stat_exit(&stats->dequeue);
451 bfq_stat_exit(&stats->group_wait_time);
452 bfq_stat_exit(&stats->idle_time);
453 bfq_stat_exit(&stats->empty_time);
a33801e8 454#endif
ea25da48
PV
455}
456
457static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp)
458{
fd41e603
TH
459 if (blkg_rwstat_init(&stats->bytes, gfp) ||
460 blkg_rwstat_init(&stats->ios, gfp))
2fc428f6 461 goto error;
fd41e603 462
8060c47b 463#ifdef CONFIG_BFQ_CGROUP_DEBUG
ea25da48
PV
464 if (blkg_rwstat_init(&stats->merged, gfp) ||
465 blkg_rwstat_init(&stats->service_time, gfp) ||
466 blkg_rwstat_init(&stats->wait_time, gfp) ||
467 blkg_rwstat_init(&stats->queued, gfp) ||
c0ce79dc
CH
468 bfq_stat_init(&stats->time, gfp) ||
469 bfq_stat_init(&stats->avg_queue_size_sum, gfp) ||
470 bfq_stat_init(&stats->avg_queue_size_samples, gfp) ||
471 bfq_stat_init(&stats->dequeue, gfp) ||
472 bfq_stat_init(&stats->group_wait_time, gfp) ||
473 bfq_stat_init(&stats->idle_time, gfp) ||
2fc428f6
ZL
474 bfq_stat_init(&stats->empty_time, gfp))
475 goto error;
a33801e8 476#endif
ea25da48
PV
477
478 return 0;
2fc428f6
ZL
479
480error:
481 bfqg_stats_exit(stats);
482 return -ENOMEM;
ea25da48
PV
483}
484
485static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
486{
487 return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
488}
489
490static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
491{
492 return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq));
493}
494
dfb79af5 495static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
ea25da48
PV
496{
497 struct bfq_group_data *bgd;
498
499 bgd = kzalloc(sizeof(*bgd), gfp);
500 if (!bgd)
501 return NULL;
502 return &bgd->pd;
503}
504
dfb79af5 505static void bfq_cpd_init(struct blkcg_policy_data *cpd)
ea25da48
PV
506{
507 struct bfq_group_data *d = cpd_to_bfqgd(cpd);
508
509 d->weight = cgroup_subsys_on_dfl(io_cgrp_subsys) ?
510 CGROUP_WEIGHT_DFL : BFQ_WEIGHT_LEGACY_DFL;
511}
512
dfb79af5 513static void bfq_cpd_free(struct blkcg_policy_data *cpd)
ea25da48
PV
514{
515 kfree(cpd_to_bfqgd(cpd));
516}
517
cf09a8ee
TH
518static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
519 struct blkcg *blkcg)
ea25da48
PV
520{
521 struct bfq_group *bfqg;
522
cf09a8ee 523 bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node);
ea25da48
PV
524 if (!bfqg)
525 return NULL;
526
527 if (bfqg_stats_init(&bfqg->stats, gfp)) {
528 kfree(bfqg);
529 return NULL;
530 }
531
8f9bebc3
PV
532 /* see comments in bfq_bic_update_cgroup for why refcounting */
533 bfqg_get(bfqg);
ea25da48
PV
534 return &bfqg->pd;
535}
536
dfb79af5 537static void bfq_pd_init(struct blkg_policy_data *pd)
ea25da48
PV
538{
539 struct blkcg_gq *blkg = pd_to_blkg(pd);
540 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
541 struct bfq_data *bfqd = blkg->q->elevator->elevator_data;
542 struct bfq_entity *entity = &bfqg->entity;
543 struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg);
544
545 entity->orig_weight = entity->weight = entity->new_weight = d->weight;
546 entity->my_sched_data = &bfqg->sched_data;
430a67f9
PV
547 entity->last_bfqq_created = NULL;
548
ea25da48
PV
549 bfqg->my_entity = entity; /*
550 * the root_group's will be set to NULL
551 * in bfq_init_queue()
552 */
553 bfqg->bfqd = bfqd;
554 bfqg->active_entities = 0;
60a6e10c 555 bfqg->num_queues_with_pending_reqs = 0;
09f87186 556 bfqg->online = true;
ea25da48
PV
557 bfqg->rq_pos_tree = RB_ROOT;
558}
559
dfb79af5 560static void bfq_pd_free(struct blkg_policy_data *pd)
ea25da48
PV
561{
562 struct bfq_group *bfqg = pd_to_bfqg(pd);
563
564 bfqg_stats_exit(&bfqg->stats);
8f9bebc3 565 bfqg_put(bfqg);
ea25da48
PV
566}
567
dfb79af5 568static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
ea25da48
PV
569{
570 struct bfq_group *bfqg = pd_to_bfqg(pd);
571
572 bfqg_stats_reset(&bfqg->stats);
573}
574
575static void bfq_group_set_parent(struct bfq_group *bfqg,
576 struct bfq_group *parent)
577{
578 struct bfq_entity *entity;
579
580 entity = &bfqg->entity;
581 entity->parent = parent->my_entity;
582 entity->sched_data = &parent->sched_data;
583}
584
4e54a249 585static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
ea25da48 586{
4e54a249 587 struct bfq_group *parent;
ea25da48
PV
588 struct bfq_entity *entity;
589
ea25da48
PV
590 /*
591 * Update chain of bfq_groups as we might be handling a leaf group
592 * which, along with some of its relatives, has not been hooked yet
593 * to the private hierarchy of BFQ.
594 */
595 entity = &bfqg->entity;
596 for_each_entity(entity) {
14afc593
CN
597 struct bfq_group *curr_bfqg = container_of(entity,
598 struct bfq_group, entity);
599 if (curr_bfqg != bfqd->root_group) {
600 parent = bfqg_parent(curr_bfqg);
ea25da48
PV
601 if (!parent)
602 parent = bfqd->root_group;
14afc593 603 bfq_group_set_parent(curr_bfqg, parent);
ea25da48
PV
604 }
605 }
4e54a249 606}
ea25da48 607
4e54a249
JK
608struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
609{
610 struct blkcg_gq *blkg = bio->bi_blkg;
075a53b7 611 struct bfq_group *bfqg;
4e54a249 612
075a53b7 613 while (blkg) {
f02be900
YK
614 if (!blkg->online) {
615 blkg = blkg->parent;
616 continue;
617 }
075a53b7
JK
618 bfqg = blkg_to_bfqg(blkg);
619 if (bfqg->online) {
620 bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
621 return bfqg;
622 }
623 blkg = blkg->parent;
624 }
625 bio_associate_blkg_from_css(bio,
626 &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
627 return bfqd->root_group;
ea25da48
PV
628}
629
630/**
631 * bfq_bfqq_move - migrate @bfqq to @bfqg.
632 * @bfqd: queue descriptor.
633 * @bfqq: the queue to move.
634 * @bfqg: the group to move to.
635 *
636 * Move @bfqq to @bfqg, deactivating it from its old group and reactivating
637 * it on the new one. Avoid putting the entity on the old group idle tree.
638 *
8f9bebc3
PV
639 * Must be called under the scheduler lock, to make sure that the blkg
640 * owning @bfqg does not disappear (see comments in
641 * bfq_bic_update_cgroup on guaranteeing the consistency of blkg
642 * objects).
ea25da48
PV
643 */
644void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
645 struct bfq_group *bfqg)
646{
647 struct bfq_entity *entity = &bfqq->entity;
c5e4cb0f 648 struct bfq_group *old_parent = bfqq_group(bfqq);
60a6e10c 649 bool has_pending_reqs = false;
c5e4cb0f
YK
650
651 /*
652 * No point to move bfqq to the same group, which can happen when
653 * root group is offlined
654 */
655 if (old_parent == bfqg)
656 return;
ea25da48 657
8410f709
YK
658 /*
659 * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group
660 * until elevator exit.
661 */
662 if (bfqq == &bfqd->oom_bfqq)
663 return;
fd1bb3ae
PV
664 /*
665 * Get extra reference to prevent bfqq from being freed in
666 * next possible expire or deactivate.
667 */
668 bfqq->ref++;
669
60a6e10c
YK
670 if (entity->in_groups_with_pending_reqs) {
671 has_pending_reqs = true;
672 bfq_del_bfqq_in_groups_with_pending_reqs(bfqq);
673 }
674
ea25da48
PV
675 /* If bfqq is empty, then bfq_bfqq_expire also invokes
676 * bfq_del_bfqq_busy, thereby removing bfqq and its entity
677 * from data structures related to current group. Otherwise we
678 * need to remove bfqq explicitly with bfq_deactivate_bfqq, as
679 * we do below.
680 */
681 if (bfqq == bfqd->in_service_queue)
682 bfq_bfqq_expire(bfqd, bfqd->in_service_queue,
683 false, BFQQE_PREEMPTED);
684
685 if (bfq_bfqq_busy(bfqq))
686 bfq_deactivate_bfqq(bfqd, bfqq, false, false);
33a16a98 687 else if (entity->on_st_or_in_serv)
ea25da48 688 bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
c5e4cb0f 689 bfqg_and_blkg_put(old_parent);
ea25da48 690
d29bd414
PV
691 if (entity->parent &&
692 entity->parent->last_bfqq_created == bfqq)
693 entity->parent->last_bfqq_created = NULL;
694 else if (bfqd->last_bfqq_created == bfqq)
695 bfqd->last_bfqq_created = NULL;
696
ea25da48
PV
697 entity->parent = bfqg->my_entity;
698 entity->sched_data = &bfqg->sched_data;
8f9bebc3
PV
699 /* pin down bfqg and its associated blkg */
700 bfqg_and_blkg_get(bfqg);
ea25da48 701
60a6e10c
YK
702 if (has_pending_reqs)
703 bfq_add_bfqq_in_groups_with_pending_reqs(bfqq);
704
ea25da48 705 if (bfq_bfqq_busy(bfqq)) {
8cacc5ab
PV
706 if (unlikely(!bfqd->nonrot_with_queueing))
707 bfq_pos_tree_add_move(bfqd, bfqq);
ea25da48
PV
708 bfq_activate_bfqq(bfqd, bfqq);
709 }
710
711 if (!bfqd->in_service_queue && !bfqd->rq_in_driver)
712 bfq_schedule_dispatch(bfqd);
fd1bb3ae 713 /* release extra ref taken above, bfqq may happen to be freed now */
ecedd3d7 714 bfq_put_queue(bfqq);
ea25da48
PV
715}
716
717/**
1d87be82 718 * __bfq_bic_change_cgroup - move @bic to @bfqg.
ea25da48
PV
719 * @bfqd: the queue descriptor.
720 * @bic: the bic to move.
1d87be82 721 * @bfqg: the group to move to.
ea25da48 722 *
8f9bebc3
PV
723 * Move bic to blkcg, assuming that bfqd->lock is held; which makes
724 * sure that the reference to cgroup is valid across the call (see
725 * comments in bfq_bic_update_cgroup on this issue)
ea25da48 726 */
452af7dc
YK
727static void __bfq_bic_change_cgroup(struct bfq_data *bfqd,
728 struct bfq_io_cq *bic,
729 struct bfq_group *bfqg)
ea25da48 730{
337366e0
YK
731 struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false);
732 struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true);
ea25da48
PV
733 struct bfq_entity *entity;
734
ea25da48
PV
735 if (async_bfqq) {
736 entity = &async_bfqq->entity;
737
738 if (entity->sched_data != &bfqg->sched_data) {
337366e0 739 bic_set_bfqq(bic, NULL, false);
c8997736 740 bfq_release_process_ref(bfqd, async_bfqq);
ea25da48
PV
741 }
742 }
743
744 if (sync_bfqq) {
3bc5e683
JK
745 if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
746 /* We are the only user of this bfqq, just move it */
747 if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
748 bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
749 } else {
750 struct bfq_queue *bfqq;
751
752 /*
753 * The queue was merged to a different queue. Check
754 * that the merge chain still belongs to the same
755 * cgroup.
756 */
757 for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
758 if (bfqq->entity.sched_data !=
759 &bfqg->sched_data)
760 break;
761 if (bfqq) {
762 /*
763 * Some queue changed cgroup so the merge is
764 * not valid anymore. We cannot easily just
765 * cancel the merge (by clearing new_bfqq) as
766 * there may be other processes using this
767 * queue and holding refs to all queues below
768 * sync_bfqq->new_bfqq. Similarly if the merge
769 * already happened, we need to detach from
770 * bfqq now so that we cannot merge bio to a
771 * request from the old cgroup.
772 */
773 bfq_put_cooperator(sync_bfqq);
774 bfq_release_process_ref(bfqd, sync_bfqq);
337366e0 775 bic_set_bfqq(bic, NULL, true);
3bc5e683
JK
776 }
777 }
ea25da48 778 }
ea25da48
PV
779}
780
781void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
782{
783 struct bfq_data *bfqd = bic_to_bfqd(bic);
4e54a249 784 struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
ea25da48
PV
785 uint64_t serial_nr;
786
4e54a249 787 serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
ea25da48
PV
788
789 /*
790 * Check whether blkcg has changed. The condition may trigger
791 * spuriously on a newly created cic but there's no harm.
792 */
793 if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
4e54a249 794 return;
ea25da48 795
4e54a249
JK
796 /*
797 * New cgroup for this process. Make sure it is linked to bfq internal
798 * cgroup hierarchy.
799 */
800 bfq_link_bfqg(bfqd, bfqg);
801 __bfq_bic_change_cgroup(bfqd, bic, bfqg);
8f9bebc3
PV
802 /*
803 * Update blkg_path for bfq_log_* functions. We cache this
804 * path, and update it here, for the following
805 * reasons. Operations on blkg objects in blk-cgroup are
806 * protected with the request_queue lock, and not with the
807 * lock that protects the instances of this scheduler
808 * (bfqd->lock). This exposes BFQ to the following sort of
809 * race.
810 *
811 * The blkg_lookup performed in bfq_get_queue, protected
812 * through rcu, may happen to return the address of a copy of
813 * the original blkg. If this is the case, then the
814 * bfqg_and_blkg_get performed in bfq_get_queue, to pin down
815 * the blkg, is useless: it does not prevent blk-cgroup code
816 * from destroying both the original blkg and all objects
817 * directly or indirectly referred by the copy of the
818 * blkg.
819 *
820 * On the bright side, destroy operations on a blkg invoke, as
821 * a first step, hooks of the scheduler associated with the
822 * blkg. And these hooks are executed with bfqd->lock held for
823 * BFQ. As a consequence, for any blkg associated with the
824 * request queue this instance of the scheduler is attached
825 * to, we are guaranteed that such a blkg is not destroyed, and
826 * that all the pointers it contains are consistent, while we
827 * are holding bfqd->lock. A blkg_lookup performed with
828 * bfqd->lock held then returns a fully consistent blkg, which
829 * remains consistent until this lock is held.
830 *
831 * Thanks to the last fact, and to the fact that: (1) bfqg has
832 * been obtained through a blkg_lookup in the above
833 * assignment, and (2) bfqd->lock is being held, here we can
834 * safely use the policy data for the involved blkg (i.e., the
835 * field bfqg->pd) to get to the blkg associated with bfqg,
836 * and then we can safely use any field of blkg. After we
837 * release bfqd->lock, even just getting blkg through this
838 * bfqg may cause dangling references to be traversed, as
839 * bfqg->pd may not exist any more.
840 *
841 * In view of the above facts, here we cache, in the bfqg, any
842 * blkg data we may need for this bic, and for its associated
843 * bfq_queue. As of now, we need to cache only the path of the
844 * blkg, which is used in the bfq_log_* functions.
845 *
846 * Finally, note that bfqg itself needs to be protected from
847 * destruction on the blkg_free of the original blkg (which
848 * invokes bfq_pd_free). We use an additional private
849 * refcounter for bfqg, to let it disappear only after no
850 * bfq_queue refers to it any longer.
851 */
852 blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
ea25da48 853 bic->blkcg_serial_nr = serial_nr;
ea25da48
PV
854}
855
856/**
857 * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st.
858 * @st: the service tree being flushed.
859 */
860static void bfq_flush_idle_tree(struct bfq_service_tree *st)
861{
862 struct bfq_entity *entity = st->first_idle;
863
864 for (; entity ; entity = st->first_idle)
865 __bfq_deactivate_entity(entity, false);
866}
867
868/**
869 * bfq_reparent_leaf_entity - move leaf entity to the root_group.
870 * @bfqd: the device data structure with the root group.
576682fa
PV
871 * @entity: the entity to move, if entity is a leaf; or the parent entity
872 * of an active leaf entity to move, if entity is not a leaf.
1d87be82 873 * @ioprio_class: I/O priority class to reparent.
ea25da48
PV
874 */
875static void bfq_reparent_leaf_entity(struct bfq_data *bfqd,
576682fa
PV
876 struct bfq_entity *entity,
877 int ioprio_class)
ea25da48 878{
576682fa
PV
879 struct bfq_queue *bfqq;
880 struct bfq_entity *child_entity = entity;
881
882 while (child_entity->my_sched_data) { /* leaf not reached yet */
883 struct bfq_sched_data *child_sd = child_entity->my_sched_data;
884 struct bfq_service_tree *child_st = child_sd->service_tree +
885 ioprio_class;
886 struct rb_root *child_active = &child_st->active;
ea25da48 887
576682fa
PV
888 child_entity = bfq_entity_of(rb_first(child_active));
889
890 if (!child_entity)
891 child_entity = child_sd->in_service_entity;
892 }
893
894 bfqq = bfq_entity_to_bfqq(child_entity);
ea25da48
PV
895 bfq_bfqq_move(bfqd, bfqq, bfqd->root_group);
896}
897
898/**
576682fa 899 * bfq_reparent_active_queues - move to the root group all active queues.
ea25da48
PV
900 * @bfqd: the device data structure with the root group.
901 * @bfqg: the group to move from.
576682fa 902 * @st: the service tree to start the search from.
1d87be82 903 * @ioprio_class: I/O priority class to reparent.
ea25da48 904 */
576682fa
PV
905static void bfq_reparent_active_queues(struct bfq_data *bfqd,
906 struct bfq_group *bfqg,
907 struct bfq_service_tree *st,
908 int ioprio_class)
ea25da48
PV
909{
910 struct rb_root *active = &st->active;
576682fa 911 struct bfq_entity *entity;
ea25da48 912
576682fa
PV
913 while ((entity = bfq_entity_of(rb_first(active))))
914 bfq_reparent_leaf_entity(bfqd, entity, ioprio_class);
ea25da48
PV
915
916 if (bfqg->sched_data.in_service_entity)
917 bfq_reparent_leaf_entity(bfqd,
576682fa
PV
918 bfqg->sched_data.in_service_entity,
919 ioprio_class);
ea25da48
PV
920}
921
922/**
923 * bfq_pd_offline - deactivate the entity associated with @pd,
924 * and reparent its children entities.
925 * @pd: descriptor of the policy going offline.
926 *
927 * blkio already grabs the queue_lock for us, so no need to use
928 * RCU-based magic
929 */
dfb79af5 930static void bfq_pd_offline(struct blkg_policy_data *pd)
ea25da48
PV
931{
932 struct bfq_service_tree *st;
933 struct bfq_group *bfqg = pd_to_bfqg(pd);
934 struct bfq_data *bfqd = bfqg->bfqd;
935 struct bfq_entity *entity = bfqg->my_entity;
936 unsigned long flags;
937 int i;
938
52257ffb
PV
939 spin_lock_irqsave(&bfqd->lock, flags);
940
ea25da48 941 if (!entity) /* root group */
52257ffb 942 goto put_async_queues;
ea25da48 943
ea25da48
PV
944 /*
945 * Empty all service_trees belonging to this group before
946 * deactivating the group itself.
947 */
948 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
949 st = bfqg->sched_data.service_tree + i;
950
ea25da48
PV
951 /*
952 * It may happen that some queues are still active
953 * (busy) upon group destruction (if the corresponding
954 * processes have been forced to terminate). We move
955 * all the leaf entities corresponding to these queues
956 * to the root_group.
957 * Also, it may happen that the group has an entity
958 * in service, which is disconnected from the active
959 * tree: it must be moved, too.
960 * There is no need to put the sync queues, as the
961 * scheduler has taken no reference.
962 */
576682fa 963 bfq_reparent_active_queues(bfqd, bfqg, st, i);
4d38a87f
PV
964
965 /*
966 * The idle tree may still contain bfq_queues
967 * belonging to exited task because they never
968 * migrated to a different cgroup from the one being
969 * destroyed now. In addition, even
970 * bfq_reparent_active_queues() may happen to add some
971 * entities to the idle tree. It happens if, in some
972 * of the calls to bfq_bfqq_move() performed by
973 * bfq_reparent_active_queues(), the queue to move is
974 * empty and gets expired.
975 */
976 bfq_flush_idle_tree(st);
ea25da48
PV
977 }
978
979 __bfq_deactivate_entity(entity, false);
52257ffb
PV
980
981put_async_queues:
ea25da48 982 bfq_put_async_queues(bfqd, bfqg);
09f87186 983 bfqg->online = false;
ea25da48
PV
984
985 spin_unlock_irqrestore(&bfqd->lock, flags);
986 /*
987 * @blkg is going offline and will be ignored by
988 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so
989 * that they don't get lost. If IOs complete after this point, the
990 * stats for them will be lost. Oh well...
991 */
992 bfqg_stats_xfer_dead(bfqg);
993}
994
995void bfq_end_wr_async(struct bfq_data *bfqd)
996{
997 struct blkcg_gq *blkg;
998
999 list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
1000 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1001
1002 bfq_end_wr_async_queues(bfqd, bfqg);
1003 }
1004 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1005}
1006
795fe54c 1007static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v)
ea25da48
PV
1008{
1009 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1010 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1011 unsigned int val = 0;
1012
1013 if (bfqgd)
1014 val = bfqgd->weight;
1015
1016 seq_printf(sf, "%u\n", val);
1017
1018 return 0;
1019}
1020
795fe54c
FZ
1021static u64 bfqg_prfill_weight_device(struct seq_file *sf,
1022 struct blkg_policy_data *pd, int off)
5ff047e3 1023{
795fe54c
FZ
1024 struct bfq_group *bfqg = pd_to_bfqg(pd);
1025
1026 if (!bfqg->entity.dev_weight)
1027 return 0;
1028 return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight);
1029}
1030
1031static int bfq_io_show_weight(struct seq_file *sf, void *v)
1032{
1033 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
1034 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1035
1036 seq_printf(sf, "default %u\n", bfqgd->weight);
1037 blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device,
1038 &blkcg_policy_bfq, 0, false);
1039 return 0;
1040}
1041
1042static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight)
1043{
1044 weight = dev_weight ?: weight;
1045
1046 bfqg->entity.dev_weight = dev_weight;
5ff047e3
FZ
1047 /*
1048 * Setting the prio_changed flag of the entity
1049 * to 1 with new_weight == weight would re-set
1050 * the value of the weight to its ioprio mapping.
1051 * Set the flag only if necessary.
1052 */
1053 if ((unsigned short)weight != bfqg->entity.new_weight) {
1054 bfqg->entity.new_weight = (unsigned short)weight;
1055 /*
1056 * Make sure that the above new value has been
1057 * stored in bfqg->entity.new_weight before
1058 * setting the prio_changed flag. In fact,
1059 * this flag may be read asynchronously (in
1060 * critical sections protected by a different
1061 * lock than that held here), and finding this
1062 * flag set may cause the execution of the code
1063 * for updating parameters whose value may
1064 * depend also on bfqg->entity.new_weight (in
1065 * __bfq_entity_update_weight_prio).
1066 * This barrier makes sure that the new value
1067 * of bfqg->entity.new_weight is correctly
1068 * seen in that code.
1069 */
1070 smp_wmb();
1071 bfqg->entity.prio_changed = 1;
1072 }
1073}
1074
ea25da48
PV
1075static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
1076 struct cftype *cftype,
1077 u64 val)
1078{
1079 struct blkcg *blkcg = css_to_blkcg(css);
1080 struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
1081 struct blkcg_gq *blkg;
1082 int ret = -ERANGE;
1083
1084 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
1085 return ret;
1086
1087 ret = 0;
1088 spin_lock_irq(&blkcg->lock);
1089 bfqgd->weight = (unsigned short)val;
1090 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
1091 struct bfq_group *bfqg = blkg_to_bfqg(blkg);
1092
5ff047e3 1093 if (bfqg)
795fe54c 1094 bfq_group_set_weight(bfqg, val, 0);
ea25da48
PV
1095 }
1096 spin_unlock_irq(&blkcg->lock);
1097
1098 return ret;
1099}
1100
795fe54c
FZ
1101static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of,
1102 char *buf, size_t nbytes,
1103 loff_t off)
ea25da48 1104{
795fe54c
FZ
1105 int ret;
1106 struct blkg_conf_ctx ctx;
1107 struct blkcg *blkcg = css_to_blkcg(of_css(of));
1108 struct bfq_group *bfqg;
1109 u64 v;
ea25da48 1110
795fe54c 1111 ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, buf, &ctx);
ea25da48
PV
1112 if (ret)
1113 return ret;
1114
795fe54c
FZ
1115 if (sscanf(ctx.body, "%llu", &v) == 1) {
1116 /* require "default" on dfl */
1117 ret = -ERANGE;
1118 if (!v)
1119 goto out;
1120 } else if (!strcmp(strim(ctx.body), "default")) {
1121 v = 0;
1122 } else {
1123 ret = -EINVAL;
1124 goto out;
1125 }
1126
1127 bfqg = blkg_to_bfqg(ctx.blkg);
1128
1129 ret = -ERANGE;
1130 if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) {
1131 bfq_group_set_weight(bfqg, bfqg->entity.weight, v);
1132 ret = 0;
1133 }
1134out:
1135 blkg_conf_finish(&ctx);
fc8ebd01 1136 return ret ?: nbytes;
ea25da48
PV
1137}
1138
795fe54c
FZ
1139static ssize_t bfq_io_set_weight(struct kernfs_open_file *of,
1140 char *buf, size_t nbytes,
1141 loff_t off)
1142{
1143 char *endp;
1144 int ret;
1145 u64 v;
1146
1147 buf = strim(buf);
1148
1149 /* "WEIGHT" or "default WEIGHT" sets the default weight */
1150 v = simple_strtoull(buf, &endp, 0);
1151 if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) {
1152 ret = bfq_io_set_weight_legacy(of_css(of), NULL, v);
1153 return ret ?: nbytes;
1154 }
1155
1156 return bfq_io_set_device_weight(of, buf, nbytes, off);
1157}
1158
a557f1c7 1159static int bfqg_print_rwstat(struct seq_file *sf, void *v)
ea25da48 1160{
a557f1c7
TH
1161 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat,
1162 &blkcg_policy_bfq, seq_cft(sf)->private, true);
ea25da48
PV
1163 return 0;
1164}
1165
a557f1c7
TH
1166static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf,
1167 struct blkg_policy_data *pd, int off)
ea25da48 1168{
a557f1c7
TH
1169 struct blkg_rwstat_sample sum;
1170
1171 blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum);
1172 return __blkg_prfill_rwstat(sf, pd, &sum);
1173}
1174
1175static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v)
1176{
1177 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1178 bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq,
1179 seq_cft(sf)->private, true);
1180 return 0;
1181}
1182
fd41e603 1183#ifdef CONFIG_BFQ_CGROUP_DEBUG
a557f1c7
TH
1184static int bfqg_print_stat(struct seq_file *sf, void *v)
1185{
1186 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat,
1187 &blkcg_policy_bfq, seq_cft(sf)->private, false);
ea25da48
PV
1188 return 0;
1189}
1190
1191static u64 bfqg_prfill_stat_recursive(struct seq_file *sf,
1192 struct blkg_policy_data *pd, int off)
1193{
d6258980
CH
1194 struct blkcg_gq *blkg = pd_to_blkg(pd);
1195 struct blkcg_gq *pos_blkg;
1196 struct cgroup_subsys_state *pos_css;
1197 u64 sum = 0;
1198
1199 lockdep_assert_held(&blkg->q->queue_lock);
1200
1201 rcu_read_lock();
1202 blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
1203 struct bfq_stat *stat;
1204
1205 if (!pos_blkg->online)
1206 continue;
1207
1208 stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off;
1209 sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt);
1210 }
1211 rcu_read_unlock();
1212
ea25da48
PV
1213 return __blkg_prfill_u64(sf, pd, sum);
1214}
1215
ea25da48
PV
1216static int bfqg_print_stat_recursive(struct seq_file *sf, void *v)
1217{
1218 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1219 bfqg_prfill_stat_recursive, &blkcg_policy_bfq,
1220 seq_cft(sf)->private, false);
1221 return 0;
1222}
1223
ea25da48
PV
1224static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd,
1225 int off)
1226{
fd41e603
TH
1227 struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg);
1228 u64 sum = blkg_rwstat_total(&bfqg->stats.bytes);
ea25da48
PV
1229
1230 return __blkg_prfill_u64(sf, pd, sum >> 9);
1231}
1232
1233static int bfqg_print_stat_sectors(struct seq_file *sf, void *v)
1234{
1235 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1236 bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false);
1237 return 0;
1238}
1239
1240static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf,
1241 struct blkg_policy_data *pd, int off)
1242{
7af6fd91 1243 struct blkg_rwstat_sample tmp;
5d0b6e48 1244
fd41e603
TH
1245 blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq,
1246 offsetof(struct bfq_group, stats.bytes), &tmp);
ea25da48 1247
7af6fd91
CH
1248 return __blkg_prfill_u64(sf, pd,
1249 (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9);
ea25da48
PV
1250}
1251
1252static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v)
1253{
1254 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1255 bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0,
1256 false);
1257 return 0;
1258}
1259
1260static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf,
1261 struct blkg_policy_data *pd, int off)
1262{
1263 struct bfq_group *bfqg = pd_to_bfqg(pd);
c0ce79dc 1264 u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples);
ea25da48
PV
1265 u64 v = 0;
1266
1267 if (samples) {
c0ce79dc 1268 v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum);
ea25da48
PV
1269 v = div64_u64(v, samples);
1270 }
1271 __blkg_prfill_u64(sf, pd, v);
1272 return 0;
1273}
1274
1275/* print avg_queue_size */
1276static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v)
1277{
1278 blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1279 bfqg_prfill_avg_queue_size, &blkcg_policy_bfq,
1280 0, false);
1281 return 0;
1282}
8060c47b 1283#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48
PV
1284
1285struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1286{
1287 int ret;
1288
1289 ret = blkcg_activate_policy(bfqd->queue, &blkcg_policy_bfq);
1290 if (ret)
1291 return NULL;
1292
1293 return blkg_to_bfqg(bfqd->queue->root_blkg);
1294}
1295
1296struct blkcg_policy blkcg_policy_bfq = {
1297 .dfl_cftypes = bfq_blkg_files,
1298 .legacy_cftypes = bfq_blkcg_legacy_files,
1299
1300 .cpd_alloc_fn = bfq_cpd_alloc,
1301 .cpd_init_fn = bfq_cpd_init,
1302 .cpd_bind_fn = bfq_cpd_init,
1303 .cpd_free_fn = bfq_cpd_free,
1304
1305 .pd_alloc_fn = bfq_pd_alloc,
1306 .pd_init_fn = bfq_pd_init,
1307 .pd_offline_fn = bfq_pd_offline,
1308 .pd_free_fn = bfq_pd_free,
1309 .pd_reset_stats_fn = bfq_pd_reset_stats,
1310};
1311
1312struct cftype bfq_blkcg_legacy_files[] = {
1313 {
1314 .name = "bfq.weight",
cf892988 1315 .flags = CFTYPE_NOT_ON_ROOT,
795fe54c 1316 .seq_show = bfq_io_show_weight_legacy,
ea25da48
PV
1317 .write_u64 = bfq_io_set_weight_legacy,
1318 },
795fe54c
FZ
1319 {
1320 .name = "bfq.weight_device",
1321 .flags = CFTYPE_NOT_ON_ROOT,
1322 .seq_show = bfq_io_show_weight,
1323 .write = bfq_io_set_weight,
1324 },
ea25da48
PV
1325
1326 /* statistics, covers only the tasks in the bfqg */
ea25da48
PV
1327 {
1328 .name = "bfq.io_service_bytes",
fd41e603
TH
1329 .private = offsetof(struct bfq_group, stats.bytes),
1330 .seq_show = bfqg_print_rwstat,
ea25da48
PV
1331 },
1332 {
1333 .name = "bfq.io_serviced",
fd41e603
TH
1334 .private = offsetof(struct bfq_group, stats.ios),
1335 .seq_show = bfqg_print_rwstat,
ea25da48 1336 },
8060c47b 1337#ifdef CONFIG_BFQ_CGROUP_DEBUG
a33801e8
LM
1338 {
1339 .name = "bfq.time",
1340 .private = offsetof(struct bfq_group, stats.time),
1341 .seq_show = bfqg_print_stat,
1342 },
1343 {
1344 .name = "bfq.sectors",
1345 .seq_show = bfqg_print_stat_sectors,
1346 },
ea25da48
PV
1347 {
1348 .name = "bfq.io_service_time",
1349 .private = offsetof(struct bfq_group, stats.service_time),
1350 .seq_show = bfqg_print_rwstat,
1351 },
1352 {
1353 .name = "bfq.io_wait_time",
1354 .private = offsetof(struct bfq_group, stats.wait_time),
1355 .seq_show = bfqg_print_rwstat,
1356 },
1357 {
1358 .name = "bfq.io_merged",
1359 .private = offsetof(struct bfq_group, stats.merged),
1360 .seq_show = bfqg_print_rwstat,
1361 },
1362 {
1363 .name = "bfq.io_queued",
1364 .private = offsetof(struct bfq_group, stats.queued),
1365 .seq_show = bfqg_print_rwstat,
1366 },
8060c47b 1367#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48 1368
636b8fe8 1369 /* the same statistics which cover the bfqg and its descendants */
ea25da48
PV
1370 {
1371 .name = "bfq.io_service_bytes_recursive",
fd41e603
TH
1372 .private = offsetof(struct bfq_group, stats.bytes),
1373 .seq_show = bfqg_print_rwstat_recursive,
ea25da48
PV
1374 },
1375 {
1376 .name = "bfq.io_serviced_recursive",
fd41e603
TH
1377 .private = offsetof(struct bfq_group, stats.ios),
1378 .seq_show = bfqg_print_rwstat_recursive,
ea25da48 1379 },
8060c47b 1380#ifdef CONFIG_BFQ_CGROUP_DEBUG
a33801e8
LM
1381 {
1382 .name = "bfq.time_recursive",
1383 .private = offsetof(struct bfq_group, stats.time),
1384 .seq_show = bfqg_print_stat_recursive,
1385 },
1386 {
1387 .name = "bfq.sectors_recursive",
1388 .seq_show = bfqg_print_stat_sectors_recursive,
1389 },
ea25da48
PV
1390 {
1391 .name = "bfq.io_service_time_recursive",
1392 .private = offsetof(struct bfq_group, stats.service_time),
1393 .seq_show = bfqg_print_rwstat_recursive,
1394 },
1395 {
1396 .name = "bfq.io_wait_time_recursive",
1397 .private = offsetof(struct bfq_group, stats.wait_time),
1398 .seq_show = bfqg_print_rwstat_recursive,
1399 },
1400 {
1401 .name = "bfq.io_merged_recursive",
1402 .private = offsetof(struct bfq_group, stats.merged),
1403 .seq_show = bfqg_print_rwstat_recursive,
1404 },
1405 {
1406 .name = "bfq.io_queued_recursive",
1407 .private = offsetof(struct bfq_group, stats.queued),
1408 .seq_show = bfqg_print_rwstat_recursive,
1409 },
1410 {
1411 .name = "bfq.avg_queue_size",
1412 .seq_show = bfqg_print_avg_queue_size,
1413 },
1414 {
1415 .name = "bfq.group_wait_time",
1416 .private = offsetof(struct bfq_group, stats.group_wait_time),
1417 .seq_show = bfqg_print_stat,
1418 },
1419 {
1420 .name = "bfq.idle_time",
1421 .private = offsetof(struct bfq_group, stats.idle_time),
1422 .seq_show = bfqg_print_stat,
1423 },
1424 {
1425 .name = "bfq.empty_time",
1426 .private = offsetof(struct bfq_group, stats.empty_time),
1427 .seq_show = bfqg_print_stat,
1428 },
1429 {
1430 .name = "bfq.dequeue",
1431 .private = offsetof(struct bfq_group, stats.dequeue),
1432 .seq_show = bfqg_print_stat,
1433 },
8060c47b 1434#endif /* CONFIG_BFQ_CGROUP_DEBUG */
ea25da48
PV
1435 { } /* terminate */
1436};
1437
1438struct cftype bfq_blkg_files[] = {
1439 {
1440 .name = "bfq.weight",
cf892988 1441 .flags = CFTYPE_NOT_ON_ROOT,
ea25da48
PV
1442 .seq_show = bfq_io_show_weight,
1443 .write = bfq_io_set_weight,
1444 },
1445 {} /* terminate */
1446};
1447
1448#else /* CONFIG_BFQ_GROUP_IOSCHED */
1449
ea25da48
PV
1450void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1451 struct bfq_group *bfqg) {}
1452
1453void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg)
1454{
1455 struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
1456
1457 entity->weight = entity->new_weight;
1458 entity->orig_weight = entity->new_weight;
1459 if (bfqq) {
1460 bfqq->ioprio = bfqq->new_ioprio;
1461 bfqq->ioprio_class = bfqq->new_ioprio_class;
1462 }
1463 entity->sched_data = &bfqg->sched_data;
1464}
1465
1466void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {}
1467
1468void bfq_end_wr_async(struct bfq_data *bfqd)
1469{
1470 bfq_end_wr_async_queues(bfqd, bfqd->root_group);
1471}
1472
4e54a249 1473struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
ea25da48
PV
1474{
1475 return bfqd->root_group;
1476}
1477
1478struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
1479{
1480 return bfqq->bfqd->root_group;
1481}
1482
4d8340d0
PV
1483void bfqg_and_blkg_put(struct bfq_group *bfqg) {}
1484
ea25da48
PV
1485struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
1486{
1487 struct bfq_group *bfqg;
1488 int i;
1489
1490 bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node);
1491 if (!bfqg)
1492 return NULL;
1493
1494 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
1495 bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
1496
1497 return bfqg;
1498}
1499#endif /* CONFIG_BFQ_GROUP_IOSCHED */
This page took 0.493879 seconds and 4 git commands to generate.