]>
Commit | Line | Data |
---|---|---|
a497ee34 | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
ea25da48 PV |
2 | /* |
3 | * cgroups support for the BFQ I/O scheduler. | |
ea25da48 PV |
4 | */ |
5 | #include <linux/module.h> | |
6 | #include <linux/slab.h> | |
7 | #include <linux/blkdev.h> | |
8 | #include <linux/cgroup.h> | |
ea25da48 PV |
9 | #include <linux/ktime.h> |
10 | #include <linux/rbtree.h> | |
11 | #include <linux/ioprio.h> | |
12 | #include <linux/sbitmap.h> | |
13 | #include <linux/delay.h> | |
14 | ||
2e9bc346 | 15 | #include "elevator.h" |
ea25da48 PV |
16 | #include "bfq-iosched.h" |
17 | ||
8060c47b | 18 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
c0ce79dc CH |
19 | static int bfq_stat_init(struct bfq_stat *stat, gfp_t gfp) |
20 | { | |
21 | int ret; | |
22 | ||
23 | ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp); | |
24 | if (ret) | |
25 | return ret; | |
26 | ||
27 | atomic64_set(&stat->aux_cnt, 0); | |
28 | return 0; | |
29 | } | |
30 | ||
31 | static void bfq_stat_exit(struct bfq_stat *stat) | |
32 | { | |
33 | percpu_counter_destroy(&stat->cpu_cnt); | |
34 | } | |
35 | ||
36 | /** | |
37 | * bfq_stat_add - add a value to a bfq_stat | |
38 | * @stat: target bfq_stat | |
39 | * @val: value to add | |
40 | * | |
41 | * Add @val to @stat. The caller must ensure that IRQ on the same CPU | |
42 | * don't re-enter this function for the same counter. | |
43 | */ | |
44 | static inline void bfq_stat_add(struct bfq_stat *stat, uint64_t val) | |
45 | { | |
46 | percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH); | |
47 | } | |
48 | ||
49 | /** | |
50 | * bfq_stat_read - read the current value of a bfq_stat | |
51 | * @stat: bfq_stat to read | |
52 | */ | |
53 | static inline uint64_t bfq_stat_read(struct bfq_stat *stat) | |
54 | { | |
55 | return percpu_counter_sum_positive(&stat->cpu_cnt); | |
56 | } | |
57 | ||
58 | /** | |
59 | * bfq_stat_reset - reset a bfq_stat | |
60 | * @stat: bfq_stat to reset | |
61 | */ | |
62 | static inline void bfq_stat_reset(struct bfq_stat *stat) | |
63 | { | |
64 | percpu_counter_set(&stat->cpu_cnt, 0); | |
65 | atomic64_set(&stat->aux_cnt, 0); | |
66 | } | |
67 | ||
68 | /** | |
69 | * bfq_stat_add_aux - add a bfq_stat into another's aux count | |
70 | * @to: the destination bfq_stat | |
71 | * @from: the source | |
72 | * | |
73 | * Add @from's count including the aux one to @to's aux count. | |
74 | */ | |
75 | static inline void bfq_stat_add_aux(struct bfq_stat *to, | |
76 | struct bfq_stat *from) | |
77 | { | |
78 | atomic64_add(bfq_stat_read(from) + atomic64_read(&from->aux_cnt), | |
79 | &to->aux_cnt); | |
80 | } | |
81 | ||
c0ce79dc CH |
82 | /** |
83 | * blkg_prfill_stat - prfill callback for bfq_stat | |
84 | * @sf: seq_file to print to | |
85 | * @pd: policy private data of interest | |
86 | * @off: offset to the bfq_stat in @pd | |
87 | * | |
88 | * prfill callback for printing a bfq_stat. | |
89 | */ | |
90 | static u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, | |
91 | int off) | |
92 | { | |
93 | return __blkg_prfill_u64(sf, pd, bfq_stat_read((void *)pd + off)); | |
94 | } | |
95 | ||
ea25da48 PV |
96 | /* bfqg stats flags */ |
97 | enum bfqg_stats_flags { | |
98 | BFQG_stats_waiting = 0, | |
99 | BFQG_stats_idling, | |
100 | BFQG_stats_empty, | |
101 | }; | |
102 | ||
103 | #define BFQG_FLAG_FNS(name) \ | |
104 | static void bfqg_stats_mark_##name(struct bfqg_stats *stats) \ | |
105 | { \ | |
106 | stats->flags |= (1 << BFQG_stats_##name); \ | |
107 | } \ | |
108 | static void bfqg_stats_clear_##name(struct bfqg_stats *stats) \ | |
109 | { \ | |
110 | stats->flags &= ~(1 << BFQG_stats_##name); \ | |
111 | } \ | |
112 | static int bfqg_stats_##name(struct bfqg_stats *stats) \ | |
113 | { \ | |
114 | return (stats->flags & (1 << BFQG_stats_##name)) != 0; \ | |
115 | } \ | |
116 | ||
117 | BFQG_FLAG_FNS(waiting) | |
118 | BFQG_FLAG_FNS(idling) | |
119 | BFQG_FLAG_FNS(empty) | |
120 | #undef BFQG_FLAG_FNS | |
121 | ||
8f9bebc3 | 122 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
123 | static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats) |
124 | { | |
84c7afce | 125 | u64 now; |
ea25da48 PV |
126 | |
127 | if (!bfqg_stats_waiting(stats)) | |
128 | return; | |
129 | ||
08420cf7 | 130 | now = blk_time_get_ns(); |
84c7afce | 131 | if (now > stats->start_group_wait_time) |
c0ce79dc | 132 | bfq_stat_add(&stats->group_wait_time, |
ea25da48 PV |
133 | now - stats->start_group_wait_time); |
134 | bfqg_stats_clear_waiting(stats); | |
135 | } | |
136 | ||
8f9bebc3 | 137 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
138 | static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, |
139 | struct bfq_group *curr_bfqg) | |
140 | { | |
141 | struct bfqg_stats *stats = &bfqg->stats; | |
142 | ||
143 | if (bfqg_stats_waiting(stats)) | |
144 | return; | |
145 | if (bfqg == curr_bfqg) | |
146 | return; | |
08420cf7 | 147 | stats->start_group_wait_time = blk_time_get_ns(); |
ea25da48 PV |
148 | bfqg_stats_mark_waiting(stats); |
149 | } | |
150 | ||
8f9bebc3 | 151 | /* This should be called with the scheduler lock held. */ |
ea25da48 PV |
152 | static void bfqg_stats_end_empty_time(struct bfqg_stats *stats) |
153 | { | |
84c7afce | 154 | u64 now; |
ea25da48 PV |
155 | |
156 | if (!bfqg_stats_empty(stats)) | |
157 | return; | |
158 | ||
08420cf7 | 159 | now = blk_time_get_ns(); |
84c7afce | 160 | if (now > stats->start_empty_time) |
c0ce79dc | 161 | bfq_stat_add(&stats->empty_time, |
ea25da48 PV |
162 | now - stats->start_empty_time); |
163 | bfqg_stats_clear_empty(stats); | |
164 | } | |
165 | ||
166 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) | |
167 | { | |
c0ce79dc | 168 | bfq_stat_add(&bfqg->stats.dequeue, 1); |
ea25da48 PV |
169 | } |
170 | ||
171 | void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) | |
172 | { | |
173 | struct bfqg_stats *stats = &bfqg->stats; | |
174 | ||
175 | if (blkg_rwstat_total(&stats->queued)) | |
176 | return; | |
177 | ||
178 | /* | |
179 | * group is already marked empty. This can happen if bfqq got new | |
180 | * request in parent group and moved to this group while being added | |
181 | * to service tree. Just ignore the event and move on. | |
182 | */ | |
183 | if (bfqg_stats_empty(stats)) | |
184 | return; | |
185 | ||
08420cf7 | 186 | stats->start_empty_time = blk_time_get_ns(); |
ea25da48 PV |
187 | bfqg_stats_mark_empty(stats); |
188 | } | |
189 | ||
190 | void bfqg_stats_update_idle_time(struct bfq_group *bfqg) | |
191 | { | |
192 | struct bfqg_stats *stats = &bfqg->stats; | |
193 | ||
194 | if (bfqg_stats_idling(stats)) { | |
08420cf7 | 195 | u64 now = blk_time_get_ns(); |
ea25da48 | 196 | |
84c7afce | 197 | if (now > stats->start_idle_time) |
c0ce79dc | 198 | bfq_stat_add(&stats->idle_time, |
ea25da48 PV |
199 | now - stats->start_idle_time); |
200 | bfqg_stats_clear_idling(stats); | |
201 | } | |
202 | } | |
203 | ||
204 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) | |
205 | { | |
206 | struct bfqg_stats *stats = &bfqg->stats; | |
207 | ||
08420cf7 | 208 | stats->start_idle_time = blk_time_get_ns(); |
ea25da48 PV |
209 | bfqg_stats_mark_idling(stats); |
210 | } | |
211 | ||
212 | void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) | |
213 | { | |
214 | struct bfqg_stats *stats = &bfqg->stats; | |
215 | ||
c0ce79dc | 216 | bfq_stat_add(&stats->avg_queue_size_sum, |
ea25da48 | 217 | blkg_rwstat_total(&stats->queued)); |
c0ce79dc | 218 | bfq_stat_add(&stats->avg_queue_size_samples, 1); |
ea25da48 PV |
219 | bfqg_stats_update_group_wait_time(stats); |
220 | } | |
221 | ||
a33801e8 | 222 | void bfqg_stats_update_io_add(struct bfq_group *bfqg, struct bfq_queue *bfqq, |
dc469ba2 | 223 | blk_opf_t opf) |
a33801e8 | 224 | { |
dc469ba2 | 225 | blkg_rwstat_add(&bfqg->stats.queued, opf, 1); |
a33801e8 | 226 | bfqg_stats_end_empty_time(&bfqg->stats); |
aa625117 | 227 | if (!(bfqq == bfqg->bfqd->in_service_queue)) |
a33801e8 LM |
228 | bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); |
229 | } | |
230 | ||
dc469ba2 | 231 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) |
a33801e8 | 232 | { |
dc469ba2 | 233 | blkg_rwstat_add(&bfqg->stats.queued, opf, -1); |
a33801e8 LM |
234 | } |
235 | ||
dc469ba2 | 236 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) |
a33801e8 | 237 | { |
dc469ba2 | 238 | blkg_rwstat_add(&bfqg->stats.merged, opf, 1); |
a33801e8 LM |
239 | } |
240 | ||
84c7afce | 241 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
dc469ba2 | 242 | u64 io_start_time_ns, blk_opf_t opf) |
a33801e8 LM |
243 | { |
244 | struct bfqg_stats *stats = &bfqg->stats; | |
08420cf7 | 245 | u64 now = blk_time_get_ns(); |
a33801e8 | 246 | |
84c7afce | 247 | if (now > io_start_time_ns) |
dc469ba2 | 248 | blkg_rwstat_add(&stats->service_time, opf, |
84c7afce OS |
249 | now - io_start_time_ns); |
250 | if (io_start_time_ns > start_time_ns) | |
dc469ba2 | 251 | blkg_rwstat_add(&stats->wait_time, opf, |
84c7afce | 252 | io_start_time_ns - start_time_ns); |
a33801e8 LM |
253 | } |
254 | ||
8060c47b | 255 | #else /* CONFIG_BFQ_CGROUP_DEBUG */ |
a33801e8 | 256 | |
dc469ba2 BVA |
257 | void bfqg_stats_update_io_remove(struct bfq_group *bfqg, blk_opf_t opf) { } |
258 | void bfqg_stats_update_io_merged(struct bfq_group *bfqg, blk_opf_t opf) { } | |
84c7afce | 259 | void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns, |
dc469ba2 | 260 | u64 io_start_time_ns, blk_opf_t opf) { } |
a33801e8 | 261 | void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } |
a33801e8 | 262 | void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } |
a33801e8 | 263 | |
8060c47b | 264 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
a33801e8 LM |
265 | |
266 | #ifdef CONFIG_BFQ_GROUP_IOSCHED | |
267 | ||
ea25da48 PV |
268 | /* |
269 | * blk-cgroup policy-related handlers | |
270 | * The following functions help in converting between blk-cgroup | |
271 | * internal structures and BFQ-specific structures. | |
272 | */ | |
273 | ||
274 | static struct bfq_group *pd_to_bfqg(struct blkg_policy_data *pd) | |
275 | { | |
276 | return pd ? container_of(pd, struct bfq_group, pd) : NULL; | |
277 | } | |
278 | ||
279 | struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) | |
280 | { | |
281 | return pd_to_blkg(&bfqg->pd); | |
282 | } | |
283 | ||
284 | static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) | |
285 | { | |
286 | return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); | |
287 | } | |
288 | ||
289 | /* | |
290 | * bfq_group handlers | |
291 | * The following functions help in navigating the bfq_group hierarchy | |
292 | * by allowing to find the parent of a bfq_group or the bfq_group | |
293 | * associated to a bfq_queue. | |
294 | */ | |
295 | ||
296 | static struct bfq_group *bfqg_parent(struct bfq_group *bfqg) | |
297 | { | |
298 | struct blkcg_gq *pblkg = bfqg_to_blkg(bfqg)->parent; | |
299 | ||
300 | return pblkg ? blkg_to_bfqg(pblkg) : NULL; | |
301 | } | |
302 | ||
303 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | |
304 | { | |
305 | struct bfq_entity *group_entity = bfqq->entity.parent; | |
306 | ||
307 | return group_entity ? container_of(group_entity, struct bfq_group, | |
308 | entity) : | |
309 | bfqq->bfqd->root_group; | |
310 | } | |
311 | ||
312 | /* | |
313 | * The following two functions handle get and put of a bfq_group by | |
314 | * wrapping the related blk-cgroup hooks. | |
315 | */ | |
316 | ||
317 | static void bfqg_get(struct bfq_group *bfqg) | |
318 | { | |
216f7647 | 319 | refcount_inc(&bfqg->ref); |
ea25da48 PV |
320 | } |
321 | ||
dfb79af5 | 322 | static void bfqg_put(struct bfq_group *bfqg) |
ea25da48 | 323 | { |
216f7647 | 324 | if (refcount_dec_and_test(&bfqg->ref)) |
8f9bebc3 PV |
325 | kfree(bfqg); |
326 | } | |
327 | ||
2de791ab | 328 | static void bfqg_and_blkg_get(struct bfq_group *bfqg) |
8f9bebc3 PV |
329 | { |
330 | /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */ | |
331 | bfqg_get(bfqg); | |
332 | ||
333 | blkg_get(bfqg_to_blkg(bfqg)); | |
334 | } | |
335 | ||
336 | void bfqg_and_blkg_put(struct bfq_group *bfqg) | |
337 | { | |
8f9bebc3 | 338 | blkg_put(bfqg_to_blkg(bfqg)); |
d5274b3c KK |
339 | |
340 | bfqg_put(bfqg); | |
ea25da48 PV |
341 | } |
342 | ||
fd41e603 TH |
343 | void bfqg_stats_update_legacy_io(struct request_queue *q, struct request *rq) |
344 | { | |
345 | struct bfq_group *bfqg = blkg_to_bfqg(rq->bio->bi_blkg); | |
346 | ||
08802ed6 HT |
347 | if (!bfqg) |
348 | return; | |
349 | ||
fd41e603 TH |
350 | blkg_rwstat_add(&bfqg->stats.bytes, rq->cmd_flags, blk_rq_bytes(rq)); |
351 | blkg_rwstat_add(&bfqg->stats.ios, rq->cmd_flags, 1); | |
352 | } | |
353 | ||
ea25da48 PV |
354 | /* @stats = 0 */ |
355 | static void bfqg_stats_reset(struct bfqg_stats *stats) | |
356 | { | |
8060c47b | 357 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
ea25da48 PV |
358 | /* queued stats shouldn't be cleared */ |
359 | blkg_rwstat_reset(&stats->merged); | |
360 | blkg_rwstat_reset(&stats->service_time); | |
361 | blkg_rwstat_reset(&stats->wait_time); | |
c0ce79dc CH |
362 | bfq_stat_reset(&stats->time); |
363 | bfq_stat_reset(&stats->avg_queue_size_sum); | |
364 | bfq_stat_reset(&stats->avg_queue_size_samples); | |
365 | bfq_stat_reset(&stats->dequeue); | |
366 | bfq_stat_reset(&stats->group_wait_time); | |
367 | bfq_stat_reset(&stats->idle_time); | |
368 | bfq_stat_reset(&stats->empty_time); | |
a33801e8 | 369 | #endif |
ea25da48 PV |
370 | } |
371 | ||
372 | /* @to += @from */ | |
373 | static void bfqg_stats_add_aux(struct bfqg_stats *to, struct bfqg_stats *from) | |
374 | { | |
375 | if (!to || !from) | |
376 | return; | |
377 | ||
8060c47b | 378 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
ea25da48 PV |
379 | /* queued stats shouldn't be cleared */ |
380 | blkg_rwstat_add_aux(&to->merged, &from->merged); | |
381 | blkg_rwstat_add_aux(&to->service_time, &from->service_time); | |
382 | blkg_rwstat_add_aux(&to->wait_time, &from->wait_time); | |
c0ce79dc CH |
383 | bfq_stat_add_aux(&from->time, &from->time); |
384 | bfq_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); | |
385 | bfq_stat_add_aux(&to->avg_queue_size_samples, | |
ea25da48 | 386 | &from->avg_queue_size_samples); |
c0ce79dc CH |
387 | bfq_stat_add_aux(&to->dequeue, &from->dequeue); |
388 | bfq_stat_add_aux(&to->group_wait_time, &from->group_wait_time); | |
389 | bfq_stat_add_aux(&to->idle_time, &from->idle_time); | |
390 | bfq_stat_add_aux(&to->empty_time, &from->empty_time); | |
a33801e8 | 391 | #endif |
ea25da48 PV |
392 | } |
393 | ||
394 | /* | |
395 | * Transfer @bfqg's stats to its parent's aux counts so that the ancestors' | |
396 | * recursive stats can still account for the amount used by this bfqg after | |
397 | * it's gone. | |
398 | */ | |
399 | static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) | |
400 | { | |
401 | struct bfq_group *parent; | |
402 | ||
403 | if (!bfqg) /* root_group */ | |
404 | return; | |
405 | ||
406 | parent = bfqg_parent(bfqg); | |
407 | ||
0d945c1f | 408 | lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); |
ea25da48 PV |
409 | |
410 | if (unlikely(!parent)) | |
411 | return; | |
412 | ||
413 | bfqg_stats_add_aux(&parent->stats, &bfqg->stats); | |
414 | bfqg_stats_reset(&bfqg->stats); | |
415 | } | |
416 | ||
417 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) | |
418 | { | |
419 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
420 | ||
421 | entity->weight = entity->new_weight; | |
422 | entity->orig_weight = entity->new_weight; | |
423 | if (bfqq) { | |
424 | bfqq->ioprio = bfqq->new_ioprio; | |
425 | bfqq->ioprio_class = bfqq->new_ioprio_class; | |
8f9bebc3 PV |
426 | /* |
427 | * Make sure that bfqg and its associated blkg do not | |
428 | * disappear before entity. | |
429 | */ | |
430 | bfqg_and_blkg_get(bfqg); | |
ea25da48 PV |
431 | } |
432 | entity->parent = bfqg->my_entity; /* NULL for root group */ | |
433 | entity->sched_data = &bfqg->sched_data; | |
434 | } | |
435 | ||
436 | static void bfqg_stats_exit(struct bfqg_stats *stats) | |
437 | { | |
fd41e603 TH |
438 | blkg_rwstat_exit(&stats->bytes); |
439 | blkg_rwstat_exit(&stats->ios); | |
8060c47b | 440 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
ea25da48 PV |
441 | blkg_rwstat_exit(&stats->merged); |
442 | blkg_rwstat_exit(&stats->service_time); | |
443 | blkg_rwstat_exit(&stats->wait_time); | |
444 | blkg_rwstat_exit(&stats->queued); | |
c0ce79dc CH |
445 | bfq_stat_exit(&stats->time); |
446 | bfq_stat_exit(&stats->avg_queue_size_sum); | |
447 | bfq_stat_exit(&stats->avg_queue_size_samples); | |
448 | bfq_stat_exit(&stats->dequeue); | |
449 | bfq_stat_exit(&stats->group_wait_time); | |
450 | bfq_stat_exit(&stats->idle_time); | |
451 | bfq_stat_exit(&stats->empty_time); | |
a33801e8 | 452 | #endif |
ea25da48 PV |
453 | } |
454 | ||
455 | static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) | |
456 | { | |
fd41e603 TH |
457 | if (blkg_rwstat_init(&stats->bytes, gfp) || |
458 | blkg_rwstat_init(&stats->ios, gfp)) | |
2fc428f6 | 459 | goto error; |
fd41e603 | 460 | |
8060c47b | 461 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
ea25da48 PV |
462 | if (blkg_rwstat_init(&stats->merged, gfp) || |
463 | blkg_rwstat_init(&stats->service_time, gfp) || | |
464 | blkg_rwstat_init(&stats->wait_time, gfp) || | |
465 | blkg_rwstat_init(&stats->queued, gfp) || | |
c0ce79dc CH |
466 | bfq_stat_init(&stats->time, gfp) || |
467 | bfq_stat_init(&stats->avg_queue_size_sum, gfp) || | |
468 | bfq_stat_init(&stats->avg_queue_size_samples, gfp) || | |
469 | bfq_stat_init(&stats->dequeue, gfp) || | |
470 | bfq_stat_init(&stats->group_wait_time, gfp) || | |
471 | bfq_stat_init(&stats->idle_time, gfp) || | |
2fc428f6 ZL |
472 | bfq_stat_init(&stats->empty_time, gfp)) |
473 | goto error; | |
a33801e8 | 474 | #endif |
ea25da48 PV |
475 | |
476 | return 0; | |
2fc428f6 ZL |
477 | |
478 | error: | |
479 | bfqg_stats_exit(stats); | |
480 | return -ENOMEM; | |
ea25da48 PV |
481 | } |
482 | ||
483 | static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) | |
484 | { | |
485 | return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; | |
486 | } | |
487 | ||
488 | static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) | |
489 | { | |
490 | return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); | |
491 | } | |
492 | ||
dfb79af5 | 493 | static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) |
ea25da48 PV |
494 | { |
495 | struct bfq_group_data *bgd; | |
496 | ||
497 | bgd = kzalloc(sizeof(*bgd), gfp); | |
498 | if (!bgd) | |
499 | return NULL; | |
ea25da48 | 500 | |
650e2cb5 CZ |
501 | bgd->weight = CGROUP_WEIGHT_DFL; |
502 | return &bgd->pd; | |
ea25da48 PV |
503 | } |
504 | ||
dfb79af5 | 505 | static void bfq_cpd_free(struct blkcg_policy_data *cpd) |
ea25da48 PV |
506 | { |
507 | kfree(cpd_to_bfqgd(cpd)); | |
508 | } | |
509 | ||
0a0b4f79 CH |
510 | static struct blkg_policy_data *bfq_pd_alloc(struct gendisk *disk, |
511 | struct blkcg *blkcg, gfp_t gfp) | |
ea25da48 PV |
512 | { |
513 | struct bfq_group *bfqg; | |
514 | ||
0a0b4f79 | 515 | bfqg = kzalloc_node(sizeof(*bfqg), gfp, disk->node_id); |
ea25da48 PV |
516 | if (!bfqg) |
517 | return NULL; | |
518 | ||
519 | if (bfqg_stats_init(&bfqg->stats, gfp)) { | |
520 | kfree(bfqg); | |
521 | return NULL; | |
522 | } | |
523 | ||
8f9bebc3 | 524 | /* see comments in bfq_bic_update_cgroup for why refcounting */ |
216f7647 | 525 | refcount_set(&bfqg->ref, 1); |
ea25da48 PV |
526 | return &bfqg->pd; |
527 | } | |
528 | ||
dfb79af5 | 529 | static void bfq_pd_init(struct blkg_policy_data *pd) |
ea25da48 PV |
530 | { |
531 | struct blkcg_gq *blkg = pd_to_blkg(pd); | |
532 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
533 | struct bfq_data *bfqd = blkg->q->elevator->elevator_data; | |
534 | struct bfq_entity *entity = &bfqg->entity; | |
535 | struct bfq_group_data *d = blkcg_to_bfqgd(blkg->blkcg); | |
536 | ||
537 | entity->orig_weight = entity->weight = entity->new_weight = d->weight; | |
538 | entity->my_sched_data = &bfqg->sched_data; | |
430a67f9 PV |
539 | entity->last_bfqq_created = NULL; |
540 | ||
ea25da48 PV |
541 | bfqg->my_entity = entity; /* |
542 | * the root_group's will be set to NULL | |
543 | * in bfq_init_queue() | |
544 | */ | |
545 | bfqg->bfqd = bfqd; | |
546 | bfqg->active_entities = 0; | |
60a6e10c | 547 | bfqg->num_queues_with_pending_reqs = 0; |
ea25da48 PV |
548 | bfqg->rq_pos_tree = RB_ROOT; |
549 | } | |
550 | ||
dfb79af5 | 551 | static void bfq_pd_free(struct blkg_policy_data *pd) |
ea25da48 PV |
552 | { |
553 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
554 | ||
555 | bfqg_stats_exit(&bfqg->stats); | |
8f9bebc3 | 556 | bfqg_put(bfqg); |
ea25da48 PV |
557 | } |
558 | ||
dfb79af5 | 559 | static void bfq_pd_reset_stats(struct blkg_policy_data *pd) |
ea25da48 PV |
560 | { |
561 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
562 | ||
563 | bfqg_stats_reset(&bfqg->stats); | |
564 | } | |
565 | ||
566 | static void bfq_group_set_parent(struct bfq_group *bfqg, | |
567 | struct bfq_group *parent) | |
568 | { | |
569 | struct bfq_entity *entity; | |
570 | ||
571 | entity = &bfqg->entity; | |
572 | entity->parent = parent->my_entity; | |
573 | entity->sched_data = &parent->sched_data; | |
574 | } | |
575 | ||
4e54a249 | 576 | static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg) |
ea25da48 | 577 | { |
4e54a249 | 578 | struct bfq_group *parent; |
ea25da48 PV |
579 | struct bfq_entity *entity; |
580 | ||
ea25da48 PV |
581 | /* |
582 | * Update chain of bfq_groups as we might be handling a leaf group | |
583 | * which, along with some of its relatives, has not been hooked yet | |
584 | * to the private hierarchy of BFQ. | |
585 | */ | |
586 | entity = &bfqg->entity; | |
587 | for_each_entity(entity) { | |
14afc593 CN |
588 | struct bfq_group *curr_bfqg = container_of(entity, |
589 | struct bfq_group, entity); | |
590 | if (curr_bfqg != bfqd->root_group) { | |
591 | parent = bfqg_parent(curr_bfqg); | |
ea25da48 PV |
592 | if (!parent) |
593 | parent = bfqd->root_group; | |
14afc593 | 594 | bfq_group_set_parent(curr_bfqg, parent); |
ea25da48 PV |
595 | } |
596 | } | |
4e54a249 | 597 | } |
ea25da48 | 598 | |
4e54a249 JK |
599 | struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) |
600 | { | |
601 | struct blkcg_gq *blkg = bio->bi_blkg; | |
075a53b7 | 602 | struct bfq_group *bfqg; |
4e54a249 | 603 | |
075a53b7 | 604 | while (blkg) { |
f02be900 YK |
605 | if (!blkg->online) { |
606 | blkg = blkg->parent; | |
607 | continue; | |
608 | } | |
075a53b7 | 609 | bfqg = blkg_to_bfqg(blkg); |
f37bf75c | 610 | if (bfqg->pd.online) { |
075a53b7 JK |
611 | bio_associate_blkg_from_css(bio, &blkg->blkcg->css); |
612 | return bfqg; | |
613 | } | |
614 | blkg = blkg->parent; | |
615 | } | |
616 | bio_associate_blkg_from_css(bio, | |
617 | &bfqg_to_blkg(bfqd->root_group)->blkcg->css); | |
618 | return bfqd->root_group; | |
ea25da48 PV |
619 | } |
620 | ||
621 | /** | |
622 | * bfq_bfqq_move - migrate @bfqq to @bfqg. | |
623 | * @bfqd: queue descriptor. | |
624 | * @bfqq: the queue to move. | |
625 | * @bfqg: the group to move to. | |
626 | * | |
627 | * Move @bfqq to @bfqg, deactivating it from its old group and reactivating | |
628 | * it on the new one. Avoid putting the entity on the old group idle tree. | |
629 | * | |
8f9bebc3 PV |
630 | * Must be called under the scheduler lock, to make sure that the blkg |
631 | * owning @bfqg does not disappear (see comments in | |
632 | * bfq_bic_update_cgroup on guaranteeing the consistency of blkg | |
633 | * objects). | |
ea25da48 PV |
634 | */ |
635 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, | |
636 | struct bfq_group *bfqg) | |
637 | { | |
638 | struct bfq_entity *entity = &bfqq->entity; | |
c5e4cb0f | 639 | struct bfq_group *old_parent = bfqq_group(bfqq); |
60a6e10c | 640 | bool has_pending_reqs = false; |
c5e4cb0f YK |
641 | |
642 | /* | |
643 | * No point to move bfqq to the same group, which can happen when | |
644 | * root group is offlined | |
645 | */ | |
646 | if (old_parent == bfqg) | |
647 | return; | |
ea25da48 | 648 | |
8410f709 YK |
649 | /* |
650 | * oom_bfqq is not allowed to move, oom_bfqq will hold ref to root_group | |
651 | * until elevator exit. | |
652 | */ | |
653 | if (bfqq == &bfqd->oom_bfqq) | |
654 | return; | |
fd1bb3ae PV |
655 | /* |
656 | * Get extra reference to prevent bfqq from being freed in | |
657 | * next possible expire or deactivate. | |
658 | */ | |
659 | bfqq->ref++; | |
660 | ||
60a6e10c YK |
661 | if (entity->in_groups_with_pending_reqs) { |
662 | has_pending_reqs = true; | |
663 | bfq_del_bfqq_in_groups_with_pending_reqs(bfqq); | |
664 | } | |
665 | ||
ea25da48 PV |
666 | /* If bfqq is empty, then bfq_bfqq_expire also invokes |
667 | * bfq_del_bfqq_busy, thereby removing bfqq and its entity | |
668 | * from data structures related to current group. Otherwise we | |
669 | * need to remove bfqq explicitly with bfq_deactivate_bfqq, as | |
670 | * we do below. | |
671 | */ | |
672 | if (bfqq == bfqd->in_service_queue) | |
673 | bfq_bfqq_expire(bfqd, bfqd->in_service_queue, | |
674 | false, BFQQE_PREEMPTED); | |
675 | ||
676 | if (bfq_bfqq_busy(bfqq)) | |
677 | bfq_deactivate_bfqq(bfqd, bfqq, false, false); | |
33a16a98 | 678 | else if (entity->on_st_or_in_serv) |
ea25da48 | 679 | bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); |
c5e4cb0f | 680 | bfqg_and_blkg_put(old_parent); |
ea25da48 | 681 | |
f45916ae | 682 | bfq_reassign_last_bfqq(bfqq, NULL); |
ea25da48 PV |
683 | entity->parent = bfqg->my_entity; |
684 | entity->sched_data = &bfqg->sched_data; | |
8f9bebc3 PV |
685 | /* pin down bfqg and its associated blkg */ |
686 | bfqg_and_blkg_get(bfqg); | |
ea25da48 | 687 | |
60a6e10c YK |
688 | if (has_pending_reqs) |
689 | bfq_add_bfqq_in_groups_with_pending_reqs(bfqq); | |
690 | ||
ea25da48 | 691 | if (bfq_bfqq_busy(bfqq)) { |
8cacc5ab PV |
692 | if (unlikely(!bfqd->nonrot_with_queueing)) |
693 | bfq_pos_tree_add_move(bfqd, bfqq); | |
ea25da48 PV |
694 | bfq_activate_bfqq(bfqd, bfqq); |
695 | } | |
696 | ||
2d31c684 | 697 | if (!bfqd->in_service_queue && !bfqd->tot_rq_in_driver) |
ea25da48 | 698 | bfq_schedule_dispatch(bfqd); |
fd1bb3ae | 699 | /* release extra ref taken above, bfqq may happen to be freed now */ |
ecedd3d7 | 700 | bfq_put_queue(bfqq); |
ea25da48 PV |
701 | } |
702 | ||
9778369a PV |
703 | static void bfq_sync_bfqq_move(struct bfq_data *bfqd, |
704 | struct bfq_queue *sync_bfqq, | |
705 | struct bfq_io_cq *bic, | |
706 | struct bfq_group *bfqg, | |
707 | unsigned int act_idx) | |
708 | { | |
709 | struct bfq_queue *bfqq; | |
710 | ||
711 | if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) { | |
712 | /* We are the only user of this bfqq, just move it */ | |
713 | if (sync_bfqq->entity.sched_data != &bfqg->sched_data) | |
714 | bfq_bfqq_move(bfqd, sync_bfqq, bfqg); | |
715 | return; | |
716 | } | |
717 | ||
718 | /* | |
719 | * The queue was merged to a different queue. Check | |
720 | * that the merge chain still belongs to the same | |
721 | * cgroup. | |
722 | */ | |
723 | for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq) | |
724 | if (bfqq->entity.sched_data != &bfqg->sched_data) | |
725 | break; | |
726 | if (bfqq) { | |
727 | /* | |
728 | * Some queue changed cgroup so the merge is not valid | |
729 | * anymore. We cannot easily just cancel the merge (by | |
730 | * clearing new_bfqq) as there may be other processes | |
731 | * using this queue and holding refs to all queues | |
732 | * below sync_bfqq->new_bfqq. Similarly if the merge | |
733 | * already happened, we need to detach from bfqq now | |
734 | * so that we cannot merge bio to a request from the | |
735 | * old cgroup. | |
736 | */ | |
737 | bfq_put_cooperator(sync_bfqq); | |
9778369a | 738 | bic_set_bfqq(bic, NULL, true, act_idx); |
cf5a60d9 | 739 | bfq_release_process_ref(bfqd, sync_bfqq); |
9778369a PV |
740 | } |
741 | } | |
742 | ||
ea25da48 | 743 | /** |
1d87be82 | 744 | * __bfq_bic_change_cgroup - move @bic to @bfqg. |
ea25da48 PV |
745 | * @bfqd: the queue descriptor. |
746 | * @bic: the bic to move. | |
1d87be82 | 747 | * @bfqg: the group to move to. |
ea25da48 | 748 | * |
8f9bebc3 PV |
749 | * Move bic to blkcg, assuming that bfqd->lock is held; which makes |
750 | * sure that the reference to cgroup is valid across the call (see | |
751 | * comments in bfq_bic_update_cgroup on this issue) | |
ea25da48 | 752 | */ |
452af7dc YK |
753 | static void __bfq_bic_change_cgroup(struct bfq_data *bfqd, |
754 | struct bfq_io_cq *bic, | |
755 | struct bfq_group *bfqg) | |
ea25da48 | 756 | { |
9778369a | 757 | unsigned int act_idx; |
ea25da48 | 758 | |
9778369a PV |
759 | for (act_idx = 0; act_idx < bfqd->num_actuators; act_idx++) { |
760 | struct bfq_queue *async_bfqq = bic_to_bfqq(bic, false, act_idx); | |
761 | struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, true, act_idx); | |
ea25da48 | 762 | |
9778369a PV |
763 | if (async_bfqq && |
764 | async_bfqq->entity.sched_data != &bfqg->sched_data) { | |
765 | bic_set_bfqq(bic, NULL, false, act_idx); | |
c8997736 | 766 | bfq_release_process_ref(bfqd, async_bfqq); |
ea25da48 | 767 | } |
ea25da48 | 768 | |
9778369a PV |
769 | if (sync_bfqq) |
770 | bfq_sync_bfqq_move(bfqd, sync_bfqq, bic, bfqg, act_idx); | |
ea25da48 | 771 | } |
ea25da48 PV |
772 | } |
773 | ||
774 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) | |
775 | { | |
776 | struct bfq_data *bfqd = bic_to_bfqd(bic); | |
4e54a249 | 777 | struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio); |
ea25da48 PV |
778 | uint64_t serial_nr; |
779 | ||
4e54a249 | 780 | serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr; |
ea25da48 PV |
781 | |
782 | /* | |
783 | * Check whether blkcg has changed. The condition may trigger | |
784 | * spuriously on a newly created cic but there's no harm. | |
785 | */ | |
786 | if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr)) | |
4e54a249 | 787 | return; |
ea25da48 | 788 | |
4e54a249 JK |
789 | /* |
790 | * New cgroup for this process. Make sure it is linked to bfq internal | |
791 | * cgroup hierarchy. | |
792 | */ | |
793 | bfq_link_bfqg(bfqd, bfqg); | |
794 | __bfq_bic_change_cgroup(bfqd, bic, bfqg); | |
ea25da48 | 795 | bic->blkcg_serial_nr = serial_nr; |
ea25da48 PV |
796 | } |
797 | ||
798 | /** | |
799 | * bfq_flush_idle_tree - deactivate any entity on the idle tree of @st. | |
800 | * @st: the service tree being flushed. | |
801 | */ | |
802 | static void bfq_flush_idle_tree(struct bfq_service_tree *st) | |
803 | { | |
804 | struct bfq_entity *entity = st->first_idle; | |
805 | ||
806 | for (; entity ; entity = st->first_idle) | |
807 | __bfq_deactivate_entity(entity, false); | |
808 | } | |
809 | ||
810 | /** | |
811 | * bfq_reparent_leaf_entity - move leaf entity to the root_group. | |
812 | * @bfqd: the device data structure with the root group. | |
576682fa PV |
813 | * @entity: the entity to move, if entity is a leaf; or the parent entity |
814 | * of an active leaf entity to move, if entity is not a leaf. | |
1d87be82 | 815 | * @ioprio_class: I/O priority class to reparent. |
ea25da48 PV |
816 | */ |
817 | static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, | |
576682fa PV |
818 | struct bfq_entity *entity, |
819 | int ioprio_class) | |
ea25da48 | 820 | { |
576682fa PV |
821 | struct bfq_queue *bfqq; |
822 | struct bfq_entity *child_entity = entity; | |
823 | ||
824 | while (child_entity->my_sched_data) { /* leaf not reached yet */ | |
825 | struct bfq_sched_data *child_sd = child_entity->my_sched_data; | |
826 | struct bfq_service_tree *child_st = child_sd->service_tree + | |
827 | ioprio_class; | |
828 | struct rb_root *child_active = &child_st->active; | |
ea25da48 | 829 | |
576682fa PV |
830 | child_entity = bfq_entity_of(rb_first(child_active)); |
831 | ||
832 | if (!child_entity) | |
833 | child_entity = child_sd->in_service_entity; | |
834 | } | |
835 | ||
836 | bfqq = bfq_entity_to_bfqq(child_entity); | |
ea25da48 PV |
837 | bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); |
838 | } | |
839 | ||
840 | /** | |
576682fa | 841 | * bfq_reparent_active_queues - move to the root group all active queues. |
ea25da48 PV |
842 | * @bfqd: the device data structure with the root group. |
843 | * @bfqg: the group to move from. | |
576682fa | 844 | * @st: the service tree to start the search from. |
1d87be82 | 845 | * @ioprio_class: I/O priority class to reparent. |
ea25da48 | 846 | */ |
576682fa PV |
847 | static void bfq_reparent_active_queues(struct bfq_data *bfqd, |
848 | struct bfq_group *bfqg, | |
849 | struct bfq_service_tree *st, | |
850 | int ioprio_class) | |
ea25da48 PV |
851 | { |
852 | struct rb_root *active = &st->active; | |
576682fa | 853 | struct bfq_entity *entity; |
ea25da48 | 854 | |
576682fa PV |
855 | while ((entity = bfq_entity_of(rb_first(active)))) |
856 | bfq_reparent_leaf_entity(bfqd, entity, ioprio_class); | |
ea25da48 PV |
857 | |
858 | if (bfqg->sched_data.in_service_entity) | |
859 | bfq_reparent_leaf_entity(bfqd, | |
576682fa PV |
860 | bfqg->sched_data.in_service_entity, |
861 | ioprio_class); | |
ea25da48 PV |
862 | } |
863 | ||
864 | /** | |
865 | * bfq_pd_offline - deactivate the entity associated with @pd, | |
866 | * and reparent its children entities. | |
867 | * @pd: descriptor of the policy going offline. | |
868 | * | |
869 | * blkio already grabs the queue_lock for us, so no need to use | |
870 | * RCU-based magic | |
871 | */ | |
dfb79af5 | 872 | static void bfq_pd_offline(struct blkg_policy_data *pd) |
ea25da48 PV |
873 | { |
874 | struct bfq_service_tree *st; | |
875 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
876 | struct bfq_data *bfqd = bfqg->bfqd; | |
877 | struct bfq_entity *entity = bfqg->my_entity; | |
878 | unsigned long flags; | |
879 | int i; | |
880 | ||
52257ffb PV |
881 | spin_lock_irqsave(&bfqd->lock, flags); |
882 | ||
ea25da48 | 883 | if (!entity) /* root group */ |
52257ffb | 884 | goto put_async_queues; |
ea25da48 | 885 | |
ea25da48 PV |
886 | /* |
887 | * Empty all service_trees belonging to this group before | |
888 | * deactivating the group itself. | |
889 | */ | |
890 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { | |
891 | st = bfqg->sched_data.service_tree + i; | |
892 | ||
ea25da48 PV |
893 | /* |
894 | * It may happen that some queues are still active | |
895 | * (busy) upon group destruction (if the corresponding | |
896 | * processes have been forced to terminate). We move | |
897 | * all the leaf entities corresponding to these queues | |
898 | * to the root_group. | |
899 | * Also, it may happen that the group has an entity | |
900 | * in service, which is disconnected from the active | |
901 | * tree: it must be moved, too. | |
902 | * There is no need to put the sync queues, as the | |
903 | * scheduler has taken no reference. | |
904 | */ | |
576682fa | 905 | bfq_reparent_active_queues(bfqd, bfqg, st, i); |
4d38a87f PV |
906 | |
907 | /* | |
908 | * The idle tree may still contain bfq_queues | |
909 | * belonging to exited task because they never | |
910 | * migrated to a different cgroup from the one being | |
911 | * destroyed now. In addition, even | |
912 | * bfq_reparent_active_queues() may happen to add some | |
913 | * entities to the idle tree. It happens if, in some | |
914 | * of the calls to bfq_bfqq_move() performed by | |
915 | * bfq_reparent_active_queues(), the queue to move is | |
916 | * empty and gets expired. | |
917 | */ | |
918 | bfq_flush_idle_tree(st); | |
ea25da48 PV |
919 | } |
920 | ||
921 | __bfq_deactivate_entity(entity, false); | |
52257ffb PV |
922 | |
923 | put_async_queues: | |
ea25da48 PV |
924 | bfq_put_async_queues(bfqd, bfqg); |
925 | ||
926 | spin_unlock_irqrestore(&bfqd->lock, flags); | |
927 | /* | |
928 | * @blkg is going offline and will be ignored by | |
929 | * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so | |
930 | * that they don't get lost. If IOs complete after this point, the | |
931 | * stats for them will be lost. Oh well... | |
932 | */ | |
933 | bfqg_stats_xfer_dead(bfqg); | |
934 | } | |
935 | ||
936 | void bfq_end_wr_async(struct bfq_data *bfqd) | |
937 | { | |
938 | struct blkcg_gq *blkg; | |
939 | ||
940 | list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { | |
941 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
942 | ||
943 | bfq_end_wr_async_queues(bfqd, bfqg); | |
944 | } | |
945 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | |
946 | } | |
947 | ||
795fe54c | 948 | static int bfq_io_show_weight_legacy(struct seq_file *sf, void *v) |
ea25da48 PV |
949 | { |
950 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
951 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
952 | unsigned int val = 0; | |
953 | ||
954 | if (bfqgd) | |
955 | val = bfqgd->weight; | |
956 | ||
957 | seq_printf(sf, "%u\n", val); | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
795fe54c FZ |
962 | static u64 bfqg_prfill_weight_device(struct seq_file *sf, |
963 | struct blkg_policy_data *pd, int off) | |
5ff047e3 | 964 | { |
795fe54c FZ |
965 | struct bfq_group *bfqg = pd_to_bfqg(pd); |
966 | ||
967 | if (!bfqg->entity.dev_weight) | |
968 | return 0; | |
969 | return __blkg_prfill_u64(sf, pd, bfqg->entity.dev_weight); | |
970 | } | |
971 | ||
972 | static int bfq_io_show_weight(struct seq_file *sf, void *v) | |
973 | { | |
974 | struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); | |
975 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
976 | ||
977 | seq_printf(sf, "default %u\n", bfqgd->weight); | |
978 | blkcg_print_blkgs(sf, blkcg, bfqg_prfill_weight_device, | |
979 | &blkcg_policy_bfq, 0, false); | |
980 | return 0; | |
981 | } | |
982 | ||
983 | static void bfq_group_set_weight(struct bfq_group *bfqg, u64 weight, u64 dev_weight) | |
984 | { | |
985 | weight = dev_weight ?: weight; | |
986 | ||
987 | bfqg->entity.dev_weight = dev_weight; | |
5ff047e3 FZ |
988 | /* |
989 | * Setting the prio_changed flag of the entity | |
990 | * to 1 with new_weight == weight would re-set | |
991 | * the value of the weight to its ioprio mapping. | |
992 | * Set the flag only if necessary. | |
993 | */ | |
994 | if ((unsigned short)weight != bfqg->entity.new_weight) { | |
995 | bfqg->entity.new_weight = (unsigned short)weight; | |
996 | /* | |
997 | * Make sure that the above new value has been | |
998 | * stored in bfqg->entity.new_weight before | |
999 | * setting the prio_changed flag. In fact, | |
1000 | * this flag may be read asynchronously (in | |
1001 | * critical sections protected by a different | |
1002 | * lock than that held here), and finding this | |
1003 | * flag set may cause the execution of the code | |
1004 | * for updating parameters whose value may | |
1005 | * depend also on bfqg->entity.new_weight (in | |
1006 | * __bfq_entity_update_weight_prio). | |
1007 | * This barrier makes sure that the new value | |
1008 | * of bfqg->entity.new_weight is correctly | |
1009 | * seen in that code. | |
1010 | */ | |
1011 | smp_wmb(); | |
1012 | bfqg->entity.prio_changed = 1; | |
1013 | } | |
1014 | } | |
1015 | ||
ea25da48 PV |
1016 | static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css, |
1017 | struct cftype *cftype, | |
1018 | u64 val) | |
1019 | { | |
1020 | struct blkcg *blkcg = css_to_blkcg(css); | |
1021 | struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg); | |
1022 | struct blkcg_gq *blkg; | |
1023 | int ret = -ERANGE; | |
1024 | ||
1025 | if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) | |
1026 | return ret; | |
1027 | ||
1028 | ret = 0; | |
1029 | spin_lock_irq(&blkcg->lock); | |
1030 | bfqgd->weight = (unsigned short)val; | |
1031 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { | |
1032 | struct bfq_group *bfqg = blkg_to_bfqg(blkg); | |
1033 | ||
5ff047e3 | 1034 | if (bfqg) |
795fe54c | 1035 | bfq_group_set_weight(bfqg, val, 0); |
ea25da48 PV |
1036 | } |
1037 | spin_unlock_irq(&blkcg->lock); | |
1038 | ||
1039 | return ret; | |
1040 | } | |
1041 | ||
795fe54c FZ |
1042 | static ssize_t bfq_io_set_device_weight(struct kernfs_open_file *of, |
1043 | char *buf, size_t nbytes, | |
1044 | loff_t off) | |
ea25da48 | 1045 | { |
795fe54c FZ |
1046 | int ret; |
1047 | struct blkg_conf_ctx ctx; | |
1048 | struct blkcg *blkcg = css_to_blkcg(of_css(of)); | |
1049 | struct bfq_group *bfqg; | |
1050 | u64 v; | |
ea25da48 | 1051 | |
faffaab2 TH |
1052 | blkg_conf_init(&ctx, buf); |
1053 | ||
1054 | ret = blkg_conf_prep(blkcg, &blkcg_policy_bfq, &ctx); | |
ea25da48 | 1055 | if (ret) |
faffaab2 | 1056 | goto out; |
ea25da48 | 1057 | |
795fe54c FZ |
1058 | if (sscanf(ctx.body, "%llu", &v) == 1) { |
1059 | /* require "default" on dfl */ | |
1060 | ret = -ERANGE; | |
1061 | if (!v) | |
1062 | goto out; | |
1063 | } else if (!strcmp(strim(ctx.body), "default")) { | |
1064 | v = 0; | |
1065 | } else { | |
1066 | ret = -EINVAL; | |
1067 | goto out; | |
1068 | } | |
1069 | ||
1070 | bfqg = blkg_to_bfqg(ctx.blkg); | |
1071 | ||
1072 | ret = -ERANGE; | |
1073 | if (!v || (v >= BFQ_MIN_WEIGHT && v <= BFQ_MAX_WEIGHT)) { | |
1074 | bfq_group_set_weight(bfqg, bfqg->entity.weight, v); | |
1075 | ret = 0; | |
1076 | } | |
1077 | out: | |
faffaab2 | 1078 | blkg_conf_exit(&ctx); |
fc8ebd01 | 1079 | return ret ?: nbytes; |
ea25da48 PV |
1080 | } |
1081 | ||
795fe54c FZ |
1082 | static ssize_t bfq_io_set_weight(struct kernfs_open_file *of, |
1083 | char *buf, size_t nbytes, | |
1084 | loff_t off) | |
1085 | { | |
1086 | char *endp; | |
1087 | int ret; | |
1088 | u64 v; | |
1089 | ||
1090 | buf = strim(buf); | |
1091 | ||
1092 | /* "WEIGHT" or "default WEIGHT" sets the default weight */ | |
1093 | v = simple_strtoull(buf, &endp, 0); | |
1094 | if (*endp == '\0' || sscanf(buf, "default %llu", &v) == 1) { | |
1095 | ret = bfq_io_set_weight_legacy(of_css(of), NULL, v); | |
1096 | return ret ?: nbytes; | |
1097 | } | |
1098 | ||
1099 | return bfq_io_set_device_weight(of, buf, nbytes, off); | |
1100 | } | |
1101 | ||
a557f1c7 | 1102 | static int bfqg_print_rwstat(struct seq_file *sf, void *v) |
ea25da48 | 1103 | { |
a557f1c7 TH |
1104 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_rwstat, |
1105 | &blkcg_policy_bfq, seq_cft(sf)->private, true); | |
ea25da48 PV |
1106 | return 0; |
1107 | } | |
1108 | ||
a557f1c7 TH |
1109 | static u64 bfqg_prfill_rwstat_recursive(struct seq_file *sf, |
1110 | struct blkg_policy_data *pd, int off) | |
ea25da48 | 1111 | { |
a557f1c7 TH |
1112 | struct blkg_rwstat_sample sum; |
1113 | ||
1114 | blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off, &sum); | |
1115 | return __blkg_prfill_rwstat(sf, pd, &sum); | |
1116 | } | |
1117 | ||
1118 | static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) | |
1119 | { | |
1120 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1121 | bfqg_prfill_rwstat_recursive, &blkcg_policy_bfq, | |
1122 | seq_cft(sf)->private, true); | |
1123 | return 0; | |
1124 | } | |
1125 | ||
fd41e603 | 1126 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
a557f1c7 TH |
1127 | static int bfqg_print_stat(struct seq_file *sf, void *v) |
1128 | { | |
1129 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), blkg_prfill_stat, | |
1130 | &blkcg_policy_bfq, seq_cft(sf)->private, false); | |
ea25da48 PV |
1131 | return 0; |
1132 | } | |
1133 | ||
1134 | static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, | |
1135 | struct blkg_policy_data *pd, int off) | |
1136 | { | |
d6258980 CH |
1137 | struct blkcg_gq *blkg = pd_to_blkg(pd); |
1138 | struct blkcg_gq *pos_blkg; | |
1139 | struct cgroup_subsys_state *pos_css; | |
1140 | u64 sum = 0; | |
1141 | ||
1142 | lockdep_assert_held(&blkg->q->queue_lock); | |
1143 | ||
1144 | rcu_read_lock(); | |
1145 | blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) { | |
1146 | struct bfq_stat *stat; | |
1147 | ||
1148 | if (!pos_blkg->online) | |
1149 | continue; | |
1150 | ||
1151 | stat = (void *)blkg_to_pd(pos_blkg, &blkcg_policy_bfq) + off; | |
1152 | sum += bfq_stat_read(stat) + atomic64_read(&stat->aux_cnt); | |
1153 | } | |
1154 | rcu_read_unlock(); | |
1155 | ||
ea25da48 PV |
1156 | return __blkg_prfill_u64(sf, pd, sum); |
1157 | } | |
1158 | ||
ea25da48 PV |
1159 | static int bfqg_print_stat_recursive(struct seq_file *sf, void *v) |
1160 | { | |
1161 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1162 | bfqg_prfill_stat_recursive, &blkcg_policy_bfq, | |
1163 | seq_cft(sf)->private, false); | |
1164 | return 0; | |
1165 | } | |
1166 | ||
ea25da48 PV |
1167 | static u64 bfqg_prfill_sectors(struct seq_file *sf, struct blkg_policy_data *pd, |
1168 | int off) | |
1169 | { | |
fd41e603 TH |
1170 | struct bfq_group *bfqg = blkg_to_bfqg(pd->blkg); |
1171 | u64 sum = blkg_rwstat_total(&bfqg->stats.bytes); | |
ea25da48 PV |
1172 | |
1173 | return __blkg_prfill_u64(sf, pd, sum >> 9); | |
1174 | } | |
1175 | ||
1176 | static int bfqg_print_stat_sectors(struct seq_file *sf, void *v) | |
1177 | { | |
1178 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1179 | bfqg_prfill_sectors, &blkcg_policy_bfq, 0, false); | |
1180 | return 0; | |
1181 | } | |
1182 | ||
1183 | static u64 bfqg_prfill_sectors_recursive(struct seq_file *sf, | |
1184 | struct blkg_policy_data *pd, int off) | |
1185 | { | |
7af6fd91 | 1186 | struct blkg_rwstat_sample tmp; |
5d0b6e48 | 1187 | |
fd41e603 TH |
1188 | blkg_rwstat_recursive_sum(pd->blkg, &blkcg_policy_bfq, |
1189 | offsetof(struct bfq_group, stats.bytes), &tmp); | |
ea25da48 | 1190 | |
7af6fd91 CH |
1191 | return __blkg_prfill_u64(sf, pd, |
1192 | (tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]) >> 9); | |
ea25da48 PV |
1193 | } |
1194 | ||
1195 | static int bfqg_print_stat_sectors_recursive(struct seq_file *sf, void *v) | |
1196 | { | |
1197 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1198 | bfqg_prfill_sectors_recursive, &blkcg_policy_bfq, 0, | |
1199 | false); | |
1200 | return 0; | |
1201 | } | |
1202 | ||
1203 | static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, | |
1204 | struct blkg_policy_data *pd, int off) | |
1205 | { | |
1206 | struct bfq_group *bfqg = pd_to_bfqg(pd); | |
c0ce79dc | 1207 | u64 samples = bfq_stat_read(&bfqg->stats.avg_queue_size_samples); |
ea25da48 PV |
1208 | u64 v = 0; |
1209 | ||
1210 | if (samples) { | |
c0ce79dc | 1211 | v = bfq_stat_read(&bfqg->stats.avg_queue_size_sum); |
ea25da48 PV |
1212 | v = div64_u64(v, samples); |
1213 | } | |
1214 | __blkg_prfill_u64(sf, pd, v); | |
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | /* print avg_queue_size */ | |
1219 | static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) | |
1220 | { | |
1221 | blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), | |
1222 | bfqg_prfill_avg_queue_size, &blkcg_policy_bfq, | |
1223 | 0, false); | |
1224 | return 0; | |
1225 | } | |
8060c47b | 1226 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
ea25da48 PV |
1227 | |
1228 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) | |
1229 | { | |
1230 | int ret; | |
1231 | ||
40e4996e | 1232 | ret = blkcg_activate_policy(bfqd->queue->disk, &blkcg_policy_bfq); |
ea25da48 PV |
1233 | if (ret) |
1234 | return NULL; | |
1235 | ||
1236 | return blkg_to_bfqg(bfqd->queue->root_blkg); | |
1237 | } | |
1238 | ||
1239 | struct blkcg_policy blkcg_policy_bfq = { | |
1240 | .dfl_cftypes = bfq_blkg_files, | |
1241 | .legacy_cftypes = bfq_blkcg_legacy_files, | |
1242 | ||
1243 | .cpd_alloc_fn = bfq_cpd_alloc, | |
ea25da48 PV |
1244 | .cpd_free_fn = bfq_cpd_free, |
1245 | ||
1246 | .pd_alloc_fn = bfq_pd_alloc, | |
1247 | .pd_init_fn = bfq_pd_init, | |
1248 | .pd_offline_fn = bfq_pd_offline, | |
1249 | .pd_free_fn = bfq_pd_free, | |
1250 | .pd_reset_stats_fn = bfq_pd_reset_stats, | |
1251 | }; | |
1252 | ||
1253 | struct cftype bfq_blkcg_legacy_files[] = { | |
1254 | { | |
1255 | .name = "bfq.weight", | |
cf892988 | 1256 | .flags = CFTYPE_NOT_ON_ROOT, |
795fe54c | 1257 | .seq_show = bfq_io_show_weight_legacy, |
ea25da48 PV |
1258 | .write_u64 = bfq_io_set_weight_legacy, |
1259 | }, | |
795fe54c FZ |
1260 | { |
1261 | .name = "bfq.weight_device", | |
1262 | .flags = CFTYPE_NOT_ON_ROOT, | |
1263 | .seq_show = bfq_io_show_weight, | |
1264 | .write = bfq_io_set_weight, | |
1265 | }, | |
ea25da48 PV |
1266 | |
1267 | /* statistics, covers only the tasks in the bfqg */ | |
ea25da48 PV |
1268 | { |
1269 | .name = "bfq.io_service_bytes", | |
fd41e603 TH |
1270 | .private = offsetof(struct bfq_group, stats.bytes), |
1271 | .seq_show = bfqg_print_rwstat, | |
ea25da48 PV |
1272 | }, |
1273 | { | |
1274 | .name = "bfq.io_serviced", | |
fd41e603 TH |
1275 | .private = offsetof(struct bfq_group, stats.ios), |
1276 | .seq_show = bfqg_print_rwstat, | |
ea25da48 | 1277 | }, |
8060c47b | 1278 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
a33801e8 LM |
1279 | { |
1280 | .name = "bfq.time", | |
1281 | .private = offsetof(struct bfq_group, stats.time), | |
1282 | .seq_show = bfqg_print_stat, | |
1283 | }, | |
1284 | { | |
1285 | .name = "bfq.sectors", | |
1286 | .seq_show = bfqg_print_stat_sectors, | |
1287 | }, | |
ea25da48 PV |
1288 | { |
1289 | .name = "bfq.io_service_time", | |
1290 | .private = offsetof(struct bfq_group, stats.service_time), | |
1291 | .seq_show = bfqg_print_rwstat, | |
1292 | }, | |
1293 | { | |
1294 | .name = "bfq.io_wait_time", | |
1295 | .private = offsetof(struct bfq_group, stats.wait_time), | |
1296 | .seq_show = bfqg_print_rwstat, | |
1297 | }, | |
1298 | { | |
1299 | .name = "bfq.io_merged", | |
1300 | .private = offsetof(struct bfq_group, stats.merged), | |
1301 | .seq_show = bfqg_print_rwstat, | |
1302 | }, | |
1303 | { | |
1304 | .name = "bfq.io_queued", | |
1305 | .private = offsetof(struct bfq_group, stats.queued), | |
1306 | .seq_show = bfqg_print_rwstat, | |
1307 | }, | |
8060c47b | 1308 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
ea25da48 | 1309 | |
636b8fe8 | 1310 | /* the same statistics which cover the bfqg and its descendants */ |
ea25da48 PV |
1311 | { |
1312 | .name = "bfq.io_service_bytes_recursive", | |
fd41e603 TH |
1313 | .private = offsetof(struct bfq_group, stats.bytes), |
1314 | .seq_show = bfqg_print_rwstat_recursive, | |
ea25da48 PV |
1315 | }, |
1316 | { | |
1317 | .name = "bfq.io_serviced_recursive", | |
fd41e603 TH |
1318 | .private = offsetof(struct bfq_group, stats.ios), |
1319 | .seq_show = bfqg_print_rwstat_recursive, | |
ea25da48 | 1320 | }, |
8060c47b | 1321 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
a33801e8 LM |
1322 | { |
1323 | .name = "bfq.time_recursive", | |
1324 | .private = offsetof(struct bfq_group, stats.time), | |
1325 | .seq_show = bfqg_print_stat_recursive, | |
1326 | }, | |
1327 | { | |
1328 | .name = "bfq.sectors_recursive", | |
1329 | .seq_show = bfqg_print_stat_sectors_recursive, | |
1330 | }, | |
ea25da48 PV |
1331 | { |
1332 | .name = "bfq.io_service_time_recursive", | |
1333 | .private = offsetof(struct bfq_group, stats.service_time), | |
1334 | .seq_show = bfqg_print_rwstat_recursive, | |
1335 | }, | |
1336 | { | |
1337 | .name = "bfq.io_wait_time_recursive", | |
1338 | .private = offsetof(struct bfq_group, stats.wait_time), | |
1339 | .seq_show = bfqg_print_rwstat_recursive, | |
1340 | }, | |
1341 | { | |
1342 | .name = "bfq.io_merged_recursive", | |
1343 | .private = offsetof(struct bfq_group, stats.merged), | |
1344 | .seq_show = bfqg_print_rwstat_recursive, | |
1345 | }, | |
1346 | { | |
1347 | .name = "bfq.io_queued_recursive", | |
1348 | .private = offsetof(struct bfq_group, stats.queued), | |
1349 | .seq_show = bfqg_print_rwstat_recursive, | |
1350 | }, | |
1351 | { | |
1352 | .name = "bfq.avg_queue_size", | |
1353 | .seq_show = bfqg_print_avg_queue_size, | |
1354 | }, | |
1355 | { | |
1356 | .name = "bfq.group_wait_time", | |
1357 | .private = offsetof(struct bfq_group, stats.group_wait_time), | |
1358 | .seq_show = bfqg_print_stat, | |
1359 | }, | |
1360 | { | |
1361 | .name = "bfq.idle_time", | |
1362 | .private = offsetof(struct bfq_group, stats.idle_time), | |
1363 | .seq_show = bfqg_print_stat, | |
1364 | }, | |
1365 | { | |
1366 | .name = "bfq.empty_time", | |
1367 | .private = offsetof(struct bfq_group, stats.empty_time), | |
1368 | .seq_show = bfqg_print_stat, | |
1369 | }, | |
1370 | { | |
1371 | .name = "bfq.dequeue", | |
1372 | .private = offsetof(struct bfq_group, stats.dequeue), | |
1373 | .seq_show = bfqg_print_stat, | |
1374 | }, | |
8060c47b | 1375 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
ea25da48 PV |
1376 | { } /* terminate */ |
1377 | }; | |
1378 | ||
1379 | struct cftype bfq_blkg_files[] = { | |
1380 | { | |
1381 | .name = "bfq.weight", | |
cf892988 | 1382 | .flags = CFTYPE_NOT_ON_ROOT, |
ea25da48 PV |
1383 | .seq_show = bfq_io_show_weight, |
1384 | .write = bfq_io_set_weight, | |
1385 | }, | |
1386 | {} /* terminate */ | |
1387 | }; | |
1388 | ||
1389 | #else /* CONFIG_BFQ_GROUP_IOSCHED */ | |
1390 | ||
ea25da48 PV |
1391 | void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
1392 | struct bfq_group *bfqg) {} | |
1393 | ||
1394 | void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) | |
1395 | { | |
1396 | struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); | |
1397 | ||
1398 | entity->weight = entity->new_weight; | |
1399 | entity->orig_weight = entity->new_weight; | |
1400 | if (bfqq) { | |
1401 | bfqq->ioprio = bfqq->new_ioprio; | |
1402 | bfqq->ioprio_class = bfqq->new_ioprio_class; | |
1403 | } | |
1404 | entity->sched_data = &bfqg->sched_data; | |
1405 | } | |
1406 | ||
1407 | void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} | |
1408 | ||
1409 | void bfq_end_wr_async(struct bfq_data *bfqd) | |
1410 | { | |
1411 | bfq_end_wr_async_queues(bfqd, bfqd->root_group); | |
1412 | } | |
1413 | ||
4e54a249 | 1414 | struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio) |
ea25da48 PV |
1415 | { |
1416 | return bfqd->root_group; | |
1417 | } | |
1418 | ||
1419 | struct bfq_group *bfqq_group(struct bfq_queue *bfqq) | |
1420 | { | |
1421 | return bfqq->bfqd->root_group; | |
1422 | } | |
1423 | ||
4d8340d0 PV |
1424 | void bfqg_and_blkg_put(struct bfq_group *bfqg) {} |
1425 | ||
ea25da48 PV |
1426 | struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) |
1427 | { | |
1428 | struct bfq_group *bfqg; | |
1429 | int i; | |
1430 | ||
1431 | bfqg = kmalloc_node(sizeof(*bfqg), GFP_KERNEL | __GFP_ZERO, node); | |
1432 | if (!bfqg) | |
1433 | return NULL; | |
1434 | ||
1435 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) | |
1436 | bfqg->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; | |
1437 | ||
1438 | return bfqg; | |
1439 | } | |
1440 | #endif /* CONFIG_BFQ_GROUP_IOSCHED */ |