1 /* SPDX-License-Identifier: GPL-2.0
3 * IO cost model based controller.
7 * Copyright (C) 2019 Facebook
9 * One challenge of controlling IO resources is the lack of trivially
10 * observable cost metric. This is distinguished from CPU and memory where
11 * wallclock time and the number of bytes can serve as accurate enough
14 * Bandwidth and iops are the most commonly used metrics for IO devices but
15 * depending on the type and specifics of the device, different IO patterns
16 * easily lead to multiple orders of magnitude variations rendering them
17 * useless for the purpose of IO capacity distribution. While on-device
18 * time, with a lot of clutches, could serve as a useful approximation for
19 * non-queued rotational devices, this is no longer viable with modern
20 * devices, even the rotational ones.
22 * While there is no cost metric we can trivially observe, it isn't a
23 * complete mystery. For example, on a rotational device, seek cost
24 * dominates while a contiguous transfer contributes a smaller amount
25 * proportional to the size. If we can characterize at least the relative
26 * costs of these different types of IOs, it should be possible to
27 * implement a reasonable work-conserving proportional IO resource
32 * IO cost model estimates the cost of an IO given its basic parameters and
33 * history (e.g. the end sector of the last IO). The cost is measured in
34 * device time. If a given IO is estimated to cost 10ms, the device should
35 * be able to process ~100 of those IOs in a second.
37 * Currently, there's only one builtin cost model - linear. Each IO is
38 * classified as sequential or random and given a base cost accordingly.
39 * On top of that, a size cost proportional to the length of the IO is
40 * added. While simple, this model captures the operational
41 * characteristics of a wide varienty of devices well enough. Default
42 * parameters for several different classes of devices are provided and the
43 * parameters can be configured from userspace via
44 * /sys/fs/cgroup/io.cost.model.
46 * If needed, tools/cgroup/iocost_coef_gen.py can be used to generate
47 * device-specific coefficients.
51 * The device virtual time (vtime) is used as the primary control metric.
52 * The control strategy is composed of the following three parts.
54 * 2-1. Vtime Distribution
56 * When a cgroup becomes active in terms of IOs, its hierarchical share is
57 * calculated. Please consider the following hierarchy where the numbers
58 * inside parentheses denote the configured weights.
64 * A0 (w:100) A1 (w:100)
66 * If B is idle and only A0 and A1 are actively issuing IOs, as the two are
67 * of equal weight, each gets 50% share. If then B starts issuing IOs, B
68 * gets 300/(100+300) or 75% share, and A0 and A1 equally splits the rest,
69 * 12.5% each. The distribution mechanism only cares about these flattened
70 * shares. They're called hweights (hierarchical weights) and always add
71 * upto 1 (WEIGHT_ONE).
73 * A given cgroup's vtime runs slower in inverse proportion to its hweight.
74 * For example, with 12.5% weight, A0's time runs 8 times slower (100/12.5)
75 * against the device vtime - an IO which takes 10ms on the underlying
76 * device is considered to take 80ms on A0.
78 * This constitutes the basis of IO capacity distribution. Each cgroup's
79 * vtime is running at a rate determined by its hweight. A cgroup tracks
80 * the vtime consumed by past IOs and can issue a new IO if doing so
81 * wouldn't outrun the current device vtime. Otherwise, the IO is
82 * suspended until the vtime has progressed enough to cover it.
84 * 2-2. Vrate Adjustment
86 * It's unrealistic to expect the cost model to be perfect. There are too
87 * many devices and even on the same device the overall performance
88 * fluctuates depending on numerous factors such as IO mixture and device
89 * internal garbage collection. The controller needs to adapt dynamically.
91 * This is achieved by adjusting the overall IO rate according to how busy
92 * the device is. If the device becomes overloaded, we're sending down too
93 * many IOs and should generally slow down. If there are waiting issuers
94 * but the device isn't saturated, we're issuing too few and should
97 * To slow down, we lower the vrate - the rate at which the device vtime
98 * passes compared to the wall clock. For example, if the vtime is running
99 * at the vrate of 75%, all cgroups added up would only be able to issue
100 * 750ms worth of IOs per second, and vice-versa for speeding up.
102 * Device business is determined using two criteria - rq wait and
103 * completion latencies.
105 * When a device gets saturated, the on-device and then the request queues
106 * fill up and a bio which is ready to be issued has to wait for a request
107 * to become available. When this delay becomes noticeable, it's a clear
108 * indication that the device is saturated and we lower the vrate. This
109 * saturation signal is fairly conservative as it only triggers when both
110 * hardware and software queues are filled up, and is used as the default
113 * As devices can have deep queues and be unfair in how the queued commands
114 * are executed, soley depending on rq wait may not result in satisfactory
115 * control quality. For a better control quality, completion latency QoS
116 * parameters can be configured so that the device is considered saturated
117 * if N'th percentile completion latency rises above the set point.
119 * The completion latency requirements are a function of both the
120 * underlying device characteristics and the desired IO latency quality of
121 * service. There is an inherent trade-off - the tighter the latency QoS,
122 * the higher the bandwidth lossage. Latency QoS is disabled by default
123 * and can be set through /sys/fs/cgroup/io.cost.qos.
125 * 2-3. Work Conservation
127 * Imagine two cgroups A and B with equal weights. A is issuing a small IO
128 * periodically while B is sending out enough parallel IOs to saturate the
129 * device on its own. Let's say A's usage amounts to 100ms worth of IO
130 * cost per second, i.e., 10% of the device capacity. The naive
131 * distribution of half and half would lead to 60% utilization of the
132 * device, a significant reduction in the total amount of work done
133 * compared to free-for-all competition. This is too high a cost to pay
136 * To conserve the total amount of work done, we keep track of how much
137 * each active cgroup is actually using and yield part of its weight if
138 * there are other cgroups which can make use of it. In the above case,
139 * A's weight will be lowered so that it hovers above the actual usage and
140 * B would be able to use the rest.
142 * As we don't want to penalize a cgroup for donating its weight, the
143 * surplus weight adjustment factors in a margin and has an immediate
144 * snapback mechanism in case the cgroup needs more IO vtime for itself.
146 * Note that adjusting down surplus weights has the same effects as
147 * accelerating vtime for other cgroups and work conservation can also be
148 * implemented by adjusting vrate dynamically. However, squaring who can
149 * donate and should take back how much requires hweight propagations
150 * anyway making it easier to implement and understand as a separate
155 * Instead of debugfs or other clumsy monitoring mechanisms, this
156 * controller uses a drgn based monitoring script -
157 * tools/cgroup/iocost_monitor.py. For details on drgn, please see
158 * https://github.com/osandov/drgn. The output looks like the following.
160 * sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
161 * active weight hweight% inflt% dbt delay usages%
162 * test/a * 50/ 50 33.33/ 33.33 27.65 2 0*041 033:033:033
163 * test/b * 100/ 100 66.67/ 66.67 17.56 0 0*000 066:079:077
165 * - per : Timer period
166 * - cur_per : Internal wall and device vtime clock
167 * - vrate : Device virtual time rate against wall clock
168 * - weight : Surplus-adjusted and configured weights
169 * - hweight : Surplus-adjusted and configured hierarchical weights
170 * - inflt : The percentage of in-flight IO cost at the end of last period
171 * - del_ms : Deferred issuer delay induction level and duration
172 * - usages : Usage history
175 #include <linux/kernel.h>
176 #include <linux/module.h>
177 #include <linux/timer.h>
178 #include <linux/time64.h>
179 #include <linux/parser.h>
180 #include <linux/sched/signal.h>
181 #include <linux/blk-cgroup.h>
182 #include <asm/local.h>
183 #include <asm/local64.h>
184 #include "blk-rq-qos.h"
185 #include "blk-stat.h"
188 #ifdef CONFIG_TRACEPOINTS
190 /* copied from TRACE_CGROUP_PATH, see cgroup-internal.h */
191 #define TRACE_IOCG_PATH_LEN 1024
192 static DEFINE_SPINLOCK(trace_iocg_path_lock);
193 static char trace_iocg_path[TRACE_IOCG_PATH_LEN];
195 #define TRACE_IOCG_PATH(type, iocg, ...) \
197 unsigned long flags; \
198 if (trace_iocost_##type##_enabled()) { \
199 spin_lock_irqsave(&trace_iocg_path_lock, flags); \
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
201 trace_iocg_path, TRACE_IOCG_PATH_LEN); \
202 trace_iocost_##type(iocg, trace_iocg_path, \
204 spin_unlock_irqrestore(&trace_iocg_path_lock, flags); \
208 #else /* CONFIG_TRACE_POINTS */
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0)
210 #endif /* CONFIG_TRACE_POINTS */
215 /* timer period is calculated from latency requirements, bound it */
216 MIN_PERIOD = USEC_PER_MSEC,
217 MAX_PERIOD = USEC_PER_SEC,
220 * iocg->vtime is targeted at 50% behind the device vtime, which
221 * serves as its IO credit buffer. Surplus weight adjustment is
222 * immediately canceled if the vtime margin runs below 10%.
226 MARGIN_TARGET_PCT = 50,
228 INUSE_ADJ_STEP_PCT = 25,
230 /* Have some play in timer operations */
233 /* 1/64k is granular enough and can easily be handled w/ u32 */
234 WEIGHT_ONE = 1 << 16,
237 * As vtime is used to calculate the cost of each IO, it needs to
238 * be fairly high precision. For example, it should be able to
239 * represent the cost of a single page worth of discard with
240 * suffificient accuracy. At the same time, it should be able to
241 * represent reasonably long enough durations to be useful and
242 * convenient during operation.
244 * 1s worth of vtime is 2^37. This gives us both sub-nanosecond
245 * granularity and days of wrap-around time even at extreme vrates.
247 VTIME_PER_SEC_SHIFT = 37,
248 VTIME_PER_SEC = 1LLU << VTIME_PER_SEC_SHIFT,
249 VTIME_PER_USEC = VTIME_PER_SEC / USEC_PER_SEC,
250 VTIME_PER_NSEC = VTIME_PER_SEC / NSEC_PER_SEC,
252 /* bound vrate adjustments within two orders of magnitude */
253 VRATE_MIN_PPM = 10000, /* 1% */
254 VRATE_MAX_PPM = 100000000, /* 10000% */
256 VRATE_MIN = VTIME_PER_USEC * VRATE_MIN_PPM / MILLION,
257 VRATE_CLAMP_ADJ_PCT = 4,
259 /* if IOs end up waiting for requests, issue less */
260 RQ_WAIT_BUSY_PCT = 5,
262 /* unbusy hysterisis */
266 * The effect of delay is indirect and non-linear and a huge amount of
267 * future debt can accumulate abruptly while unthrottled. Linearly scale
268 * up delay as debt is going up and then let it decay exponentially.
269 * This gives us quick ramp ups while delay is accumulating and long
270 * tails which can help reducing the frequency of debt explosions on
271 * unthrottle. The parameters are experimentally determined.
273 * The delay mechanism provides adequate protection and behavior in many
274 * cases. However, this is far from ideal and falls shorts on both
275 * fronts. The debtors are often throttled too harshly costing a
276 * significant level of fairness and possibly total work while the
277 * protection against their impacts on the system can be choppy and
280 * The shortcoming primarily stems from the fact that, unlike for page
281 * cache, the kernel doesn't have well-defined back-pressure propagation
282 * mechanism and policies for anonymous memory. Fully addressing this
283 * issue will likely require substantial improvements in the area.
285 MIN_DELAY_THR_PCT = 500,
286 MAX_DELAY_THR_PCT = 25000,
288 MAX_DELAY = 250 * USEC_PER_MSEC,
290 /* halve debts if avg usage over 100ms is under 50% */
292 DFGV_PERIOD = 100 * USEC_PER_MSEC,
294 /* don't let cmds which take a very long time pin lagging for too long */
295 MAX_LAGGING_PERIODS = 10,
297 /* switch iff the conditions are met for longer than this */
298 AUTOP_CYCLE_NSEC = 10LLU * NSEC_PER_SEC,
301 * Count IO size in 4k pages. The 12bit shift helps keeping
302 * size-proportional components of cost calculation in closer
303 * numbers of digits to per-IO cost components.
306 IOC_PAGE_SIZE = 1 << IOC_PAGE_SHIFT,
307 IOC_SECT_TO_PAGE_SHIFT = IOC_PAGE_SHIFT - SECTOR_SHIFT,
309 /* if apart further than 16M, consider randio for linear model */
310 LCOEF_RANDIO_PAGES = 4096,
319 /* io.cost.qos controls including per-dev enable of the whole controller */
326 /* io.cost.qos params */
337 /* io.cost.model controls */
344 /* builtin linear cost model coefficients */
374 u32 qos[NR_QOS_PARAMS];
375 u64 i_lcoefs[NR_I_LCOEFS];
376 u64 lcoefs[NR_LCOEFS];
377 u32 too_fast_vrate_pct;
378 u32 too_slow_vrate_pct;
394 struct ioc_pcpu_stat {
395 struct ioc_missed missed[2];
397 local64_t rq_wait_ns;
407 struct ioc_params params;
408 struct ioc_margins margins;
415 struct timer_list timer;
416 struct list_head active_iocgs; /* active cgroups */
417 struct ioc_pcpu_stat __percpu *pcpu_stat;
419 enum ioc_running running;
420 atomic64_t vtime_rate;
424 seqcount_spinlock_t period_seqcount;
425 u64 period_at; /* wallclock starttime */
426 u64 period_at_vtime; /* vtime starttime */
428 atomic64_t cur_period; /* inc'd each period */
429 int busy_level; /* saturation history */
431 bool weights_updated;
432 atomic_t hweight_gen; /* for lazy hweights */
434 /* debt forgivness */
437 u64 dfgv_usage_us_sum;
439 u64 autop_too_fast_at;
440 u64 autop_too_slow_at;
442 bool user_qos_params:1;
443 bool user_cost_model:1;
446 struct iocg_pcpu_stat {
447 local64_t abs_vusage;
457 /* per device-cgroup pair */
459 struct blkg_policy_data pd;
463 * A iocg can get its weight from two sources - an explicit
464 * per-device-cgroup configuration or the default weight of the
465 * cgroup. `cfg_weight` is the explicit per-device-cgroup
466 * configuration. `weight` is the effective considering both
469 * When an idle cgroup becomes active its `active` goes from 0 to
470 * `weight`. `inuse` is the surplus adjusted active weight.
471 * `active` and `inuse` are used to calculate `hweight_active` and
474 * `last_inuse` remembers `inuse` while an iocg is idle to persist
475 * surplus adjustments.
477 * `inuse` may be adjusted dynamically during period. `saved_*` are used
478 * to determine and track adjustments.
488 sector_t cursor; /* to detect randio */
491 * `vtime` is this iocg's vtime cursor which progresses as IOs are
492 * issued. If lagging behind device vtime, the delta represents
493 * the currently available IO budget. If running ahead, the
496 * `vtime_done` is the same but progressed on completion rather
497 * than issue. The delta behind `vtime` represents the cost of
498 * currently in-flight IOs.
501 atomic64_t done_vtime;
504 /* current delay in effect and when it started */
509 * The period this iocg was last active in. Used for deactivation
510 * and invalidating `vtime`.
512 atomic64_t active_period;
513 struct list_head active_list;
515 /* see __propagate_weights() and current_hweight() for details */
516 u64 child_active_sum;
518 u64 child_adjusted_sum;
522 u32 hweight_donating;
523 u32 hweight_after_donation;
525 struct list_head walk_list;
526 struct list_head surplus_list;
528 struct wait_queue_head waitq;
529 struct hrtimer waitq_timer;
531 /* timestamp at the latest activation */
535 struct iocg_pcpu_stat __percpu *pcpu_stat;
536 struct iocg_stat local_stat;
537 struct iocg_stat desc_stat;
538 struct iocg_stat last_stat;
539 u64 last_stat_abs_vusage;
545 /* this iocg's depth in the hierarchy and ancestors including self */
547 struct ioc_gq *ancestors[];
552 struct blkcg_policy_data cpd;
553 unsigned int dfl_weight;
564 struct wait_queue_entry wait;
570 struct iocg_wake_ctx {
576 static const struct ioc_params autop[] = {
579 [QOS_RLAT] = 250000, /* 250ms */
581 [QOS_MIN] = VRATE_MIN_PPM,
582 [QOS_MAX] = VRATE_MAX_PPM,
585 [I_LCOEF_RBPS] = 174019176,
586 [I_LCOEF_RSEQIOPS] = 41708,
587 [I_LCOEF_RRANDIOPS] = 370,
588 [I_LCOEF_WBPS] = 178075866,
589 [I_LCOEF_WSEQIOPS] = 42705,
590 [I_LCOEF_WRANDIOPS] = 378,
595 [QOS_RLAT] = 25000, /* 25ms */
597 [QOS_MIN] = VRATE_MIN_PPM,
598 [QOS_MAX] = VRATE_MAX_PPM,
601 [I_LCOEF_RBPS] = 245855193,
602 [I_LCOEF_RSEQIOPS] = 61575,
603 [I_LCOEF_RRANDIOPS] = 6946,
604 [I_LCOEF_WBPS] = 141365009,
605 [I_LCOEF_WSEQIOPS] = 33716,
606 [I_LCOEF_WRANDIOPS] = 26796,
611 [QOS_RLAT] = 25000, /* 25ms */
613 [QOS_MIN] = VRATE_MIN_PPM,
614 [QOS_MAX] = VRATE_MAX_PPM,
617 [I_LCOEF_RBPS] = 488636629,
618 [I_LCOEF_RSEQIOPS] = 8932,
619 [I_LCOEF_RRANDIOPS] = 8518,
620 [I_LCOEF_WBPS] = 427891549,
621 [I_LCOEF_WSEQIOPS] = 28755,
622 [I_LCOEF_WRANDIOPS] = 21940,
624 .too_fast_vrate_pct = 500,
628 [QOS_RLAT] = 5000, /* 5ms */
630 [QOS_MIN] = VRATE_MIN_PPM,
631 [QOS_MAX] = VRATE_MAX_PPM,
634 [I_LCOEF_RBPS] = 3102524156LLU,
635 [I_LCOEF_RSEQIOPS] = 724816,
636 [I_LCOEF_RRANDIOPS] = 778122,
637 [I_LCOEF_WBPS] = 1742780862LLU,
638 [I_LCOEF_WSEQIOPS] = 425702,
639 [I_LCOEF_WRANDIOPS] = 443193,
641 .too_slow_vrate_pct = 10,
646 * vrate adjust percentages indexed by ioc->busy_level. We adjust up on
647 * vtime credit shortage and down on device saturation.
649 static u32 vrate_adj_pct[] =
651 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
652 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
653 4, 4, 4, 4, 4, 4, 4, 4, 8, 8, 8, 8, 8, 8, 8, 8, 16 };
655 static struct blkcg_policy blkcg_policy_iocost;
657 /* accessors and helpers */
658 static struct ioc *rqos_to_ioc(struct rq_qos *rqos)
660 return container_of(rqos, struct ioc, rqos);
663 static struct ioc *q_to_ioc(struct request_queue *q)
665 return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST));
668 static const char *q_name(struct request_queue *q)
670 if (blk_queue_registered(q))
671 return kobject_name(q->kobj.parent);
676 static const char __maybe_unused *ioc_name(struct ioc *ioc)
678 return q_name(ioc->rqos.q);
681 static struct ioc_gq *pd_to_iocg(struct blkg_policy_data *pd)
683 return pd ? container_of(pd, struct ioc_gq, pd) : NULL;
686 static struct ioc_gq *blkg_to_iocg(struct blkcg_gq *blkg)
688 return pd_to_iocg(blkg_to_pd(blkg, &blkcg_policy_iocost));
691 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg)
693 return pd_to_blkg(&iocg->pd);
696 static struct ioc_cgrp *blkcg_to_iocc(struct blkcg *blkcg)
698 return container_of(blkcg_to_cpd(blkcg, &blkcg_policy_iocost),
699 struct ioc_cgrp, cpd);
703 * Scale @abs_cost to the inverse of @hw_inuse. The lower the hierarchical
704 * weight, the more expensive each IO. Must round up.
706 static u64 abs_cost_to_cost(u64 abs_cost, u32 hw_inuse)
708 return DIV64_U64_ROUND_UP(abs_cost * WEIGHT_ONE, hw_inuse);
712 * The inverse of abs_cost_to_cost(). Must round up.
714 static u64 cost_to_abs_cost(u64 cost, u32 hw_inuse)
716 return DIV64_U64_ROUND_UP(cost * hw_inuse, WEIGHT_ONE);
719 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio,
720 u64 abs_cost, u64 cost)
722 struct iocg_pcpu_stat *gcs;
724 bio->bi_iocost_cost = cost;
725 atomic64_add(cost, &iocg->vtime);
727 gcs = get_cpu_ptr(iocg->pcpu_stat);
728 local64_add(abs_cost, &gcs->abs_vusage);
732 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags)
735 spin_lock_irqsave(&iocg->ioc->lock, *flags);
736 spin_lock(&iocg->waitq.lock);
738 spin_lock_irqsave(&iocg->waitq.lock, *flags);
742 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags)
745 spin_unlock(&iocg->waitq.lock);
746 spin_unlock_irqrestore(&iocg->ioc->lock, *flags);
748 spin_unlock_irqrestore(&iocg->waitq.lock, *flags);
752 #define CREATE_TRACE_POINTS
753 #include <trace/events/iocost.h>
755 static void ioc_refresh_margins(struct ioc *ioc)
757 struct ioc_margins *margins = &ioc->margins;
758 u32 period_us = ioc->period_us;
759 u64 vrate = ioc->vtime_base_rate;
761 margins->min = (period_us * MARGIN_MIN_PCT / 100) * vrate;
762 margins->low = (period_us * MARGIN_LOW_PCT / 100) * vrate;
763 margins->target = (period_us * MARGIN_TARGET_PCT / 100) * vrate;
766 /* latency Qos params changed, update period_us and all the dependent params */
767 static void ioc_refresh_period_us(struct ioc *ioc)
769 u32 ppm, lat, multi, period_us;
771 lockdep_assert_held(&ioc->lock);
773 /* pick the higher latency target */
774 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) {
775 ppm = ioc->params.qos[QOS_RPPM];
776 lat = ioc->params.qos[QOS_RLAT];
778 ppm = ioc->params.qos[QOS_WPPM];
779 lat = ioc->params.qos[QOS_WLAT];
783 * We want the period to be long enough to contain a healthy number
784 * of IOs while short enough for granular control. Define it as a
785 * multiple of the latency target. Ideally, the multiplier should
786 * be scaled according to the percentile so that it would nominally
787 * contain a certain number of requests. Let's be simpler and
788 * scale it linearly so that it's 2x >= pct(90) and 10x at pct(50).
791 multi = max_t(u32, (MILLION - ppm) / 50000, 2);
794 period_us = multi * lat;
795 period_us = clamp_t(u32, period_us, MIN_PERIOD, MAX_PERIOD);
797 /* calculate dependent params */
798 ioc->period_us = period_us;
799 ioc->timer_slack_ns = div64_u64(
800 (u64)period_us * NSEC_PER_USEC * TIMER_SLACK_PCT,
802 ioc_refresh_margins(ioc);
805 static int ioc_autop_idx(struct ioc *ioc)
807 int idx = ioc->autop_idx;
808 const struct ioc_params *p = &autop[idx];
813 if (!blk_queue_nonrot(ioc->rqos.q))
816 /* handle SATA SSDs w/ broken NCQ */
817 if (blk_queue_depth(ioc->rqos.q) == 1)
818 return AUTOP_SSD_QD1;
820 /* use one of the normal ssd sets */
821 if (idx < AUTOP_SSD_DFL)
822 return AUTOP_SSD_DFL;
824 /* if user is overriding anything, maintain what was there */
825 if (ioc->user_qos_params || ioc->user_cost_model)
828 /* step up/down based on the vrate */
829 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC);
830 now_ns = ktime_get_ns();
832 if (p->too_fast_vrate_pct && p->too_fast_vrate_pct <= vrate_pct) {
833 if (!ioc->autop_too_fast_at)
834 ioc->autop_too_fast_at = now_ns;
835 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC)
838 ioc->autop_too_fast_at = 0;
841 if (p->too_slow_vrate_pct && p->too_slow_vrate_pct >= vrate_pct) {
842 if (!ioc->autop_too_slow_at)
843 ioc->autop_too_slow_at = now_ns;
844 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC)
847 ioc->autop_too_slow_at = 0;
854 * Take the followings as input
856 * @bps maximum sequential throughput
857 * @seqiops maximum sequential 4k iops
858 * @randiops maximum random 4k iops
860 * and calculate the linear model cost coefficients.
862 * *@page per-page cost 1s / (@bps / 4096)
863 * *@seqio base cost of a seq IO max((1s / @seqiops) - *@page, 0)
864 * @randiops base cost of a rand IO max((1s / @randiops) - *@page, 0)
866 static void calc_lcoefs(u64 bps, u64 seqiops, u64 randiops,
867 u64 *page, u64 *seqio, u64 *randio)
871 *page = *seqio = *randio = 0;
874 *page = DIV64_U64_ROUND_UP(VTIME_PER_SEC,
875 DIV_ROUND_UP_ULL(bps, IOC_PAGE_SIZE));
878 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, seqiops);
884 v = DIV64_U64_ROUND_UP(VTIME_PER_SEC, randiops);
890 static void ioc_refresh_lcoefs(struct ioc *ioc)
892 u64 *u = ioc->params.i_lcoefs;
893 u64 *c = ioc->params.lcoefs;
895 calc_lcoefs(u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
896 &c[LCOEF_RPAGE], &c[LCOEF_RSEQIO], &c[LCOEF_RRANDIO]);
897 calc_lcoefs(u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS],
898 &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
901 static bool ioc_refresh_params(struct ioc *ioc, bool force)
903 const struct ioc_params *p;
906 lockdep_assert_held(&ioc->lock);
908 idx = ioc_autop_idx(ioc);
911 if (idx == ioc->autop_idx && !force)
914 if (idx != ioc->autop_idx)
915 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
917 ioc->autop_idx = idx;
918 ioc->autop_too_fast_at = 0;
919 ioc->autop_too_slow_at = 0;
921 if (!ioc->user_qos_params)
922 memcpy(ioc->params.qos, p->qos, sizeof(p->qos));
923 if (!ioc->user_cost_model)
924 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs));
926 ioc_refresh_period_us(ioc);
927 ioc_refresh_lcoefs(ioc);
929 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] *
930 VTIME_PER_USEC, MILLION);
931 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] *
932 VTIME_PER_USEC, MILLION);
938 * When an iocg accumulates too much vtime or gets deactivated, we throw away
939 * some vtime, which lowers the overall device utilization. As the exact amount
940 * which is being thrown away is known, we can compensate by accelerating the
941 * vrate accordingly so that the extra vtime generated in the current period
942 * matches what got lost.
944 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
946 s64 pleft = ioc->period_at + ioc->period_us - now->now;
947 s64 vperiod = ioc->period_us * ioc->vtime_base_rate;
948 s64 vcomp, vcomp_min, vcomp_max;
950 lockdep_assert_held(&ioc->lock);
952 /* we need some time left in this period */
957 * Calculate how much vrate should be adjusted to offset the error.
958 * Limit the amount of adjustment and deduct the adjusted amount from
961 vcomp = -div64_s64(ioc->vtime_err, pleft);
962 vcomp_min = -(ioc->vtime_base_rate >> 1);
963 vcomp_max = ioc->vtime_base_rate;
964 vcomp = clamp(vcomp, vcomp_min, vcomp_max);
966 ioc->vtime_err += vcomp * pleft;
968 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp);
970 /* bound how much error can accumulate */
971 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
974 static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
975 int nr_lagging, int nr_shortages,
976 int prev_busy_level, u32 *missed_ppm)
978 u64 vrate = ioc->vtime_base_rate;
979 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
981 if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
982 if (ioc->busy_level != prev_busy_level || nr_lagging)
983 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
984 missed_ppm, rq_wait_pct,
985 nr_lagging, nr_shortages);
991 * If vrate is out of bounds, apply clamp gradually as the
992 * bounds can change abruptly. Otherwise, apply busy_level
995 if (vrate < vrate_min) {
996 vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
997 vrate = min(vrate, vrate_min);
998 } else if (vrate > vrate_max) {
999 vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
1000 vrate = max(vrate, vrate_max);
1002 int idx = min_t(int, abs(ioc->busy_level),
1003 ARRAY_SIZE(vrate_adj_pct) - 1);
1004 u32 adj_pct = vrate_adj_pct[idx];
1006 if (ioc->busy_level > 0)
1007 adj_pct = 100 - adj_pct;
1009 adj_pct = 100 + adj_pct;
1011 vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
1012 vrate_min, vrate_max);
1015 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
1016 nr_lagging, nr_shortages);
1018 ioc->vtime_base_rate = vrate;
1019 ioc_refresh_margins(ioc);
1022 /* take a snapshot of the current [v]time and vrate */
1023 static void ioc_now(struct ioc *ioc, struct ioc_now *now)
1027 now->now_ns = ktime_get();
1028 now->now = ktime_to_us(now->now_ns);
1029 now->vrate = atomic64_read(&ioc->vtime_rate);
1032 * The current vtime is
1034 * vtime at period start + (wallclock time since the start) * vrate
1036 * As a consistent snapshot of `period_at_vtime` and `period_at` is
1037 * needed, they're seqcount protected.
1040 seq = read_seqcount_begin(&ioc->period_seqcount);
1041 now->vnow = ioc->period_at_vtime +
1042 (now->now - ioc->period_at) * now->vrate;
1043 } while (read_seqcount_retry(&ioc->period_seqcount, seq));
1046 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
1048 WARN_ON_ONCE(ioc->running != IOC_RUNNING);
1050 write_seqcount_begin(&ioc->period_seqcount);
1051 ioc->period_at = now->now;
1052 ioc->period_at_vtime = now->vnow;
1053 write_seqcount_end(&ioc->period_seqcount);
1055 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us);
1056 add_timer(&ioc->timer);
1060 * Update @iocg's `active` and `inuse` to @active and @inuse, update level
1061 * weight sums and propagate upwards accordingly. If @save, the current margin
1062 * is saved to be used as reference for later inuse in-period adjustments.
1064 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1065 bool save, struct ioc_now *now)
1067 struct ioc *ioc = iocg->ioc;
1070 lockdep_assert_held(&ioc->lock);
1073 * For an active leaf node, its inuse shouldn't be zero or exceed
1074 * @active. An active internal node's inuse is solely determined by the
1075 * inuse to active ratio of its children regardless of @inuse.
1077 if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
1078 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
1079 iocg->child_active_sum);
1081 inuse = clamp_t(u32, inuse, 1, active);
1084 iocg->last_inuse = iocg->inuse;
1086 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime);
1088 if (active == iocg->active && inuse == iocg->inuse)
1091 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1092 struct ioc_gq *parent = iocg->ancestors[lvl];
1093 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1094 u32 parent_active = 0, parent_inuse = 0;
1096 /* update the level sums */
1097 parent->child_active_sum += (s32)(active - child->active);
1098 parent->child_inuse_sum += (s32)(inuse - child->inuse);
1099 /* apply the updates */
1100 child->active = active;
1101 child->inuse = inuse;
1104 * The delta between inuse and active sums indicates that
1105 * much of weight is being given away. Parent's inuse
1106 * and active should reflect the ratio.
1108 if (parent->child_active_sum) {
1109 parent_active = parent->weight;
1110 parent_inuse = DIV64_U64_ROUND_UP(
1111 parent_active * parent->child_inuse_sum,
1112 parent->child_active_sum);
1115 /* do we need to keep walking up? */
1116 if (parent_active == parent->active &&
1117 parent_inuse == parent->inuse)
1120 active = parent_active;
1121 inuse = parent_inuse;
1124 ioc->weights_updated = true;
1127 static void commit_weights(struct ioc *ioc)
1129 lockdep_assert_held(&ioc->lock);
1131 if (ioc->weights_updated) {
1132 /* paired with rmb in current_hweight(), see there */
1134 atomic_inc(&ioc->hweight_gen);
1135 ioc->weights_updated = false;
1139 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
1140 bool save, struct ioc_now *now)
1142 __propagate_weights(iocg, active, inuse, save, now);
1143 commit_weights(iocg->ioc);
1146 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep)
1148 struct ioc *ioc = iocg->ioc;
1153 /* hot path - if uptodate, use cached */
1154 ioc_gen = atomic_read(&ioc->hweight_gen);
1155 if (ioc_gen == iocg->hweight_gen)
1159 * Paired with wmb in commit_weights(). If we saw the updated
1160 * hweight_gen, all the weight updates from __propagate_weights() are
1163 * We can race with weight updates during calculation and get it
1164 * wrong. However, hweight_gen would have changed and a future
1165 * reader will recalculate and we're guaranteed to discard the
1166 * wrong result soon.
1170 hwa = hwi = WEIGHT_ONE;
1171 for (lvl = 0; lvl <= iocg->level - 1; lvl++) {
1172 struct ioc_gq *parent = iocg->ancestors[lvl];
1173 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1174 u64 active_sum = READ_ONCE(parent->child_active_sum);
1175 u64 inuse_sum = READ_ONCE(parent->child_inuse_sum);
1176 u32 active = READ_ONCE(child->active);
1177 u32 inuse = READ_ONCE(child->inuse);
1179 /* we can race with deactivations and either may read as zero */
1180 if (!active_sum || !inuse_sum)
1183 active_sum = max_t(u64, active, active_sum);
1184 hwa = div64_u64((u64)hwa * active, active_sum);
1186 inuse_sum = max_t(u64, inuse, inuse_sum);
1187 hwi = div64_u64((u64)hwi * inuse, inuse_sum);
1190 iocg->hweight_active = max_t(u32, hwa, 1);
1191 iocg->hweight_inuse = max_t(u32, hwi, 1);
1192 iocg->hweight_gen = ioc_gen;
1195 *hw_activep = iocg->hweight_active;
1197 *hw_inusep = iocg->hweight_inuse;
1201 * Calculate the hweight_inuse @iocg would get with max @inuse assuming all the
1202 * other weights stay unchanged.
1204 static u32 current_hweight_max(struct ioc_gq *iocg)
1206 u32 hwm = WEIGHT_ONE;
1207 u32 inuse = iocg->active;
1208 u64 child_inuse_sum;
1211 lockdep_assert_held(&iocg->ioc->lock);
1213 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1214 struct ioc_gq *parent = iocg->ancestors[lvl];
1215 struct ioc_gq *child = iocg->ancestors[lvl + 1];
1217 child_inuse_sum = parent->child_inuse_sum + inuse - child->inuse;
1218 hwm = div64_u64((u64)hwm * inuse, child_inuse_sum);
1219 inuse = DIV64_U64_ROUND_UP(parent->active * child_inuse_sum,
1220 parent->child_active_sum);
1223 return max_t(u32, hwm, 1);
1226 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now)
1228 struct ioc *ioc = iocg->ioc;
1229 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1230 struct ioc_cgrp *iocc = blkcg_to_iocc(blkg->blkcg);
1233 lockdep_assert_held(&ioc->lock);
1235 weight = iocg->cfg_weight ?: iocc->dfl_weight;
1236 if (weight != iocg->weight && iocg->active)
1237 propagate_weights(iocg, weight, iocg->inuse, true, now);
1238 iocg->weight = weight;
1241 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now)
1243 struct ioc *ioc = iocg->ioc;
1244 u64 last_period, cur_period;
1249 * If seem to be already active, just update the stamp to tell the
1250 * timer that we're still active. We don't mind occassional races.
1252 if (!list_empty(&iocg->active_list)) {
1254 cur_period = atomic64_read(&ioc->cur_period);
1255 if (atomic64_read(&iocg->active_period) != cur_period)
1256 atomic64_set(&iocg->active_period, cur_period);
1260 /* racy check on internal node IOs, treat as root level IOs */
1261 if (iocg->child_active_sum)
1264 spin_lock_irq(&ioc->lock);
1269 cur_period = atomic64_read(&ioc->cur_period);
1270 last_period = atomic64_read(&iocg->active_period);
1271 atomic64_set(&iocg->active_period, cur_period);
1273 /* already activated or breaking leaf-only constraint? */
1274 if (!list_empty(&iocg->active_list))
1275 goto succeed_unlock;
1276 for (i = iocg->level - 1; i > 0; i--)
1277 if (!list_empty(&iocg->ancestors[i]->active_list))
1280 if (iocg->child_active_sum)
1284 * Always start with the target budget. On deactivation, we throw away
1285 * anything above it.
1287 vtarget = now->vnow - ioc->margins.target;
1288 vtime = atomic64_read(&iocg->vtime);
1290 atomic64_add(vtarget - vtime, &iocg->vtime);
1291 atomic64_add(vtarget - vtime, &iocg->done_vtime);
1295 * Activate, propagate weight and start period timer if not
1296 * running. Reset hweight_gen to avoid accidental match from
1299 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1;
1300 list_add(&iocg->active_list, &ioc->active_iocgs);
1302 propagate_weights(iocg, iocg->weight,
1303 iocg->last_inuse ?: iocg->weight, true, now);
1305 TRACE_IOCG_PATH(iocg_activate, iocg, now,
1306 last_period, cur_period, vtime);
1308 iocg->activated_at = now->now;
1310 if (ioc->running == IOC_IDLE) {
1311 ioc->running = IOC_RUNNING;
1312 ioc->dfgv_period_at = now->now;
1313 ioc->dfgv_period_rem = 0;
1314 ioc_start_period(ioc, now);
1318 spin_unlock_irq(&ioc->lock);
1322 spin_unlock_irq(&ioc->lock);
1326 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now)
1328 struct ioc *ioc = iocg->ioc;
1329 struct blkcg_gq *blkg = iocg_to_blkg(iocg);
1330 u64 tdelta, delay, new_delay;
1331 s64 vover, vover_pct;
1334 lockdep_assert_held(&iocg->waitq.lock);
1336 /* calculate the current delay in effect - 1/2 every second */
1337 tdelta = now->now - iocg->delay_at;
1339 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC);
1343 /* calculate the new delay from the debt amount */
1344 current_hweight(iocg, &hwa, NULL);
1345 vover = atomic64_read(&iocg->vtime) +
1346 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow;
1347 vover_pct = div64_s64(100 * vover,
1348 ioc->period_us * ioc->vtime_base_rate);
1350 if (vover_pct <= MIN_DELAY_THR_PCT)
1352 else if (vover_pct >= MAX_DELAY_THR_PCT)
1353 new_delay = MAX_DELAY;
1355 new_delay = MIN_DELAY +
1356 div_u64((MAX_DELAY - MIN_DELAY) *
1357 (vover_pct - MIN_DELAY_THR_PCT),
1358 MAX_DELAY_THR_PCT - MIN_DELAY_THR_PCT);
1360 /* pick the higher one and apply */
1361 if (new_delay > delay) {
1362 iocg->delay = new_delay;
1363 iocg->delay_at = now->now;
1367 if (delay >= MIN_DELAY) {
1368 if (!iocg->indelay_since)
1369 iocg->indelay_since = now->now;
1370 blkcg_set_delay(blkg, delay * NSEC_PER_USEC);
1373 if (iocg->indelay_since) {
1374 iocg->local_stat.indelay_us += now->now - iocg->indelay_since;
1375 iocg->indelay_since = 0;
1378 blkcg_clear_delay(blkg);
1383 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost,
1384 struct ioc_now *now)
1386 struct iocg_pcpu_stat *gcs;
1388 lockdep_assert_held(&iocg->ioc->lock);
1389 lockdep_assert_held(&iocg->waitq.lock);
1390 WARN_ON_ONCE(list_empty(&iocg->active_list));
1393 * Once in debt, debt handling owns inuse. @iocg stays at the minimum
1394 * inuse donating all of it share to others until its debt is paid off.
1396 if (!iocg->abs_vdebt && abs_cost) {
1397 iocg->indebt_since = now->now;
1398 propagate_weights(iocg, iocg->active, 0, false, now);
1401 iocg->abs_vdebt += abs_cost;
1403 gcs = get_cpu_ptr(iocg->pcpu_stat);
1404 local64_add(abs_cost, &gcs->abs_vusage);
1408 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay,
1409 struct ioc_now *now)
1411 lockdep_assert_held(&iocg->ioc->lock);
1412 lockdep_assert_held(&iocg->waitq.lock);
1414 /* make sure that nobody messed with @iocg */
1415 WARN_ON_ONCE(list_empty(&iocg->active_list));
1416 WARN_ON_ONCE(iocg->inuse > 1);
1418 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt);
1420 /* if debt is paid in full, restore inuse */
1421 if (!iocg->abs_vdebt) {
1422 iocg->local_stat.indebt_us += now->now - iocg->indebt_since;
1423 iocg->indebt_since = 0;
1425 propagate_weights(iocg, iocg->active, iocg->last_inuse,
1430 static int iocg_wake_fn(struct wait_queue_entry *wq_entry, unsigned mode,
1431 int flags, void *key)
1433 struct iocg_wait *wait = container_of(wq_entry, struct iocg_wait, wait);
1434 struct iocg_wake_ctx *ctx = (struct iocg_wake_ctx *)key;
1435 u64 cost = abs_cost_to_cost(wait->abs_cost, ctx->hw_inuse);
1437 ctx->vbudget -= cost;
1439 if (ctx->vbudget < 0)
1442 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost);
1445 * autoremove_wake_function() removes the wait entry only when it
1446 * actually changed the task state. We want the wait always
1447 * removed. Remove explicitly and use default_wake_function().
1449 list_del_init(&wq_entry->entry);
1450 wait->committed = true;
1452 default_wake_function(wq_entry, mode, flags, key);
1457 * Calculate the accumulated budget, pay debt if @pay_debt and wake up waiters
1458 * accordingly. When @pay_debt is %true, the caller must be holding ioc->lock in
1459 * addition to iocg->waitq.lock.
1461 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt,
1462 struct ioc_now *now)
1464 struct ioc *ioc = iocg->ioc;
1465 struct iocg_wake_ctx ctx = { .iocg = iocg };
1466 u64 vshortage, expires, oexpires;
1470 lockdep_assert_held(&iocg->waitq.lock);
1472 current_hweight(iocg, &hwa, NULL);
1473 vbudget = now->vnow - atomic64_read(&iocg->vtime);
1476 if (pay_debt && iocg->abs_vdebt && vbudget > 0) {
1477 u64 abs_vbudget = cost_to_abs_cost(vbudget, hwa);
1478 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt);
1479 u64 vpay = abs_cost_to_cost(abs_vpay, hwa);
1481 lockdep_assert_held(&ioc->lock);
1483 atomic64_add(vpay, &iocg->vtime);
1484 atomic64_add(vpay, &iocg->done_vtime);
1485 iocg_pay_debt(iocg, abs_vpay, now);
1489 if (iocg->abs_vdebt || iocg->delay)
1490 iocg_kick_delay(iocg, now);
1493 * Debt can still be outstanding if we haven't paid all yet or the
1494 * caller raced and called without @pay_debt. Shouldn't wake up waiters
1495 * under debt. Make sure @vbudget reflects the outstanding amount and is
1498 if (iocg->abs_vdebt) {
1499 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa);
1500 vbudget = min_t(s64, 0, vbudget - vdebt);
1504 * Wake up the ones which are due and see how much vtime we'll need for
1505 * the next one. As paying off debt restores hw_inuse, it must be read
1506 * after the above debt payment.
1508 ctx.vbudget = vbudget;
1509 current_hweight(iocg, NULL, &ctx.hw_inuse);
1511 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx);
1513 if (!waitqueue_active(&iocg->waitq)) {
1514 if (iocg->wait_since) {
1515 iocg->local_stat.wait_us += now->now - iocg->wait_since;
1516 iocg->wait_since = 0;
1521 if (!iocg->wait_since)
1522 iocg->wait_since = now->now;
1524 if (WARN_ON_ONCE(ctx.vbudget >= 0))
1527 /* determine next wakeup, add a timer margin to guarantee chunking */
1528 vshortage = -ctx.vbudget;
1529 expires = now->now_ns +
1530 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) *
1532 expires += ioc->timer_slack_ns;
1534 /* if already active and close enough, don't bother */
1535 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer));
1536 if (hrtimer_is_queued(&iocg->waitq_timer) &&
1537 abs(oexpires - expires) <= ioc->timer_slack_ns)
1540 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires),
1541 ioc->timer_slack_ns, HRTIMER_MODE_ABS);
1544 static enum hrtimer_restart iocg_waitq_timer_fn(struct hrtimer *timer)
1546 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer);
1547 bool pay_debt = READ_ONCE(iocg->abs_vdebt);
1549 unsigned long flags;
1551 ioc_now(iocg->ioc, &now);
1553 iocg_lock(iocg, pay_debt, &flags);
1554 iocg_kick_waitq(iocg, pay_debt, &now);
1555 iocg_unlock(iocg, pay_debt, &flags);
1557 return HRTIMER_NORESTART;
1560 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p)
1562 u32 nr_met[2] = { };
1563 u32 nr_missed[2] = { };
1567 for_each_online_cpu(cpu) {
1568 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu);
1569 u64 this_rq_wait_ns;
1571 for (rw = READ; rw <= WRITE; rw++) {
1572 u32 this_met = local_read(&stat->missed[rw].nr_met);
1573 u32 this_missed = local_read(&stat->missed[rw].nr_missed);
1575 nr_met[rw] += this_met - stat->missed[rw].last_met;
1576 nr_missed[rw] += this_missed - stat->missed[rw].last_missed;
1577 stat->missed[rw].last_met = this_met;
1578 stat->missed[rw].last_missed = this_missed;
1581 this_rq_wait_ns = local64_read(&stat->rq_wait_ns);
1582 rq_wait_ns += this_rq_wait_ns - stat->last_rq_wait_ns;
1583 stat->last_rq_wait_ns = this_rq_wait_ns;
1586 for (rw = READ; rw <= WRITE; rw++) {
1587 if (nr_met[rw] + nr_missed[rw])
1589 DIV64_U64_ROUND_UP((u64)nr_missed[rw] * MILLION,
1590 nr_met[rw] + nr_missed[rw]);
1592 missed_ppm_ar[rw] = 0;
1595 *rq_wait_pct_p = div64_u64(rq_wait_ns * 100,
1596 ioc->period_us * NSEC_PER_USEC);
1599 /* was iocg idle this period? */
1600 static bool iocg_is_idle(struct ioc_gq *iocg)
1602 struct ioc *ioc = iocg->ioc;
1604 /* did something get issued this period? */
1605 if (atomic64_read(&iocg->active_period) ==
1606 atomic64_read(&ioc->cur_period))
1609 /* is something in flight? */
1610 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
1617 * Call this function on the target leaf @iocg's to build pre-order traversal
1618 * list of all the ancestors in @inner_walk. The inner nodes are linked through
1619 * ->walk_list and the caller is responsible for dissolving the list after use.
1621 static void iocg_build_inner_walk(struct ioc_gq *iocg,
1622 struct list_head *inner_walk)
1626 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
1628 /* find the first ancestor which hasn't been visited yet */
1629 for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
1630 if (!list_empty(&iocg->ancestors[lvl]->walk_list))
1634 /* walk down and visit the inner nodes to get pre-order traversal */
1635 while (++lvl <= iocg->level - 1) {
1636 struct ioc_gq *inner = iocg->ancestors[lvl];
1638 /* record traversal order */
1639 list_add_tail(&inner->walk_list, inner_walk);
1643 /* collect per-cpu counters and propagate the deltas to the parent */
1644 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now)
1646 struct ioc *ioc = iocg->ioc;
1647 struct iocg_stat new_stat;
1652 lockdep_assert_held(&iocg->ioc->lock);
1654 /* collect per-cpu counters */
1655 for_each_possible_cpu(cpu) {
1656 abs_vusage += local64_read(
1657 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu));
1659 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage;
1660 iocg->last_stat_abs_vusage = abs_vusage;
1662 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate);
1663 iocg->local_stat.usage_us += iocg->usage_delta_us;
1665 /* propagate upwards */
1667 iocg->local_stat.usage_us + iocg->desc_stat.usage_us;
1669 iocg->local_stat.wait_us + iocg->desc_stat.wait_us;
1670 new_stat.indebt_us =
1671 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us;
1672 new_stat.indelay_us =
1673 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us;
1675 /* propagate the deltas to the parent */
1676 if (iocg->level > 0) {
1677 struct iocg_stat *parent_stat =
1678 &iocg->ancestors[iocg->level - 1]->desc_stat;
1680 parent_stat->usage_us +=
1681 new_stat.usage_us - iocg->last_stat.usage_us;
1682 parent_stat->wait_us +=
1683 new_stat.wait_us - iocg->last_stat.wait_us;
1684 parent_stat->indebt_us +=
1685 new_stat.indebt_us - iocg->last_stat.indebt_us;
1686 parent_stat->indelay_us +=
1687 new_stat.indelay_us - iocg->last_stat.indelay_us;
1690 iocg->last_stat = new_stat;
1693 /* get stat counters ready for reading on all active iocgs */
1694 static void iocg_flush_stat(struct list_head *target_iocgs, struct ioc_now *now)
1696 LIST_HEAD(inner_walk);
1697 struct ioc_gq *iocg, *tiocg;
1699 /* flush leaves and build inner node walk list */
1700 list_for_each_entry(iocg, target_iocgs, active_list) {
1701 iocg_flush_stat_one(iocg, now);
1702 iocg_build_inner_walk(iocg, &inner_walk);
1705 /* keep flushing upwards by walking the inner list backwards */
1706 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) {
1707 iocg_flush_stat_one(iocg, now);
1708 list_del_init(&iocg->walk_list);
1713 * Determine what @iocg's hweight_inuse should be after donating unused
1714 * capacity. @hwm is the upper bound and used to signal no donation. This
1715 * function also throws away @iocg's excess budget.
1717 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm,
1718 u32 usage, struct ioc_now *now)
1720 struct ioc *ioc = iocg->ioc;
1721 u64 vtime = atomic64_read(&iocg->vtime);
1722 s64 excess, delta, target, new_hwi;
1724 /* debt handling owns inuse for debtors */
1725 if (iocg->abs_vdebt)
1728 /* see whether minimum margin requirement is met */
1729 if (waitqueue_active(&iocg->waitq) ||
1730 time_after64(vtime, now->vnow - ioc->margins.min))
1733 /* throw away excess above target */
1734 excess = now->vnow - vtime - ioc->margins.target;
1736 atomic64_add(excess, &iocg->vtime);
1737 atomic64_add(excess, &iocg->done_vtime);
1739 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE);
1743 * Let's say the distance between iocg's and device's vtimes as a
1744 * fraction of period duration is delta. Assuming that the iocg will
1745 * consume the usage determined above, we want to determine new_hwi so
1746 * that delta equals MARGIN_TARGET at the end of the next period.
1748 * We need to execute usage worth of IOs while spending the sum of the
1749 * new budget (1 - MARGIN_TARGET) and the leftover from the last period
1752 * usage = (1 - MARGIN_TARGET + delta) * new_hwi
1754 * Therefore, the new_hwi is:
1756 * new_hwi = usage / (1 - MARGIN_TARGET + delta)
1758 delta = div64_s64(WEIGHT_ONE * (now->vnow - vtime),
1759 now->vnow - ioc->period_at_vtime);
1760 target = WEIGHT_ONE * MARGIN_TARGET_PCT / 100;
1761 new_hwi = div64_s64(WEIGHT_ONE * usage, WEIGHT_ONE - target + delta);
1763 return clamp_t(s64, new_hwi, 1, hwm);
1767 * For work-conservation, an iocg which isn't using all of its share should
1768 * donate the leftover to other iocgs. There are two ways to achieve this - 1.
1769 * bumping up vrate accordingly 2. lowering the donating iocg's inuse weight.
1771 * #1 is mathematically simpler but has the drawback of requiring synchronous
1772 * global hweight_inuse updates when idle iocg's get activated or inuse weights
1773 * change due to donation snapbacks as it has the possibility of grossly
1774 * overshooting what's allowed by the model and vrate.
1776 * #2 is inherently safe with local operations. The donating iocg can easily
1777 * snap back to higher weights when needed without worrying about impacts on
1778 * other nodes as the impacts will be inherently correct. This also makes idle
1779 * iocg activations safe. The only effect activations have is decreasing
1780 * hweight_inuse of others, the right solution to which is for those iocgs to
1781 * snap back to higher weights.
1783 * So, we go with #2. The challenge is calculating how each donating iocg's
1784 * inuse should be adjusted to achieve the target donation amounts. This is done
1785 * using Andy's method described in the following pdf.
1787 * https://drive.google.com/file/d/1PsJwxPFtjUnwOY1QJ5AeICCcsL7BM3bo
1789 * Given the weights and target after-donation hweight_inuse values, Andy's
1790 * method determines how the proportional distribution should look like at each
1791 * sibling level to maintain the relative relationship between all non-donating
1792 * pairs. To roughly summarize, it divides the tree into donating and
1793 * non-donating parts, calculates global donation rate which is used to
1794 * determine the target hweight_inuse for each node, and then derives per-level
1797 * The following pdf shows that global distribution calculated this way can be
1798 * achieved by scaling inuse weights of donating leaves and propagating the
1799 * adjustments upwards proportionally.
1801 * https://drive.google.com/file/d/1vONz1-fzVO7oY5DXXsLjSxEtYYQbOvsE
1803 * Combining the above two, we can determine how each leaf iocg's inuse should
1804 * be adjusted to achieve the target donation.
1806 * https://drive.google.com/file/d/1WcrltBOSPN0qXVdBgnKm4mdp9FhuEFQN
1808 * The inline comments use symbols from the last pdf.
1810 * b is the sum of the absolute budgets in the subtree. 1 for the root node.
1811 * f is the sum of the absolute budgets of non-donating nodes in the subtree.
1812 * t is the sum of the absolute budgets of donating nodes in the subtree.
1813 * w is the weight of the node. w = w_f + w_t
1814 * w_f is the non-donating portion of w. w_f = w * f / b
1815 * w_b is the donating portion of w. w_t = w * t / b
1816 * s is the sum of all sibling weights. s = Sum(w) for siblings
1817 * s_f and s_t are the non-donating and donating portions of s.
1819 * Subscript p denotes the parent's counterpart and ' the adjusted value - e.g.
1820 * w_pt is the donating portion of the parent's weight and w'_pt the same value
1821 * after adjustments. Subscript r denotes the root node's values.
1823 static void transfer_surpluses(struct list_head *surpluses, struct ioc_now *now)
1825 LIST_HEAD(over_hwa);
1826 LIST_HEAD(inner_walk);
1827 struct ioc_gq *iocg, *tiocg, *root_iocg;
1828 u32 after_sum, over_sum, over_target, gamma;
1831 * It's pretty unlikely but possible for the total sum of
1832 * hweight_after_donation's to be higher than WEIGHT_ONE, which will
1833 * confuse the following calculations. If such condition is detected,
1834 * scale down everyone over its full share equally to keep the sum below
1839 list_for_each_entry(iocg, surpluses, surplus_list) {
1842 current_hweight(iocg, &hwa, NULL);
1843 after_sum += iocg->hweight_after_donation;
1845 if (iocg->hweight_after_donation > hwa) {
1846 over_sum += iocg->hweight_after_donation;
1847 list_add(&iocg->walk_list, &over_hwa);
1851 if (after_sum >= WEIGHT_ONE) {
1853 * The delta should be deducted from the over_sum, calculate
1854 * target over_sum value.
1856 u32 over_delta = after_sum - (WEIGHT_ONE - 1);
1857 WARN_ON_ONCE(over_sum <= over_delta);
1858 over_target = over_sum - over_delta;
1863 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) {
1865 iocg->hweight_after_donation =
1866 div_u64((u64)iocg->hweight_after_donation *
1867 over_target, over_sum);
1868 list_del_init(&iocg->walk_list);
1872 * Build pre-order inner node walk list and prepare for donation
1873 * adjustment calculations.
1875 list_for_each_entry(iocg, surpluses, surplus_list) {
1876 iocg_build_inner_walk(iocg, &inner_walk);
1879 root_iocg = list_first_entry(&inner_walk, struct ioc_gq, walk_list);
1880 WARN_ON_ONCE(root_iocg->level > 0);
1882 list_for_each_entry(iocg, &inner_walk, walk_list) {
1883 iocg->child_adjusted_sum = 0;
1884 iocg->hweight_donating = 0;
1885 iocg->hweight_after_donation = 0;
1889 * Propagate the donating budget (b_t) and after donation budget (b'_t)
1892 list_for_each_entry(iocg, surpluses, surplus_list) {
1893 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1895 parent->hweight_donating += iocg->hweight_donating;
1896 parent->hweight_after_donation += iocg->hweight_after_donation;
1899 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) {
1900 if (iocg->level > 0) {
1901 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1903 parent->hweight_donating += iocg->hweight_donating;
1904 parent->hweight_after_donation += iocg->hweight_after_donation;
1909 * Calculate inner hwa's (b) and make sure the donation values are
1910 * within the accepted ranges as we're doing low res calculations with
1913 list_for_each_entry(iocg, &inner_walk, walk_list) {
1915 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
1917 iocg->hweight_active = DIV64_U64_ROUND_UP(
1918 (u64)parent->hweight_active * iocg->active,
1919 parent->child_active_sum);
1923 iocg->hweight_donating = min(iocg->hweight_donating,
1924 iocg->hweight_active);
1925 iocg->hweight_after_donation = min(iocg->hweight_after_donation,
1926 iocg->hweight_donating - 1);
1927 if (WARN_ON_ONCE(iocg->hweight_active <= 1 ||
1928 iocg->hweight_donating <= 1 ||
1929 iocg->hweight_after_donation == 0)) {
1930 pr_warn("iocg: invalid donation weights in ");
1931 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup);
1932 pr_cont(": active=%u donating=%u after=%u\n",
1933 iocg->hweight_active, iocg->hweight_donating,
1934 iocg->hweight_after_donation);
1939 * Calculate the global donation rate (gamma) - the rate to adjust
1940 * non-donating budgets by.
1942 * No need to use 64bit multiplication here as the first operand is
1943 * guaranteed to be smaller than WEIGHT_ONE (1<<16).
1945 * We know that there are beneficiary nodes and the sum of the donating
1946 * hweights can't be whole; however, due to the round-ups during hweight
1947 * calculations, root_iocg->hweight_donating might still end up equal to
1948 * or greater than whole. Limit the range when calculating the divider.
1950 * gamma = (1 - t_r') / (1 - t_r)
1952 gamma = DIV_ROUND_UP(
1953 (WEIGHT_ONE - root_iocg->hweight_after_donation) * WEIGHT_ONE,
1954 WEIGHT_ONE - min_t(u32, root_iocg->hweight_donating, WEIGHT_ONE - 1));
1957 * Calculate adjusted hwi, child_adjusted_sum and inuse for the inner
1960 list_for_each_entry(iocg, &inner_walk, walk_list) {
1961 struct ioc_gq *parent;
1962 u32 inuse, wpt, wptp;
1965 if (iocg->level == 0) {
1966 /* adjusted weight sum for 1st level: s' = s * b_pf / b'_pf */
1967 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP(
1968 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating),
1969 WEIGHT_ONE - iocg->hweight_after_donation);
1973 parent = iocg->ancestors[iocg->level - 1];
1975 /* b' = gamma * b_f + b_t' */
1976 iocg->hweight_inuse = DIV64_U64_ROUND_UP(
1977 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating),
1978 WEIGHT_ONE) + iocg->hweight_after_donation;
1980 /* w' = s' * b' / b'_p */
1981 inuse = DIV64_U64_ROUND_UP(
1982 (u64)parent->child_adjusted_sum * iocg->hweight_inuse,
1983 parent->hweight_inuse);
1985 /* adjusted weight sum for children: s' = s_f + s_t * w'_pt / w_pt */
1986 st = DIV64_U64_ROUND_UP(
1987 iocg->child_active_sum * iocg->hweight_donating,
1988 iocg->hweight_active);
1989 sf = iocg->child_active_sum - st;
1990 wpt = DIV64_U64_ROUND_UP(
1991 (u64)iocg->active * iocg->hweight_donating,
1992 iocg->hweight_active);
1993 wptp = DIV64_U64_ROUND_UP(
1994 (u64)inuse * iocg->hweight_after_donation,
1995 iocg->hweight_inuse);
1997 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt);
2001 * All inner nodes now have ->hweight_inuse and ->child_adjusted_sum and
2002 * we can finally determine leaf adjustments.
2004 list_for_each_entry(iocg, surpluses, surplus_list) {
2005 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1];
2009 * In-debt iocgs participated in the donation calculation with
2010 * the minimum target hweight_inuse. Configuring inuse
2011 * accordingly would work fine but debt handling expects
2012 * @iocg->inuse stay at the minimum and we don't wanna
2015 if (iocg->abs_vdebt) {
2016 WARN_ON_ONCE(iocg->inuse > 1);
2020 /* w' = s' * b' / b'_p, note that b' == b'_t for donating leaves */
2021 inuse = DIV64_U64_ROUND_UP(
2022 parent->child_adjusted_sum * iocg->hweight_after_donation,
2023 parent->hweight_inuse);
2025 TRACE_IOCG_PATH(inuse_transfer, iocg, now,
2027 iocg->hweight_inuse,
2028 iocg->hweight_after_donation);
2030 __propagate_weights(iocg, iocg->active, inuse, true, now);
2033 /* walk list should be dissolved after use */
2034 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list)
2035 list_del_init(&iocg->walk_list);
2039 * A low weight iocg can amass a large amount of debt, for example, when
2040 * anonymous memory gets reclaimed aggressively. If the system has a lot of
2041 * memory paired with a slow IO device, the debt can span multiple seconds or
2042 * more. If there are no other subsequent IO issuers, the in-debt iocg may end
2043 * up blocked paying its debt while the IO device is idle.
2045 * The following protects against such cases. If the device has been
2046 * sufficiently idle for a while, the debts are halved and delays are
2049 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
2050 struct ioc_now *now)
2052 struct ioc_gq *iocg;
2053 u64 dur, usage_pct, nr_cycles;
2055 /* if no debtor, reset the cycle */
2057 ioc->dfgv_period_at = now->now;
2058 ioc->dfgv_period_rem = 0;
2059 ioc->dfgv_usage_us_sum = 0;
2064 * Debtors can pass through a lot of writes choking the device and we
2065 * don't want to be forgiving debts while the device is struggling from
2066 * write bursts. If we're missing latency targets, consider the device
2069 if (ioc->busy_level > 0)
2070 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us);
2072 ioc->dfgv_usage_us_sum += usage_us_sum;
2073 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD))
2077 * At least DFGV_PERIOD has passed since the last period. Calculate the
2078 * average usage and reset the period counters.
2080 dur = now->now - ioc->dfgv_period_at;
2081 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur);
2083 ioc->dfgv_period_at = now->now;
2084 ioc->dfgv_usage_us_sum = 0;
2086 /* if was too busy, reset everything */
2087 if (usage_pct > DFGV_USAGE_PCT) {
2088 ioc->dfgv_period_rem = 0;
2093 * Usage is lower than threshold. Let's forgive some debts. Debt
2094 * forgiveness runs off of the usual ioc timer but its period usually
2095 * doesn't match ioc's. Compensate the difference by performing the
2096 * reduction as many times as would fit in the duration since the last
2097 * run and carrying over the left-over duration in @ioc->dfgv_period_rem
2098 * - if ioc period is 75% of DFGV_PERIOD, one out of three consecutive
2099 * reductions is doubled.
2101 nr_cycles = dur + ioc->dfgv_period_rem;
2102 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD);
2104 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2105 u64 __maybe_unused old_debt, __maybe_unused old_delay;
2107 if (!iocg->abs_vdebt && !iocg->delay)
2110 spin_lock(&iocg->waitq.lock);
2112 old_debt = iocg->abs_vdebt;
2113 old_delay = iocg->delay;
2115 if (iocg->abs_vdebt)
2116 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1;
2118 iocg->delay = iocg->delay >> nr_cycles ?: 1;
2120 iocg_kick_waitq(iocg, true, now);
2122 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct,
2123 old_debt, iocg->abs_vdebt,
2124 old_delay, iocg->delay);
2126 spin_unlock(&iocg->waitq.lock);
2131 * Check the active iocgs' state to avoid oversleeping and deactive
2134 * Since waiters determine the sleep durations based on the vrate
2135 * they saw at the time of sleep, if vrate has increased, some
2136 * waiters could be sleeping for too long. Wake up tardy waiters
2137 * which should have woken up in the last period and expire idle
2140 static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
2143 struct ioc_gq *iocg, *tiocg;
2145 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
2146 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2147 !iocg->delay && !iocg_is_idle(iocg))
2150 spin_lock(&iocg->waitq.lock);
2152 /* flush wait and indebt stat deltas */
2153 if (iocg->wait_since) {
2154 iocg->local_stat.wait_us += now->now - iocg->wait_since;
2155 iocg->wait_since = now->now;
2157 if (iocg->indebt_since) {
2158 iocg->local_stat.indebt_us +=
2159 now->now - iocg->indebt_since;
2160 iocg->indebt_since = now->now;
2162 if (iocg->indelay_since) {
2163 iocg->local_stat.indelay_us +=
2164 now->now - iocg->indelay_since;
2165 iocg->indelay_since = now->now;
2168 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
2170 /* might be oversleeping vtime / hweight changes, kick */
2171 iocg_kick_waitq(iocg, true, now);
2172 if (iocg->abs_vdebt || iocg->delay)
2174 } else if (iocg_is_idle(iocg)) {
2175 /* no waiter and idle, deactivate */
2176 u64 vtime = atomic64_read(&iocg->vtime);
2180 * @iocg has been inactive for a full duration and will
2181 * have a high budget. Account anything above target as
2182 * error and throw away. On reactivation, it'll start
2183 * with the target budget.
2185 excess = now->vnow - vtime - ioc->margins.target;
2189 current_hweight(iocg, NULL, &old_hwi);
2190 ioc->vtime_err -= div64_u64(excess * old_hwi,
2194 TRACE_IOCG_PATH(iocg_idle, iocg, now,
2195 atomic64_read(&iocg->active_period),
2196 atomic64_read(&ioc->cur_period), vtime);
2197 __propagate_weights(iocg, 0, 0, false, now);
2198 list_del_init(&iocg->active_list);
2201 spin_unlock(&iocg->waitq.lock);
2204 commit_weights(ioc);
2208 static void ioc_timer_fn(struct timer_list *timer)
2210 struct ioc *ioc = container_of(timer, struct ioc, timer);
2211 struct ioc_gq *iocg, *tiocg;
2213 LIST_HEAD(surpluses);
2214 int nr_debtors, nr_shortages = 0, nr_lagging = 0;
2215 u64 usage_us_sum = 0;
2216 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
2217 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
2218 u32 missed_ppm[2], rq_wait_pct;
2220 int prev_busy_level;
2222 /* how were the latencies during the period? */
2223 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct);
2225 /* take care of active iocgs */
2226 spin_lock_irq(&ioc->lock);
2230 period_vtime = now.vnow - ioc->period_at_vtime;
2231 if (WARN_ON_ONCE(!period_vtime)) {
2232 spin_unlock_irq(&ioc->lock);
2236 nr_debtors = ioc_check_iocgs(ioc, &now);
2239 * Wait and indebt stat are flushed above and the donation calculation
2240 * below needs updated usage stat. Let's bring stat up-to-date.
2242 iocg_flush_stat(&ioc->active_iocgs, &now);
2244 /* calc usage and see whether some weights need to be moved around */
2245 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
2246 u64 vdone, vtime, usage_us;
2247 u32 hw_active, hw_inuse;
2250 * Collect unused and wind vtime closer to vnow to prevent
2251 * iocgs from accumulating a large amount of budget.
2253 vdone = atomic64_read(&iocg->done_vtime);
2254 vtime = atomic64_read(&iocg->vtime);
2255 current_hweight(iocg, &hw_active, &hw_inuse);
2258 * Latency QoS detection doesn't account for IOs which are
2259 * in-flight for longer than a period. Detect them by
2260 * comparing vdone against period start. If lagging behind
2261 * IOs from past periods, don't increase vrate.
2263 if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
2264 !atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
2265 time_after64(vtime, vdone) &&
2266 time_after64(vtime, now.vnow -
2267 MAX_LAGGING_PERIODS * period_vtime) &&
2268 time_before64(vdone, now.vnow - period_vtime))
2272 * Determine absolute usage factoring in in-flight IOs to avoid
2273 * high-latency completions appearing as idle.
2275 usage_us = iocg->usage_delta_us;
2276 usage_us_sum += usage_us;
2278 /* see whether there's surplus vtime */
2279 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2280 if (hw_inuse < hw_active ||
2281 (!waitqueue_active(&iocg->waitq) &&
2282 time_before64(vtime, now.vnow - ioc->margins.low))) {
2283 u32 hwa, old_hwi, hwm, new_hwi, usage;
2286 if (vdone != vtime) {
2287 u64 inflight_us = DIV64_U64_ROUND_UP(
2288 cost_to_abs_cost(vtime - vdone, hw_inuse),
2289 ioc->vtime_base_rate);
2291 usage_us = max(usage_us, inflight_us);
2294 /* convert to hweight based usage ratio */
2295 if (time_after64(iocg->activated_at, ioc->period_at))
2296 usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
2298 usage_dur = max_t(u64, now.now - ioc->period_at, 1);
2300 usage = clamp_t(u32,
2301 DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
2306 * Already donating or accumulated enough to start.
2307 * Determine the donation amount.
2309 current_hweight(iocg, &hwa, &old_hwi);
2310 hwm = current_hweight_max(iocg);
2311 new_hwi = hweight_after_donation(iocg, old_hwi, hwm,
2313 if (new_hwi < hwm) {
2314 iocg->hweight_donating = hwa;
2315 iocg->hweight_after_donation = new_hwi;
2316 list_add(&iocg->surplus_list, &surpluses);
2318 TRACE_IOCG_PATH(inuse_shortage, iocg, &now,
2319 iocg->inuse, iocg->active,
2320 iocg->hweight_inuse, new_hwi);
2322 __propagate_weights(iocg, iocg->active,
2323 iocg->active, true, &now);
2327 /* genuinely short on vtime */
2332 if (!list_empty(&surpluses) && nr_shortages)
2333 transfer_surpluses(&surpluses, &now);
2335 commit_weights(ioc);
2337 /* surplus list should be dissolved after use */
2338 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list)
2339 list_del_init(&iocg->surplus_list);
2342 * If q is getting clogged or we're missing too much, we're issuing
2343 * too much IO and should lower vtime rate. If we're not missing
2344 * and experiencing shortages but not surpluses, we're too stingy
2345 * and should increase vtime rate.
2347 prev_busy_level = ioc->busy_level;
2348 if (rq_wait_pct > RQ_WAIT_BUSY_PCT ||
2349 missed_ppm[READ] > ppm_rthr ||
2350 missed_ppm[WRITE] > ppm_wthr) {
2351 /* clearly missing QoS targets, slow down vrate */
2352 ioc->busy_level = max(ioc->busy_level, 0);
2354 } else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
2355 missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
2356 missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
2357 /* QoS targets are being met with >25% margin */
2360 * We're throttling while the device has spare
2361 * capacity. If vrate was being slowed down, stop.
2363 ioc->busy_level = min(ioc->busy_level, 0);
2366 * If there are IOs spanning multiple periods, wait
2367 * them out before pushing the device harder.
2373 * Nobody is being throttled and the users aren't
2374 * issuing enough IOs to saturate the device. We
2375 * simply don't know how close the device is to
2376 * saturation. Coast.
2378 ioc->busy_level = 0;
2381 /* inside the hysterisis margin, we're good */
2382 ioc->busy_level = 0;
2385 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
2387 ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
2388 prev_busy_level, missed_ppm);
2390 ioc_refresh_params(ioc, false);
2392 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now);
2395 * This period is done. Move onto the next one. If nothing's
2396 * going on with the device, stop the timer.
2398 atomic64_inc(&ioc->cur_period);
2400 if (ioc->running != IOC_STOP) {
2401 if (!list_empty(&ioc->active_iocgs)) {
2402 ioc_start_period(ioc, &now);
2404 ioc->busy_level = 0;
2406 ioc->running = IOC_IDLE;
2409 ioc_refresh_vrate(ioc, &now);
2412 spin_unlock_irq(&ioc->lock);
2415 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
2416 u64 abs_cost, struct ioc_now *now)
2418 struct ioc *ioc = iocg->ioc;
2419 struct ioc_margins *margins = &ioc->margins;
2420 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi;
2423 u64 cost, new_inuse;
2425 current_hweight(iocg, NULL, &hwi);
2427 cost = abs_cost_to_cost(abs_cost, hwi);
2428 margin = now->vnow - vtime - cost;
2430 /* debt handling owns inuse for debtors */
2431 if (iocg->abs_vdebt)
2435 * We only increase inuse during period and do so if the margin has
2436 * deteriorated since the previous adjustment.
2438 if (margin >= iocg->saved_margin || margin >= margins->low ||
2439 iocg->inuse == iocg->active)
2442 spin_lock_irq(&ioc->lock);
2444 /* we own inuse only when @iocg is in the normal active state */
2445 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) {
2446 spin_unlock_irq(&ioc->lock);
2451 * Bump up inuse till @abs_cost fits in the existing budget.
2452 * adj_step must be determined after acquiring ioc->lock - we might
2453 * have raced and lost to another thread for activation and could
2454 * be reading 0 iocg->active before ioc->lock which will lead to
2457 new_inuse = iocg->inuse;
2458 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100);
2460 new_inuse = new_inuse + adj_step;
2461 propagate_weights(iocg, iocg->active, new_inuse, true, now);
2462 current_hweight(iocg, NULL, &hwi);
2463 cost = abs_cost_to_cost(abs_cost, hwi);
2464 } while (time_after64(vtime + cost, now->vnow) &&
2465 iocg->inuse != iocg->active);
2467 spin_unlock_irq(&ioc->lock);
2469 TRACE_IOCG_PATH(inuse_adjust, iocg, now,
2470 old_inuse, iocg->inuse, old_hwi, hwi);
2475 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg,
2476 bool is_merge, u64 *costp)
2478 struct ioc *ioc = iocg->ioc;
2479 u64 coef_seqio, coef_randio, coef_page;
2480 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1);
2484 switch (bio_op(bio)) {
2486 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO];
2487 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO];
2488 coef_page = ioc->params.lcoefs[LCOEF_RPAGE];
2491 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO];
2492 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO];
2493 coef_page = ioc->params.lcoefs[LCOEF_WPAGE];
2500 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor);
2501 seek_pages >>= IOC_SECT_TO_PAGE_SHIFT;
2505 if (seek_pages > LCOEF_RANDIO_PAGES) {
2506 cost += coef_randio;
2511 cost += pages * coef_page;
2516 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge)
2520 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost);
2524 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc,
2527 unsigned int pages = blk_rq_stats_sectors(rq) >> IOC_SECT_TO_PAGE_SHIFT;
2529 switch (req_op(rq)) {
2531 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE];
2534 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE];
2541 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc)
2545 calc_size_vtime_cost_builtin(rq, ioc, &cost);
2549 static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
2551 struct blkcg_gq *blkg = bio->bi_blkg;
2552 struct ioc *ioc = rqos_to_ioc(rqos);
2553 struct ioc_gq *iocg = blkg_to_iocg(blkg);
2555 struct iocg_wait wait;
2556 u64 abs_cost, cost, vtime;
2557 bool use_debt, ioc_locked;
2558 unsigned long flags;
2560 /* bypass IOs if disabled, still initializing, or for root cgroup */
2561 if (!ioc->enabled || !iocg || !iocg->level)
2564 /* calculate the absolute vtime cost */
2565 abs_cost = calc_vtime_cost(bio, iocg, false);
2569 if (!iocg_activate(iocg, &now))
2572 iocg->cursor = bio_end_sector(bio);
2573 vtime = atomic64_read(&iocg->vtime);
2574 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2577 * If no one's waiting and within budget, issue right away. The
2578 * tests are racy but the races aren't systemic - we only miss once
2579 * in a while which is fine.
2581 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
2582 time_before_eq64(vtime + cost, now.vnow)) {
2583 iocg_commit_bio(iocg, bio, abs_cost, cost);
2588 * We're over budget. This can be handled in two ways. IOs which may
2589 * cause priority inversions are punted to @ioc->aux_iocg and charged as
2590 * debt. Otherwise, the issuer is blocked on @iocg->waitq. Debt handling
2591 * requires @ioc->lock, waitq handling @iocg->waitq.lock. Determine
2592 * whether debt handling is needed and acquire locks accordingly.
2594 use_debt = bio_issue_as_root_blkg(bio) || fatal_signal_pending(current);
2595 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt);
2597 iocg_lock(iocg, ioc_locked, &flags);
2600 * @iocg must stay activated for debt and waitq handling. Deactivation
2601 * is synchronized against both ioc->lock and waitq.lock and we won't
2602 * get deactivated as long as we're waiting or has debt, so we're good
2603 * if we're activated here. In the unlikely cases that we aren't, just
2606 if (unlikely(list_empty(&iocg->active_list))) {
2607 iocg_unlock(iocg, ioc_locked, &flags);
2608 iocg_commit_bio(iocg, bio, abs_cost, cost);
2613 * We're over budget. If @bio has to be issued regardless, remember
2614 * the abs_cost instead of advancing vtime. iocg_kick_waitq() will pay
2615 * off the debt before waking more IOs.
2617 * This way, the debt is continuously paid off each period with the
2618 * actual budget available to the cgroup. If we just wound vtime, we
2619 * would incorrectly use the current hw_inuse for the entire amount
2620 * which, for example, can lead to the cgroup staying blocked for a
2621 * long time even with substantially raised hw_inuse.
2623 * An iocg with vdebt should stay online so that the timer can keep
2624 * deducting its vdebt and [de]activate use_delay mechanism
2625 * accordingly. We don't want to race against the timer trying to
2626 * clear them and leave @iocg inactive w/ dangling use_delay heavily
2627 * penalizing the cgroup and its descendants.
2630 iocg_incur_debt(iocg, abs_cost, &now);
2631 if (iocg_kick_delay(iocg, &now))
2632 blkcg_schedule_throttle(rqos->q,
2633 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2634 iocg_unlock(iocg, ioc_locked, &flags);
2638 /* guarantee that iocgs w/ waiters have maximum inuse */
2639 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) {
2641 iocg_unlock(iocg, false, &flags);
2645 propagate_weights(iocg, iocg->active, iocg->active, true,
2650 * Append self to the waitq and schedule the wakeup timer if we're
2651 * the first waiter. The timer duration is calculated based on the
2652 * current vrate. vtime and hweight changes can make it too short
2653 * or too long. Each wait entry records the absolute cost it's
2654 * waiting for to allow re-evaluation using a custom wait entry.
2656 * If too short, the timer simply reschedules itself. If too long,
2657 * the period timer will notice and trigger wakeups.
2659 * All waiters are on iocg->waitq and the wait states are
2660 * synchronized using waitq.lock.
2662 init_waitqueue_func_entry(&wait.wait, iocg_wake_fn);
2663 wait.wait.private = current;
2665 wait.abs_cost = abs_cost;
2666 wait.committed = false; /* will be set true by waker */
2668 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait);
2669 iocg_kick_waitq(iocg, ioc_locked, &now);
2671 iocg_unlock(iocg, ioc_locked, &flags);
2674 set_current_state(TASK_UNINTERRUPTIBLE);
2680 /* waker already committed us, proceed */
2681 finish_wait(&iocg->waitq, &wait.wait);
2684 static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
2687 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2688 struct ioc *ioc = rqos_to_ioc(rqos);
2689 sector_t bio_end = bio_end_sector(bio);
2691 u64 vtime, abs_cost, cost;
2692 unsigned long flags;
2694 /* bypass if disabled, still initializing, or for root cgroup */
2695 if (!ioc->enabled || !iocg || !iocg->level)
2698 abs_cost = calc_vtime_cost(bio, iocg, true);
2704 vtime = atomic64_read(&iocg->vtime);
2705 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now);
2707 /* update cursor if backmerging into the request at the cursor */
2708 if (blk_rq_pos(rq) < bio_end &&
2709 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor)
2710 iocg->cursor = bio_end;
2713 * Charge if there's enough vtime budget and the existing request has
2716 if (rq->bio && rq->bio->bi_iocost_cost &&
2717 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) {
2718 iocg_commit_bio(iocg, bio, abs_cost, cost);
2723 * Otherwise, account it as debt if @iocg is online, which it should
2724 * be for the vast majority of cases. See debt handling in
2725 * ioc_rqos_throttle() for details.
2727 spin_lock_irqsave(&ioc->lock, flags);
2728 spin_lock(&iocg->waitq.lock);
2730 if (likely(!list_empty(&iocg->active_list))) {
2731 iocg_incur_debt(iocg, abs_cost, &now);
2732 if (iocg_kick_delay(iocg, &now))
2733 blkcg_schedule_throttle(rqos->q,
2734 (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
2736 iocg_commit_bio(iocg, bio, abs_cost, cost);
2739 spin_unlock(&iocg->waitq.lock);
2740 spin_unlock_irqrestore(&ioc->lock, flags);
2743 static void ioc_rqos_done_bio(struct rq_qos *rqos, struct bio *bio)
2745 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg);
2747 if (iocg && bio->bi_iocost_cost)
2748 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime);
2751 static void ioc_rqos_done(struct rq_qos *rqos, struct request *rq)
2753 struct ioc *ioc = rqos_to_ioc(rqos);
2754 struct ioc_pcpu_stat *ccs;
2755 u64 on_q_ns, rq_wait_ns, size_nsec;
2758 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns)
2761 switch (req_op(rq) & REQ_OP_MASK) {
2774 on_q_ns = ktime_get_ns() - rq->alloc_time_ns;
2775 rq_wait_ns = rq->start_time_ns - rq->alloc_time_ns;
2776 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC);
2778 ccs = get_cpu_ptr(ioc->pcpu_stat);
2780 if (on_q_ns <= size_nsec ||
2781 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC)
2782 local_inc(&ccs->missed[rw].nr_met);
2784 local_inc(&ccs->missed[rw].nr_missed);
2786 local64_add(rq_wait_ns, &ccs->rq_wait_ns);
2791 static void ioc_rqos_queue_depth_changed(struct rq_qos *rqos)
2793 struct ioc *ioc = rqos_to_ioc(rqos);
2795 spin_lock_irq(&ioc->lock);
2796 ioc_refresh_params(ioc, false);
2797 spin_unlock_irq(&ioc->lock);
2800 static void ioc_rqos_exit(struct rq_qos *rqos)
2802 struct ioc *ioc = rqos_to_ioc(rqos);
2804 blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
2806 spin_lock_irq(&ioc->lock);
2807 ioc->running = IOC_STOP;
2808 spin_unlock_irq(&ioc->lock);
2810 del_timer_sync(&ioc->timer);
2811 free_percpu(ioc->pcpu_stat);
2815 static struct rq_qos_ops ioc_rqos_ops = {
2816 .throttle = ioc_rqos_throttle,
2817 .merge = ioc_rqos_merge,
2818 .done_bio = ioc_rqos_done_bio,
2819 .done = ioc_rqos_done,
2820 .queue_depth_changed = ioc_rqos_queue_depth_changed,
2821 .exit = ioc_rqos_exit,
2824 static int blk_iocost_init(struct request_queue *q)
2827 struct rq_qos *rqos;
2830 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2834 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat);
2835 if (!ioc->pcpu_stat) {
2840 for_each_possible_cpu(cpu) {
2841 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu);
2843 for (i = 0; i < ARRAY_SIZE(ccs->missed); i++) {
2844 local_set(&ccs->missed[i].nr_met, 0);
2845 local_set(&ccs->missed[i].nr_missed, 0);
2847 local64_set(&ccs->rq_wait_ns, 0);
2851 rqos->id = RQ_QOS_COST;
2852 rqos->ops = &ioc_rqos_ops;
2855 spin_lock_init(&ioc->lock);
2856 timer_setup(&ioc->timer, ioc_timer_fn, 0);
2857 INIT_LIST_HEAD(&ioc->active_iocgs);
2859 ioc->running = IOC_IDLE;
2860 ioc->vtime_base_rate = VTIME_PER_USEC;
2861 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
2862 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
2863 ioc->period_at = ktime_to_us(ktime_get());
2864 atomic64_set(&ioc->cur_period, 0);
2865 atomic_set(&ioc->hweight_gen, 0);
2867 spin_lock_irq(&ioc->lock);
2868 ioc->autop_idx = AUTOP_INVALID;
2869 ioc_refresh_params(ioc, true);
2870 spin_unlock_irq(&ioc->lock);
2873 * rqos must be added before activation to allow iocg_pd_init() to
2874 * lookup the ioc from q. This means that the rqos methods may get
2875 * called before policy activation completion, can't assume that the
2876 * target bio has an iocg associated and need to test for NULL iocg.
2878 rq_qos_add(q, rqos);
2879 ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
2881 rq_qos_del(q, rqos);
2882 free_percpu(ioc->pcpu_stat);
2889 static struct blkcg_policy_data *ioc_cpd_alloc(gfp_t gfp)
2891 struct ioc_cgrp *iocc;
2893 iocc = kzalloc(sizeof(struct ioc_cgrp), gfp);
2897 iocc->dfl_weight = CGROUP_WEIGHT_DFL * WEIGHT_ONE;
2901 static void ioc_cpd_free(struct blkcg_policy_data *cpd)
2903 kfree(container_of(cpd, struct ioc_cgrp, cpd));
2906 static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q,
2907 struct blkcg *blkcg)
2909 int levels = blkcg->css.cgroup->level + 1;
2910 struct ioc_gq *iocg;
2912 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node);
2916 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp);
2917 if (!iocg->pcpu_stat) {
2925 static void ioc_pd_init(struct blkg_policy_data *pd)
2927 struct ioc_gq *iocg = pd_to_iocg(pd);
2928 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd);
2929 struct ioc *ioc = q_to_ioc(blkg->q);
2931 struct blkcg_gq *tblkg;
2932 unsigned long flags;
2937 atomic64_set(&iocg->vtime, now.vnow);
2938 atomic64_set(&iocg->done_vtime, now.vnow);
2939 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period));
2940 INIT_LIST_HEAD(&iocg->active_list);
2941 INIT_LIST_HEAD(&iocg->walk_list);
2942 INIT_LIST_HEAD(&iocg->surplus_list);
2943 iocg->hweight_active = WEIGHT_ONE;
2944 iocg->hweight_inuse = WEIGHT_ONE;
2946 init_waitqueue_head(&iocg->waitq);
2947 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
2948 iocg->waitq_timer.function = iocg_waitq_timer_fn;
2950 iocg->level = blkg->blkcg->css.cgroup->level;
2952 for (tblkg = blkg; tblkg; tblkg = tblkg->parent) {
2953 struct ioc_gq *tiocg = blkg_to_iocg(tblkg);
2954 iocg->ancestors[tiocg->level] = tiocg;
2957 spin_lock_irqsave(&ioc->lock, flags);
2958 weight_updated(iocg, &now);
2959 spin_unlock_irqrestore(&ioc->lock, flags);
2962 static void ioc_pd_free(struct blkg_policy_data *pd)
2964 struct ioc_gq *iocg = pd_to_iocg(pd);
2965 struct ioc *ioc = iocg->ioc;
2966 unsigned long flags;
2969 spin_lock_irqsave(&ioc->lock, flags);
2971 if (!list_empty(&iocg->active_list)) {
2975 propagate_weights(iocg, 0, 0, false, &now);
2976 list_del_init(&iocg->active_list);
2979 WARN_ON_ONCE(!list_empty(&iocg->walk_list));
2980 WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
2982 spin_unlock_irqrestore(&ioc->lock, flags);
2984 hrtimer_cancel(&iocg->waitq_timer);
2986 free_percpu(iocg->pcpu_stat);
2990 static size_t ioc_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
2992 struct ioc_gq *iocg = pd_to_iocg(pd);
2993 struct ioc *ioc = iocg->ioc;
2999 if (iocg->level == 0) {
3000 unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
3001 ioc->vtime_base_rate * 10000,
3003 pos += scnprintf(buf + pos, size - pos, " cost.vrate=%u.%02u",
3004 vp10k / 100, vp10k % 100);
3007 pos += scnprintf(buf + pos, size - pos, " cost.usage=%llu",
3008 iocg->last_stat.usage_us);
3010 if (blkcg_debug_stats)
3011 pos += scnprintf(buf + pos, size - pos,
3012 " cost.wait=%llu cost.indebt=%llu cost.indelay=%llu",
3013 iocg->last_stat.wait_us,
3014 iocg->last_stat.indebt_us,
3015 iocg->last_stat.indelay_us);
3020 static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3023 const char *dname = blkg_dev_name(pd->blkg);
3024 struct ioc_gq *iocg = pd_to_iocg(pd);
3026 if (dname && iocg->cfg_weight)
3027 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE);
3032 static int ioc_weight_show(struct seq_file *sf, void *v)
3034 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3035 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3037 seq_printf(sf, "default %u\n", iocc->dfl_weight / WEIGHT_ONE);
3038 blkcg_print_blkgs(sf, blkcg, ioc_weight_prfill,
3039 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3043 static ssize_t ioc_weight_write(struct kernfs_open_file *of, char *buf,
3044 size_t nbytes, loff_t off)
3046 struct blkcg *blkcg = css_to_blkcg(of_css(of));
3047 struct ioc_cgrp *iocc = blkcg_to_iocc(blkcg);
3048 struct blkg_conf_ctx ctx;
3050 struct ioc_gq *iocg;
3054 if (!strchr(buf, ':')) {
3055 struct blkcg_gq *blkg;
3057 if (!sscanf(buf, "default %u", &v) && !sscanf(buf, "%u", &v))
3060 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3063 spin_lock(&blkcg->lock);
3064 iocc->dfl_weight = v * WEIGHT_ONE;
3065 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
3066 struct ioc_gq *iocg = blkg_to_iocg(blkg);
3069 spin_lock_irq(&iocg->ioc->lock);
3070 ioc_now(iocg->ioc, &now);
3071 weight_updated(iocg, &now);
3072 spin_unlock_irq(&iocg->ioc->lock);
3075 spin_unlock(&blkcg->lock);
3080 ret = blkg_conf_prep(blkcg, &blkcg_policy_iocost, buf, &ctx);
3084 iocg = blkg_to_iocg(ctx.blkg);
3086 if (!strncmp(ctx.body, "default", 7)) {
3089 if (!sscanf(ctx.body, "%u", &v))
3091 if (v < CGROUP_WEIGHT_MIN || v > CGROUP_WEIGHT_MAX)
3095 spin_lock(&iocg->ioc->lock);
3096 iocg->cfg_weight = v * WEIGHT_ONE;
3097 ioc_now(iocg->ioc, &now);
3098 weight_updated(iocg, &now);
3099 spin_unlock(&iocg->ioc->lock);
3101 blkg_conf_finish(&ctx);
3105 blkg_conf_finish(&ctx);
3109 static u64 ioc_qos_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
3112 const char *dname = blkg_dev_name(pd->blkg);
3113 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3118 seq_printf(sf, "%s enable=%d ctrl=%s rpct=%u.%02u rlat=%u wpct=%u.%02u wlat=%u min=%u.%02u max=%u.%02u\n",
3119 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto",
3120 ioc->params.qos[QOS_RPPM] / 10000,
3121 ioc->params.qos[QOS_RPPM] % 10000 / 100,
3122 ioc->params.qos[QOS_RLAT],
3123 ioc->params.qos[QOS_WPPM] / 10000,
3124 ioc->params.qos[QOS_WPPM] % 10000 / 100,
3125 ioc->params.qos[QOS_WLAT],
3126 ioc->params.qos[QOS_MIN] / 10000,
3127 ioc->params.qos[QOS_MIN] % 10000 / 100,
3128 ioc->params.qos[QOS_MAX] / 10000,
3129 ioc->params.qos[QOS_MAX] % 10000 / 100);
3133 static int ioc_qos_show(struct seq_file *sf, void *v)
3135 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3137 blkcg_print_blkgs(sf, blkcg, ioc_qos_prfill,
3138 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3142 static const match_table_t qos_ctrl_tokens = {
3143 { QOS_ENABLE, "enable=%u" },
3144 { QOS_CTRL, "ctrl=%s" },
3145 { NR_QOS_CTRL_PARAMS, NULL },
3148 static const match_table_t qos_tokens = {
3149 { QOS_RPPM, "rpct=%s" },
3150 { QOS_RLAT, "rlat=%u" },
3151 { QOS_WPPM, "wpct=%s" },
3152 { QOS_WLAT, "wlat=%u" },
3153 { QOS_MIN, "min=%s" },
3154 { QOS_MAX, "max=%s" },
3155 { NR_QOS_PARAMS, NULL },
3158 static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
3159 size_t nbytes, loff_t off)
3161 struct block_device *bdev;
3163 u32 qos[NR_QOS_PARAMS];
3168 bdev = blkcg_conf_open_bdev(&input);
3170 return PTR_ERR(bdev);
3172 ioc = q_to_ioc(bdev->bd_disk->queue);
3174 ret = blk_iocost_init(bdev->bd_disk->queue);
3177 ioc = q_to_ioc(bdev->bd_disk->queue);
3180 spin_lock_irq(&ioc->lock);
3181 memcpy(qos, ioc->params.qos, sizeof(qos));
3182 enable = ioc->enabled;
3183 user = ioc->user_qos_params;
3184 spin_unlock_irq(&ioc->lock);
3186 while ((p = strsep(&input, " \t\n"))) {
3187 substring_t args[MAX_OPT_ARGS];
3195 switch (match_token(p, qos_ctrl_tokens, args)) {
3197 match_u64(&args[0], &v);
3201 match_strlcpy(buf, &args[0], sizeof(buf));
3202 if (!strcmp(buf, "auto"))
3204 else if (!strcmp(buf, "user"))
3211 tok = match_token(p, qos_tokens, args);
3215 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3218 if (cgroup_parse_float(buf, 2, &v))
3220 if (v < 0 || v > 10000)
3226 if (match_u64(&args[0], &v))
3232 if (match_strlcpy(buf, &args[0], sizeof(buf)) >=
3235 if (cgroup_parse_float(buf, 2, &v))
3239 qos[tok] = clamp_t(s64, v * 100,
3240 VRATE_MIN_PPM, VRATE_MAX_PPM);
3248 if (qos[QOS_MIN] > qos[QOS_MAX])
3251 spin_lock_irq(&ioc->lock);
3254 blk_stat_enable_accounting(ioc->rqos.q);
3255 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3256 ioc->enabled = true;
3258 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q);
3259 ioc->enabled = false;
3263 memcpy(ioc->params.qos, qos, sizeof(qos));
3264 ioc->user_qos_params = true;
3266 ioc->user_qos_params = false;
3269 ioc_refresh_params(ioc, true);
3270 spin_unlock_irq(&ioc->lock);
3272 blkdev_put_no_open(bdev);
3277 blkdev_put_no_open(bdev);
3281 static u64 ioc_cost_model_prfill(struct seq_file *sf,
3282 struct blkg_policy_data *pd, int off)
3284 const char *dname = blkg_dev_name(pd->blkg);
3285 struct ioc *ioc = pd_to_iocg(pd)->ioc;
3286 u64 *u = ioc->params.i_lcoefs;
3291 seq_printf(sf, "%s ctrl=%s model=linear "
3292 "rbps=%llu rseqiops=%llu rrandiops=%llu "
3293 "wbps=%llu wseqiops=%llu wrandiops=%llu\n",
3294 dname, ioc->user_cost_model ? "user" : "auto",
3295 u[I_LCOEF_RBPS], u[I_LCOEF_RSEQIOPS], u[I_LCOEF_RRANDIOPS],
3296 u[I_LCOEF_WBPS], u[I_LCOEF_WSEQIOPS], u[I_LCOEF_WRANDIOPS]);
3300 static int ioc_cost_model_show(struct seq_file *sf, void *v)
3302 struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
3304 blkcg_print_blkgs(sf, blkcg, ioc_cost_model_prfill,
3305 &blkcg_policy_iocost, seq_cft(sf)->private, false);
3309 static const match_table_t cost_ctrl_tokens = {
3310 { COST_CTRL, "ctrl=%s" },
3311 { COST_MODEL, "model=%s" },
3312 { NR_COST_CTRL_PARAMS, NULL },
3315 static const match_table_t i_lcoef_tokens = {
3316 { I_LCOEF_RBPS, "rbps=%u" },
3317 { I_LCOEF_RSEQIOPS, "rseqiops=%u" },
3318 { I_LCOEF_RRANDIOPS, "rrandiops=%u" },
3319 { I_LCOEF_WBPS, "wbps=%u" },
3320 { I_LCOEF_WSEQIOPS, "wseqiops=%u" },
3321 { I_LCOEF_WRANDIOPS, "wrandiops=%u" },
3322 { NR_I_LCOEFS, NULL },
3325 static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
3326 size_t nbytes, loff_t off)
3328 struct block_device *bdev;
3335 bdev = blkcg_conf_open_bdev(&input);
3337 return PTR_ERR(bdev);
3339 ioc = q_to_ioc(bdev->bd_disk->queue);
3341 ret = blk_iocost_init(bdev->bd_disk->queue);
3344 ioc = q_to_ioc(bdev->bd_disk->queue);
3347 spin_lock_irq(&ioc->lock);
3348 memcpy(u, ioc->params.i_lcoefs, sizeof(u));
3349 user = ioc->user_cost_model;
3350 spin_unlock_irq(&ioc->lock);
3352 while ((p = strsep(&input, " \t\n"))) {
3353 substring_t args[MAX_OPT_ARGS];
3361 switch (match_token(p, cost_ctrl_tokens, args)) {
3363 match_strlcpy(buf, &args[0], sizeof(buf));
3364 if (!strcmp(buf, "auto"))
3366 else if (!strcmp(buf, "user"))
3372 match_strlcpy(buf, &args[0], sizeof(buf));
3373 if (strcmp(buf, "linear"))
3378 tok = match_token(p, i_lcoef_tokens, args);
3379 if (tok == NR_I_LCOEFS)
3381 if (match_u64(&args[0], &v))
3387 spin_lock_irq(&ioc->lock);
3389 memcpy(ioc->params.i_lcoefs, u, sizeof(u));
3390 ioc->user_cost_model = true;
3392 ioc->user_cost_model = false;
3394 ioc_refresh_params(ioc, true);
3395 spin_unlock_irq(&ioc->lock);
3397 blkdev_put_no_open(bdev);
3403 blkdev_put_no_open(bdev);
3407 static struct cftype ioc_files[] = {
3410 .flags = CFTYPE_NOT_ON_ROOT,
3411 .seq_show = ioc_weight_show,
3412 .write = ioc_weight_write,
3416 .flags = CFTYPE_ONLY_ON_ROOT,
3417 .seq_show = ioc_qos_show,
3418 .write = ioc_qos_write,
3421 .name = "cost.model",
3422 .flags = CFTYPE_ONLY_ON_ROOT,
3423 .seq_show = ioc_cost_model_show,
3424 .write = ioc_cost_model_write,
3429 static struct blkcg_policy blkcg_policy_iocost = {
3430 .dfl_cftypes = ioc_files,
3431 .cpd_alloc_fn = ioc_cpd_alloc,
3432 .cpd_free_fn = ioc_cpd_free,
3433 .pd_alloc_fn = ioc_pd_alloc,
3434 .pd_init_fn = ioc_pd_init,
3435 .pd_free_fn = ioc_pd_free,
3436 .pd_stat_fn = ioc_pd_stat,
3439 static int __init ioc_init(void)
3441 return blkcg_policy_register(&blkcg_policy_iocost);
3444 static void __exit ioc_exit(void)
3446 blkcg_policy_unregister(&blkcg_policy_iocost);
3449 module_init(ioc_init);
3450 module_exit(ioc_exit);