1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/base/power/domain_governor.c - Governors for device PM domains.
7 #include <linux/kernel.h>
8 #include <linux/pm_domain.h>
9 #include <linux/pm_qos.h>
10 #include <linux/hrtimer.h>
11 #include <linux/cpuidle.h>
12 #include <linux/cpumask.h>
13 #include <linux/ktime.h>
15 static int dev_update_qos_constraint(struct device *dev, void *data)
17 s64 *constraint_ns_p = data;
20 if (dev->power.subsys_data && dev->power.subsys_data->domain_data) {
22 * Only take suspend-time QoS constraints of devices into
23 * account, because constraints updated after the device has
24 * been suspended are not guaranteed to be taken into account
25 * anyway. In order for them to take effect, the device has to
26 * be resumed and suspended again.
28 constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns;
31 * The child is not in a domain and there's no info on its
32 * suspend/resume latencies, so assume them to be negligible and
33 * take its current PM QoS constraint (that's the only thing
34 * known at this point anyway).
36 constraint_ns = dev_pm_qos_read_value(dev, DEV_PM_QOS_RESUME_LATENCY);
37 constraint_ns *= NSEC_PER_USEC;
40 if (constraint_ns < *constraint_ns_p)
41 *constraint_ns_p = constraint_ns;
47 * default_suspend_ok - Default PM domain governor routine to suspend devices.
48 * @dev: Device to check.
50 static bool default_suspend_ok(struct device *dev)
52 struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
56 dev_dbg(dev, "%s()\n", __func__);
58 spin_lock_irqsave(&dev->power.lock, flags);
60 if (!td->constraint_changed) {
61 bool ret = td->cached_suspend_ok;
63 spin_unlock_irqrestore(&dev->power.lock, flags);
66 td->constraint_changed = false;
67 td->cached_suspend_ok = false;
68 td->effective_constraint_ns = 0;
69 constraint_ns = __dev_pm_qos_resume_latency(dev);
71 spin_unlock_irqrestore(&dev->power.lock, flags);
73 if (constraint_ns == 0)
76 constraint_ns *= NSEC_PER_USEC;
78 * We can walk the children without any additional locking, because
79 * they all have been suspended at this point and their
80 * effective_constraint_ns fields won't be modified in parallel with us.
82 if (!dev->power.ignore_children)
83 device_for_each_child(dev, &constraint_ns,
84 dev_update_qos_constraint);
86 if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS) {
87 /* "No restriction", so the device is allowed to suspend. */
88 td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS;
89 td->cached_suspend_ok = true;
90 } else if (constraint_ns == 0) {
92 * This triggers if one of the children that don't belong to a
93 * domain has a zero PM QoS constraint and it's better not to
94 * suspend then. effective_constraint_ns is zero already and
95 * cached_suspend_ok is false, so bail out.
99 constraint_ns -= td->suspend_latency_ns +
100 td->resume_latency_ns;
102 * effective_constraint_ns is zero already and cached_suspend_ok
103 * is false, so if the computed value is not positive, return
106 if (constraint_ns <= 0)
109 td->effective_constraint_ns = constraint_ns;
110 td->cached_suspend_ok = true;
114 * The children have been suspended already, so we don't need to take
115 * their suspend latencies into account here.
117 return td->cached_suspend_ok;
120 static void update_domain_next_wakeup(struct generic_pm_domain *genpd, ktime_t now)
122 ktime_t domain_wakeup = KTIME_MAX;
124 struct pm_domain_data *pdd;
125 struct gpd_link *link;
127 if (!(genpd->flags & GENPD_FLAG_MIN_RESIDENCY))
131 * Devices that have a predictable wakeup pattern, may specify
132 * their next wakeup. Let's find the next wakeup from all the
133 * devices attached to this domain and from all the sub-domains.
134 * It is possible that component's a next wakeup may have become
135 * stale when we read that here. We will ignore to ensure the domain
136 * is able to enter its optimal idle state.
138 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
139 next_wakeup = to_gpd_data(pdd)->next_wakeup;
140 if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
141 if (ktime_before(next_wakeup, domain_wakeup))
142 domain_wakeup = next_wakeup;
145 list_for_each_entry(link, &genpd->parent_links, parent_node) {
146 next_wakeup = link->child->next_wakeup;
147 if (next_wakeup != KTIME_MAX && !ktime_before(next_wakeup, now))
148 if (ktime_before(next_wakeup, domain_wakeup))
149 domain_wakeup = next_wakeup;
152 genpd->next_wakeup = domain_wakeup;
155 static bool next_wakeup_allows_state(struct generic_pm_domain *genpd,
156 unsigned int state, ktime_t now)
158 ktime_t domain_wakeup = genpd->next_wakeup;
159 s64 idle_time_ns, min_sleep_ns;
161 min_sleep_ns = genpd->states[state].power_off_latency_ns +
162 genpd->states[state].residency_ns;
164 idle_time_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
166 return idle_time_ns >= min_sleep_ns;
169 static bool __default_power_down_ok(struct dev_pm_domain *pd,
172 struct generic_pm_domain *genpd = pd_to_genpd(pd);
173 struct gpd_link *link;
174 struct pm_domain_data *pdd;
178 off_on_time_ns = genpd->states[state].power_off_latency_ns +
179 genpd->states[state].power_on_latency_ns;
181 min_off_time_ns = -1;
183 * Check if subdomains can be off for enough time.
185 * All subdomains have been powered off already at this point.
187 list_for_each_entry(link, &genpd->parent_links, parent_node) {
188 struct generic_pm_domain *sd = link->child;
189 s64 sd_max_off_ns = sd->max_off_time_ns;
191 if (sd_max_off_ns < 0)
195 * Check if the subdomain is allowed to be off long enough for
196 * the current domain to turn off and on (that's how much time
197 * it will have to wait worst case).
199 if (sd_max_off_ns <= off_on_time_ns)
202 if (min_off_time_ns > sd_max_off_ns || min_off_time_ns < 0)
203 min_off_time_ns = sd_max_off_ns;
207 * Check if the devices in the domain can be off enough time.
209 list_for_each_entry(pdd, &genpd->dev_list, list_node) {
210 struct gpd_timing_data *td;
214 * Check if the device is allowed to be off long enough for the
215 * domain to turn off and on (that's how much time it will
216 * have to wait worst case).
218 td = &to_gpd_data(pdd)->td;
219 constraint_ns = td->effective_constraint_ns;
221 * Zero means "no suspend at all" and this runs only when all
222 * devices in the domain are suspended, so it must be positive.
224 if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS)
227 if (constraint_ns <= off_on_time_ns)
230 if (min_off_time_ns > constraint_ns || min_off_time_ns < 0)
231 min_off_time_ns = constraint_ns;
235 * If the computed minimum device off time is negative, there are no
236 * latency constraints, so the domain can spend arbitrary time in the
239 if (min_off_time_ns < 0)
243 * The difference between the computed minimum subdomain or device off
244 * time and the time needed to turn the domain on is the maximum
245 * theoretical time this domain can spend in the "off" state.
247 genpd->max_off_time_ns = min_off_time_ns -
248 genpd->states[state].power_on_latency_ns;
253 * _default_power_down_ok - Default generic PM domain power off governor routine.
254 * @pd: PM domain to check.
256 * This routine must be executed under the PM domain's lock.
258 static bool _default_power_down_ok(struct dev_pm_domain *pd, ktime_t now)
260 struct generic_pm_domain *genpd = pd_to_genpd(pd);
261 int state_idx = genpd->state_count - 1;
262 struct gpd_link *link;
265 * Find the next wakeup from devices that can determine their own wakeup
266 * to find when the domain would wakeup and do it for every device down
267 * the hierarchy. It is not worth while to sleep if the state's residency
270 update_domain_next_wakeup(genpd, now);
271 if ((genpd->flags & GENPD_FLAG_MIN_RESIDENCY) && (genpd->next_wakeup != KTIME_MAX)) {
272 /* Let's find out the deepest domain idle state, the devices prefer */
273 while (state_idx >= 0) {
274 if (next_wakeup_allows_state(genpd, state_idx, now)) {
275 genpd->max_off_time_changed = true;
283 genpd->cached_power_down_ok = false;
288 if (!genpd->max_off_time_changed) {
289 genpd->state_idx = genpd->cached_power_down_state_idx;
290 return genpd->cached_power_down_ok;
294 * We have to invalidate the cached results for the parents, so
295 * use the observation that default_power_down_ok() is not
296 * going to be called for any parent until this instance
299 list_for_each_entry(link, &genpd->child_links, child_node)
300 link->parent->max_off_time_changed = true;
302 genpd->max_off_time_ns = -1;
303 genpd->max_off_time_changed = false;
304 genpd->cached_power_down_ok = true;
307 * Find a state to power down to, starting from the state
308 * determined by the next wakeup.
310 while (!__default_power_down_ok(pd, state_idx)) {
311 if (state_idx == 0) {
312 genpd->cached_power_down_ok = false;
319 genpd->state_idx = state_idx;
320 genpd->cached_power_down_state_idx = genpd->state_idx;
321 return genpd->cached_power_down_ok;
324 static bool default_power_down_ok(struct dev_pm_domain *pd)
326 return _default_power_down_ok(pd, ktime_get());
329 static bool always_on_power_down_ok(struct dev_pm_domain *domain)
334 #ifdef CONFIG_CPU_IDLE
335 static bool cpu_power_down_ok(struct dev_pm_domain *pd)
337 struct generic_pm_domain *genpd = pd_to_genpd(pd);
338 struct cpuidle_device *dev;
339 ktime_t domain_wakeup, next_hrtimer;
340 ktime_t now = ktime_get();
341 s64 idle_duration_ns;
344 /* Validate dev PM QoS constraints. */
345 if (!_default_power_down_ok(pd, now))
348 if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN))
352 * Find the next wakeup for any of the online CPUs within the PM domain
353 * and its subdomains. Note, we only need the genpd->cpus, as it already
354 * contains a mask of all CPUs from subdomains.
356 domain_wakeup = ktime_set(KTIME_SEC_MAX, 0);
357 for_each_cpu_and(cpu, genpd->cpus, cpu_online_mask) {
358 dev = per_cpu(cpuidle_devices, cpu);
360 next_hrtimer = READ_ONCE(dev->next_hrtimer);
361 if (ktime_before(next_hrtimer, domain_wakeup))
362 domain_wakeup = next_hrtimer;
366 /* The minimum idle duration is from now - until the next wakeup. */
367 idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now));
368 if (idle_duration_ns <= 0)
372 * Find the deepest idle state that has its residency value satisfied
373 * and by also taking into account the power off latency for the state.
374 * Start at the state picked by the dev PM QoS constraint validation.
376 i = genpd->state_idx;
378 if (idle_duration_ns >= (genpd->states[i].residency_ns +
379 genpd->states[i].power_off_latency_ns)) {
380 genpd->state_idx = i;
388 struct dev_power_governor pm_domain_cpu_gov = {
389 .suspend_ok = default_suspend_ok,
390 .power_down_ok = cpu_power_down_ok,
394 struct dev_power_governor simple_qos_governor = {
395 .suspend_ok = default_suspend_ok,
396 .power_down_ok = default_power_down_ok,
400 * pm_genpd_gov_always_on - A governor implementing an always-on policy
402 struct dev_power_governor pm_domain_always_on_gov = {
403 .power_down_ok = always_on_power_down_ok,
404 .suspend_ok = default_suspend_ok,