1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/cpufreq/cpufreq.c
5 * Copyright (C) 2001 Russell King
10 * Added handling for CPU hotplug
12 * Fix handling for CPU hotplug -- affected CPUs
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <linux/units.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 /* Macros to iterate over CPU policies */
37 #define for_each_suitable_policy(__policy, __active) \
38 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
39 if ((__active) == !policy_is_inactive(__policy))
41 #define for_each_active_policy(__policy) \
42 for_each_suitable_policy(__policy, true)
43 #define for_each_inactive_policy(__policy) \
44 for_each_suitable_policy(__policy, false)
46 /* Iterate over governors */
47 static LIST_HEAD(cpufreq_governor_list);
48 #define for_each_governor(__governor) \
49 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
51 static char default_governor[CPUFREQ_NAME_LEN];
54 * The "cpufreq driver" - the arch- or hardware-dependent low
55 * level driver of CPUFreq support, and its spinlock. This lock
56 * also protects the cpufreq_cpu_data array.
58 static struct cpufreq_driver *cpufreq_driver;
59 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60 static DEFINE_RWLOCK(cpufreq_driver_lock);
62 static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
63 bool cpufreq_supports_freq_invariance(void)
65 return static_branch_likely(&cpufreq_freq_invariance);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 bool has_target_index(void)
78 return !!cpufreq_driver->target_index;
81 /* internal prototypes */
82 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
83 static int cpufreq_init_governor(struct cpufreq_policy *policy);
84 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
85 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
86 static int cpufreq_set_policy(struct cpufreq_policy *policy,
87 struct cpufreq_governor *new_gov,
88 unsigned int new_pol);
89 static bool cpufreq_boost_supported(void);
92 * Two notifier lists: the "policy" list is involved in the
93 * validation process for a new CPU frequency policy; the
94 * "transition" list for kernel code that needs to handle
95 * changes to devices when the CPU clock speed changes.
96 * The mutex locks both lists.
98 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
99 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
101 static int off __read_mostly;
102 static int cpufreq_disabled(void)
106 void disable_cpufreq(void)
110 static DEFINE_MUTEX(cpufreq_governor_mutex);
112 bool have_governor_per_policy(void)
114 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
116 EXPORT_SYMBOL_GPL(have_governor_per_policy);
118 static struct kobject *cpufreq_global_kobject;
120 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
122 if (have_governor_per_policy())
123 return &policy->kobj;
125 return cpufreq_global_kobject;
127 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
129 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
131 struct kernel_cpustat kcpustat;
136 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
138 kcpustat_cpu_fetch(&kcpustat, cpu);
140 busy_time = kcpustat.cpustat[CPUTIME_USER];
141 busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
142 busy_time += kcpustat.cpustat[CPUTIME_IRQ];
143 busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
144 busy_time += kcpustat.cpustat[CPUTIME_STEAL];
145 busy_time += kcpustat.cpustat[CPUTIME_NICE];
147 idle_time = cur_wall_time - busy_time;
149 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
151 return div_u64(idle_time, NSEC_PER_USEC);
154 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
156 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
158 if (idle_time == -1ULL)
159 return get_cpu_idle_time_jiffy(cpu, wall);
161 idle_time += get_cpu_iowait_time_us(cpu, wall);
165 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
168 * This is a generic cpufreq init() routine which can be used by cpufreq
169 * drivers of SMP systems. It will do following:
170 * - validate & show freq table passed
171 * - set policies transition latency
172 * - policy->cpus with all possible CPUs
174 void cpufreq_generic_init(struct cpufreq_policy *policy,
175 struct cpufreq_frequency_table *table,
176 unsigned int transition_latency)
178 policy->freq_table = table;
179 policy->cpuinfo.transition_latency = transition_latency;
182 * The driver only supports the SMP configuration where all processors
183 * share the clock and voltage and clock.
185 cpumask_setall(policy->cpus);
187 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
189 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
191 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
193 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
195 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
197 unsigned int cpufreq_generic_get(unsigned int cpu)
199 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
201 if (!policy || IS_ERR(policy->clk)) {
202 pr_err("%s: No %s associated to cpu: %d\n",
203 __func__, policy ? "clk" : "policy", cpu);
207 return clk_get_rate(policy->clk) / 1000;
209 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
212 * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
213 * @cpu: CPU to find the policy for.
215 * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
216 * the kobject reference counter of that policy. Return a valid policy on
217 * success or NULL on failure.
219 * The policy returned by this function has to be released with the help of
220 * cpufreq_cpu_put() to balance its kobject reference counter properly.
222 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
224 struct cpufreq_policy *policy = NULL;
227 if (WARN_ON(cpu >= nr_cpu_ids))
230 /* get the cpufreq driver */
231 read_lock_irqsave(&cpufreq_driver_lock, flags);
233 if (cpufreq_driver) {
235 policy = cpufreq_cpu_get_raw(cpu);
237 kobject_get(&policy->kobj);
240 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
244 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
247 * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
248 * @policy: cpufreq policy returned by cpufreq_cpu_get().
250 void cpufreq_cpu_put(struct cpufreq_policy *policy)
252 kobject_put(&policy->kobj);
254 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
257 * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
258 * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
260 void cpufreq_cpu_release(struct cpufreq_policy *policy)
262 if (WARN_ON(!policy))
265 lockdep_assert_held(&policy->rwsem);
267 up_write(&policy->rwsem);
269 cpufreq_cpu_put(policy);
273 * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
274 * @cpu: CPU to find the policy for.
276 * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
277 * if the policy returned by it is not NULL, acquire its rwsem for writing.
278 * Return the policy if it is active or release it and return NULL otherwise.
280 * The policy returned by this function has to be released with the help of
281 * cpufreq_cpu_release() in order to release its rwsem and balance its usage
284 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
286 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
291 down_write(&policy->rwsem);
293 if (policy_is_inactive(policy)) {
294 cpufreq_cpu_release(policy);
301 /*********************************************************************
302 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
303 *********************************************************************/
306 * adjust_jiffies - Adjust the system "loops_per_jiffy".
307 * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
308 * @ci: Frequency change information.
310 * This function alters the system "loops_per_jiffy" for the clock
311 * speed change. Note that loops_per_jiffy cannot be updated on SMP
312 * systems as each CPU might be scaled differently. So, use the arch
313 * per-CPU loops_per_jiffy value wherever possible.
315 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
318 static unsigned long l_p_j_ref;
319 static unsigned int l_p_j_ref_freq;
321 if (ci->flags & CPUFREQ_CONST_LOOPS)
324 if (!l_p_j_ref_freq) {
325 l_p_j_ref = loops_per_jiffy;
326 l_p_j_ref_freq = ci->old;
327 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
328 l_p_j_ref, l_p_j_ref_freq);
330 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
331 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
333 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
334 loops_per_jiffy, ci->new);
340 * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
341 * @policy: cpufreq policy to enable fast frequency switching for.
342 * @freqs: contain details of the frequency update.
343 * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
345 * This function calls the transition notifiers and adjust_jiffies().
347 * It is called twice on all CPU frequency changes that have external effects.
349 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
350 struct cpufreq_freqs *freqs,
355 BUG_ON(irqs_disabled());
357 if (cpufreq_disabled())
360 freqs->policy = policy;
361 freqs->flags = cpufreq_driver->flags;
362 pr_debug("notification %u of frequency transition to %u kHz\n",
366 case CPUFREQ_PRECHANGE:
368 * Detect if the driver reported a value as "old frequency"
369 * which is not equal to what the cpufreq core thinks is
372 if (policy->cur && policy->cur != freqs->old) {
373 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
374 freqs->old, policy->cur);
375 freqs->old = policy->cur;
378 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
379 CPUFREQ_PRECHANGE, freqs);
381 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
384 case CPUFREQ_POSTCHANGE:
385 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
386 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
387 cpumask_pr_args(policy->cpus));
389 for_each_cpu(cpu, policy->cpus)
390 trace_cpu_frequency(freqs->new, cpu);
392 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
393 CPUFREQ_POSTCHANGE, freqs);
395 cpufreq_stats_record_transition(policy, freqs->new);
396 policy->cur = freqs->new;
400 /* Do post notifications when there are chances that transition has failed */
401 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
402 struct cpufreq_freqs *freqs, int transition_failed)
404 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
405 if (!transition_failed)
408 swap(freqs->old, freqs->new);
409 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
410 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
413 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
414 struct cpufreq_freqs *freqs)
418 * Catch double invocations of _begin() which lead to self-deadlock.
419 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
420 * doesn't invoke _begin() on their behalf, and hence the chances of
421 * double invocations are very low. Moreover, there are scenarios
422 * where these checks can emit false-positive warnings in these
423 * drivers; so we avoid that by skipping them altogether.
425 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
426 && current == policy->transition_task);
429 wait_event(policy->transition_wait, !policy->transition_ongoing);
431 spin_lock(&policy->transition_lock);
433 if (unlikely(policy->transition_ongoing)) {
434 spin_unlock(&policy->transition_lock);
438 policy->transition_ongoing = true;
439 policy->transition_task = current;
441 spin_unlock(&policy->transition_lock);
443 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
445 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
447 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
448 struct cpufreq_freqs *freqs, int transition_failed)
450 if (WARN_ON(!policy->transition_ongoing))
453 cpufreq_notify_post_transition(policy, freqs, transition_failed);
455 arch_set_freq_scale(policy->related_cpus,
457 arch_scale_freq_ref(policy->cpu));
459 spin_lock(&policy->transition_lock);
460 policy->transition_ongoing = false;
461 policy->transition_task = NULL;
462 spin_unlock(&policy->transition_lock);
464 wake_up(&policy->transition_wait);
466 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
469 * Fast frequency switching status count. Positive means "enabled", negative
470 * means "disabled" and 0 means "not decided yet".
472 static int cpufreq_fast_switch_count;
473 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
475 static void cpufreq_list_transition_notifiers(void)
477 struct notifier_block *nb;
479 pr_info("Registered transition notifiers:\n");
481 mutex_lock(&cpufreq_transition_notifier_list.mutex);
483 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
484 pr_info("%pS\n", nb->notifier_call);
486 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
490 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
491 * @policy: cpufreq policy to enable fast frequency switching for.
493 * Try to enable fast frequency switching for @policy.
495 * The attempt will fail if there is at least one transition notifier registered
496 * at this point, as fast frequency switching is quite fundamentally at odds
497 * with transition notifiers. Thus if successful, it will make registration of
498 * transition notifiers fail going forward.
500 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
502 lockdep_assert_held(&policy->rwsem);
504 if (!policy->fast_switch_possible)
507 mutex_lock(&cpufreq_fast_switch_lock);
508 if (cpufreq_fast_switch_count >= 0) {
509 cpufreq_fast_switch_count++;
510 policy->fast_switch_enabled = true;
512 pr_warn("CPU%u: Fast frequency switching not enabled\n",
514 cpufreq_list_transition_notifiers();
516 mutex_unlock(&cpufreq_fast_switch_lock);
518 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
521 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
522 * @policy: cpufreq policy to disable fast frequency switching for.
524 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
526 mutex_lock(&cpufreq_fast_switch_lock);
527 if (policy->fast_switch_enabled) {
528 policy->fast_switch_enabled = false;
529 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
530 cpufreq_fast_switch_count--;
532 mutex_unlock(&cpufreq_fast_switch_lock);
534 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
536 static unsigned int __resolve_freq(struct cpufreq_policy *policy,
537 unsigned int target_freq, unsigned int relation)
541 target_freq = clamp_val(target_freq, policy->min, policy->max);
543 if (!policy->freq_table)
546 idx = cpufreq_frequency_table_target(policy, target_freq, relation);
547 policy->cached_resolved_idx = idx;
548 policy->cached_target_freq = target_freq;
549 return policy->freq_table[idx].frequency;
553 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
555 * @policy: associated policy to interrogate
556 * @target_freq: target frequency to resolve.
558 * The target to driver frequency mapping is cached in the policy.
560 * Return: Lowest driver-supported frequency greater than or equal to the
561 * given target_freq, subject to policy (min/max) and driver limitations.
563 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
564 unsigned int target_freq)
566 return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
568 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
570 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
572 unsigned int latency;
574 if (policy->transition_delay_us)
575 return policy->transition_delay_us;
577 latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
579 unsigned int max_delay_us = 2 * MSEC_PER_SEC;
582 * If the platform already has high transition_latency, use it
585 if (latency > max_delay_us)
589 * For platforms that can change the frequency very fast (< 2
590 * us), the above formula gives a decent transition delay. But
591 * for platforms where transition_latency is in milliseconds, it
592 * ends up giving unrealistic values.
594 * Cap the default transition delay to 2 ms, which seems to be
595 * a reasonable amount of time after which we should reevaluate
598 return min(latency * LATENCY_MULTIPLIER, max_delay_us);
601 return LATENCY_MULTIPLIER;
603 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
605 /*********************************************************************
607 *********************************************************************/
608 static ssize_t show_boost(struct kobject *kobj,
609 struct kobj_attribute *attr, char *buf)
611 return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
614 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
615 const char *buf, size_t count)
619 if (kstrtobool(buf, &enable))
622 if (cpufreq_boost_trigger_state(enable)) {
623 pr_err("%s: Cannot %s BOOST!\n",
624 __func__, enable ? "enable" : "disable");
628 pr_debug("%s: cpufreq BOOST %s\n",
629 __func__, enable ? "enabled" : "disabled");
633 define_one_global_rw(boost);
635 static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
637 return sysfs_emit(buf, "%d\n", policy->boost_enabled);
640 static ssize_t store_local_boost(struct cpufreq_policy *policy,
641 const char *buf, size_t count)
646 if (kstrtobool(buf, &enable))
649 if (!cpufreq_driver->boost_enabled)
652 if (policy->boost_enabled == enable)
655 policy->boost_enabled = enable;
658 ret = cpufreq_driver->set_boost(policy, enable);
662 policy->boost_enabled = !policy->boost_enabled;
669 static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
671 static struct cpufreq_governor *find_governor(const char *str_governor)
673 struct cpufreq_governor *t;
676 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
682 static struct cpufreq_governor *get_governor(const char *str_governor)
684 struct cpufreq_governor *t;
686 mutex_lock(&cpufreq_governor_mutex);
687 t = find_governor(str_governor);
691 if (!try_module_get(t->owner))
695 mutex_unlock(&cpufreq_governor_mutex);
700 static unsigned int cpufreq_parse_policy(char *str_governor)
702 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
703 return CPUFREQ_POLICY_PERFORMANCE;
705 if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
706 return CPUFREQ_POLICY_POWERSAVE;
708 return CPUFREQ_POLICY_UNKNOWN;
712 * cpufreq_parse_governor - parse a governor string only for has_target()
713 * @str_governor: Governor name.
715 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
717 struct cpufreq_governor *t;
719 t = get_governor(str_governor);
723 if (request_module("cpufreq_%s", str_governor))
726 return get_governor(str_governor);
730 * cpufreq_per_cpu_attr_read() / show_##file_name() -
731 * print out cpufreq information
733 * Write out information from cpufreq_driver->policy[cpu]; object must be
737 #define show_one(file_name, object) \
738 static ssize_t show_##file_name \
739 (struct cpufreq_policy *policy, char *buf) \
741 return sysfs_emit(buf, "%u\n", policy->object); \
744 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
745 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
746 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
747 show_one(scaling_min_freq, min);
748 show_one(scaling_max_freq, max);
750 __weak unsigned int arch_freq_get_on_cpu(int cpu)
755 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
760 freq = arch_freq_get_on_cpu(policy->cpu);
762 ret = sysfs_emit(buf, "%u\n", freq);
763 else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
764 ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
766 ret = sysfs_emit(buf, "%u\n", policy->cur);
771 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
773 #define store_one(file_name, object) \
774 static ssize_t store_##file_name \
775 (struct cpufreq_policy *policy, const char *buf, size_t count) \
780 ret = kstrtoul(buf, 0, &val); \
784 ret = freq_qos_update_request(policy->object##_freq_req, val);\
785 return ret >= 0 ? count : ret; \
788 store_one(scaling_min_freq, min);
789 store_one(scaling_max_freq, max);
792 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
794 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
797 unsigned int cur_freq = __cpufreq_get(policy);
800 return sysfs_emit(buf, "%u\n", cur_freq);
802 return sysfs_emit(buf, "<unknown>\n");
806 * show_scaling_governor - show the current policy for the specified CPU
808 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
810 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
811 return sysfs_emit(buf, "powersave\n");
812 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
813 return sysfs_emit(buf, "performance\n");
814 else if (policy->governor)
815 return sysfs_emit(buf, "%s\n", policy->governor->name);
820 * store_scaling_governor - store policy for the specified CPU
822 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
823 const char *buf, size_t count)
825 char str_governor[16];
828 ret = sscanf(buf, "%15s", str_governor);
832 if (cpufreq_driver->setpolicy) {
833 unsigned int new_pol;
835 new_pol = cpufreq_parse_policy(str_governor);
839 ret = cpufreq_set_policy(policy, NULL, new_pol);
841 struct cpufreq_governor *new_gov;
843 new_gov = cpufreq_parse_governor(str_governor);
847 ret = cpufreq_set_policy(policy, new_gov,
848 CPUFREQ_POLICY_UNKNOWN);
850 module_put(new_gov->owner);
853 return ret ? ret : count;
857 * show_scaling_driver - show the cpufreq driver currently loaded
859 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
861 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
865 * show_scaling_available_governors - show the available CPUfreq governors
867 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
871 struct cpufreq_governor *t;
874 i += sysfs_emit(buf, "performance powersave");
878 mutex_lock(&cpufreq_governor_mutex);
879 for_each_governor(t) {
880 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
881 - (CPUFREQ_NAME_LEN + 2)))
883 i += sysfs_emit_at(buf, i, "%s ", t->name);
885 mutex_unlock(&cpufreq_governor_mutex);
887 i += sysfs_emit_at(buf, i, "\n");
891 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
896 for_each_cpu(cpu, mask) {
897 i += sysfs_emit_at(buf, i, "%u ", cpu);
898 if (i >= (PAGE_SIZE - 5))
902 /* Remove the extra space at the end */
905 i += sysfs_emit_at(buf, i, "\n");
908 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
911 * show_related_cpus - show the CPUs affected by each transition even if
912 * hw coordination is in use
914 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
916 return cpufreq_show_cpus(policy->related_cpus, buf);
920 * show_affected_cpus - show the CPUs affected by each transition
922 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
924 return cpufreq_show_cpus(policy->cpus, buf);
927 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
928 const char *buf, size_t count)
930 unsigned int freq = 0;
933 if (!policy->governor || !policy->governor->store_setspeed)
936 ret = sscanf(buf, "%u", &freq);
940 policy->governor->store_setspeed(policy, freq);
945 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
947 if (!policy->governor || !policy->governor->show_setspeed)
948 return sysfs_emit(buf, "<unsupported>\n");
950 return policy->governor->show_setspeed(policy, buf);
954 * show_bios_limit - show the current cpufreq HW/BIOS limitation
956 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
960 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
962 return sysfs_emit(buf, "%u\n", limit);
963 return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
966 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
967 cpufreq_freq_attr_ro(cpuinfo_min_freq);
968 cpufreq_freq_attr_ro(cpuinfo_max_freq);
969 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
970 cpufreq_freq_attr_ro(scaling_available_governors);
971 cpufreq_freq_attr_ro(scaling_driver);
972 cpufreq_freq_attr_ro(scaling_cur_freq);
973 cpufreq_freq_attr_ro(bios_limit);
974 cpufreq_freq_attr_ro(related_cpus);
975 cpufreq_freq_attr_ro(affected_cpus);
976 cpufreq_freq_attr_rw(scaling_min_freq);
977 cpufreq_freq_attr_rw(scaling_max_freq);
978 cpufreq_freq_attr_rw(scaling_governor);
979 cpufreq_freq_attr_rw(scaling_setspeed);
981 static struct attribute *cpufreq_attrs[] = {
982 &cpuinfo_min_freq.attr,
983 &cpuinfo_max_freq.attr,
984 &cpuinfo_transition_latency.attr,
985 &scaling_min_freq.attr,
986 &scaling_max_freq.attr,
989 &scaling_governor.attr,
990 &scaling_driver.attr,
991 &scaling_available_governors.attr,
992 &scaling_setspeed.attr,
995 ATTRIBUTE_GROUPS(cpufreq);
997 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
998 #define to_attr(a) container_of(a, struct freq_attr, attr)
1000 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
1002 struct cpufreq_policy *policy = to_policy(kobj);
1003 struct freq_attr *fattr = to_attr(attr);
1004 ssize_t ret = -EBUSY;
1009 down_read(&policy->rwsem);
1010 if (likely(!policy_is_inactive(policy)))
1011 ret = fattr->show(policy, buf);
1012 up_read(&policy->rwsem);
1017 static ssize_t store(struct kobject *kobj, struct attribute *attr,
1018 const char *buf, size_t count)
1020 struct cpufreq_policy *policy = to_policy(kobj);
1021 struct freq_attr *fattr = to_attr(attr);
1022 ssize_t ret = -EBUSY;
1027 down_write(&policy->rwsem);
1028 if (likely(!policy_is_inactive(policy)))
1029 ret = fattr->store(policy, buf, count);
1030 up_write(&policy->rwsem);
1035 static void cpufreq_sysfs_release(struct kobject *kobj)
1037 struct cpufreq_policy *policy = to_policy(kobj);
1038 pr_debug("last reference is dropped\n");
1039 complete(&policy->kobj_unregister);
1042 static const struct sysfs_ops sysfs_ops = {
1047 static const struct kobj_type ktype_cpufreq = {
1048 .sysfs_ops = &sysfs_ops,
1049 .default_groups = cpufreq_groups,
1050 .release = cpufreq_sysfs_release,
1053 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1059 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1062 dev_dbg(dev, "%s: Adding symlink\n", __func__);
1063 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1064 dev_err(dev, "cpufreq symlink creation failed\n");
1067 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1070 dev_dbg(dev, "%s: Removing symlink\n", __func__);
1071 sysfs_remove_link(&dev->kobj, "cpufreq");
1072 cpumask_clear_cpu(cpu, policy->real_cpus);
1075 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1077 struct freq_attr **drv_attr;
1080 /* set up files for this cpu device */
1081 drv_attr = cpufreq_driver->attr;
1082 while (drv_attr && *drv_attr) {
1083 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1088 if (cpufreq_driver->get) {
1089 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1094 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1098 if (cpufreq_driver->bios_limit) {
1099 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1104 if (cpufreq_boost_supported()) {
1105 ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1113 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1115 struct cpufreq_governor *gov = NULL;
1116 unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1120 /* Update policy governor to the one used before hotplug. */
1121 gov = get_governor(policy->last_governor);
1123 pr_debug("Restoring governor %s for cpu %d\n",
1124 gov->name, policy->cpu);
1126 gov = get_governor(default_governor);
1130 gov = cpufreq_default_governor();
1131 __module_get(gov->owner);
1136 /* Use the default policy if there is no last_policy. */
1137 if (policy->last_policy) {
1138 pol = policy->last_policy;
1140 pol = cpufreq_parse_policy(default_governor);
1142 * In case the default governor is neither "performance"
1143 * nor "powersave", fall back to the initial policy
1144 * value set by the driver.
1146 if (pol == CPUFREQ_POLICY_UNKNOWN)
1147 pol = policy->policy;
1149 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1150 pol != CPUFREQ_POLICY_POWERSAVE)
1154 ret = cpufreq_set_policy(policy, gov, pol);
1156 module_put(gov->owner);
1161 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1165 /* Has this CPU been taken care of already? */
1166 if (cpumask_test_cpu(cpu, policy->cpus))
1169 down_write(&policy->rwsem);
1171 cpufreq_stop_governor(policy);
1173 cpumask_set_cpu(cpu, policy->cpus);
1176 ret = cpufreq_start_governor(policy);
1178 pr_err("%s: Failed to start governor\n", __func__);
1180 up_write(&policy->rwsem);
1184 void refresh_frequency_limits(struct cpufreq_policy *policy)
1186 if (!policy_is_inactive(policy)) {
1187 pr_debug("updating policy for CPU %u\n", policy->cpu);
1189 cpufreq_set_policy(policy, policy->governor, policy->policy);
1192 EXPORT_SYMBOL(refresh_frequency_limits);
1194 static void handle_update(struct work_struct *work)
1196 struct cpufreq_policy *policy =
1197 container_of(work, struct cpufreq_policy, update);
1199 pr_debug("handle_update for cpu %u called\n", policy->cpu);
1200 down_write(&policy->rwsem);
1201 refresh_frequency_limits(policy);
1202 up_write(&policy->rwsem);
1205 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1208 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1210 schedule_work(&policy->update);
1214 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1217 struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1219 schedule_work(&policy->update);
1223 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1225 struct kobject *kobj;
1226 struct completion *cmp;
1228 down_write(&policy->rwsem);
1229 cpufreq_stats_free_table(policy);
1230 kobj = &policy->kobj;
1231 cmp = &policy->kobj_unregister;
1232 up_write(&policy->rwsem);
1236 * We need to make sure that the underlying kobj is
1237 * actually not referenced anymore by anybody before we
1238 * proceed with unloading.
1240 pr_debug("waiting for dropping of refcount\n");
1241 wait_for_completion(cmp);
1242 pr_debug("wait complete\n");
1245 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1247 struct cpufreq_policy *policy;
1248 struct device *dev = get_cpu_device(cpu);
1254 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1258 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1259 goto err_free_policy;
1261 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1262 goto err_free_cpumask;
1264 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1265 goto err_free_rcpumask;
1267 init_completion(&policy->kobj_unregister);
1268 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1269 cpufreq_global_kobject, "policy%u", cpu);
1271 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1273 * The entire policy object will be freed below, but the extra
1274 * memory allocated for the kobject name needs to be freed by
1275 * releasing the kobject.
1277 kobject_put(&policy->kobj);
1278 goto err_free_real_cpus;
1281 freq_constraints_init(&policy->constraints);
1283 policy->nb_min.notifier_call = cpufreq_notifier_min;
1284 policy->nb_max.notifier_call = cpufreq_notifier_max;
1286 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1289 dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1291 goto err_kobj_remove;
1294 ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1297 dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1299 goto err_min_qos_notifier;
1302 INIT_LIST_HEAD(&policy->policy_list);
1303 init_rwsem(&policy->rwsem);
1304 spin_lock_init(&policy->transition_lock);
1305 init_waitqueue_head(&policy->transition_wait);
1306 INIT_WORK(&policy->update, handle_update);
1311 err_min_qos_notifier:
1312 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1315 cpufreq_policy_put_kobj(policy);
1317 free_cpumask_var(policy->real_cpus);
1319 free_cpumask_var(policy->related_cpus);
1321 free_cpumask_var(policy->cpus);
1328 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1330 unsigned long flags;
1334 * The callers must ensure the policy is inactive by now, to avoid any
1335 * races with show()/store() callbacks.
1337 if (unlikely(!policy_is_inactive(policy)))
1338 pr_warn("%s: Freeing active policy\n", __func__);
1340 /* Remove policy from list */
1341 write_lock_irqsave(&cpufreq_driver_lock, flags);
1342 list_del(&policy->policy_list);
1344 for_each_cpu(cpu, policy->related_cpus)
1345 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1346 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1348 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1350 freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1353 /* Cancel any pending policy->update work before freeing the policy. */
1354 cancel_work_sync(&policy->update);
1356 if (policy->max_freq_req) {
1358 * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1359 * notification, since CPUFREQ_CREATE_POLICY notification was
1360 * sent after adding max_freq_req earlier.
1362 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1363 CPUFREQ_REMOVE_POLICY, policy);
1364 freq_qos_remove_request(policy->max_freq_req);
1367 freq_qos_remove_request(policy->min_freq_req);
1368 kfree(policy->min_freq_req);
1370 cpufreq_policy_put_kobj(policy);
1371 free_cpumask_var(policy->real_cpus);
1372 free_cpumask_var(policy->related_cpus);
1373 free_cpumask_var(policy->cpus);
1377 static int cpufreq_online(unsigned int cpu)
1379 struct cpufreq_policy *policy;
1381 unsigned long flags;
1385 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1387 /* Check if this CPU already has a policy to manage it */
1388 policy = per_cpu(cpufreq_cpu_data, cpu);
1390 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1391 if (!policy_is_inactive(policy))
1392 return cpufreq_add_policy_cpu(policy, cpu);
1394 /* This is the only online CPU for the policy. Start over. */
1396 down_write(&policy->rwsem);
1398 policy->governor = NULL;
1401 policy = cpufreq_policy_alloc(cpu);
1404 down_write(&policy->rwsem);
1407 if (!new_policy && cpufreq_driver->online) {
1408 /* Recover policy->cpus using related_cpus */
1409 cpumask_copy(policy->cpus, policy->related_cpus);
1411 ret = cpufreq_driver->online(policy);
1413 pr_debug("%s: %d: initialization failed\n", __func__,
1415 goto out_exit_policy;
1418 cpumask_copy(policy->cpus, cpumask_of(cpu));
1421 * Call driver. From then on the cpufreq must be able
1422 * to accept all calls to ->verify and ->setpolicy for this CPU.
1424 ret = cpufreq_driver->init(policy);
1426 pr_debug("%s: %d: initialization failed\n", __func__,
1428 goto out_free_policy;
1431 /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
1432 if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
1433 policy->boost_enabled = true;
1436 * The initialization has succeeded and the policy is online.
1437 * If there is a problem with its frequency table, take it
1438 * offline and drop it.
1440 ret = cpufreq_table_validate_and_sort(policy);
1442 goto out_offline_policy;
1444 /* related_cpus should at least include policy->cpus. */
1445 cpumask_copy(policy->related_cpus, policy->cpus);
1449 * affected cpus must always be the one, which are online. We aren't
1450 * managing offline cpus here.
1452 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1455 for_each_cpu(j, policy->related_cpus) {
1456 per_cpu(cpufreq_cpu_data, j) = policy;
1457 add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1460 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1462 if (!policy->min_freq_req) {
1464 goto out_destroy_policy;
1467 ret = freq_qos_add_request(&policy->constraints,
1468 policy->min_freq_req, FREQ_QOS_MIN,
1469 FREQ_QOS_MIN_DEFAULT_VALUE);
1472 * So we don't call freq_qos_remove_request() for an
1473 * uninitialized request.
1475 kfree(policy->min_freq_req);
1476 policy->min_freq_req = NULL;
1477 goto out_destroy_policy;
1481 * This must be initialized right here to avoid calling
1482 * freq_qos_remove_request() on uninitialized request in case
1485 policy->max_freq_req = policy->min_freq_req + 1;
1487 ret = freq_qos_add_request(&policy->constraints,
1488 policy->max_freq_req, FREQ_QOS_MAX,
1489 FREQ_QOS_MAX_DEFAULT_VALUE);
1491 policy->max_freq_req = NULL;
1492 goto out_destroy_policy;
1495 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1496 CPUFREQ_CREATE_POLICY, policy);
1499 if (cpufreq_driver->get && has_target()) {
1500 policy->cur = cpufreq_driver->get(policy->cpu);
1503 pr_err("%s: ->get() failed\n", __func__);
1504 goto out_destroy_policy;
1509 * Sometimes boot loaders set CPU frequency to a value outside of
1510 * frequency table present with cpufreq core. In such cases CPU might be
1511 * unstable if it has to run on that frequency for long duration of time
1512 * and so its better to set it to a frequency which is specified in
1513 * freq-table. This also makes cpufreq stats inconsistent as
1514 * cpufreq-stats would fail to register because current frequency of CPU
1515 * isn't found in freq-table.
1517 * Because we don't want this change to effect boot process badly, we go
1518 * for the next freq which is >= policy->cur ('cur' must be set by now,
1519 * otherwise we will end up setting freq to lowest of the table as 'cur'
1520 * is initialized to zero).
1522 * We are passing target-freq as "policy->cur - 1" otherwise
1523 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1524 * equal to target-freq.
1526 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1528 unsigned int old_freq = policy->cur;
1530 /* Are we running at unknown frequency ? */
1531 ret = cpufreq_frequency_table_get_index(policy, old_freq);
1532 if (ret == -EINVAL) {
1533 ret = __cpufreq_driver_target(policy, old_freq - 1,
1534 CPUFREQ_RELATION_L);
1537 * Reaching here after boot in a few seconds may not
1538 * mean that system will remain stable at "unknown"
1539 * frequency for longer duration. Hence, a BUG_ON().
1542 pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1543 __func__, policy->cpu, old_freq, policy->cur);
1548 ret = cpufreq_add_dev_interface(policy);
1550 goto out_destroy_policy;
1552 cpufreq_stats_create_table(policy);
1554 write_lock_irqsave(&cpufreq_driver_lock, flags);
1555 list_add(&policy->policy_list, &cpufreq_policy_list);
1556 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1559 * Register with the energy model before
1560 * sugov_eas_rebuild_sd() is called, which will result
1561 * in rebuilding of the sched domains, which should only be done
1562 * once the energy model is properly initialized for the policy
1565 * Also, this should be called before the policy is registered
1566 * with cooling framework.
1568 if (cpufreq_driver->register_em)
1569 cpufreq_driver->register_em(policy);
1572 ret = cpufreq_init_policy(policy);
1574 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1575 __func__, cpu, ret);
1576 goto out_destroy_policy;
1579 up_write(&policy->rwsem);
1581 kobject_uevent(&policy->kobj, KOBJ_ADD);
1583 /* Callback for handling stuff after policy is ready */
1584 if (cpufreq_driver->ready)
1585 cpufreq_driver->ready(policy);
1587 /* Register cpufreq cooling only for a new policy */
1588 if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1589 policy->cdev = of_cpufreq_cooling_register(policy);
1591 pr_debug("initialization complete\n");
1596 for_each_cpu(j, policy->real_cpus)
1597 remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1600 if (cpufreq_driver->offline)
1601 cpufreq_driver->offline(policy);
1604 if (cpufreq_driver->exit)
1605 cpufreq_driver->exit(policy);
1608 cpumask_clear(policy->cpus);
1609 up_write(&policy->rwsem);
1611 cpufreq_policy_free(policy);
1616 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1618 * @sif: Subsystem interface structure pointer (not used)
1620 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1622 struct cpufreq_policy *policy;
1623 unsigned cpu = dev->id;
1626 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1628 if (cpu_online(cpu)) {
1629 ret = cpufreq_online(cpu);
1634 /* Create sysfs link on CPU registration */
1635 policy = per_cpu(cpufreq_cpu_data, cpu);
1637 add_cpu_dev_symlink(policy, cpu, dev);
1642 static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1647 cpufreq_stop_governor(policy);
1649 cpumask_clear_cpu(cpu, policy->cpus);
1651 if (!policy_is_inactive(policy)) {
1652 /* Nominate a new CPU if necessary. */
1653 if (cpu == policy->cpu)
1654 policy->cpu = cpumask_any(policy->cpus);
1656 /* Start the governor again for the active policy. */
1658 ret = cpufreq_start_governor(policy);
1660 pr_err("%s: Failed to start governor\n", __func__);
1667 strscpy(policy->last_governor, policy->governor->name,
1670 policy->last_policy = policy->policy;
1673 cpufreq_exit_governor(policy);
1676 * Perform the ->offline() during light-weight tear-down, as
1677 * that allows fast recovery when the CPU comes back.
1679 if (cpufreq_driver->offline) {
1680 cpufreq_driver->offline(policy);
1684 if (cpufreq_driver->exit)
1685 cpufreq_driver->exit(policy);
1687 policy->freq_table = NULL;
1690 static int cpufreq_offline(unsigned int cpu)
1692 struct cpufreq_policy *policy;
1694 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1696 policy = cpufreq_cpu_get_raw(cpu);
1698 pr_debug("%s: No cpu_data found\n", __func__);
1702 down_write(&policy->rwsem);
1704 __cpufreq_offline(cpu, policy);
1706 up_write(&policy->rwsem);
1711 * cpufreq_remove_dev - remove a CPU device
1713 * Removes the cpufreq interface for a CPU device.
1715 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1717 unsigned int cpu = dev->id;
1718 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1723 down_write(&policy->rwsem);
1725 if (cpu_online(cpu))
1726 __cpufreq_offline(cpu, policy);
1728 remove_cpu_dev_symlink(policy, cpu, dev);
1730 if (!cpumask_empty(policy->real_cpus)) {
1731 up_write(&policy->rwsem);
1736 * Unregister cpufreq cooling once all the CPUs of the policy are
1739 if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1740 cpufreq_cooling_unregister(policy->cdev);
1741 policy->cdev = NULL;
1744 /* We did light-weight exit earlier, do full tear down now */
1745 if (cpufreq_driver->offline && cpufreq_driver->exit)
1746 cpufreq_driver->exit(policy);
1748 up_write(&policy->rwsem);
1750 cpufreq_policy_free(policy);
1754 * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1755 * @policy: Policy managing CPUs.
1756 * @new_freq: New CPU frequency.
1758 * Adjust to the current frequency first and clean up later by either calling
1759 * cpufreq_update_policy(), or scheduling handle_update().
1761 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1762 unsigned int new_freq)
1764 struct cpufreq_freqs freqs;
1766 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1767 policy->cur, new_freq);
1769 freqs.old = policy->cur;
1770 freqs.new = new_freq;
1772 cpufreq_freq_transition_begin(policy, &freqs);
1773 cpufreq_freq_transition_end(policy, &freqs, 0);
1776 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1778 unsigned int new_freq;
1780 new_freq = cpufreq_driver->get(policy->cpu);
1785 * If fast frequency switching is used with the given policy, the check
1786 * against policy->cur is pointless, so skip it in that case.
1788 if (policy->fast_switch_enabled || !has_target())
1791 if (policy->cur != new_freq) {
1793 * For some platforms, the frequency returned by hardware may be
1794 * slightly different from what is provided in the frequency
1795 * table, for example hardware may return 499 MHz instead of 500
1796 * MHz. In such cases it is better to avoid getting into
1797 * unnecessary frequency updates.
1799 if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1802 cpufreq_out_of_sync(policy, new_freq);
1804 schedule_work(&policy->update);
1811 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1814 * This is the last known freq, without actually getting it from the driver.
1815 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1817 unsigned int cpufreq_quick_get(unsigned int cpu)
1819 struct cpufreq_policy *policy;
1820 unsigned int ret_freq = 0;
1821 unsigned long flags;
1823 read_lock_irqsave(&cpufreq_driver_lock, flags);
1825 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1826 ret_freq = cpufreq_driver->get(cpu);
1827 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1831 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1833 policy = cpufreq_cpu_get(cpu);
1835 ret_freq = policy->cur;
1836 cpufreq_cpu_put(policy);
1841 EXPORT_SYMBOL(cpufreq_quick_get);
1844 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1847 * Just return the max possible frequency for a given CPU.
1849 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1851 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1852 unsigned int ret_freq = 0;
1855 ret_freq = policy->max;
1856 cpufreq_cpu_put(policy);
1861 EXPORT_SYMBOL(cpufreq_quick_get_max);
1864 * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1867 * The default return value is the max_freq field of cpuinfo.
1869 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1871 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1872 unsigned int ret_freq = 0;
1875 ret_freq = policy->cpuinfo.max_freq;
1876 cpufreq_cpu_put(policy);
1881 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1883 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1885 if (unlikely(policy_is_inactive(policy)))
1888 return cpufreq_verify_current_freq(policy, true);
1892 * cpufreq_get - get the current CPU frequency (in kHz)
1895 * Get the CPU current (static) CPU frequency
1897 unsigned int cpufreq_get(unsigned int cpu)
1899 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1900 unsigned int ret_freq = 0;
1903 down_read(&policy->rwsem);
1904 if (cpufreq_driver->get)
1905 ret_freq = __cpufreq_get(policy);
1906 up_read(&policy->rwsem);
1908 cpufreq_cpu_put(policy);
1913 EXPORT_SYMBOL(cpufreq_get);
1915 static struct subsys_interface cpufreq_interface = {
1917 .subsys = &cpu_subsys,
1918 .add_dev = cpufreq_add_dev,
1919 .remove_dev = cpufreq_remove_dev,
1923 * In case platform wants some specific frequency to be configured
1926 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1930 if (!policy->suspend_freq) {
1931 pr_debug("%s: suspend_freq not defined\n", __func__);
1935 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1936 policy->suspend_freq);
1938 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1939 CPUFREQ_RELATION_H);
1941 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1942 __func__, policy->suspend_freq, ret);
1946 EXPORT_SYMBOL(cpufreq_generic_suspend);
1949 * cpufreq_suspend() - Suspend CPUFreq governors.
1951 * Called during system wide Suspend/Hibernate cycles for suspending governors
1952 * as some platforms can't change frequency after this point in suspend cycle.
1953 * Because some of the devices (like: i2c, regulators, etc) they use for
1954 * changing frequency are suspended quickly after this point.
1956 void cpufreq_suspend(void)
1958 struct cpufreq_policy *policy;
1960 if (!cpufreq_driver)
1963 if (!has_target() && !cpufreq_driver->suspend)
1966 pr_debug("%s: Suspending Governors\n", __func__);
1968 for_each_active_policy(policy) {
1970 down_write(&policy->rwsem);
1971 cpufreq_stop_governor(policy);
1972 up_write(&policy->rwsem);
1975 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1976 pr_err("%s: Failed to suspend driver: %s\n", __func__,
1977 cpufreq_driver->name);
1981 cpufreq_suspended = true;
1985 * cpufreq_resume() - Resume CPUFreq governors.
1987 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1988 * are suspended with cpufreq_suspend().
1990 void cpufreq_resume(void)
1992 struct cpufreq_policy *policy;
1995 if (!cpufreq_driver)
1998 if (unlikely(!cpufreq_suspended))
2001 cpufreq_suspended = false;
2003 if (!has_target() && !cpufreq_driver->resume)
2006 pr_debug("%s: Resuming Governors\n", __func__);
2008 for_each_active_policy(policy) {
2009 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
2010 pr_err("%s: Failed to resume driver: %s\n", __func__,
2011 cpufreq_driver->name);
2012 } else if (has_target()) {
2013 down_write(&policy->rwsem);
2014 ret = cpufreq_start_governor(policy);
2015 up_write(&policy->rwsem);
2018 pr_err("%s: Failed to start governor for CPU%u's policy\n",
2019 __func__, policy->cpu);
2025 * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2026 * @flags: Flags to test against the current cpufreq driver's flags.
2028 * Assumes that the driver is there, so callers must ensure that this is the
2031 bool cpufreq_driver_test_flags(u16 flags)
2033 return !!(cpufreq_driver->flags & flags);
2037 * cpufreq_get_current_driver - Return the current driver's name.
2039 * Return the name string of the currently registered cpufreq driver or NULL if
2042 const char *cpufreq_get_current_driver(void)
2045 return cpufreq_driver->name;
2049 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2052 * cpufreq_get_driver_data - Return current driver data.
2054 * Return the private data of the currently registered cpufreq driver, or NULL
2055 * if no cpufreq driver has been registered.
2057 void *cpufreq_get_driver_data(void)
2060 return cpufreq_driver->driver_data;
2064 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2066 /*********************************************************************
2067 * NOTIFIER LISTS INTERFACE *
2068 *********************************************************************/
2071 * cpufreq_register_notifier - Register a notifier with cpufreq.
2072 * @nb: notifier function to register.
2073 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2075 * Add a notifier to one of two lists: either a list of notifiers that run on
2076 * clock rate changes (once before and once after every transition), or a list
2077 * of notifiers that ron on cpufreq policy changes.
2079 * This function may sleep and it has the same return values as
2080 * blocking_notifier_chain_register().
2082 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2086 if (cpufreq_disabled())
2090 case CPUFREQ_TRANSITION_NOTIFIER:
2091 mutex_lock(&cpufreq_fast_switch_lock);
2093 if (cpufreq_fast_switch_count > 0) {
2094 mutex_unlock(&cpufreq_fast_switch_lock);
2097 ret = srcu_notifier_chain_register(
2098 &cpufreq_transition_notifier_list, nb);
2100 cpufreq_fast_switch_count--;
2102 mutex_unlock(&cpufreq_fast_switch_lock);
2104 case CPUFREQ_POLICY_NOTIFIER:
2105 ret = blocking_notifier_chain_register(
2106 &cpufreq_policy_notifier_list, nb);
2114 EXPORT_SYMBOL(cpufreq_register_notifier);
2117 * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2118 * @nb: notifier block to be unregistered.
2119 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2121 * Remove a notifier from one of the cpufreq notifier lists.
2123 * This function may sleep and it has the same return values as
2124 * blocking_notifier_chain_unregister().
2126 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2130 if (cpufreq_disabled())
2134 case CPUFREQ_TRANSITION_NOTIFIER:
2135 mutex_lock(&cpufreq_fast_switch_lock);
2137 ret = srcu_notifier_chain_unregister(
2138 &cpufreq_transition_notifier_list, nb);
2139 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2140 cpufreq_fast_switch_count++;
2142 mutex_unlock(&cpufreq_fast_switch_lock);
2144 case CPUFREQ_POLICY_NOTIFIER:
2145 ret = blocking_notifier_chain_unregister(
2146 &cpufreq_policy_notifier_list, nb);
2154 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2157 /*********************************************************************
2159 *********************************************************************/
2162 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2163 * @policy: cpufreq policy to switch the frequency for.
2164 * @target_freq: New frequency to set (may be approximate).
2166 * Carry out a fast frequency switch without sleeping.
2168 * The driver's ->fast_switch() callback invoked by this function must be
2169 * suitable for being called from within RCU-sched read-side critical sections
2170 * and it is expected to select the minimum available frequency greater than or
2171 * equal to @target_freq (CPUFREQ_RELATION_L).
2173 * This function must not be called if policy->fast_switch_enabled is unset.
2175 * Governors calling this function must guarantee that it will never be invoked
2176 * twice in parallel for the same policy and that it will never be called in
2177 * parallel with either ->target() or ->target_index() for the same policy.
2179 * Returns the actual frequency set for the CPU.
2181 * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2182 * error condition, the hardware configuration must be preserved.
2184 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2185 unsigned int target_freq)
2190 target_freq = clamp_val(target_freq, policy->min, policy->max);
2191 freq = cpufreq_driver->fast_switch(policy, target_freq);
2197 arch_set_freq_scale(policy->related_cpus, freq,
2198 arch_scale_freq_ref(policy->cpu));
2199 cpufreq_stats_record_transition(policy, freq);
2201 if (trace_cpu_frequency_enabled()) {
2202 for_each_cpu(cpu, policy->cpus)
2203 trace_cpu_frequency(freq, cpu);
2208 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2211 * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2213 * @min_perf: Minimum (required) performance level (units of @capacity).
2214 * @target_perf: Target (desired) performance level (units of @capacity).
2215 * @capacity: Capacity of the target CPU.
2217 * Carry out a fast performance level switch of @cpu without sleeping.
2219 * The driver's ->adjust_perf() callback invoked by this function must be
2220 * suitable for being called from within RCU-sched read-side critical sections
2221 * and it is expected to select a suitable performance level equal to or above
2222 * @min_perf and preferably equal to or below @target_perf.
2224 * This function must not be called if policy->fast_switch_enabled is unset.
2226 * Governors calling this function must guarantee that it will never be invoked
2227 * twice in parallel for the same CPU and that it will never be called in
2228 * parallel with either ->target() or ->target_index() or ->fast_switch() for
2231 void cpufreq_driver_adjust_perf(unsigned int cpu,
2232 unsigned long min_perf,
2233 unsigned long target_perf,
2234 unsigned long capacity)
2236 cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2240 * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2242 * Return 'true' if the ->adjust_perf callback is present for the
2243 * current driver or 'false' otherwise.
2245 bool cpufreq_driver_has_adjust_perf(void)
2247 return !!cpufreq_driver->adjust_perf;
2250 /* Must set freqs->new to intermediate frequency */
2251 static int __target_intermediate(struct cpufreq_policy *policy,
2252 struct cpufreq_freqs *freqs, int index)
2256 freqs->new = cpufreq_driver->get_intermediate(policy, index);
2258 /* We don't need to switch to intermediate freq */
2262 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2263 __func__, policy->cpu, freqs->old, freqs->new);
2265 cpufreq_freq_transition_begin(policy, freqs);
2266 ret = cpufreq_driver->target_intermediate(policy, index);
2267 cpufreq_freq_transition_end(policy, freqs, ret);
2270 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2276 static int __target_index(struct cpufreq_policy *policy, int index)
2278 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2279 unsigned int restore_freq, intermediate_freq = 0;
2280 unsigned int newfreq = policy->freq_table[index].frequency;
2281 int retval = -EINVAL;
2284 if (newfreq == policy->cur)
2287 /* Save last value to restore later on errors */
2288 restore_freq = policy->cur;
2290 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2292 /* Handle switching to intermediate frequency */
2293 if (cpufreq_driver->get_intermediate) {
2294 retval = __target_intermediate(policy, &freqs, index);
2298 intermediate_freq = freqs.new;
2299 /* Set old freq to intermediate */
2300 if (intermediate_freq)
2301 freqs.old = freqs.new;
2304 freqs.new = newfreq;
2305 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2306 __func__, policy->cpu, freqs.old, freqs.new);
2308 cpufreq_freq_transition_begin(policy, &freqs);
2311 retval = cpufreq_driver->target_index(policy, index);
2313 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2317 cpufreq_freq_transition_end(policy, &freqs, retval);
2320 * Failed after setting to intermediate freq? Driver should have
2321 * reverted back to initial frequency and so should we. Check
2322 * here for intermediate_freq instead of get_intermediate, in
2323 * case we haven't switched to intermediate freq at all.
2325 if (unlikely(retval && intermediate_freq)) {
2326 freqs.old = intermediate_freq;
2327 freqs.new = restore_freq;
2328 cpufreq_freq_transition_begin(policy, &freqs);
2329 cpufreq_freq_transition_end(policy, &freqs, 0);
2336 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2337 unsigned int target_freq,
2338 unsigned int relation)
2340 unsigned int old_target_freq = target_freq;
2342 if (cpufreq_disabled())
2345 target_freq = __resolve_freq(policy, target_freq, relation);
2347 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2348 policy->cpu, target_freq, relation, old_target_freq);
2351 * This might look like a redundant call as we are checking it again
2352 * after finding index. But it is left intentionally for cases where
2353 * exactly same freq is called again and so we can save on few function
2356 if (target_freq == policy->cur &&
2357 !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2360 if (cpufreq_driver->target) {
2362 * If the driver hasn't setup a single inefficient frequency,
2363 * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2365 if (!policy->efficiencies_available)
2366 relation &= ~CPUFREQ_RELATION_E;
2368 return cpufreq_driver->target(policy, target_freq, relation);
2371 if (!cpufreq_driver->target_index)
2374 return __target_index(policy, policy->cached_resolved_idx);
2376 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2378 int cpufreq_driver_target(struct cpufreq_policy *policy,
2379 unsigned int target_freq,
2380 unsigned int relation)
2384 down_write(&policy->rwsem);
2386 ret = __cpufreq_driver_target(policy, target_freq, relation);
2388 up_write(&policy->rwsem);
2392 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2394 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2399 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2403 /* Don't start any governor operations if we are entering suspend */
2404 if (cpufreq_suspended)
2407 * Governor might not be initiated here if ACPI _PPC changed
2408 * notification happened, so check it.
2410 if (!policy->governor)
2413 /* Platform doesn't want dynamic frequency switching ? */
2414 if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2415 cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2416 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2419 pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2420 policy->governor->name, gov->name);
2421 policy->governor = gov;
2427 if (!try_module_get(policy->governor->owner))
2430 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2432 if (policy->governor->init) {
2433 ret = policy->governor->init(policy);
2435 module_put(policy->governor->owner);
2440 policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2445 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2447 if (cpufreq_suspended || !policy->governor)
2450 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2452 if (policy->governor->exit)
2453 policy->governor->exit(policy);
2455 module_put(policy->governor->owner);
2458 int cpufreq_start_governor(struct cpufreq_policy *policy)
2462 if (cpufreq_suspended)
2465 if (!policy->governor)
2468 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2470 if (cpufreq_driver->get)
2471 cpufreq_verify_current_freq(policy, false);
2473 if (policy->governor->start) {
2474 ret = policy->governor->start(policy);
2479 if (policy->governor->limits)
2480 policy->governor->limits(policy);
2485 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2487 if (cpufreq_suspended || !policy->governor)
2490 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2492 if (policy->governor->stop)
2493 policy->governor->stop(policy);
2496 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2498 if (cpufreq_suspended || !policy->governor)
2501 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2503 if (policy->governor->limits)
2504 policy->governor->limits(policy);
2507 int cpufreq_register_governor(struct cpufreq_governor *governor)
2514 if (cpufreq_disabled())
2517 mutex_lock(&cpufreq_governor_mutex);
2520 if (!find_governor(governor->name)) {
2522 list_add(&governor->governor_list, &cpufreq_governor_list);
2525 mutex_unlock(&cpufreq_governor_mutex);
2528 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2530 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2532 struct cpufreq_policy *policy;
2533 unsigned long flags;
2538 if (cpufreq_disabled())
2541 /* clear last_governor for all inactive policies */
2542 read_lock_irqsave(&cpufreq_driver_lock, flags);
2543 for_each_inactive_policy(policy) {
2544 if (!strcmp(policy->last_governor, governor->name)) {
2545 policy->governor = NULL;
2546 strcpy(policy->last_governor, "\0");
2549 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2551 mutex_lock(&cpufreq_governor_mutex);
2552 list_del(&governor->governor_list);
2553 mutex_unlock(&cpufreq_governor_mutex);
2555 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2558 /*********************************************************************
2559 * POLICY INTERFACE *
2560 *********************************************************************/
2563 * cpufreq_get_policy - get the current cpufreq_policy
2564 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2566 * @cpu: CPU to find the policy for
2568 * Reads the current cpufreq policy.
2570 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2572 struct cpufreq_policy *cpu_policy;
2576 cpu_policy = cpufreq_cpu_get(cpu);
2580 memcpy(policy, cpu_policy, sizeof(*policy));
2582 cpufreq_cpu_put(cpu_policy);
2585 EXPORT_SYMBOL(cpufreq_get_policy);
2587 DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
2590 * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
2591 * @policy: cpufreq policy of the CPUs.
2593 * Update the value of cpufreq pressure for all @cpus in the policy.
2595 static void cpufreq_update_pressure(struct cpufreq_policy *policy)
2597 unsigned long max_capacity, capped_freq, pressure;
2601 cpu = cpumask_first(policy->related_cpus);
2602 max_freq = arch_scale_freq_ref(cpu);
2603 capped_freq = policy->max;
2606 * Handle properly the boost frequencies, which should simply clean
2607 * the cpufreq pressure value.
2609 if (max_freq <= capped_freq) {
2612 max_capacity = arch_scale_cpu_capacity(cpu);
2613 pressure = max_capacity -
2614 mult_frac(max_capacity, capped_freq, max_freq);
2617 for_each_cpu(cpu, policy->related_cpus)
2618 WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
2622 * cpufreq_set_policy - Modify cpufreq policy parameters.
2623 * @policy: Policy object to modify.
2624 * @new_gov: Policy governor pointer.
2625 * @new_pol: Policy value (for drivers with built-in governors).
2627 * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2628 * limits to be set for the policy, update @policy with the verified limits
2629 * values and either invoke the driver's ->setpolicy() callback (if present) or
2630 * carry out a governor update for @policy. That is, run the current governor's
2631 * ->limits() callback (if @new_gov points to the same object as the one in
2632 * @policy) or replace the governor for @policy with @new_gov.
2634 * The cpuinfo part of @policy is not updated by this function.
2636 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2637 struct cpufreq_governor *new_gov,
2638 unsigned int new_pol)
2640 struct cpufreq_policy_data new_data;
2641 struct cpufreq_governor *old_gov;
2644 memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2645 new_data.freq_table = policy->freq_table;
2646 new_data.cpu = policy->cpu;
2648 * PM QoS framework collects all the requests from users and provide us
2649 * the final aggregated value here.
2651 new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2652 new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2654 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2655 new_data.cpu, new_data.min, new_data.max);
2658 * Verify that the CPU speed can be set within these limits and make sure
2661 ret = cpufreq_driver->verify(&new_data);
2666 * Resolve policy min/max to available frequencies. It ensures
2667 * no frequency resolution will neither overshoot the requested maximum
2668 * nor undershoot the requested minimum.
2670 policy->min = new_data.min;
2671 policy->max = new_data.max;
2672 policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2673 policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2674 trace_cpu_frequency_limits(policy);
2676 cpufreq_update_pressure(policy);
2678 policy->cached_target_freq = UINT_MAX;
2680 pr_debug("new min and max freqs are %u - %u kHz\n",
2681 policy->min, policy->max);
2683 if (cpufreq_driver->setpolicy) {
2684 policy->policy = new_pol;
2685 pr_debug("setting range\n");
2686 return cpufreq_driver->setpolicy(policy);
2689 if (new_gov == policy->governor) {
2690 pr_debug("governor limits update\n");
2691 cpufreq_governor_limits(policy);
2695 pr_debug("governor switch\n");
2697 /* save old, working values */
2698 old_gov = policy->governor;
2699 /* end old governor */
2701 cpufreq_stop_governor(policy);
2702 cpufreq_exit_governor(policy);
2705 /* start new governor */
2706 policy->governor = new_gov;
2707 ret = cpufreq_init_governor(policy);
2709 ret = cpufreq_start_governor(policy);
2711 pr_debug("governor change\n");
2714 cpufreq_exit_governor(policy);
2717 /* new governor failed, so re-start old one */
2718 pr_debug("starting governor %s failed\n", policy->governor->name);
2720 policy->governor = old_gov;
2721 if (cpufreq_init_governor(policy))
2722 policy->governor = NULL;
2724 cpufreq_start_governor(policy);
2731 * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2732 * @cpu: CPU to re-evaluate the policy for.
2734 * Update the current frequency for the cpufreq policy of @cpu and use
2735 * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2736 * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2737 * for the policy in question, among other things.
2739 void cpufreq_update_policy(unsigned int cpu)
2741 struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2747 * BIOS might change freq behind our back
2748 * -> ask driver for current freq and notify governors about a change
2750 if (cpufreq_driver->get && has_target() &&
2751 (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2754 refresh_frequency_limits(policy);
2757 cpufreq_cpu_release(policy);
2759 EXPORT_SYMBOL(cpufreq_update_policy);
2762 * cpufreq_update_limits - Update policy limits for a given CPU.
2763 * @cpu: CPU to update the policy limits for.
2765 * Invoke the driver's ->update_limits callback if present or call
2766 * cpufreq_update_policy() for @cpu.
2768 void cpufreq_update_limits(unsigned int cpu)
2770 if (cpufreq_driver->update_limits)
2771 cpufreq_driver->update_limits(cpu);
2773 cpufreq_update_policy(cpu);
2775 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2777 /*********************************************************************
2779 *********************************************************************/
2780 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2784 if (!policy->freq_table)
2787 ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2789 pr_err("%s: Policy frequency update failed\n", __func__);
2793 ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2800 int cpufreq_boost_trigger_state(int state)
2802 struct cpufreq_policy *policy;
2803 unsigned long flags;
2806 if (cpufreq_driver->boost_enabled == state)
2809 write_lock_irqsave(&cpufreq_driver_lock, flags);
2810 cpufreq_driver->boost_enabled = state;
2811 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2814 for_each_active_policy(policy) {
2815 policy->boost_enabled = state;
2816 ret = cpufreq_driver->set_boost(policy, state);
2818 policy->boost_enabled = !policy->boost_enabled;
2819 goto err_reset_state;
2829 write_lock_irqsave(&cpufreq_driver_lock, flags);
2830 cpufreq_driver->boost_enabled = !state;
2831 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2833 pr_err("%s: Cannot %s BOOST\n",
2834 __func__, state ? "enable" : "disable");
2839 static bool cpufreq_boost_supported(void)
2841 return cpufreq_driver->set_boost;
2844 static int create_boost_sysfs_file(void)
2848 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2850 pr_err("%s: cannot register global BOOST sysfs file\n",
2856 static void remove_boost_sysfs_file(void)
2858 if (cpufreq_boost_supported())
2859 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2862 int cpufreq_enable_boost_support(void)
2864 if (!cpufreq_driver)
2867 if (cpufreq_boost_supported())
2870 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2872 /* This will get removed on driver unregister */
2873 return create_boost_sysfs_file();
2875 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2877 bool cpufreq_boost_enabled(void)
2879 return cpufreq_driver->boost_enabled;
2881 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2883 /*********************************************************************
2884 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2885 *********************************************************************/
2886 static enum cpuhp_state hp_online;
2888 static int cpuhp_cpufreq_online(unsigned int cpu)
2890 cpufreq_online(cpu);
2895 static int cpuhp_cpufreq_offline(unsigned int cpu)
2897 cpufreq_offline(cpu);
2903 * cpufreq_register_driver - register a CPU Frequency driver
2904 * @driver_data: A struct cpufreq_driver containing the values#
2905 * submitted by the CPU Frequency driver.
2907 * Registers a CPU Frequency driver to this core code. This code
2908 * returns zero on success, -EEXIST when another driver got here first
2909 * (and isn't unregistered in the meantime).
2912 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2914 unsigned long flags;
2917 if (cpufreq_disabled())
2921 * The cpufreq core depends heavily on the availability of device
2922 * structure, make sure they are available before proceeding further.
2924 if (!get_cpu_device(0))
2925 return -EPROBE_DEFER;
2927 if (!driver_data || !driver_data->verify || !driver_data->init ||
2928 !(driver_data->setpolicy || driver_data->target_index ||
2929 driver_data->target) ||
2930 (driver_data->setpolicy && (driver_data->target_index ||
2931 driver_data->target)) ||
2932 (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2933 (!driver_data->online != !driver_data->offline) ||
2934 (driver_data->adjust_perf && !driver_data->fast_switch))
2937 pr_debug("trying to register driver %s\n", driver_data->name);
2939 /* Protect against concurrent CPU online/offline. */
2942 write_lock_irqsave(&cpufreq_driver_lock, flags);
2943 if (cpufreq_driver) {
2944 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2948 cpufreq_driver = driver_data;
2949 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2952 * Mark support for the scheduler's frequency invariance engine for
2953 * drivers that implement target(), target_index() or fast_switch().
2955 if (!cpufreq_driver->setpolicy) {
2956 static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2957 pr_debug("supports frequency invariance");
2960 if (driver_data->setpolicy)
2961 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2963 if (cpufreq_boost_supported()) {
2964 ret = create_boost_sysfs_file();
2966 goto err_null_driver;
2969 ret = subsys_interface_register(&cpufreq_interface);
2971 goto err_boost_unreg;
2973 if (unlikely(list_empty(&cpufreq_policy_list))) {
2974 /* if all ->init() calls failed, unregister */
2976 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2981 ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2983 cpuhp_cpufreq_online,
2984 cpuhp_cpufreq_offline);
2990 pr_debug("driver %s up and running\n", driver_data->name);
2994 subsys_interface_unregister(&cpufreq_interface);
2996 remove_boost_sysfs_file();
2998 write_lock_irqsave(&cpufreq_driver_lock, flags);
2999 cpufreq_driver = NULL;
3000 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3005 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
3008 * cpufreq_unregister_driver - unregister the current CPUFreq driver
3010 * Unregister the current CPUFreq driver. Only call this if you have
3011 * the right to do so, i.e. if you have succeeded in initialising before!
3012 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
3013 * currently not initialised.
3015 void cpufreq_unregister_driver(struct cpufreq_driver *driver)
3017 unsigned long flags;
3019 if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
3022 pr_debug("unregistering driver %s\n", driver->name);
3024 /* Protect against concurrent cpu hotplug */
3026 subsys_interface_unregister(&cpufreq_interface);
3027 remove_boost_sysfs_file();
3028 static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
3029 cpuhp_remove_state_nocalls_cpuslocked(hp_online);
3031 write_lock_irqsave(&cpufreq_driver_lock, flags);
3033 cpufreq_driver = NULL;
3035 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3038 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3040 static int __init cpufreq_core_init(void)
3042 struct cpufreq_governor *gov = cpufreq_default_governor();
3043 struct device *dev_root;
3045 if (cpufreq_disabled())
3048 dev_root = bus_get_dev_root(&cpu_subsys);
3050 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3051 put_device(dev_root);
3053 BUG_ON(!cpufreq_global_kobject);
3055 if (!strlen(default_governor))
3056 strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3060 module_param(off, int, 0444);
3061 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3062 core_initcall(cpufreq_core_init);