2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
9 * Added handling for CPU hotplug
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
34 static LIST_HEAD(cpufreq_policy_list);
36 static inline bool policy_is_inactive(struct cpufreq_policy *policy)
38 return cpumask_empty(policy->cpus);
41 /* Macros to iterate over CPU policies */
42 #define for_each_suitable_policy(__policy, __active) \
43 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
44 if ((__active) == !policy_is_inactive(__policy))
46 #define for_each_active_policy(__policy) \
47 for_each_suitable_policy(__policy, true)
48 #define for_each_inactive_policy(__policy) \
49 for_each_suitable_policy(__policy, false)
51 #define for_each_policy(__policy) \
52 list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
54 /* Iterate over governors */
55 static LIST_HEAD(cpufreq_governor_list);
56 #define for_each_governor(__governor) \
57 list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
60 * The "cpufreq driver" - the arch- or hardware-dependent low
61 * level driver of CPUFreq support, and its spinlock. This lock
62 * also protects the cpufreq_cpu_data array.
64 static struct cpufreq_driver *cpufreq_driver;
65 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
66 static DEFINE_RWLOCK(cpufreq_driver_lock);
68 /* Flag to suspend/resume CPUFreq governors */
69 static bool cpufreq_suspended;
71 static inline bool has_target(void)
73 return cpufreq_driver->target_index || cpufreq_driver->target;
76 /* internal prototypes */
77 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
78 static int cpufreq_init_governor(struct cpufreq_policy *policy);
79 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
80 static int cpufreq_start_governor(struct cpufreq_policy *policy);
81 static void cpufreq_stop_governor(struct cpufreq_policy *policy);
82 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
85 * Two notifier lists: the "policy" list is involved in the
86 * validation process for a new CPU frequency policy; the
87 * "transition" list for kernel code that needs to handle
88 * changes to devices when the CPU clock speed changes.
89 * The mutex locks both lists.
91 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
92 static struct srcu_notifier_head cpufreq_transition_notifier_list;
94 static bool init_cpufreq_transition_notifier_list_called;
95 static int __init init_cpufreq_transition_notifier_list(void)
97 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
98 init_cpufreq_transition_notifier_list_called = true;
101 pure_initcall(init_cpufreq_transition_notifier_list);
103 static int off __read_mostly;
104 static int cpufreq_disabled(void)
108 void disable_cpufreq(void)
112 static DEFINE_MUTEX(cpufreq_governor_mutex);
114 bool have_governor_per_policy(void)
116 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
118 EXPORT_SYMBOL_GPL(have_governor_per_policy);
120 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
122 if (have_governor_per_policy())
123 return &policy->kobj;
125 return cpufreq_global_kobject;
127 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
129 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
135 cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
137 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
138 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
139 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
140 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
141 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
142 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
144 idle_time = cur_wall_time - busy_time;
146 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
148 return div_u64(idle_time, NSEC_PER_USEC);
151 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
153 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
155 if (idle_time == -1ULL)
156 return get_cpu_idle_time_jiffy(cpu, wall);
158 idle_time += get_cpu_iowait_time_us(cpu, wall);
162 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
165 * This is a generic cpufreq init() routine which can be used by cpufreq
166 * drivers of SMP systems. It will do following:
167 * - validate & show freq table passed
168 * - set policies transition latency
169 * - policy->cpus with all possible CPUs
171 int cpufreq_generic_init(struct cpufreq_policy *policy,
172 struct cpufreq_frequency_table *table,
173 unsigned int transition_latency)
177 ret = cpufreq_table_validate_and_show(policy, table);
179 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
183 policy->cpuinfo.transition_latency = transition_latency;
186 * The driver only supports the SMP configuration where all processors
187 * share the clock and voltage and clock.
189 cpumask_setall(policy->cpus);
193 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
195 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
197 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
199 return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
201 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
203 unsigned int cpufreq_generic_get(unsigned int cpu)
205 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
207 if (!policy || IS_ERR(policy->clk)) {
208 pr_err("%s: No %s associated to cpu: %d\n",
209 __func__, policy ? "clk" : "policy", cpu);
213 return clk_get_rate(policy->clk) / 1000;
215 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
218 * cpufreq_cpu_get: returns policy for a cpu and marks it busy.
220 * @cpu: cpu to find policy for.
222 * This returns policy for 'cpu', returns NULL if it doesn't exist.
223 * It also increments the kobject reference count to mark it busy and so would
224 * require a corresponding call to cpufreq_cpu_put() to decrement it back.
225 * If corresponding call cpufreq_cpu_put() isn't made, the policy wouldn't be
226 * freed as that depends on the kobj count.
228 * Return: A valid policy on success, otherwise NULL on failure.
230 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
232 struct cpufreq_policy *policy = NULL;
235 if (WARN_ON(cpu >= nr_cpu_ids))
238 /* get the cpufreq driver */
239 read_lock_irqsave(&cpufreq_driver_lock, flags);
241 if (cpufreq_driver) {
243 policy = cpufreq_cpu_get_raw(cpu);
245 kobject_get(&policy->kobj);
248 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
252 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
255 * cpufreq_cpu_put: Decrements the usage count of a policy
257 * @policy: policy earlier returned by cpufreq_cpu_get().
259 * This decrements the kobject reference count incremented earlier by calling
262 void cpufreq_cpu_put(struct cpufreq_policy *policy)
264 kobject_put(&policy->kobj);
266 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
268 /*********************************************************************
269 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
270 *********************************************************************/
273 * adjust_jiffies - adjust the system "loops_per_jiffy"
275 * This function alters the system "loops_per_jiffy" for the clock
276 * speed change. Note that loops_per_jiffy cannot be updated on SMP
277 * systems as each CPU might be scaled differently. So, use the arch
278 * per-CPU loops_per_jiffy value wherever possible.
280 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
283 static unsigned long l_p_j_ref;
284 static unsigned int l_p_j_ref_freq;
286 if (ci->flags & CPUFREQ_CONST_LOOPS)
289 if (!l_p_j_ref_freq) {
290 l_p_j_ref = loops_per_jiffy;
291 l_p_j_ref_freq = ci->old;
292 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
293 l_p_j_ref, l_p_j_ref_freq);
295 if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
296 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
298 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
299 loops_per_jiffy, ci->new);
304 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
305 struct cpufreq_freqs *freqs, unsigned int state)
307 BUG_ON(irqs_disabled());
309 if (cpufreq_disabled())
312 freqs->flags = cpufreq_driver->flags;
313 pr_debug("notification %u of frequency transition to %u kHz\n",
318 case CPUFREQ_PRECHANGE:
319 /* detect if the driver reported a value as "old frequency"
320 * which is not equal to what the cpufreq core thinks is
323 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
324 if ((policy) && (policy->cpu == freqs->cpu) &&
325 (policy->cur) && (policy->cur != freqs->old)) {
326 pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
327 freqs->old, policy->cur);
328 freqs->old = policy->cur;
331 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
332 CPUFREQ_PRECHANGE, freqs);
333 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
336 case CPUFREQ_POSTCHANGE:
337 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
338 pr_debug("FREQ: %lu - CPU: %lu\n",
339 (unsigned long)freqs->new, (unsigned long)freqs->cpu);
340 trace_cpu_frequency(freqs->new, freqs->cpu);
341 cpufreq_stats_record_transition(policy, freqs->new);
342 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
343 CPUFREQ_POSTCHANGE, freqs);
344 if (likely(policy) && likely(policy->cpu == freqs->cpu))
345 policy->cur = freqs->new;
351 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
352 * on frequency transition.
354 * This function calls the transition notifiers and the "adjust_jiffies"
355 * function. It is called twice on all CPU frequency changes that have
358 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
359 struct cpufreq_freqs *freqs, unsigned int state)
361 for_each_cpu(freqs->cpu, policy->cpus)
362 __cpufreq_notify_transition(policy, freqs, state);
365 /* Do post notifications when there are chances that transition has failed */
366 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
367 struct cpufreq_freqs *freqs, int transition_failed)
369 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
370 if (!transition_failed)
373 swap(freqs->old, freqs->new);
374 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
375 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
378 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
379 struct cpufreq_freqs *freqs)
383 * Catch double invocations of _begin() which lead to self-deadlock.
384 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
385 * doesn't invoke _begin() on their behalf, and hence the chances of
386 * double invocations are very low. Moreover, there are scenarios
387 * where these checks can emit false-positive warnings in these
388 * drivers; so we avoid that by skipping them altogether.
390 WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
391 && current == policy->transition_task);
394 wait_event(policy->transition_wait, !policy->transition_ongoing);
396 spin_lock(&policy->transition_lock);
398 if (unlikely(policy->transition_ongoing)) {
399 spin_unlock(&policy->transition_lock);
403 policy->transition_ongoing = true;
404 policy->transition_task = current;
406 spin_unlock(&policy->transition_lock);
408 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
410 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
412 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
413 struct cpufreq_freqs *freqs, int transition_failed)
415 if (unlikely(WARN_ON(!policy->transition_ongoing)))
418 cpufreq_notify_post_transition(policy, freqs, transition_failed);
420 policy->transition_ongoing = false;
421 policy->transition_task = NULL;
423 wake_up(&policy->transition_wait);
425 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
428 * Fast frequency switching status count. Positive means "enabled", negative
429 * means "disabled" and 0 means "not decided yet".
431 static int cpufreq_fast_switch_count;
432 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
434 static void cpufreq_list_transition_notifiers(void)
436 struct notifier_block *nb;
438 pr_info("Registered transition notifiers:\n");
440 mutex_lock(&cpufreq_transition_notifier_list.mutex);
442 for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
443 pr_info("%pF\n", nb->notifier_call);
445 mutex_unlock(&cpufreq_transition_notifier_list.mutex);
449 * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
450 * @policy: cpufreq policy to enable fast frequency switching for.
452 * Try to enable fast frequency switching for @policy.
454 * The attempt will fail if there is at least one transition notifier registered
455 * at this point, as fast frequency switching is quite fundamentally at odds
456 * with transition notifiers. Thus if successful, it will make registration of
457 * transition notifiers fail going forward.
459 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
461 lockdep_assert_held(&policy->rwsem);
463 if (!policy->fast_switch_possible)
466 mutex_lock(&cpufreq_fast_switch_lock);
467 if (cpufreq_fast_switch_count >= 0) {
468 cpufreq_fast_switch_count++;
469 policy->fast_switch_enabled = true;
471 pr_warn("CPU%u: Fast frequency switching not enabled\n",
473 cpufreq_list_transition_notifiers();
475 mutex_unlock(&cpufreq_fast_switch_lock);
477 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
480 * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
481 * @policy: cpufreq policy to disable fast frequency switching for.
483 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
485 mutex_lock(&cpufreq_fast_switch_lock);
486 if (policy->fast_switch_enabled) {
487 policy->fast_switch_enabled = false;
488 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
489 cpufreq_fast_switch_count--;
491 mutex_unlock(&cpufreq_fast_switch_lock);
493 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
496 * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
498 * @target_freq: target frequency to resolve.
500 * The target to driver frequency mapping is cached in the policy.
502 * Return: Lowest driver-supported frequency greater than or equal to the
503 * given target_freq, subject to policy (min/max) and driver limitations.
505 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
506 unsigned int target_freq)
508 target_freq = clamp_val(target_freq, policy->min, policy->max);
509 policy->cached_target_freq = target_freq;
511 if (cpufreq_driver->target_index) {
514 idx = cpufreq_frequency_table_target(policy, target_freq,
516 policy->cached_resolved_idx = idx;
517 return policy->freq_table[idx].frequency;
520 if (cpufreq_driver->resolve_freq)
521 return cpufreq_driver->resolve_freq(policy, target_freq);
525 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
527 /*********************************************************************
529 *********************************************************************/
530 static ssize_t show_boost(struct kobject *kobj,
531 struct attribute *attr, char *buf)
533 return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
536 static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
537 const char *buf, size_t count)
541 ret = sscanf(buf, "%d", &enable);
542 if (ret != 1 || enable < 0 || enable > 1)
545 if (cpufreq_boost_trigger_state(enable)) {
546 pr_err("%s: Cannot %s BOOST!\n",
547 __func__, enable ? "enable" : "disable");
551 pr_debug("%s: cpufreq BOOST %s\n",
552 __func__, enable ? "enabled" : "disabled");
556 define_one_global_rw(boost);
558 static struct cpufreq_governor *find_governor(const char *str_governor)
560 struct cpufreq_governor *t;
563 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
570 * cpufreq_parse_governor - parse a governor string
572 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
573 struct cpufreq_governor **governor)
577 if (cpufreq_driver->setpolicy) {
578 if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
579 *policy = CPUFREQ_POLICY_PERFORMANCE;
581 } else if (!strncasecmp(str_governor, "powersave",
583 *policy = CPUFREQ_POLICY_POWERSAVE;
587 struct cpufreq_governor *t;
589 mutex_lock(&cpufreq_governor_mutex);
591 t = find_governor(str_governor);
596 mutex_unlock(&cpufreq_governor_mutex);
597 ret = request_module("cpufreq_%s", str_governor);
598 mutex_lock(&cpufreq_governor_mutex);
601 t = find_governor(str_governor);
609 mutex_unlock(&cpufreq_governor_mutex);
615 * cpufreq_per_cpu_attr_read() / show_##file_name() -
616 * print out cpufreq information
618 * Write out information from cpufreq_driver->policy[cpu]; object must be
622 #define show_one(file_name, object) \
623 static ssize_t show_##file_name \
624 (struct cpufreq_policy *policy, char *buf) \
626 return sprintf(buf, "%u\n", policy->object); \
629 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
630 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
631 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
632 show_one(scaling_min_freq, min);
633 show_one(scaling_max_freq, max);
635 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
639 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
640 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
642 ret = sprintf(buf, "%u\n", policy->cur);
646 static int cpufreq_set_policy(struct cpufreq_policy *policy,
647 struct cpufreq_policy *new_policy);
650 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
652 #define store_one(file_name, object) \
653 static ssize_t store_##file_name \
654 (struct cpufreq_policy *policy, const char *buf, size_t count) \
657 struct cpufreq_policy new_policy; \
659 memcpy(&new_policy, policy, sizeof(*policy)); \
661 ret = sscanf(buf, "%u", &new_policy.object); \
665 temp = new_policy.object; \
666 ret = cpufreq_set_policy(policy, &new_policy); \
668 policy->user_policy.object = temp; \
670 return ret ? ret : count; \
673 store_one(scaling_min_freq, min);
674 store_one(scaling_max_freq, max);
677 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
679 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
682 unsigned int cur_freq = __cpufreq_get(policy);
685 return sprintf(buf, "%u\n", cur_freq);
687 return sprintf(buf, "<unknown>\n");
691 * show_scaling_governor - show the current policy for the specified CPU
693 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
695 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
696 return sprintf(buf, "powersave\n");
697 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
698 return sprintf(buf, "performance\n");
699 else if (policy->governor)
700 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
701 policy->governor->name);
706 * store_scaling_governor - store policy for the specified CPU
708 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
709 const char *buf, size_t count)
712 char str_governor[16];
713 struct cpufreq_policy new_policy;
715 memcpy(&new_policy, policy, sizeof(*policy));
717 ret = sscanf(buf, "%15s", str_governor);
721 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
722 &new_policy.governor))
725 ret = cpufreq_set_policy(policy, &new_policy);
726 return ret ? ret : count;
730 * show_scaling_driver - show the cpufreq driver currently loaded
732 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
734 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
738 * show_scaling_available_governors - show the available CPUfreq governors
740 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
744 struct cpufreq_governor *t;
747 i += sprintf(buf, "performance powersave");
751 for_each_governor(t) {
752 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
753 - (CPUFREQ_NAME_LEN + 2)))
755 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
758 i += sprintf(&buf[i], "\n");
762 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
767 for_each_cpu(cpu, mask) {
769 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
770 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
771 if (i >= (PAGE_SIZE - 5))
774 i += sprintf(&buf[i], "\n");
777 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
780 * show_related_cpus - show the CPUs affected by each transition even if
781 * hw coordination is in use
783 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
785 return cpufreq_show_cpus(policy->related_cpus, buf);
789 * show_affected_cpus - show the CPUs affected by each transition
791 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
793 return cpufreq_show_cpus(policy->cpus, buf);
796 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
797 const char *buf, size_t count)
799 unsigned int freq = 0;
802 if (!policy->governor || !policy->governor->store_setspeed)
805 ret = sscanf(buf, "%u", &freq);
809 policy->governor->store_setspeed(policy, freq);
814 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
816 if (!policy->governor || !policy->governor->show_setspeed)
817 return sprintf(buf, "<unsupported>\n");
819 return policy->governor->show_setspeed(policy, buf);
823 * show_bios_limit - show the current cpufreq HW/BIOS limitation
825 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
829 if (cpufreq_driver->bios_limit) {
830 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
832 return sprintf(buf, "%u\n", limit);
834 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
837 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
838 cpufreq_freq_attr_ro(cpuinfo_min_freq);
839 cpufreq_freq_attr_ro(cpuinfo_max_freq);
840 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
841 cpufreq_freq_attr_ro(scaling_available_governors);
842 cpufreq_freq_attr_ro(scaling_driver);
843 cpufreq_freq_attr_ro(scaling_cur_freq);
844 cpufreq_freq_attr_ro(bios_limit);
845 cpufreq_freq_attr_ro(related_cpus);
846 cpufreq_freq_attr_ro(affected_cpus);
847 cpufreq_freq_attr_rw(scaling_min_freq);
848 cpufreq_freq_attr_rw(scaling_max_freq);
849 cpufreq_freq_attr_rw(scaling_governor);
850 cpufreq_freq_attr_rw(scaling_setspeed);
852 static struct attribute *default_attrs[] = {
853 &cpuinfo_min_freq.attr,
854 &cpuinfo_max_freq.attr,
855 &cpuinfo_transition_latency.attr,
856 &scaling_min_freq.attr,
857 &scaling_max_freq.attr,
860 &scaling_governor.attr,
861 &scaling_driver.attr,
862 &scaling_available_governors.attr,
863 &scaling_setspeed.attr,
867 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
868 #define to_attr(a) container_of(a, struct freq_attr, attr)
870 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
872 struct cpufreq_policy *policy = to_policy(kobj);
873 struct freq_attr *fattr = to_attr(attr);
876 down_read(&policy->rwsem);
877 ret = fattr->show(policy, buf);
878 up_read(&policy->rwsem);
883 static ssize_t store(struct kobject *kobj, struct attribute *attr,
884 const char *buf, size_t count)
886 struct cpufreq_policy *policy = to_policy(kobj);
887 struct freq_attr *fattr = to_attr(attr);
888 ssize_t ret = -EINVAL;
892 if (cpu_online(policy->cpu)) {
893 down_write(&policy->rwsem);
894 ret = fattr->store(policy, buf, count);
895 up_write(&policy->rwsem);
903 static void cpufreq_sysfs_release(struct kobject *kobj)
905 struct cpufreq_policy *policy = to_policy(kobj);
906 pr_debug("last reference is dropped\n");
907 complete(&policy->kobj_unregister);
910 static const struct sysfs_ops sysfs_ops = {
915 static struct kobj_type ktype_cpufreq = {
916 .sysfs_ops = &sysfs_ops,
917 .default_attrs = default_attrs,
918 .release = cpufreq_sysfs_release,
921 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
923 struct device *dev = get_cpu_device(cpu);
928 if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
931 dev_dbg(dev, "%s: Adding symlink\n", __func__);
932 if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
933 dev_err(dev, "cpufreq symlink creation failed\n");
936 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
939 dev_dbg(dev, "%s: Removing symlink\n", __func__);
940 sysfs_remove_link(&dev->kobj, "cpufreq");
943 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
945 struct freq_attr **drv_attr;
948 /* set up files for this cpu device */
949 drv_attr = cpufreq_driver->attr;
950 while (drv_attr && *drv_attr) {
951 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
956 if (cpufreq_driver->get) {
957 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
962 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
966 if (cpufreq_driver->bios_limit) {
967 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
975 __weak struct cpufreq_governor *cpufreq_default_governor(void)
980 static int cpufreq_init_policy(struct cpufreq_policy *policy)
982 struct cpufreq_governor *gov = NULL;
983 struct cpufreq_policy new_policy;
985 memcpy(&new_policy, policy, sizeof(*policy));
987 /* Update governor of new_policy to the governor used before hotplug */
988 gov = find_governor(policy->last_governor);
990 pr_debug("Restoring governor %s for cpu %d\n",
991 policy->governor->name, policy->cpu);
993 gov = cpufreq_default_governor();
998 new_policy.governor = gov;
1000 /* Use the default policy if there is no last_policy. */
1001 if (cpufreq_driver->setpolicy) {
1002 if (policy->last_policy)
1003 new_policy.policy = policy->last_policy;
1005 cpufreq_parse_governor(gov->name, &new_policy.policy,
1008 /* set default policy */
1009 return cpufreq_set_policy(policy, &new_policy);
1012 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1016 /* Has this CPU been taken care of already? */
1017 if (cpumask_test_cpu(cpu, policy->cpus))
1020 down_write(&policy->rwsem);
1022 cpufreq_stop_governor(policy);
1024 cpumask_set_cpu(cpu, policy->cpus);
1027 ret = cpufreq_start_governor(policy);
1029 pr_err("%s: Failed to start governor\n", __func__);
1031 up_write(&policy->rwsem);
1035 static void handle_update(struct work_struct *work)
1037 struct cpufreq_policy *policy =
1038 container_of(work, struct cpufreq_policy, update);
1039 unsigned int cpu = policy->cpu;
1040 pr_debug("handle_update for cpu %u called\n", cpu);
1041 cpufreq_update_policy(cpu);
1044 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1046 struct cpufreq_policy *policy;
1049 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1053 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1054 goto err_free_policy;
1056 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1057 goto err_free_cpumask;
1059 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1060 goto err_free_rcpumask;
1062 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1063 cpufreq_global_kobject, "policy%u", cpu);
1065 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1066 goto err_free_real_cpus;
1069 INIT_LIST_HEAD(&policy->policy_list);
1070 init_rwsem(&policy->rwsem);
1071 spin_lock_init(&policy->transition_lock);
1072 init_waitqueue_head(&policy->transition_wait);
1073 init_completion(&policy->kobj_unregister);
1074 INIT_WORK(&policy->update, handle_update);
1080 free_cpumask_var(policy->real_cpus);
1082 free_cpumask_var(policy->related_cpus);
1084 free_cpumask_var(policy->cpus);
1091 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1093 struct kobject *kobj;
1094 struct completion *cmp;
1096 down_write(&policy->rwsem);
1097 cpufreq_stats_free_table(policy);
1098 kobj = &policy->kobj;
1099 cmp = &policy->kobj_unregister;
1100 up_write(&policy->rwsem);
1104 * We need to make sure that the underlying kobj is
1105 * actually not referenced anymore by anybody before we
1106 * proceed with unloading.
1108 pr_debug("waiting for dropping of refcount\n");
1109 wait_for_completion(cmp);
1110 pr_debug("wait complete\n");
1113 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1115 unsigned long flags;
1118 /* Remove policy from list */
1119 write_lock_irqsave(&cpufreq_driver_lock, flags);
1120 list_del(&policy->policy_list);
1122 for_each_cpu(cpu, policy->related_cpus)
1123 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1124 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1126 cpufreq_policy_put_kobj(policy);
1127 free_cpumask_var(policy->real_cpus);
1128 free_cpumask_var(policy->related_cpus);
1129 free_cpumask_var(policy->cpus);
1133 static int cpufreq_online(unsigned int cpu)
1135 struct cpufreq_policy *policy;
1137 unsigned long flags;
1141 pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1143 /* Check if this CPU already has a policy to manage it */
1144 policy = per_cpu(cpufreq_cpu_data, cpu);
1146 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1147 if (!policy_is_inactive(policy))
1148 return cpufreq_add_policy_cpu(policy, cpu);
1150 /* This is the only online CPU for the policy. Start over. */
1152 down_write(&policy->rwsem);
1154 policy->governor = NULL;
1155 up_write(&policy->rwsem);
1158 policy = cpufreq_policy_alloc(cpu);
1163 cpumask_copy(policy->cpus, cpumask_of(cpu));
1165 /* call driver. From then on the cpufreq must be able
1166 * to accept all calls to ->verify and ->setpolicy for this CPU
1168 ret = cpufreq_driver->init(policy);
1170 pr_debug("initialization failed\n");
1171 goto out_free_policy;
1174 down_write(&policy->rwsem);
1177 /* related_cpus should at least include policy->cpus. */
1178 cpumask_copy(policy->related_cpus, policy->cpus);
1182 * affected cpus must always be the one, which are online. We aren't
1183 * managing offline cpus here.
1185 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1188 policy->user_policy.min = policy->min;
1189 policy->user_policy.max = policy->max;
1191 for_each_cpu(j, policy->related_cpus) {
1192 per_cpu(cpufreq_cpu_data, j) = policy;
1193 add_cpu_dev_symlink(policy, j);
1196 policy->min = policy->user_policy.min;
1197 policy->max = policy->user_policy.max;
1200 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
1201 policy->cur = cpufreq_driver->get(policy->cpu);
1203 pr_err("%s: ->get() failed\n", __func__);
1204 goto out_exit_policy;
1209 * Sometimes boot loaders set CPU frequency to a value outside of
1210 * frequency table present with cpufreq core. In such cases CPU might be
1211 * unstable if it has to run on that frequency for long duration of time
1212 * and so its better to set it to a frequency which is specified in
1213 * freq-table. This also makes cpufreq stats inconsistent as
1214 * cpufreq-stats would fail to register because current frequency of CPU
1215 * isn't found in freq-table.
1217 * Because we don't want this change to effect boot process badly, we go
1218 * for the next freq which is >= policy->cur ('cur' must be set by now,
1219 * otherwise we will end up setting freq to lowest of the table as 'cur'
1220 * is initialized to zero).
1222 * We are passing target-freq as "policy->cur - 1" otherwise
1223 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1224 * equal to target-freq.
1226 if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1228 /* Are we running at unknown frequency ? */
1229 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1230 if (ret == -EINVAL) {
1231 /* Warn user and fix it */
1232 pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1233 __func__, policy->cpu, policy->cur);
1234 ret = __cpufreq_driver_target(policy, policy->cur - 1,
1235 CPUFREQ_RELATION_L);
1238 * Reaching here after boot in a few seconds may not
1239 * mean that system will remain stable at "unknown"
1240 * frequency for longer duration. Hence, a BUG_ON().
1243 pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1244 __func__, policy->cpu, policy->cur);
1249 ret = cpufreq_add_dev_interface(policy);
1251 goto out_exit_policy;
1253 cpufreq_stats_create_table(policy);
1255 write_lock_irqsave(&cpufreq_driver_lock, flags);
1256 list_add(&policy->policy_list, &cpufreq_policy_list);
1257 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1260 ret = cpufreq_init_policy(policy);
1262 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1263 __func__, cpu, ret);
1264 /* cpufreq_policy_free() will notify based on this */
1266 goto out_exit_policy;
1269 up_write(&policy->rwsem);
1271 kobject_uevent(&policy->kobj, KOBJ_ADD);
1273 /* Callback for handling stuff after policy is ready */
1274 if (cpufreq_driver->ready)
1275 cpufreq_driver->ready(policy);
1277 pr_debug("initialization complete\n");
1282 up_write(&policy->rwsem);
1284 if (cpufreq_driver->exit)
1285 cpufreq_driver->exit(policy);
1287 for_each_cpu(j, policy->real_cpus)
1288 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1291 cpufreq_policy_free(policy);
1296 * cpufreq_add_dev - the cpufreq interface for a CPU device.
1298 * @sif: Subsystem interface structure pointer (not used)
1300 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1302 struct cpufreq_policy *policy;
1303 unsigned cpu = dev->id;
1306 dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1308 if (cpu_online(cpu)) {
1309 ret = cpufreq_online(cpu);
1314 /* Create sysfs link on CPU registration */
1315 policy = per_cpu(cpufreq_cpu_data, cpu);
1317 add_cpu_dev_symlink(policy, cpu);
1322 static int cpufreq_offline(unsigned int cpu)
1324 struct cpufreq_policy *policy;
1327 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1329 policy = cpufreq_cpu_get_raw(cpu);
1331 pr_debug("%s: No cpu_data found\n", __func__);
1335 down_write(&policy->rwsem);
1337 cpufreq_stop_governor(policy);
1339 cpumask_clear_cpu(cpu, policy->cpus);
1341 if (policy_is_inactive(policy)) {
1343 strncpy(policy->last_governor, policy->governor->name,
1346 policy->last_policy = policy->policy;
1347 } else if (cpu == policy->cpu) {
1348 /* Nominate new CPU */
1349 policy->cpu = cpumask_any(policy->cpus);
1352 /* Start governor again for active policy */
1353 if (!policy_is_inactive(policy)) {
1355 ret = cpufreq_start_governor(policy);
1357 pr_err("%s: Failed to start governor\n", __func__);
1363 if (cpufreq_driver->stop_cpu)
1364 cpufreq_driver->stop_cpu(policy);
1367 cpufreq_exit_governor(policy);
1370 * Perform the ->exit() even during light-weight tear-down,
1371 * since this is a core component, and is essential for the
1372 * subsequent light-weight ->init() to succeed.
1374 if (cpufreq_driver->exit) {
1375 cpufreq_driver->exit(policy);
1376 policy->freq_table = NULL;
1380 up_write(&policy->rwsem);
1385 * cpufreq_remove_dev - remove a CPU device
1387 * Removes the cpufreq interface for a CPU device.
1389 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1391 unsigned int cpu = dev->id;
1392 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1397 if (cpu_online(cpu))
1398 cpufreq_offline(cpu);
1400 cpumask_clear_cpu(cpu, policy->real_cpus);
1401 remove_cpu_dev_symlink(policy, dev);
1403 if (cpumask_empty(policy->real_cpus))
1404 cpufreq_policy_free(policy);
1408 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1410 * @policy: policy managing CPUs
1411 * @new_freq: CPU frequency the CPU actually runs at
1413 * We adjust to current frequency first, and need to clean up later.
1414 * So either call to cpufreq_update_policy() or schedule handle_update()).
1416 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1417 unsigned int new_freq)
1419 struct cpufreq_freqs freqs;
1421 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1422 policy->cur, new_freq);
1424 freqs.old = policy->cur;
1425 freqs.new = new_freq;
1427 cpufreq_freq_transition_begin(policy, &freqs);
1428 cpufreq_freq_transition_end(policy, &freqs, 0);
1432 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1435 * This is the last known freq, without actually getting it from the driver.
1436 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1438 unsigned int cpufreq_quick_get(unsigned int cpu)
1440 struct cpufreq_policy *policy;
1441 unsigned int ret_freq = 0;
1442 unsigned long flags;
1444 read_lock_irqsave(&cpufreq_driver_lock, flags);
1446 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1447 ret_freq = cpufreq_driver->get(cpu);
1448 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1452 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1454 policy = cpufreq_cpu_get(cpu);
1456 ret_freq = policy->cur;
1457 cpufreq_cpu_put(policy);
1462 EXPORT_SYMBOL(cpufreq_quick_get);
1465 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1468 * Just return the max possible frequency for a given CPU.
1470 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1472 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1473 unsigned int ret_freq = 0;
1476 ret_freq = policy->max;
1477 cpufreq_cpu_put(policy);
1482 EXPORT_SYMBOL(cpufreq_quick_get_max);
1484 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1486 unsigned int ret_freq = 0;
1488 if (!cpufreq_driver->get)
1491 ret_freq = cpufreq_driver->get(policy->cpu);
1494 * Updating inactive policies is invalid, so avoid doing that. Also
1495 * if fast frequency switching is used with the given policy, the check
1496 * against policy->cur is pointless, so skip it in that case too.
1498 if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
1501 if (ret_freq && policy->cur &&
1502 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1503 /* verify no discrepancy between actual and
1504 saved value exists */
1505 if (unlikely(ret_freq != policy->cur)) {
1506 cpufreq_out_of_sync(policy, ret_freq);
1507 schedule_work(&policy->update);
1515 * cpufreq_get - get the current CPU frequency (in kHz)
1518 * Get the CPU current (static) CPU frequency
1520 unsigned int cpufreq_get(unsigned int cpu)
1522 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1523 unsigned int ret_freq = 0;
1526 down_read(&policy->rwsem);
1528 if (!policy_is_inactive(policy))
1529 ret_freq = __cpufreq_get(policy);
1531 up_read(&policy->rwsem);
1533 cpufreq_cpu_put(policy);
1538 EXPORT_SYMBOL(cpufreq_get);
1540 static unsigned int cpufreq_update_current_freq(struct cpufreq_policy *policy)
1542 unsigned int new_freq;
1544 new_freq = cpufreq_driver->get(policy->cpu);
1549 pr_debug("cpufreq: Driver did not initialize current freq\n");
1550 policy->cur = new_freq;
1551 } else if (policy->cur != new_freq && has_target()) {
1552 cpufreq_out_of_sync(policy, new_freq);
1558 static struct subsys_interface cpufreq_interface = {
1560 .subsys = &cpu_subsys,
1561 .add_dev = cpufreq_add_dev,
1562 .remove_dev = cpufreq_remove_dev,
1566 * In case platform wants some specific frequency to be configured
1569 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1573 if (!policy->suspend_freq) {
1574 pr_debug("%s: suspend_freq not defined\n", __func__);
1578 pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1579 policy->suspend_freq);
1581 ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1582 CPUFREQ_RELATION_H);
1584 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1585 __func__, policy->suspend_freq, ret);
1589 EXPORT_SYMBOL(cpufreq_generic_suspend);
1592 * cpufreq_suspend() - Suspend CPUFreq governors
1594 * Called during system wide Suspend/Hibernate cycles for suspending governors
1595 * as some platforms can't change frequency after this point in suspend cycle.
1596 * Because some of the devices (like: i2c, regulators, etc) they use for
1597 * changing frequency are suspended quickly after this point.
1599 void cpufreq_suspend(void)
1601 struct cpufreq_policy *policy;
1603 if (!cpufreq_driver)
1606 if (!has_target() && !cpufreq_driver->suspend)
1609 pr_debug("%s: Suspending Governors\n", __func__);
1611 for_each_active_policy(policy) {
1613 down_write(&policy->rwsem);
1614 cpufreq_stop_governor(policy);
1615 up_write(&policy->rwsem);
1618 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1619 pr_err("%s: Failed to suspend driver: %p\n", __func__,
1624 cpufreq_suspended = true;
1628 * cpufreq_resume() - Resume CPUFreq governors
1630 * Called during system wide Suspend/Hibernate cycle for resuming governors that
1631 * are suspended with cpufreq_suspend().
1633 void cpufreq_resume(void)
1635 struct cpufreq_policy *policy;
1638 if (!cpufreq_driver)
1641 cpufreq_suspended = false;
1643 if (!has_target() && !cpufreq_driver->resume)
1646 pr_debug("%s: Resuming Governors\n", __func__);
1648 for_each_active_policy(policy) {
1649 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1650 pr_err("%s: Failed to resume driver: %p\n", __func__,
1652 } else if (has_target()) {
1653 down_write(&policy->rwsem);
1654 ret = cpufreq_start_governor(policy);
1655 up_write(&policy->rwsem);
1658 pr_err("%s: Failed to start governor for policy: %p\n",
1665 * cpufreq_get_current_driver - return current driver's name
1667 * Return the name string of the currently loaded cpufreq driver
1670 const char *cpufreq_get_current_driver(void)
1673 return cpufreq_driver->name;
1677 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1680 * cpufreq_get_driver_data - return current driver data
1682 * Return the private data of the currently loaded cpufreq
1683 * driver, or NULL if no cpufreq driver is loaded.
1685 void *cpufreq_get_driver_data(void)
1688 return cpufreq_driver->driver_data;
1692 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1694 /*********************************************************************
1695 * NOTIFIER LISTS INTERFACE *
1696 *********************************************************************/
1699 * cpufreq_register_notifier - register a driver with cpufreq
1700 * @nb: notifier function to register
1701 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1703 * Add a driver to one of two lists: either a list of drivers that
1704 * are notified about clock rate changes (once before and once after
1705 * the transition), or a list of drivers that are notified about
1706 * changes in cpufreq policy.
1708 * This function may sleep, and has the same return conditions as
1709 * blocking_notifier_chain_register.
1711 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1715 if (cpufreq_disabled())
1718 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1721 case CPUFREQ_TRANSITION_NOTIFIER:
1722 mutex_lock(&cpufreq_fast_switch_lock);
1724 if (cpufreq_fast_switch_count > 0) {
1725 mutex_unlock(&cpufreq_fast_switch_lock);
1728 ret = srcu_notifier_chain_register(
1729 &cpufreq_transition_notifier_list, nb);
1731 cpufreq_fast_switch_count--;
1733 mutex_unlock(&cpufreq_fast_switch_lock);
1735 case CPUFREQ_POLICY_NOTIFIER:
1736 ret = blocking_notifier_chain_register(
1737 &cpufreq_policy_notifier_list, nb);
1745 EXPORT_SYMBOL(cpufreq_register_notifier);
1748 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1749 * @nb: notifier block to be unregistered
1750 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1752 * Remove a driver from the CPU frequency notifier list.
1754 * This function may sleep, and has the same return conditions as
1755 * blocking_notifier_chain_unregister.
1757 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1761 if (cpufreq_disabled())
1765 case CPUFREQ_TRANSITION_NOTIFIER:
1766 mutex_lock(&cpufreq_fast_switch_lock);
1768 ret = srcu_notifier_chain_unregister(
1769 &cpufreq_transition_notifier_list, nb);
1770 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
1771 cpufreq_fast_switch_count++;
1773 mutex_unlock(&cpufreq_fast_switch_lock);
1775 case CPUFREQ_POLICY_NOTIFIER:
1776 ret = blocking_notifier_chain_unregister(
1777 &cpufreq_policy_notifier_list, nb);
1785 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1788 /*********************************************************************
1790 *********************************************************************/
1793 * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
1794 * @policy: cpufreq policy to switch the frequency for.
1795 * @target_freq: New frequency to set (may be approximate).
1797 * Carry out a fast frequency switch without sleeping.
1799 * The driver's ->fast_switch() callback invoked by this function must be
1800 * suitable for being called from within RCU-sched read-side critical sections
1801 * and it is expected to select the minimum available frequency greater than or
1802 * equal to @target_freq (CPUFREQ_RELATION_L).
1804 * This function must not be called if policy->fast_switch_enabled is unset.
1806 * Governors calling this function must guarantee that it will never be invoked
1807 * twice in parallel for the same policy and that it will never be called in
1808 * parallel with either ->target() or ->target_index() for the same policy.
1810 * If CPUFREQ_ENTRY_INVALID is returned by the driver's ->fast_switch()
1811 * callback to indicate an error condition, the hardware configuration must be
1814 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
1815 unsigned int target_freq)
1817 target_freq = clamp_val(target_freq, policy->min, policy->max);
1819 return cpufreq_driver->fast_switch(policy, target_freq);
1821 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
1823 /* Must set freqs->new to intermediate frequency */
1824 static int __target_intermediate(struct cpufreq_policy *policy,
1825 struct cpufreq_freqs *freqs, int index)
1829 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1831 /* We don't need to switch to intermediate freq */
1835 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1836 __func__, policy->cpu, freqs->old, freqs->new);
1838 cpufreq_freq_transition_begin(policy, freqs);
1839 ret = cpufreq_driver->target_intermediate(policy, index);
1840 cpufreq_freq_transition_end(policy, freqs, ret);
1843 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1849 static int __target_index(struct cpufreq_policy *policy, int index)
1851 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1852 unsigned int intermediate_freq = 0;
1853 unsigned int newfreq = policy->freq_table[index].frequency;
1854 int retval = -EINVAL;
1857 if (newfreq == policy->cur)
1860 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1862 /* Handle switching to intermediate frequency */
1863 if (cpufreq_driver->get_intermediate) {
1864 retval = __target_intermediate(policy, &freqs, index);
1868 intermediate_freq = freqs.new;
1869 /* Set old freq to intermediate */
1870 if (intermediate_freq)
1871 freqs.old = freqs.new;
1874 freqs.new = newfreq;
1875 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1876 __func__, policy->cpu, freqs.old, freqs.new);
1878 cpufreq_freq_transition_begin(policy, &freqs);
1881 retval = cpufreq_driver->target_index(policy, index);
1883 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1887 cpufreq_freq_transition_end(policy, &freqs, retval);
1890 * Failed after setting to intermediate freq? Driver should have
1891 * reverted back to initial frequency and so should we. Check
1892 * here for intermediate_freq instead of get_intermediate, in
1893 * case we haven't switched to intermediate freq at all.
1895 if (unlikely(retval && intermediate_freq)) {
1896 freqs.old = intermediate_freq;
1897 freqs.new = policy->restore_freq;
1898 cpufreq_freq_transition_begin(policy, &freqs);
1899 cpufreq_freq_transition_end(policy, &freqs, 0);
1906 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1907 unsigned int target_freq,
1908 unsigned int relation)
1910 unsigned int old_target_freq = target_freq;
1913 if (cpufreq_disabled())
1916 /* Make sure that target_freq is within supported range */
1917 target_freq = clamp_val(target_freq, policy->min, policy->max);
1919 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1920 policy->cpu, target_freq, relation, old_target_freq);
1923 * This might look like a redundant call as we are checking it again
1924 * after finding index. But it is left intentionally for cases where
1925 * exactly same freq is called again and so we can save on few function
1928 if (target_freq == policy->cur)
1931 /* Save last value to restore later on errors */
1932 policy->restore_freq = policy->cur;
1934 if (cpufreq_driver->target)
1935 return cpufreq_driver->target(policy, target_freq, relation);
1937 if (!cpufreq_driver->target_index)
1940 index = cpufreq_frequency_table_target(policy, target_freq, relation);
1942 return __target_index(policy, index);
1944 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1946 int cpufreq_driver_target(struct cpufreq_policy *policy,
1947 unsigned int target_freq,
1948 unsigned int relation)
1952 down_write(&policy->rwsem);
1954 ret = __cpufreq_driver_target(policy, target_freq, relation);
1956 up_write(&policy->rwsem);
1960 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1962 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
1967 static int cpufreq_init_governor(struct cpufreq_policy *policy)
1971 /* Don't start any governor operations if we are entering suspend */
1972 if (cpufreq_suspended)
1975 * Governor might not be initiated here if ACPI _PPC changed
1976 * notification happened, so check it.
1978 if (!policy->governor)
1981 if (policy->governor->max_transition_latency &&
1982 policy->cpuinfo.transition_latency >
1983 policy->governor->max_transition_latency) {
1984 struct cpufreq_governor *gov = cpufreq_fallback_governor();
1987 pr_warn("%s governor failed, too long transition latency of HW, fallback to %s governor\n",
1988 policy->governor->name, gov->name);
1989 policy->governor = gov;
1995 if (!try_module_get(policy->governor->owner))
1998 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2000 if (policy->governor->init) {
2001 ret = policy->governor->init(policy);
2003 module_put(policy->governor->owner);
2011 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2013 if (cpufreq_suspended || !policy->governor)
2016 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2018 if (policy->governor->exit)
2019 policy->governor->exit(policy);
2021 module_put(policy->governor->owner);
2024 static int cpufreq_start_governor(struct cpufreq_policy *policy)
2028 if (cpufreq_suspended)
2031 if (!policy->governor)
2034 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2036 if (cpufreq_driver->get && !cpufreq_driver->setpolicy)
2037 cpufreq_update_current_freq(policy);
2039 if (policy->governor->start) {
2040 ret = policy->governor->start(policy);
2045 if (policy->governor->limits)
2046 policy->governor->limits(policy);
2051 static void cpufreq_stop_governor(struct cpufreq_policy *policy)
2053 if (cpufreq_suspended || !policy->governor)
2056 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2058 if (policy->governor->stop)
2059 policy->governor->stop(policy);
2062 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2064 if (cpufreq_suspended || !policy->governor)
2067 pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2069 if (policy->governor->limits)
2070 policy->governor->limits(policy);
2073 int cpufreq_register_governor(struct cpufreq_governor *governor)
2080 if (cpufreq_disabled())
2083 mutex_lock(&cpufreq_governor_mutex);
2086 if (!find_governor(governor->name)) {
2088 list_add(&governor->governor_list, &cpufreq_governor_list);
2091 mutex_unlock(&cpufreq_governor_mutex);
2094 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2096 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2098 struct cpufreq_policy *policy;
2099 unsigned long flags;
2104 if (cpufreq_disabled())
2107 /* clear last_governor for all inactive policies */
2108 read_lock_irqsave(&cpufreq_driver_lock, flags);
2109 for_each_inactive_policy(policy) {
2110 if (!strcmp(policy->last_governor, governor->name)) {
2111 policy->governor = NULL;
2112 strcpy(policy->last_governor, "\0");
2115 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2117 mutex_lock(&cpufreq_governor_mutex);
2118 list_del(&governor->governor_list);
2119 mutex_unlock(&cpufreq_governor_mutex);
2122 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2125 /*********************************************************************
2126 * POLICY INTERFACE *
2127 *********************************************************************/
2130 * cpufreq_get_policy - get the current cpufreq_policy
2131 * @policy: struct cpufreq_policy into which the current cpufreq_policy
2134 * Reads the current cpufreq policy.
2136 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2138 struct cpufreq_policy *cpu_policy;
2142 cpu_policy = cpufreq_cpu_get(cpu);
2146 memcpy(policy, cpu_policy, sizeof(*policy));
2148 cpufreq_cpu_put(cpu_policy);
2151 EXPORT_SYMBOL(cpufreq_get_policy);
2154 * policy : current policy.
2155 * new_policy: policy to be set.
2157 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2158 struct cpufreq_policy *new_policy)
2160 struct cpufreq_governor *old_gov;
2163 pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2164 new_policy->cpu, new_policy->min, new_policy->max);
2166 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2169 * This check works well when we store new min/max freq attributes,
2170 * because new_policy is a copy of policy with one field updated.
2172 if (new_policy->min > new_policy->max)
2175 /* verify the cpu speed can be set within this limit */
2176 ret = cpufreq_driver->verify(new_policy);
2180 /* adjust if necessary - all reasons */
2181 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2182 CPUFREQ_ADJUST, new_policy);
2185 * verify the cpu speed can be set within this limit, which might be
2186 * different to the first one
2188 ret = cpufreq_driver->verify(new_policy);
2192 /* notification of the new policy */
2193 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
2194 CPUFREQ_NOTIFY, new_policy);
2196 policy->min = new_policy->min;
2197 policy->max = new_policy->max;
2199 policy->cached_target_freq = UINT_MAX;
2201 pr_debug("new min and max freqs are %u - %u kHz\n",
2202 policy->min, policy->max);
2204 if (cpufreq_driver->setpolicy) {
2205 policy->policy = new_policy->policy;
2206 pr_debug("setting range\n");
2207 return cpufreq_driver->setpolicy(new_policy);
2210 if (new_policy->governor == policy->governor) {
2211 pr_debug("cpufreq: governor limits update\n");
2212 cpufreq_governor_limits(policy);
2216 pr_debug("governor switch\n");
2218 /* save old, working values */
2219 old_gov = policy->governor;
2220 /* end old governor */
2222 cpufreq_stop_governor(policy);
2223 cpufreq_exit_governor(policy);
2226 /* start new governor */
2227 policy->governor = new_policy->governor;
2228 ret = cpufreq_init_governor(policy);
2230 ret = cpufreq_start_governor(policy);
2232 pr_debug("cpufreq: governor change\n");
2235 cpufreq_exit_governor(policy);
2238 /* new governor failed, so re-start old one */
2239 pr_debug("starting governor %s failed\n", policy->governor->name);
2241 policy->governor = old_gov;
2242 if (cpufreq_init_governor(policy))
2243 policy->governor = NULL;
2245 cpufreq_start_governor(policy);
2252 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2253 * @cpu: CPU which shall be re-evaluated
2255 * Useful for policy notifiers which have different necessities
2256 * at different times.
2258 void cpufreq_update_policy(unsigned int cpu)
2260 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2261 struct cpufreq_policy new_policy;
2266 down_write(&policy->rwsem);
2268 if (policy_is_inactive(policy))
2271 pr_debug("updating policy for CPU %u\n", cpu);
2272 memcpy(&new_policy, policy, sizeof(*policy));
2273 new_policy.min = policy->user_policy.min;
2274 new_policy.max = policy->user_policy.max;
2277 * BIOS might change freq behind our back
2278 * -> ask driver for current freq and notify governors about a change
2280 if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
2281 if (cpufreq_suspended)
2284 new_policy.cur = cpufreq_update_current_freq(policy);
2285 if (WARN_ON(!new_policy.cur))
2289 cpufreq_set_policy(policy, &new_policy);
2292 up_write(&policy->rwsem);
2294 cpufreq_cpu_put(policy);
2296 EXPORT_SYMBOL(cpufreq_update_policy);
2298 /*********************************************************************
2300 *********************************************************************/
2301 static int cpufreq_boost_set_sw(int state)
2303 struct cpufreq_policy *policy;
2306 for_each_active_policy(policy) {
2307 if (!policy->freq_table)
2310 ret = cpufreq_frequency_table_cpuinfo(policy,
2311 policy->freq_table);
2313 pr_err("%s: Policy frequency update failed\n",
2318 down_write(&policy->rwsem);
2319 policy->user_policy.max = policy->max;
2320 cpufreq_governor_limits(policy);
2321 up_write(&policy->rwsem);
2327 int cpufreq_boost_trigger_state(int state)
2329 unsigned long flags;
2332 if (cpufreq_driver->boost_enabled == state)
2335 write_lock_irqsave(&cpufreq_driver_lock, flags);
2336 cpufreq_driver->boost_enabled = state;
2337 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2339 ret = cpufreq_driver->set_boost(state);
2341 write_lock_irqsave(&cpufreq_driver_lock, flags);
2342 cpufreq_driver->boost_enabled = !state;
2343 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2345 pr_err("%s: Cannot %s BOOST\n",
2346 __func__, state ? "enable" : "disable");
2352 static bool cpufreq_boost_supported(void)
2354 return likely(cpufreq_driver) && cpufreq_driver->set_boost;
2357 static int create_boost_sysfs_file(void)
2361 ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2363 pr_err("%s: cannot register global BOOST sysfs file\n",
2369 static void remove_boost_sysfs_file(void)
2371 if (cpufreq_boost_supported())
2372 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2375 int cpufreq_enable_boost_support(void)
2377 if (!cpufreq_driver)
2380 if (cpufreq_boost_supported())
2383 cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2385 /* This will get removed on driver unregister */
2386 return create_boost_sysfs_file();
2388 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2390 int cpufreq_boost_enabled(void)
2392 return cpufreq_driver->boost_enabled;
2394 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2396 /*********************************************************************
2397 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2398 *********************************************************************/
2399 static enum cpuhp_state hp_online;
2401 static int cpuhp_cpufreq_online(unsigned int cpu)
2403 cpufreq_online(cpu);
2408 static int cpuhp_cpufreq_offline(unsigned int cpu)
2410 cpufreq_offline(cpu);
2416 * cpufreq_register_driver - register a CPU Frequency driver
2417 * @driver_data: A struct cpufreq_driver containing the values#
2418 * submitted by the CPU Frequency driver.
2420 * Registers a CPU Frequency driver to this core code. This code
2421 * returns zero on success, -EEXIST when another driver got here first
2422 * (and isn't unregistered in the meantime).
2425 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2427 unsigned long flags;
2430 if (cpufreq_disabled())
2433 if (!driver_data || !driver_data->verify || !driver_data->init ||
2434 !(driver_data->setpolicy || driver_data->target_index ||
2435 driver_data->target) ||
2436 (driver_data->setpolicy && (driver_data->target_index ||
2437 driver_data->target)) ||
2438 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2441 pr_debug("trying to register driver %s\n", driver_data->name);
2443 /* Protect against concurrent CPU online/offline. */
2446 write_lock_irqsave(&cpufreq_driver_lock, flags);
2447 if (cpufreq_driver) {
2448 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2452 cpufreq_driver = driver_data;
2453 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2455 if (driver_data->setpolicy)
2456 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2458 if (cpufreq_boost_supported()) {
2459 ret = create_boost_sysfs_file();
2461 goto err_null_driver;
2464 ret = subsys_interface_register(&cpufreq_interface);
2466 goto err_boost_unreg;
2468 if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2469 list_empty(&cpufreq_policy_list)) {
2470 /* if all ->init() calls failed, unregister */
2471 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2476 ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
2477 cpuhp_cpufreq_online,
2478 cpuhp_cpufreq_offline);
2484 pr_debug("driver %s up and running\n", driver_data->name);
2488 subsys_interface_unregister(&cpufreq_interface);
2490 remove_boost_sysfs_file();
2492 write_lock_irqsave(&cpufreq_driver_lock, flags);
2493 cpufreq_driver = NULL;
2494 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2499 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2502 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2504 * Unregister the current CPUFreq driver. Only call this if you have
2505 * the right to do so, i.e. if you have succeeded in initialising before!
2506 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2507 * currently not initialised.
2509 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2511 unsigned long flags;
2513 if (!cpufreq_driver || (driver != cpufreq_driver))
2516 pr_debug("unregistering driver %s\n", driver->name);
2518 /* Protect against concurrent cpu hotplug */
2520 subsys_interface_unregister(&cpufreq_interface);
2521 remove_boost_sysfs_file();
2522 cpuhp_remove_state_nocalls(hp_online);
2524 write_lock_irqsave(&cpufreq_driver_lock, flags);
2526 cpufreq_driver = NULL;
2528 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2533 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2536 * Stop cpufreq at shutdown to make sure it isn't holding any locks
2537 * or mutexes when secondary CPUs are halted.
2539 static struct syscore_ops cpufreq_syscore_ops = {
2540 .shutdown = cpufreq_suspend,
2543 struct kobject *cpufreq_global_kobject;
2544 EXPORT_SYMBOL(cpufreq_global_kobject);
2546 static int __init cpufreq_core_init(void)
2548 if (cpufreq_disabled())
2551 cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2552 BUG_ON(!cpufreq_global_kobject);
2554 register_syscore_ops(&cpufreq_syscore_ops);
2558 module_param(off, int, 0444);
2559 core_initcall(cpufreq_core_init);