2 * linux/drivers/cpufreq/cpufreq.c
4 * Copyright (C) 2001 Russell King
9 * Added handling for CPU hotplug
11 * Fix handling for CPU hotplug -- affected CPUs
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/delay.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/kernel_stat.h>
26 #include <linux/module.h>
27 #include <linux/mutex.h>
28 #include <linux/slab.h>
29 #include <linux/suspend.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/tick.h>
32 #include <trace/events/power.h>
35 * The "cpufreq driver" - the arch- or hardware-dependent low
36 * level driver of CPUFreq support, and its spinlock. This lock
37 * also protects the cpufreq_cpu_data array.
39 static struct cpufreq_driver *cpufreq_driver;
40 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
41 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
42 static DEFINE_RWLOCK(cpufreq_driver_lock);
43 static DEFINE_MUTEX(cpufreq_governor_lock);
44 static LIST_HEAD(cpufreq_policy_list);
46 #ifdef CONFIG_HOTPLUG_CPU
47 /* This one keeps track of the previously set governor of a removed CPU */
48 static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
51 /* Flag to suspend/resume CPUFreq governors */
52 static bool cpufreq_suspended;
54 static inline bool has_target(void)
56 return cpufreq_driver->target_index || cpufreq_driver->target;
60 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
63 static DECLARE_RWSEM(cpufreq_rwsem);
65 /* internal prototypes */
66 static int __cpufreq_governor(struct cpufreq_policy *policy,
68 static unsigned int __cpufreq_get(unsigned int cpu);
69 static void handle_update(struct work_struct *work);
72 * Two notifier lists: the "policy" list is involved in the
73 * validation process for a new CPU frequency policy; the
74 * "transition" list for kernel code that needs to handle
75 * changes to devices when the CPU clock speed changes.
76 * The mutex locks both lists.
78 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
79 static struct srcu_notifier_head cpufreq_transition_notifier_list;
81 static bool init_cpufreq_transition_notifier_list_called;
82 static int __init init_cpufreq_transition_notifier_list(void)
84 srcu_init_notifier_head(&cpufreq_transition_notifier_list);
85 init_cpufreq_transition_notifier_list_called = true;
88 pure_initcall(init_cpufreq_transition_notifier_list);
90 static int off __read_mostly;
91 static int cpufreq_disabled(void)
95 void disable_cpufreq(void)
99 static LIST_HEAD(cpufreq_governor_list);
100 static DEFINE_MUTEX(cpufreq_governor_mutex);
102 bool have_governor_per_policy(void)
104 return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
106 EXPORT_SYMBOL_GPL(have_governor_per_policy);
108 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
110 if (have_governor_per_policy())
111 return &policy->kobj;
113 return cpufreq_global_kobject;
115 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
117 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
123 cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
125 busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
126 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
127 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
128 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
129 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
130 busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
132 idle_time = cur_wall_time - busy_time;
134 *wall = cputime_to_usecs(cur_wall_time);
136 return cputime_to_usecs(idle_time);
139 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
141 u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
143 if (idle_time == -1ULL)
144 return get_cpu_idle_time_jiffy(cpu, wall);
146 idle_time += get_cpu_iowait_time_us(cpu, wall);
150 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
153 * This is a generic cpufreq init() routine which can be used by cpufreq
154 * drivers of SMP systems. It will do following:
155 * - validate & show freq table passed
156 * - set policies transition latency
157 * - policy->cpus with all possible CPUs
159 int cpufreq_generic_init(struct cpufreq_policy *policy,
160 struct cpufreq_frequency_table *table,
161 unsigned int transition_latency)
165 ret = cpufreq_table_validate_and_show(policy, table);
167 pr_err("%s: invalid frequency table: %d\n", __func__, ret);
171 policy->cpuinfo.transition_latency = transition_latency;
174 * The driver only supports the SMP configuartion where all processors
175 * share the clock and voltage and clock.
177 cpumask_setall(policy->cpus);
181 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
183 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
185 struct cpufreq_policy *policy = NULL;
188 if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
191 if (!down_read_trylock(&cpufreq_rwsem))
194 /* get the cpufreq driver */
195 read_lock_irqsave(&cpufreq_driver_lock, flags);
197 if (cpufreq_driver) {
199 policy = per_cpu(cpufreq_cpu_data, cpu);
201 kobject_get(&policy->kobj);
204 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
207 up_read(&cpufreq_rwsem);
211 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
213 void cpufreq_cpu_put(struct cpufreq_policy *policy)
215 if (cpufreq_disabled())
218 kobject_put(&policy->kobj);
219 up_read(&cpufreq_rwsem);
221 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
223 /*********************************************************************
224 * EXTERNALLY AFFECTING FREQUENCY CHANGES *
225 *********************************************************************/
228 * adjust_jiffies - adjust the system "loops_per_jiffy"
230 * This function alters the system "loops_per_jiffy" for the clock
231 * speed change. Note that loops_per_jiffy cannot be updated on SMP
232 * systems as each CPU might be scaled differently. So, use the arch
233 * per-CPU loops_per_jiffy value wherever possible.
236 static unsigned long l_p_j_ref;
237 static unsigned int l_p_j_ref_freq;
239 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
241 if (ci->flags & CPUFREQ_CONST_LOOPS)
244 if (!l_p_j_ref_freq) {
245 l_p_j_ref = loops_per_jiffy;
246 l_p_j_ref_freq = ci->old;
247 pr_debug("saving %lu as reference value for loops_per_jiffy; "
248 "freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
250 if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
251 (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
252 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
254 pr_debug("scaling loops_per_jiffy to %lu "
255 "for frequency %u kHz\n", loops_per_jiffy, ci->new);
259 static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
265 static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
266 struct cpufreq_freqs *freqs, unsigned int state)
268 BUG_ON(irqs_disabled());
270 if (cpufreq_disabled())
273 freqs->flags = cpufreq_driver->flags;
274 pr_debug("notification %u of frequency transition to %u kHz\n",
279 case CPUFREQ_PRECHANGE:
280 /* detect if the driver reported a value as "old frequency"
281 * which is not equal to what the cpufreq core thinks is
284 if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
285 if ((policy) && (policy->cpu == freqs->cpu) &&
286 (policy->cur) && (policy->cur != freqs->old)) {
287 pr_debug("Warning: CPU frequency is"
288 " %u, cpufreq assumed %u kHz.\n",
289 freqs->old, policy->cur);
290 freqs->old = policy->cur;
293 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
294 CPUFREQ_PRECHANGE, freqs);
295 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
298 case CPUFREQ_POSTCHANGE:
299 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
300 pr_debug("FREQ: %lu - CPU: %lu", (unsigned long)freqs->new,
301 (unsigned long)freqs->cpu);
302 trace_cpu_frequency(freqs->new, freqs->cpu);
303 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
304 CPUFREQ_POSTCHANGE, freqs);
305 if (likely(policy) && likely(policy->cpu == freqs->cpu))
306 policy->cur = freqs->new;
312 * cpufreq_notify_transition - call notifier chain and adjust_jiffies
313 * on frequency transition.
315 * This function calls the transition notifiers and the "adjust_jiffies"
316 * function. It is called twice on all CPU frequency changes that have
319 void cpufreq_notify_transition(struct cpufreq_policy *policy,
320 struct cpufreq_freqs *freqs, unsigned int state)
322 for_each_cpu(freqs->cpu, policy->cpus)
323 __cpufreq_notify_transition(policy, freqs, state);
325 EXPORT_SYMBOL_GPL(cpufreq_notify_transition);
328 /*********************************************************************
330 *********************************************************************/
332 static struct cpufreq_governor *__find_governor(const char *str_governor)
334 struct cpufreq_governor *t;
336 list_for_each_entry(t, &cpufreq_governor_list, governor_list)
337 if (!strnicmp(str_governor, t->name, CPUFREQ_NAME_LEN))
344 * cpufreq_parse_governor - parse a governor string
346 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
347 struct cpufreq_governor **governor)
354 if (cpufreq_driver->setpolicy) {
355 if (!strnicmp(str_governor, "performance", CPUFREQ_NAME_LEN)) {
356 *policy = CPUFREQ_POLICY_PERFORMANCE;
358 } else if (!strnicmp(str_governor, "powersave",
360 *policy = CPUFREQ_POLICY_POWERSAVE;
363 } else if (has_target()) {
364 struct cpufreq_governor *t;
366 mutex_lock(&cpufreq_governor_mutex);
368 t = __find_governor(str_governor);
373 mutex_unlock(&cpufreq_governor_mutex);
374 ret = request_module("cpufreq_%s", str_governor);
375 mutex_lock(&cpufreq_governor_mutex);
378 t = __find_governor(str_governor);
386 mutex_unlock(&cpufreq_governor_mutex);
393 * cpufreq_per_cpu_attr_read() / show_##file_name() -
394 * print out cpufreq information
396 * Write out information from cpufreq_driver->policy[cpu]; object must be
400 #define show_one(file_name, object) \
401 static ssize_t show_##file_name \
402 (struct cpufreq_policy *policy, char *buf) \
404 return sprintf(buf, "%u\n", policy->object); \
407 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
408 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
409 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
410 show_one(scaling_min_freq, min);
411 show_one(scaling_max_freq, max);
412 show_one(scaling_cur_freq, cur);
414 static int cpufreq_set_policy(struct cpufreq_policy *policy,
415 struct cpufreq_policy *new_policy);
418 * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
420 #define store_one(file_name, object) \
421 static ssize_t store_##file_name \
422 (struct cpufreq_policy *policy, const char *buf, size_t count) \
425 struct cpufreq_policy new_policy; \
427 ret = cpufreq_get_policy(&new_policy, policy->cpu); \
431 ret = sscanf(buf, "%u", &new_policy.object); \
435 ret = cpufreq_set_policy(policy, &new_policy); \
436 policy->user_policy.object = policy->object; \
438 return ret ? ret : count; \
441 store_one(scaling_min_freq, min);
442 store_one(scaling_max_freq, max);
445 * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
447 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
450 unsigned int cur_freq = __cpufreq_get(policy->cpu);
452 return sprintf(buf, "<unknown>");
453 return sprintf(buf, "%u\n", cur_freq);
457 * show_scaling_governor - show the current policy for the specified CPU
459 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
461 if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
462 return sprintf(buf, "powersave\n");
463 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
464 return sprintf(buf, "performance\n");
465 else if (policy->governor)
466 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
467 policy->governor->name);
472 * store_scaling_governor - store policy for the specified CPU
474 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
475 const char *buf, size_t count)
478 char str_governor[16];
479 struct cpufreq_policy new_policy;
481 ret = cpufreq_get_policy(&new_policy, policy->cpu);
485 ret = sscanf(buf, "%15s", str_governor);
489 if (cpufreq_parse_governor(str_governor, &new_policy.policy,
490 &new_policy.governor))
493 ret = cpufreq_set_policy(policy, &new_policy);
495 policy->user_policy.policy = policy->policy;
496 policy->user_policy.governor = policy->governor;
505 * show_scaling_driver - show the cpufreq driver currently loaded
507 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
509 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
513 * show_scaling_available_governors - show the available CPUfreq governors
515 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
519 struct cpufreq_governor *t;
522 i += sprintf(buf, "performance powersave");
526 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
527 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
528 - (CPUFREQ_NAME_LEN + 2)))
530 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
533 i += sprintf(&buf[i], "\n");
537 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
542 for_each_cpu(cpu, mask) {
544 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
545 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
546 if (i >= (PAGE_SIZE - 5))
549 i += sprintf(&buf[i], "\n");
552 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
555 * show_related_cpus - show the CPUs affected by each transition even if
556 * hw coordination is in use
558 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
560 return cpufreq_show_cpus(policy->related_cpus, buf);
564 * show_affected_cpus - show the CPUs affected by each transition
566 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
568 return cpufreq_show_cpus(policy->cpus, buf);
571 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
572 const char *buf, size_t count)
574 unsigned int freq = 0;
577 if (!policy->governor || !policy->governor->store_setspeed)
580 ret = sscanf(buf, "%u", &freq);
584 policy->governor->store_setspeed(policy, freq);
589 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
591 if (!policy->governor || !policy->governor->show_setspeed)
592 return sprintf(buf, "<unsupported>\n");
594 return policy->governor->show_setspeed(policy, buf);
598 * show_bios_limit - show the current cpufreq HW/BIOS limitation
600 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
604 if (cpufreq_driver->bios_limit) {
605 ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
607 return sprintf(buf, "%u\n", limit);
609 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
612 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
613 cpufreq_freq_attr_ro(cpuinfo_min_freq);
614 cpufreq_freq_attr_ro(cpuinfo_max_freq);
615 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
616 cpufreq_freq_attr_ro(scaling_available_governors);
617 cpufreq_freq_attr_ro(scaling_driver);
618 cpufreq_freq_attr_ro(scaling_cur_freq);
619 cpufreq_freq_attr_ro(bios_limit);
620 cpufreq_freq_attr_ro(related_cpus);
621 cpufreq_freq_attr_ro(affected_cpus);
622 cpufreq_freq_attr_rw(scaling_min_freq);
623 cpufreq_freq_attr_rw(scaling_max_freq);
624 cpufreq_freq_attr_rw(scaling_governor);
625 cpufreq_freq_attr_rw(scaling_setspeed);
627 static struct attribute *default_attrs[] = {
628 &cpuinfo_min_freq.attr,
629 &cpuinfo_max_freq.attr,
630 &cpuinfo_transition_latency.attr,
631 &scaling_min_freq.attr,
632 &scaling_max_freq.attr,
635 &scaling_governor.attr,
636 &scaling_driver.attr,
637 &scaling_available_governors.attr,
638 &scaling_setspeed.attr,
642 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
643 #define to_attr(a) container_of(a, struct freq_attr, attr)
645 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
647 struct cpufreq_policy *policy = to_policy(kobj);
648 struct freq_attr *fattr = to_attr(attr);
651 if (!down_read_trylock(&cpufreq_rwsem))
654 down_read(&policy->rwsem);
657 ret = fattr->show(policy, buf);
661 up_read(&policy->rwsem);
662 up_read(&cpufreq_rwsem);
667 static ssize_t store(struct kobject *kobj, struct attribute *attr,
668 const char *buf, size_t count)
670 struct cpufreq_policy *policy = to_policy(kobj);
671 struct freq_attr *fattr = to_attr(attr);
672 ssize_t ret = -EINVAL;
676 if (!cpu_online(policy->cpu))
679 if (!down_read_trylock(&cpufreq_rwsem))
682 down_write(&policy->rwsem);
685 ret = fattr->store(policy, buf, count);
689 up_write(&policy->rwsem);
691 up_read(&cpufreq_rwsem);
698 static void cpufreq_sysfs_release(struct kobject *kobj)
700 struct cpufreq_policy *policy = to_policy(kobj);
701 pr_debug("last reference is dropped\n");
702 complete(&policy->kobj_unregister);
705 static const struct sysfs_ops sysfs_ops = {
710 static struct kobj_type ktype_cpufreq = {
711 .sysfs_ops = &sysfs_ops,
712 .default_attrs = default_attrs,
713 .release = cpufreq_sysfs_release,
716 struct kobject *cpufreq_global_kobject;
717 EXPORT_SYMBOL(cpufreq_global_kobject);
719 static int cpufreq_global_kobject_usage;
721 int cpufreq_get_global_kobject(void)
723 if (!cpufreq_global_kobject_usage++)
724 return kobject_add(cpufreq_global_kobject,
725 &cpu_subsys.dev_root->kobj, "%s", "cpufreq");
729 EXPORT_SYMBOL(cpufreq_get_global_kobject);
731 void cpufreq_put_global_kobject(void)
733 if (!--cpufreq_global_kobject_usage)
734 kobject_del(cpufreq_global_kobject);
736 EXPORT_SYMBOL(cpufreq_put_global_kobject);
738 int cpufreq_sysfs_create_file(const struct attribute *attr)
740 int ret = cpufreq_get_global_kobject();
743 ret = sysfs_create_file(cpufreq_global_kobject, attr);
745 cpufreq_put_global_kobject();
750 EXPORT_SYMBOL(cpufreq_sysfs_create_file);
752 void cpufreq_sysfs_remove_file(const struct attribute *attr)
754 sysfs_remove_file(cpufreq_global_kobject, attr);
755 cpufreq_put_global_kobject();
757 EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
759 /* symlink affected CPUs */
760 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
765 for_each_cpu(j, policy->cpus) {
766 struct device *cpu_dev;
768 if (j == policy->cpu)
771 pr_debug("Adding link for CPU: %u\n", j);
772 cpu_dev = get_cpu_device(j);
773 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
781 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
784 struct freq_attr **drv_attr;
787 /* prepare interface data */
788 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
789 &dev->kobj, "cpufreq");
793 /* set up files for this cpu device */
794 drv_attr = cpufreq_driver->attr;
795 while ((drv_attr) && (*drv_attr)) {
796 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
798 goto err_out_kobj_put;
801 if (cpufreq_driver->get) {
802 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
804 goto err_out_kobj_put;
807 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
809 goto err_out_kobj_put;
811 if (cpufreq_driver->bios_limit) {
812 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
814 goto err_out_kobj_put;
817 ret = cpufreq_add_dev_symlink(policy);
819 goto err_out_kobj_put;
824 kobject_put(&policy->kobj);
825 wait_for_completion(&policy->kobj_unregister);
829 static void cpufreq_init_policy(struct cpufreq_policy *policy)
831 struct cpufreq_policy new_policy;
834 memcpy(&new_policy, policy, sizeof(*policy));
835 /* assure that the starting sequence is run in cpufreq_set_policy */
836 policy->governor = NULL;
838 /* set default policy */
839 ret = cpufreq_set_policy(policy, &new_policy);
840 policy->user_policy.policy = policy->policy;
841 policy->user_policy.governor = policy->governor;
844 pr_debug("setting policy failed\n");
845 if (cpufreq_driver->exit)
846 cpufreq_driver->exit(policy);
850 #ifdef CONFIG_HOTPLUG_CPU
851 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
852 unsigned int cpu, struct device *dev,
859 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
861 pr_err("%s: Failed to stop governor\n", __func__);
866 down_write(&policy->rwsem);
868 write_lock_irqsave(&cpufreq_driver_lock, flags);
870 cpumask_set_cpu(cpu, policy->cpus);
871 per_cpu(cpufreq_cpu_data, cpu) = policy;
872 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
874 up_write(&policy->rwsem);
877 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
878 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
879 pr_err("%s: Failed to start governor\n", __func__);
884 /* Don't touch sysfs links during light-weight init */
886 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
892 static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
894 struct cpufreq_policy *policy;
897 read_lock_irqsave(&cpufreq_driver_lock, flags);
899 policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
901 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 static struct cpufreq_policy *cpufreq_policy_alloc(void)
908 struct cpufreq_policy *policy;
910 policy = kzalloc(sizeof(*policy), GFP_KERNEL);
914 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
915 goto err_free_policy;
917 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
918 goto err_free_cpumask;
920 INIT_LIST_HEAD(&policy->policy_list);
921 init_rwsem(&policy->rwsem);
926 free_cpumask_var(policy->cpus);
933 static void cpufreq_policy_free(struct cpufreq_policy *policy)
935 free_cpumask_var(policy->related_cpus);
936 free_cpumask_var(policy->cpus);
940 static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
942 if (WARN_ON(cpu == policy->cpu))
945 down_write(&policy->rwsem);
947 policy->last_cpu = policy->cpu;
950 up_write(&policy->rwsem);
952 cpufreq_frequency_table_update_policy_cpu(policy);
953 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
954 CPUFREQ_UPDATE_POLICY_CPU, policy);
957 static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
960 unsigned int j, cpu = dev->id;
962 struct cpufreq_policy *policy;
964 #ifdef CONFIG_HOTPLUG_CPU
965 struct cpufreq_policy *tpolicy;
966 struct cpufreq_governor *gov;
969 if (cpu_is_offline(cpu))
972 pr_debug("adding CPU %u\n", cpu);
975 /* check whether a different CPU already registered this
976 * CPU because it is in the same boat. */
977 policy = cpufreq_cpu_get(cpu);
978 if (unlikely(policy)) {
979 cpufreq_cpu_put(policy);
984 if (!down_read_trylock(&cpufreq_rwsem))
987 #ifdef CONFIG_HOTPLUG_CPU
988 /* Check if this cpu was hot-unplugged earlier and has siblings */
989 read_lock_irqsave(&cpufreq_driver_lock, flags);
990 list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
991 if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
992 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
993 ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
994 up_read(&cpufreq_rwsem);
998 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1002 /* Restore the saved policy when doing light-weight init */
1003 policy = cpufreq_policy_restore(cpu);
1005 policy = cpufreq_policy_alloc();
1012 * In the resume path, since we restore a saved policy, the assignment
1013 * to policy->cpu is like an update of the existing policy, rather than
1014 * the creation of a brand new one. So we need to perform this update
1015 * by invoking update_policy_cpu().
1017 if (frozen && cpu != policy->cpu)
1018 update_policy_cpu(policy, cpu);
1022 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1023 cpumask_copy(policy->cpus, cpumask_of(cpu));
1025 init_completion(&policy->kobj_unregister);
1026 INIT_WORK(&policy->update, handle_update);
1028 /* call driver. From then on the cpufreq must be able
1029 * to accept all calls to ->verify and ->setpolicy for this CPU
1031 ret = cpufreq_driver->init(policy);
1033 pr_debug("initialization failed\n");
1034 goto err_set_policy_cpu;
1037 if (cpufreq_driver->get) {
1038 policy->cur = cpufreq_driver->get(policy->cpu);
1040 pr_err("%s: ->get() failed\n", __func__);
1045 /* related cpus should atleast have policy->cpus */
1046 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1049 * affected cpus must always be the one, which are online. We aren't
1050 * managing offline cpus here.
1052 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1054 policy->user_policy.min = policy->min;
1055 policy->user_policy.max = policy->max;
1057 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1058 CPUFREQ_START, policy);
1060 #ifdef CONFIG_HOTPLUG_CPU
1061 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1063 policy->governor = gov;
1064 pr_debug("Restoring governor %s for cpu %d\n",
1065 policy->governor->name, cpu);
1069 write_lock_irqsave(&cpufreq_driver_lock, flags);
1070 for_each_cpu(j, policy->cpus)
1071 per_cpu(cpufreq_cpu_data, j) = policy;
1072 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1075 ret = cpufreq_add_dev_interface(policy, dev);
1077 goto err_out_unregister;
1080 write_lock_irqsave(&cpufreq_driver_lock, flags);
1081 list_add(&policy->policy_list, &cpufreq_policy_list);
1082 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1084 cpufreq_init_policy(policy);
1086 kobject_uevent(&policy->kobj, KOBJ_ADD);
1087 up_read(&cpufreq_rwsem);
1089 pr_debug("initialization complete\n");
1094 write_lock_irqsave(&cpufreq_driver_lock, flags);
1095 for_each_cpu(j, policy->cpus)
1096 per_cpu(cpufreq_cpu_data, j) = NULL;
1097 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1100 if (cpufreq_driver->exit)
1101 cpufreq_driver->exit(policy);
1103 cpufreq_policy_free(policy);
1105 up_read(&cpufreq_rwsem);
1111 * cpufreq_add_dev - add a CPU device
1113 * Adds the cpufreq interface for a CPU device.
1115 * The Oracle says: try running cpufreq registration/unregistration concurrently
1116 * with with cpu hotplugging and all hell will break loose. Tried to clean this
1117 * mess up, but more thorough testing is needed. - Mathieu
1119 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1121 return __cpufreq_add_dev(dev, sif, false);
1124 static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1125 unsigned int old_cpu, bool frozen)
1127 struct device *cpu_dev;
1130 /* first sibling now owns the new sysfs dir */
1131 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1133 /* Don't touch sysfs files during light-weight tear-down */
1137 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1138 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1140 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1142 down_write(&policy->rwsem);
1143 cpumask_set_cpu(old_cpu, policy->cpus);
1144 up_write(&policy->rwsem);
1146 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1155 static int __cpufreq_remove_dev_prepare(struct device *dev,
1156 struct subsys_interface *sif,
1159 unsigned int cpu = dev->id, cpus;
1161 unsigned long flags;
1162 struct cpufreq_policy *policy;
1164 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1166 write_lock_irqsave(&cpufreq_driver_lock, flags);
1168 policy = per_cpu(cpufreq_cpu_data, cpu);
1170 /* Save the policy somewhere when doing a light-weight tear-down */
1172 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
1174 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1177 pr_debug("%s: No cpu_data found\n", __func__);
1182 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1184 pr_err("%s: Failed to stop governor\n", __func__);
1189 #ifdef CONFIG_HOTPLUG_CPU
1190 if (!cpufreq_driver->setpolicy)
1191 strncpy(per_cpu(cpufreq_cpu_governor, cpu),
1192 policy->governor->name, CPUFREQ_NAME_LEN);
1195 down_read(&policy->rwsem);
1196 cpus = cpumask_weight(policy->cpus);
1197 up_read(&policy->rwsem);
1199 if (cpu != policy->cpu) {
1201 sysfs_remove_link(&dev->kobj, "cpufreq");
1202 } else if (cpus > 1) {
1203 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
1205 update_policy_cpu(policy, new_cpu);
1208 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1209 __func__, new_cpu, cpu);
1217 static int __cpufreq_remove_dev_finish(struct device *dev,
1218 struct subsys_interface *sif,
1221 unsigned int cpu = dev->id, cpus;
1223 unsigned long flags;
1224 struct cpufreq_policy *policy;
1225 struct kobject *kobj;
1226 struct completion *cmp;
1228 read_lock_irqsave(&cpufreq_driver_lock, flags);
1229 policy = per_cpu(cpufreq_cpu_data, cpu);
1230 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1233 pr_debug("%s: No cpu_data found\n", __func__);
1237 down_write(&policy->rwsem);
1238 cpus = cpumask_weight(policy->cpus);
1241 cpumask_clear_cpu(cpu, policy->cpus);
1242 up_write(&policy->rwsem);
1244 /* If cpu is last user of policy, free policy */
1247 ret = __cpufreq_governor(policy,
1248 CPUFREQ_GOV_POLICY_EXIT);
1250 pr_err("%s: Failed to exit governor\n",
1257 down_read(&policy->rwsem);
1258 kobj = &policy->kobj;
1259 cmp = &policy->kobj_unregister;
1260 up_read(&policy->rwsem);
1264 * We need to make sure that the underlying kobj is
1265 * actually not referenced anymore by anybody before we
1266 * proceed with unloading.
1268 pr_debug("waiting for dropping of refcount\n");
1269 wait_for_completion(cmp);
1270 pr_debug("wait complete\n");
1274 * Perform the ->exit() even during light-weight tear-down,
1275 * since this is a core component, and is essential for the
1276 * subsequent light-weight ->init() to succeed.
1278 if (cpufreq_driver->exit)
1279 cpufreq_driver->exit(policy);
1281 /* Remove policy from list of active policies */
1282 write_lock_irqsave(&cpufreq_driver_lock, flags);
1283 list_del(&policy->policy_list);
1284 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1287 cpufreq_policy_free(policy);
1290 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
1291 (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
1292 pr_err("%s: Failed to start governor\n",
1299 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1304 * cpufreq_remove_dev - remove a CPU device
1306 * Removes the cpufreq interface for a CPU device.
1308 static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1310 unsigned int cpu = dev->id;
1313 if (cpu_is_offline(cpu))
1316 ret = __cpufreq_remove_dev_prepare(dev, sif, false);
1319 ret = __cpufreq_remove_dev_finish(dev, sif, false);
1324 static void handle_update(struct work_struct *work)
1326 struct cpufreq_policy *policy =
1327 container_of(work, struct cpufreq_policy, update);
1328 unsigned int cpu = policy->cpu;
1329 pr_debug("handle_update for cpu %u called\n", cpu);
1330 cpufreq_update_policy(cpu);
1334 * cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1337 * @old_freq: CPU frequency the kernel thinks the CPU runs at
1338 * @new_freq: CPU frequency the CPU actually runs at
1340 * We adjust to current frequency first, and need to clean up later.
1341 * So either call to cpufreq_update_policy() or schedule handle_update()).
1343 static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq,
1344 unsigned int new_freq)
1346 struct cpufreq_policy *policy;
1347 struct cpufreq_freqs freqs;
1348 unsigned long flags;
1350 pr_debug("Warning: CPU frequency out of sync: cpufreq and timing "
1351 "core thinks of %u, is %u kHz.\n", old_freq, new_freq);
1353 freqs.old = old_freq;
1354 freqs.new = new_freq;
1356 read_lock_irqsave(&cpufreq_driver_lock, flags);
1357 policy = per_cpu(cpufreq_cpu_data, cpu);
1358 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1360 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
1361 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
1365 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1368 * This is the last known freq, without actually getting it from the driver.
1369 * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1371 unsigned int cpufreq_quick_get(unsigned int cpu)
1373 struct cpufreq_policy *policy;
1374 unsigned int ret_freq = 0;
1376 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
1377 return cpufreq_driver->get(cpu);
1379 policy = cpufreq_cpu_get(cpu);
1381 ret_freq = policy->cur;
1382 cpufreq_cpu_put(policy);
1387 EXPORT_SYMBOL(cpufreq_quick_get);
1390 * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1393 * Just return the max possible frequency for a given CPU.
1395 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1397 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1398 unsigned int ret_freq = 0;
1401 ret_freq = policy->max;
1402 cpufreq_cpu_put(policy);
1407 EXPORT_SYMBOL(cpufreq_quick_get_max);
1409 static unsigned int __cpufreq_get(unsigned int cpu)
1411 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1412 unsigned int ret_freq = 0;
1414 if (!cpufreq_driver->get)
1417 ret_freq = cpufreq_driver->get(cpu);
1419 if (ret_freq && policy->cur &&
1420 !(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) {
1421 /* verify no discrepancy between actual and
1422 saved value exists */
1423 if (unlikely(ret_freq != policy->cur)) {
1424 cpufreq_out_of_sync(cpu, policy->cur, ret_freq);
1425 schedule_work(&policy->update);
1433 * cpufreq_get - get the current CPU frequency (in kHz)
1436 * Get the CPU current (static) CPU frequency
1438 unsigned int cpufreq_get(unsigned int cpu)
1440 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1441 unsigned int ret_freq = 0;
1443 if (cpufreq_disabled() || !cpufreq_driver)
1448 if (!down_read_trylock(&cpufreq_rwsem))
1451 down_read(&policy->rwsem);
1453 ret_freq = __cpufreq_get(cpu);
1455 up_read(&policy->rwsem);
1456 up_read(&cpufreq_rwsem);
1460 EXPORT_SYMBOL(cpufreq_get);
1462 static struct subsys_interface cpufreq_interface = {
1464 .subsys = &cpu_subsys,
1465 .add_dev = cpufreq_add_dev,
1466 .remove_dev = cpufreq_remove_dev,
1469 void cpufreq_suspend(void)
1471 struct cpufreq_policy *policy;
1476 pr_debug("%s: Suspending Governors\n", __func__);
1478 list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
1479 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP))
1480 pr_err("%s: Failed to stop governor for policy: %p\n",
1483 cpufreq_suspended = true;
1486 void cpufreq_resume(void)
1488 struct cpufreq_policy *policy;
1493 pr_debug("%s: Resuming Governors\n", __func__);
1495 cpufreq_suspended = false;
1497 list_for_each_entry(policy, &cpufreq_policy_list, policy_list)
1498 if (__cpufreq_governor(policy, CPUFREQ_GOV_START)
1499 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))
1500 pr_err("%s: Failed to start governor for policy: %p\n",
1505 * cpufreq_bp_suspend - Prepare the boot CPU for system suspend.
1507 * This function is only executed for the boot processor. The other CPUs
1508 * have been put offline by means of CPU hotplug.
1510 static int cpufreq_bp_suspend(void)
1514 int cpu = smp_processor_id();
1515 struct cpufreq_policy *policy;
1517 pr_debug("suspending cpu %u\n", cpu);
1519 /* If there's no policy for the boot CPU, we have nothing to do. */
1520 policy = cpufreq_cpu_get(cpu);
1524 if (cpufreq_driver->suspend) {
1525 ret = cpufreq_driver->suspend(policy);
1527 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1528 "step on CPU %u\n", policy->cpu);
1531 cpufreq_cpu_put(policy);
1536 * cpufreq_bp_resume - Restore proper frequency handling of the boot CPU.
1538 * 1.) resume CPUfreq hardware support (cpufreq_driver->resume())
1539 * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are
1540 * restored. It will verify that the current freq is in sync with
1541 * what we believe it to be. This is a bit later than when it
1542 * should be, but nonethteless it's better than calling
1543 * cpufreq_driver->get() here which might re-enable interrupts...
1545 * This function is only executed for the boot CPU. The other CPUs have not
1546 * been turned on yet.
1548 static void cpufreq_bp_resume(void)
1552 int cpu = smp_processor_id();
1553 struct cpufreq_policy *policy;
1555 pr_debug("resuming cpu %u\n", cpu);
1557 /* If there's no policy for the boot CPU, we have nothing to do. */
1558 policy = cpufreq_cpu_get(cpu);
1562 if (cpufreq_driver->resume) {
1563 ret = cpufreq_driver->resume(policy);
1565 printk(KERN_ERR "cpufreq: resume failed in ->resume "
1566 "step on CPU %u\n", policy->cpu);
1571 schedule_work(&policy->update);
1574 cpufreq_cpu_put(policy);
1577 static struct syscore_ops cpufreq_syscore_ops = {
1578 .suspend = cpufreq_bp_suspend,
1579 .resume = cpufreq_bp_resume,
1583 * cpufreq_get_current_driver - return current driver's name
1585 * Return the name string of the currently loaded cpufreq driver
1588 const char *cpufreq_get_current_driver(void)
1591 return cpufreq_driver->name;
1595 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1597 /*********************************************************************
1598 * NOTIFIER LISTS INTERFACE *
1599 *********************************************************************/
1602 * cpufreq_register_notifier - register a driver with cpufreq
1603 * @nb: notifier function to register
1604 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1606 * Add a driver to one of two lists: either a list of drivers that
1607 * are notified about clock rate changes (once before and once after
1608 * the transition), or a list of drivers that are notified about
1609 * changes in cpufreq policy.
1611 * This function may sleep, and has the same return conditions as
1612 * blocking_notifier_chain_register.
1614 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1618 if (cpufreq_disabled())
1621 WARN_ON(!init_cpufreq_transition_notifier_list_called);
1624 case CPUFREQ_TRANSITION_NOTIFIER:
1625 ret = srcu_notifier_chain_register(
1626 &cpufreq_transition_notifier_list, nb);
1628 case CPUFREQ_POLICY_NOTIFIER:
1629 ret = blocking_notifier_chain_register(
1630 &cpufreq_policy_notifier_list, nb);
1638 EXPORT_SYMBOL(cpufreq_register_notifier);
1641 * cpufreq_unregister_notifier - unregister a driver with cpufreq
1642 * @nb: notifier block to be unregistered
1643 * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1645 * Remove a driver from the CPU frequency notifier list.
1647 * This function may sleep, and has the same return conditions as
1648 * blocking_notifier_chain_unregister.
1650 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
1654 if (cpufreq_disabled())
1658 case CPUFREQ_TRANSITION_NOTIFIER:
1659 ret = srcu_notifier_chain_unregister(
1660 &cpufreq_transition_notifier_list, nb);
1662 case CPUFREQ_POLICY_NOTIFIER:
1663 ret = blocking_notifier_chain_unregister(
1664 &cpufreq_policy_notifier_list, nb);
1672 EXPORT_SYMBOL(cpufreq_unregister_notifier);
1675 /*********************************************************************
1677 *********************************************************************/
1679 int __cpufreq_driver_target(struct cpufreq_policy *policy,
1680 unsigned int target_freq,
1681 unsigned int relation)
1683 int retval = -EINVAL;
1684 unsigned int old_target_freq = target_freq;
1686 if (cpufreq_disabled())
1689 /* Make sure that target_freq is within supported range */
1690 if (target_freq > policy->max)
1691 target_freq = policy->max;
1692 if (target_freq < policy->min)
1693 target_freq = policy->min;
1695 pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
1696 policy->cpu, target_freq, relation, old_target_freq);
1699 * This might look like a redundant call as we are checking it again
1700 * after finding index. But it is left intentionally for cases where
1701 * exactly same freq is called again and so we can save on few function
1704 if (target_freq == policy->cur)
1707 if (cpufreq_driver->target)
1708 retval = cpufreq_driver->target(policy, target_freq, relation);
1709 else if (cpufreq_driver->target_index) {
1710 struct cpufreq_frequency_table *freq_table;
1711 struct cpufreq_freqs freqs;
1715 freq_table = cpufreq_frequency_get_table(policy->cpu);
1716 if (unlikely(!freq_table)) {
1717 pr_err("%s: Unable to find freq_table\n", __func__);
1721 retval = cpufreq_frequency_table_target(policy, freq_table,
1722 target_freq, relation, &index);
1723 if (unlikely(retval)) {
1724 pr_err("%s: Unable to find matching freq\n", __func__);
1728 if (freq_table[index].frequency == policy->cur) {
1733 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1736 freqs.old = policy->cur;
1737 freqs.new = freq_table[index].frequency;
1740 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1741 __func__, policy->cpu, freqs.old,
1744 cpufreq_notify_transition(policy, &freqs,
1748 retval = cpufreq_driver->target_index(policy, index);
1750 pr_err("%s: Failed to change cpu frequency: %d\n",
1755 * Notify with old freq in case we failed to change
1759 freqs.new = freqs.old;
1761 cpufreq_notify_transition(policy, &freqs,
1762 CPUFREQ_POSTCHANGE);
1769 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
1771 int cpufreq_driver_target(struct cpufreq_policy *policy,
1772 unsigned int target_freq,
1773 unsigned int relation)
1777 down_write(&policy->rwsem);
1779 ret = __cpufreq_driver_target(policy, target_freq, relation);
1781 up_write(&policy->rwsem);
1785 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1788 * when "event" is CPUFREQ_GOV_LIMITS
1791 static int __cpufreq_governor(struct cpufreq_policy *policy,
1796 /* Only must be defined when default governor is known to have latency
1797 restrictions, like e.g. conservative or ondemand.
1798 That this is the case is already ensured in Kconfig
1800 #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
1801 struct cpufreq_governor *gov = &cpufreq_gov_performance;
1803 struct cpufreq_governor *gov = NULL;
1806 /* Don't start any governor operations if we are entering suspend */
1807 if (cpufreq_suspended)
1810 if (policy->governor->max_transition_latency &&
1811 policy->cpuinfo.transition_latency >
1812 policy->governor->max_transition_latency) {
1816 printk(KERN_WARNING "%s governor failed, too long"
1817 " transition latency of HW, fallback"
1818 " to %s governor\n",
1819 policy->governor->name,
1821 policy->governor = gov;
1825 if (event == CPUFREQ_GOV_POLICY_INIT)
1826 if (!try_module_get(policy->governor->owner))
1829 pr_debug("__cpufreq_governor for CPU %u, event %u\n",
1830 policy->cpu, event);
1832 mutex_lock(&cpufreq_governor_lock);
1833 if ((policy->governor_enabled && event == CPUFREQ_GOV_START)
1834 || (!policy->governor_enabled
1835 && (event == CPUFREQ_GOV_LIMITS || event == CPUFREQ_GOV_STOP))) {
1836 mutex_unlock(&cpufreq_governor_lock);
1840 if (event == CPUFREQ_GOV_STOP)
1841 policy->governor_enabled = false;
1842 else if (event == CPUFREQ_GOV_START)
1843 policy->governor_enabled = true;
1845 mutex_unlock(&cpufreq_governor_lock);
1847 ret = policy->governor->governor(policy, event);
1850 if (event == CPUFREQ_GOV_POLICY_INIT)
1851 policy->governor->initialized++;
1852 else if (event == CPUFREQ_GOV_POLICY_EXIT)
1853 policy->governor->initialized--;
1855 /* Restore original values */
1856 mutex_lock(&cpufreq_governor_lock);
1857 if (event == CPUFREQ_GOV_STOP)
1858 policy->governor_enabled = true;
1859 else if (event == CPUFREQ_GOV_START)
1860 policy->governor_enabled = false;
1861 mutex_unlock(&cpufreq_governor_lock);
1864 if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
1865 ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
1866 module_put(policy->governor->owner);
1871 int cpufreq_register_governor(struct cpufreq_governor *governor)
1878 if (cpufreq_disabled())
1881 mutex_lock(&cpufreq_governor_mutex);
1883 governor->initialized = 0;
1885 if (__find_governor(governor->name) == NULL) {
1887 list_add(&governor->governor_list, &cpufreq_governor_list);
1890 mutex_unlock(&cpufreq_governor_mutex);
1893 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
1895 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1897 #ifdef CONFIG_HOTPLUG_CPU
1904 if (cpufreq_disabled())
1907 #ifdef CONFIG_HOTPLUG_CPU
1908 for_each_present_cpu(cpu) {
1909 if (cpu_online(cpu))
1911 if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
1912 strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
1916 mutex_lock(&cpufreq_governor_mutex);
1917 list_del(&governor->governor_list);
1918 mutex_unlock(&cpufreq_governor_mutex);
1921 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
1924 /*********************************************************************
1925 * POLICY INTERFACE *
1926 *********************************************************************/
1929 * cpufreq_get_policy - get the current cpufreq_policy
1930 * @policy: struct cpufreq_policy into which the current cpufreq_policy
1933 * Reads the current cpufreq policy.
1935 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1937 struct cpufreq_policy *cpu_policy;
1941 cpu_policy = cpufreq_cpu_get(cpu);
1945 memcpy(policy, cpu_policy, sizeof(*policy));
1947 cpufreq_cpu_put(cpu_policy);
1950 EXPORT_SYMBOL(cpufreq_get_policy);
1953 * policy : current policy.
1954 * new_policy: policy to be set.
1956 static int cpufreq_set_policy(struct cpufreq_policy *policy,
1957 struct cpufreq_policy *new_policy)
1959 int ret = 0, failed = 1;
1961 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
1962 new_policy->min, new_policy->max);
1964 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
1966 if (new_policy->min > policy->max || new_policy->max < policy->min) {
1971 /* verify the cpu speed can be set within this limit */
1972 ret = cpufreq_driver->verify(new_policy);
1976 /* adjust if necessary - all reasons */
1977 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1978 CPUFREQ_ADJUST, new_policy);
1980 /* adjust if necessary - hardware incompatibility*/
1981 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1982 CPUFREQ_INCOMPATIBLE, new_policy);
1985 * verify the cpu speed can be set within this limit, which might be
1986 * different to the first one
1988 ret = cpufreq_driver->verify(new_policy);
1992 /* notification of the new policy */
1993 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1994 CPUFREQ_NOTIFY, new_policy);
1996 policy->min = new_policy->min;
1997 policy->max = new_policy->max;
1999 pr_debug("new min and max freqs are %u - %u kHz\n",
2000 policy->min, policy->max);
2002 if (cpufreq_driver->setpolicy) {
2003 policy->policy = new_policy->policy;
2004 pr_debug("setting range\n");
2005 ret = cpufreq_driver->setpolicy(new_policy);
2007 if (new_policy->governor != policy->governor) {
2008 /* save old, working values */
2009 struct cpufreq_governor *old_gov = policy->governor;
2011 pr_debug("governor switch\n");
2013 /* end old governor */
2014 if (policy->governor) {
2015 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
2016 up_write(&policy->rwsem);
2017 __cpufreq_governor(policy,
2018 CPUFREQ_GOV_POLICY_EXIT);
2019 down_write(&policy->rwsem);
2022 /* start new governor */
2023 policy->governor = new_policy->governor;
2024 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
2025 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
2028 up_write(&policy->rwsem);
2029 __cpufreq_governor(policy,
2030 CPUFREQ_GOV_POLICY_EXIT);
2031 down_write(&policy->rwsem);
2036 /* new governor failed, so re-start old one */
2037 pr_debug("starting governor %s failed\n",
2038 policy->governor->name);
2040 policy->governor = old_gov;
2041 __cpufreq_governor(policy,
2042 CPUFREQ_GOV_POLICY_INIT);
2043 __cpufreq_governor(policy,
2049 /* might be a policy change, too, so fall through */
2051 pr_debug("governor: change or update limits\n");
2052 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
2060 * cpufreq_update_policy - re-evaluate an existing cpufreq policy
2061 * @cpu: CPU which shall be re-evaluated
2063 * Useful for policy notifiers which have different necessities
2064 * at different times.
2066 int cpufreq_update_policy(unsigned int cpu)
2068 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
2069 struct cpufreq_policy new_policy;
2077 down_write(&policy->rwsem);
2079 pr_debug("updating policy for CPU %u\n", cpu);
2080 memcpy(&new_policy, policy, sizeof(*policy));
2081 new_policy.min = policy->user_policy.min;
2082 new_policy.max = policy->user_policy.max;
2083 new_policy.policy = policy->user_policy.policy;
2084 new_policy.governor = policy->user_policy.governor;
2087 * BIOS might change freq behind our back
2088 * -> ask driver for current freq and notify governors about a change
2090 if (cpufreq_driver->get) {
2091 new_policy.cur = cpufreq_driver->get(cpu);
2093 pr_debug("Driver did not initialize current freq");
2094 policy->cur = new_policy.cur;
2096 if (policy->cur != new_policy.cur && has_target())
2097 cpufreq_out_of_sync(cpu, policy->cur,
2102 ret = cpufreq_set_policy(policy, &new_policy);
2104 up_write(&policy->rwsem);
2106 cpufreq_cpu_put(policy);
2110 EXPORT_SYMBOL(cpufreq_update_policy);
2112 static int cpufreq_cpu_callback(struct notifier_block *nfb,
2113 unsigned long action, void *hcpu)
2115 unsigned int cpu = (unsigned long)hcpu;
2117 bool frozen = false;
2119 dev = get_cpu_device(cpu);
2122 switch (action & ~CPU_TASKS_FROZEN) {
2124 __cpufreq_add_dev(dev, NULL, frozen);
2125 cpufreq_update_policy(cpu);
2128 case CPU_DOWN_PREPARE:
2129 __cpufreq_remove_dev_prepare(dev, NULL, frozen);
2133 __cpufreq_remove_dev_finish(dev, NULL, frozen);
2136 case CPU_DOWN_FAILED:
2137 __cpufreq_add_dev(dev, NULL, frozen);
2144 static struct notifier_block __refdata cpufreq_cpu_notifier = {
2145 .notifier_call = cpufreq_cpu_callback,
2148 /*********************************************************************
2149 * REGISTER / UNREGISTER CPUFREQ DRIVER *
2150 *********************************************************************/
2153 * cpufreq_register_driver - register a CPU Frequency driver
2154 * @driver_data: A struct cpufreq_driver containing the values#
2155 * submitted by the CPU Frequency driver.
2157 * Registers a CPU Frequency driver to this core code. This code
2158 * returns zero on success, -EBUSY when another driver got here first
2159 * (and isn't unregistered in the meantime).
2162 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2164 unsigned long flags;
2167 if (cpufreq_disabled())
2170 if (!driver_data || !driver_data->verify || !driver_data->init ||
2171 !(driver_data->setpolicy || driver_data->target_index ||
2172 driver_data->target))
2175 pr_debug("trying to register driver %s\n", driver_data->name);
2177 if (driver_data->setpolicy)
2178 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2180 write_lock_irqsave(&cpufreq_driver_lock, flags);
2181 if (cpufreq_driver) {
2182 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2185 cpufreq_driver = driver_data;
2186 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2188 ret = subsys_interface_register(&cpufreq_interface);
2190 goto err_null_driver;
2192 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
2196 /* check for at least one working CPU */
2197 for (i = 0; i < nr_cpu_ids; i++)
2198 if (cpu_possible(i) && per_cpu(cpufreq_cpu_data, i)) {
2203 /* if all ->init() calls failed, unregister */
2205 pr_debug("no CPU initialized for driver %s\n",
2211 register_hotcpu_notifier(&cpufreq_cpu_notifier);
2212 pr_debug("driver %s up and running\n", driver_data->name);
2216 subsys_interface_unregister(&cpufreq_interface);
2218 write_lock_irqsave(&cpufreq_driver_lock, flags);
2219 cpufreq_driver = NULL;
2220 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2223 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2226 * cpufreq_unregister_driver - unregister the current CPUFreq driver
2228 * Unregister the current CPUFreq driver. Only call this if you have
2229 * the right to do so, i.e. if you have succeeded in initialising before!
2230 * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2231 * currently not initialised.
2233 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2235 unsigned long flags;
2237 if (!cpufreq_driver || (driver != cpufreq_driver))
2240 pr_debug("unregistering driver %s\n", driver->name);
2242 subsys_interface_unregister(&cpufreq_interface);
2243 unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
2245 down_write(&cpufreq_rwsem);
2246 write_lock_irqsave(&cpufreq_driver_lock, flags);
2248 cpufreq_driver = NULL;
2250 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2251 up_write(&cpufreq_rwsem);
2255 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2257 static int __init cpufreq_core_init(void)
2259 if (cpufreq_disabled())
2262 cpufreq_global_kobject = kobject_create();
2263 BUG_ON(!cpufreq_global_kobject);
2264 register_syscore_ops(&cpufreq_syscore_ops);
2268 core_initcall(cpufreq_core_init);