]> Git Repo - linux.git/blob - drivers/cpufreq/cpufreq.c
drm/vram-helper: stop using TTM placement flags
[linux.git] / drivers / cpufreq / cpufreq.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/drivers/cpufreq/cpufreq.c
4  *
5  *  Copyright (C) 2001 Russell King
6  *            (C) 2002 - 2003 Dominik Brodowski <[email protected]>
7  *            (C) 2013 Viresh Kumar <[email protected]>
8  *
9  *  Oct 2005 - Ashok Raj <[email protected]>
10  *      Added handling for CPU hotplug
11  *  Feb 2006 - Jacob Shin <[email protected]>
12  *      Fix handling for CPU hotplug -- affected CPUs
13  */
14
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17 #include <linux/cpu.h>
18 #include <linux/cpufreq.h>
19 #include <linux/cpu_cooling.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/init.h>
23 #include <linux/kernel_stat.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/pm_qos.h>
27 #include <linux/slab.h>
28 #include <linux/suspend.h>
29 #include <linux/syscore_ops.h>
30 #include <linux/tick.h>
31 #include <trace/events/power.h>
32
33 static LIST_HEAD(cpufreq_policy_list);
34
35 /* Macros to iterate over CPU policies */
36 #define for_each_suitable_policy(__policy, __active)                     \
37         list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
38                 if ((__active) == !policy_is_inactive(__policy))
39
40 #define for_each_active_policy(__policy)                \
41         for_each_suitable_policy(__policy, true)
42 #define for_each_inactive_policy(__policy)              \
43         for_each_suitable_policy(__policy, false)
44
45 #define for_each_policy(__policy)                       \
46         list_for_each_entry(__policy, &cpufreq_policy_list, policy_list)
47
48 /* Iterate over governors */
49 static LIST_HEAD(cpufreq_governor_list);
50 #define for_each_governor(__governor)                           \
51         list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
52
53 static char default_governor[CPUFREQ_NAME_LEN];
54
55 /*
56  * The "cpufreq driver" - the arch- or hardware-dependent low
57  * level driver of CPUFreq support, and its spinlock. This lock
58  * also protects the cpufreq_cpu_data array.
59  */
60 static struct cpufreq_driver *cpufreq_driver;
61 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
62 static DEFINE_RWLOCK(cpufreq_driver_lock);
63
64 /* Flag to suspend/resume CPUFreq governors */
65 static bool cpufreq_suspended;
66
67 static inline bool has_target(void)
68 {
69         return cpufreq_driver->target_index || cpufreq_driver->target;
70 }
71
72 /* internal prototypes */
73 static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
74 static int cpufreq_init_governor(struct cpufreq_policy *policy);
75 static void cpufreq_exit_governor(struct cpufreq_policy *policy);
76 static void cpufreq_governor_limits(struct cpufreq_policy *policy);
77 static int cpufreq_set_policy(struct cpufreq_policy *policy,
78                               struct cpufreq_governor *new_gov,
79                               unsigned int new_pol);
80
81 /*
82  * Two notifier lists: the "policy" list is involved in the
83  * validation process for a new CPU frequency policy; the
84  * "transition" list for kernel code that needs to handle
85  * changes to devices when the CPU clock speed changes.
86  * The mutex locks both lists.
87  */
88 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
89 SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
90
91 static int off __read_mostly;
92 static int cpufreq_disabled(void)
93 {
94         return off;
95 }
96 void disable_cpufreq(void)
97 {
98         off = 1;
99 }
100 static DEFINE_MUTEX(cpufreq_governor_mutex);
101
102 bool have_governor_per_policy(void)
103 {
104         return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
105 }
106 EXPORT_SYMBOL_GPL(have_governor_per_policy);
107
108 static struct kobject *cpufreq_global_kobject;
109
110 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
111 {
112         if (have_governor_per_policy())
113                 return &policy->kobj;
114         else
115                 return cpufreq_global_kobject;
116 }
117 EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
118
119 static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
120 {
121         struct kernel_cpustat kcpustat;
122         u64 cur_wall_time;
123         u64 idle_time;
124         u64 busy_time;
125
126         cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
127
128         kcpustat_cpu_fetch(&kcpustat, cpu);
129
130         busy_time = kcpustat.cpustat[CPUTIME_USER];
131         busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
132         busy_time += kcpustat.cpustat[CPUTIME_IRQ];
133         busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
134         busy_time += kcpustat.cpustat[CPUTIME_STEAL];
135         busy_time += kcpustat.cpustat[CPUTIME_NICE];
136
137         idle_time = cur_wall_time - busy_time;
138         if (wall)
139                 *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
140
141         return div_u64(idle_time, NSEC_PER_USEC);
142 }
143
144 u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
145 {
146         u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
147
148         if (idle_time == -1ULL)
149                 return get_cpu_idle_time_jiffy(cpu, wall);
150         else if (!io_busy)
151                 idle_time += get_cpu_iowait_time_us(cpu, wall);
152
153         return idle_time;
154 }
155 EXPORT_SYMBOL_GPL(get_cpu_idle_time);
156
157 __weak void arch_set_freq_scale(struct cpumask *cpus, unsigned long cur_freq,
158                 unsigned long max_freq)
159 {
160 }
161 EXPORT_SYMBOL_GPL(arch_set_freq_scale);
162
163 /*
164  * This is a generic cpufreq init() routine which can be used by cpufreq
165  * drivers of SMP systems. It will do following:
166  * - validate & show freq table passed
167  * - set policies transition latency
168  * - policy->cpus with all possible CPUs
169  */
170 void cpufreq_generic_init(struct cpufreq_policy *policy,
171                 struct cpufreq_frequency_table *table,
172                 unsigned int transition_latency)
173 {
174         policy->freq_table = table;
175         policy->cpuinfo.transition_latency = transition_latency;
176
177         /*
178          * The driver only supports the SMP configuration where all processors
179          * share the clock and voltage and clock.
180          */
181         cpumask_setall(policy->cpus);
182 }
183 EXPORT_SYMBOL_GPL(cpufreq_generic_init);
184
185 struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
186 {
187         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
188
189         return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
190 }
191 EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
192
193 unsigned int cpufreq_generic_get(unsigned int cpu)
194 {
195         struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
196
197         if (!policy || IS_ERR(policy->clk)) {
198                 pr_err("%s: No %s associated to cpu: %d\n",
199                        __func__, policy ? "clk" : "policy", cpu);
200                 return 0;
201         }
202
203         return clk_get_rate(policy->clk) / 1000;
204 }
205 EXPORT_SYMBOL_GPL(cpufreq_generic_get);
206
207 /**
208  * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
209  * @cpu: CPU to find the policy for.
210  *
211  * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
212  * the kobject reference counter of that policy.  Return a valid policy on
213  * success or NULL on failure.
214  *
215  * The policy returned by this function has to be released with the help of
216  * cpufreq_cpu_put() to balance its kobject reference counter properly.
217  */
218 struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
219 {
220         struct cpufreq_policy *policy = NULL;
221         unsigned long flags;
222
223         if (WARN_ON(cpu >= nr_cpu_ids))
224                 return NULL;
225
226         /* get the cpufreq driver */
227         read_lock_irqsave(&cpufreq_driver_lock, flags);
228
229         if (cpufreq_driver) {
230                 /* get the CPU */
231                 policy = cpufreq_cpu_get_raw(cpu);
232                 if (policy)
233                         kobject_get(&policy->kobj);
234         }
235
236         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
237
238         return policy;
239 }
240 EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
241
242 /**
243  * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
244  * @policy: cpufreq policy returned by cpufreq_cpu_get().
245  */
246 void cpufreq_cpu_put(struct cpufreq_policy *policy)
247 {
248         kobject_put(&policy->kobj);
249 }
250 EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
251
252 /**
253  * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
254  * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
255  */
256 void cpufreq_cpu_release(struct cpufreq_policy *policy)
257 {
258         if (WARN_ON(!policy))
259                 return;
260
261         lockdep_assert_held(&policy->rwsem);
262
263         up_write(&policy->rwsem);
264
265         cpufreq_cpu_put(policy);
266 }
267
268 /**
269  * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
270  * @cpu: CPU to find the policy for.
271  *
272  * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
273  * if the policy returned by it is not NULL, acquire its rwsem for writing.
274  * Return the policy if it is active or release it and return NULL otherwise.
275  *
276  * The policy returned by this function has to be released with the help of
277  * cpufreq_cpu_release() in order to release its rwsem and balance its usage
278  * counter properly.
279  */
280 struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
281 {
282         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
283
284         if (!policy)
285                 return NULL;
286
287         down_write(&policy->rwsem);
288
289         if (policy_is_inactive(policy)) {
290                 cpufreq_cpu_release(policy);
291                 return NULL;
292         }
293
294         return policy;
295 }
296
297 /*********************************************************************
298  *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
299  *********************************************************************/
300
301 /*
302  * adjust_jiffies - adjust the system "loops_per_jiffy"
303  *
304  * This function alters the system "loops_per_jiffy" for the clock
305  * speed change. Note that loops_per_jiffy cannot be updated on SMP
306  * systems as each CPU might be scaled differently. So, use the arch
307  * per-CPU loops_per_jiffy value wherever possible.
308  */
309 static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
310 {
311 #ifndef CONFIG_SMP
312         static unsigned long l_p_j_ref;
313         static unsigned int l_p_j_ref_freq;
314
315         if (ci->flags & CPUFREQ_CONST_LOOPS)
316                 return;
317
318         if (!l_p_j_ref_freq) {
319                 l_p_j_ref = loops_per_jiffy;
320                 l_p_j_ref_freq = ci->old;
321                 pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
322                          l_p_j_ref, l_p_j_ref_freq);
323         }
324         if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
325                 loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
326                                                                 ci->new);
327                 pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
328                          loops_per_jiffy, ci->new);
329         }
330 #endif
331 }
332
333 /**
334  * cpufreq_notify_transition - Notify frequency transition and adjust_jiffies.
335  * @policy: cpufreq policy to enable fast frequency switching for.
336  * @freqs: contain details of the frequency update.
337  * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
338  *
339  * This function calls the transition notifiers and the "adjust_jiffies"
340  * function. It is called twice on all CPU frequency changes that have
341  * external effects.
342  */
343 static void cpufreq_notify_transition(struct cpufreq_policy *policy,
344                                       struct cpufreq_freqs *freqs,
345                                       unsigned int state)
346 {
347         int cpu;
348
349         BUG_ON(irqs_disabled());
350
351         if (cpufreq_disabled())
352                 return;
353
354         freqs->policy = policy;
355         freqs->flags = cpufreq_driver->flags;
356         pr_debug("notification %u of frequency transition to %u kHz\n",
357                  state, freqs->new);
358
359         switch (state) {
360         case CPUFREQ_PRECHANGE:
361                 /*
362                  * Detect if the driver reported a value as "old frequency"
363                  * which is not equal to what the cpufreq core thinks is
364                  * "old frequency".
365                  */
366                 if (policy->cur && policy->cur != freqs->old) {
367                         pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
368                                  freqs->old, policy->cur);
369                         freqs->old = policy->cur;
370                 }
371
372                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
373                                          CPUFREQ_PRECHANGE, freqs);
374
375                 adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
376                 break;
377
378         case CPUFREQ_POSTCHANGE:
379                 adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
380                 pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
381                          cpumask_pr_args(policy->cpus));
382
383                 for_each_cpu(cpu, policy->cpus)
384                         trace_cpu_frequency(freqs->new, cpu);
385
386                 srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
387                                          CPUFREQ_POSTCHANGE, freqs);
388
389                 cpufreq_stats_record_transition(policy, freqs->new);
390                 policy->cur = freqs->new;
391         }
392 }
393
394 /* Do post notifications when there are chances that transition has failed */
395 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
396                 struct cpufreq_freqs *freqs, int transition_failed)
397 {
398         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
399         if (!transition_failed)
400                 return;
401
402         swap(freqs->old, freqs->new);
403         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
404         cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
405 }
406
407 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
408                 struct cpufreq_freqs *freqs)
409 {
410
411         /*
412          * Catch double invocations of _begin() which lead to self-deadlock.
413          * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
414          * doesn't invoke _begin() on their behalf, and hence the chances of
415          * double invocations are very low. Moreover, there are scenarios
416          * where these checks can emit false-positive warnings in these
417          * drivers; so we avoid that by skipping them altogether.
418          */
419         WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
420                                 && current == policy->transition_task);
421
422 wait:
423         wait_event(policy->transition_wait, !policy->transition_ongoing);
424
425         spin_lock(&policy->transition_lock);
426
427         if (unlikely(policy->transition_ongoing)) {
428                 spin_unlock(&policy->transition_lock);
429                 goto wait;
430         }
431
432         policy->transition_ongoing = true;
433         policy->transition_task = current;
434
435         spin_unlock(&policy->transition_lock);
436
437         cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
438 }
439 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
440
441 void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
442                 struct cpufreq_freqs *freqs, int transition_failed)
443 {
444         if (WARN_ON(!policy->transition_ongoing))
445                 return;
446
447         cpufreq_notify_post_transition(policy, freqs, transition_failed);
448
449         policy->transition_ongoing = false;
450         policy->transition_task = NULL;
451
452         wake_up(&policy->transition_wait);
453 }
454 EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
455
456 /*
457  * Fast frequency switching status count.  Positive means "enabled", negative
458  * means "disabled" and 0 means "not decided yet".
459  */
460 static int cpufreq_fast_switch_count;
461 static DEFINE_MUTEX(cpufreq_fast_switch_lock);
462
463 static void cpufreq_list_transition_notifiers(void)
464 {
465         struct notifier_block *nb;
466
467         pr_info("Registered transition notifiers:\n");
468
469         mutex_lock(&cpufreq_transition_notifier_list.mutex);
470
471         for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
472                 pr_info("%pS\n", nb->notifier_call);
473
474         mutex_unlock(&cpufreq_transition_notifier_list.mutex);
475 }
476
477 /**
478  * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
479  * @policy: cpufreq policy to enable fast frequency switching for.
480  *
481  * Try to enable fast frequency switching for @policy.
482  *
483  * The attempt will fail if there is at least one transition notifier registered
484  * at this point, as fast frequency switching is quite fundamentally at odds
485  * with transition notifiers.  Thus if successful, it will make registration of
486  * transition notifiers fail going forward.
487  */
488 void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
489 {
490         lockdep_assert_held(&policy->rwsem);
491
492         if (!policy->fast_switch_possible)
493                 return;
494
495         mutex_lock(&cpufreq_fast_switch_lock);
496         if (cpufreq_fast_switch_count >= 0) {
497                 cpufreq_fast_switch_count++;
498                 policy->fast_switch_enabled = true;
499         } else {
500                 pr_warn("CPU%u: Fast frequency switching not enabled\n",
501                         policy->cpu);
502                 cpufreq_list_transition_notifiers();
503         }
504         mutex_unlock(&cpufreq_fast_switch_lock);
505 }
506 EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
507
508 /**
509  * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
510  * @policy: cpufreq policy to disable fast frequency switching for.
511  */
512 void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
513 {
514         mutex_lock(&cpufreq_fast_switch_lock);
515         if (policy->fast_switch_enabled) {
516                 policy->fast_switch_enabled = false;
517                 if (!WARN_ON(cpufreq_fast_switch_count <= 0))
518                         cpufreq_fast_switch_count--;
519         }
520         mutex_unlock(&cpufreq_fast_switch_lock);
521 }
522 EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
523
524 /**
525  * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
526  * one.
527  * @policy: associated policy to interrogate
528  * @target_freq: target frequency to resolve.
529  *
530  * The target to driver frequency mapping is cached in the policy.
531  *
532  * Return: Lowest driver-supported frequency greater than or equal to the
533  * given target_freq, subject to policy (min/max) and driver limitations.
534  */
535 unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
536                                          unsigned int target_freq)
537 {
538         target_freq = clamp_val(target_freq, policy->min, policy->max);
539         policy->cached_target_freq = target_freq;
540
541         if (cpufreq_driver->target_index) {
542                 unsigned int idx;
543
544                 idx = cpufreq_frequency_table_target(policy, target_freq,
545                                                      CPUFREQ_RELATION_L);
546                 policy->cached_resolved_idx = idx;
547                 return policy->freq_table[idx].frequency;
548         }
549
550         if (cpufreq_driver->resolve_freq)
551                 return cpufreq_driver->resolve_freq(policy, target_freq);
552
553         return target_freq;
554 }
555 EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
556
557 unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
558 {
559         unsigned int latency;
560
561         if (policy->transition_delay_us)
562                 return policy->transition_delay_us;
563
564         latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
565         if (latency) {
566                 /*
567                  * For platforms that can change the frequency very fast (< 10
568                  * us), the above formula gives a decent transition delay. But
569                  * for platforms where transition_latency is in milliseconds, it
570                  * ends up giving unrealistic values.
571                  *
572                  * Cap the default transition delay to 10 ms, which seems to be
573                  * a reasonable amount of time after which we should reevaluate
574                  * the frequency.
575                  */
576                 return min(latency * LATENCY_MULTIPLIER, (unsigned int)10000);
577         }
578
579         return LATENCY_MULTIPLIER;
580 }
581 EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
582
583 /*********************************************************************
584  *                          SYSFS INTERFACE                          *
585  *********************************************************************/
586 static ssize_t show_boost(struct kobject *kobj,
587                           struct kobj_attribute *attr, char *buf)
588 {
589         return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
590 }
591
592 static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
593                            const char *buf, size_t count)
594 {
595         int ret, enable;
596
597         ret = sscanf(buf, "%d", &enable);
598         if (ret != 1 || enable < 0 || enable > 1)
599                 return -EINVAL;
600
601         if (cpufreq_boost_trigger_state(enable)) {
602                 pr_err("%s: Cannot %s BOOST!\n",
603                        __func__, enable ? "enable" : "disable");
604                 return -EINVAL;
605         }
606
607         pr_debug("%s: cpufreq BOOST %s\n",
608                  __func__, enable ? "enabled" : "disabled");
609
610         return count;
611 }
612 define_one_global_rw(boost);
613
614 static struct cpufreq_governor *find_governor(const char *str_governor)
615 {
616         struct cpufreq_governor *t;
617
618         for_each_governor(t)
619                 if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
620                         return t;
621
622         return NULL;
623 }
624
625 static struct cpufreq_governor *get_governor(const char *str_governor)
626 {
627         struct cpufreq_governor *t;
628
629         mutex_lock(&cpufreq_governor_mutex);
630         t = find_governor(str_governor);
631         if (!t)
632                 goto unlock;
633
634         if (!try_module_get(t->owner))
635                 t = NULL;
636
637 unlock:
638         mutex_unlock(&cpufreq_governor_mutex);
639
640         return t;
641 }
642
643 static unsigned int cpufreq_parse_policy(char *str_governor)
644 {
645         if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
646                 return CPUFREQ_POLICY_PERFORMANCE;
647
648         if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
649                 return CPUFREQ_POLICY_POWERSAVE;
650
651         return CPUFREQ_POLICY_UNKNOWN;
652 }
653
654 /**
655  * cpufreq_parse_governor - parse a governor string only for has_target()
656  * @str_governor: Governor name.
657  */
658 static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
659 {
660         struct cpufreq_governor *t;
661
662         t = get_governor(str_governor);
663         if (t)
664                 return t;
665
666         if (request_module("cpufreq_%s", str_governor))
667                 return NULL;
668
669         return get_governor(str_governor);
670 }
671
672 /*
673  * cpufreq_per_cpu_attr_read() / show_##file_name() -
674  * print out cpufreq information
675  *
676  * Write out information from cpufreq_driver->policy[cpu]; object must be
677  * "unsigned int".
678  */
679
680 #define show_one(file_name, object)                     \
681 static ssize_t show_##file_name                         \
682 (struct cpufreq_policy *policy, char *buf)              \
683 {                                                       \
684         return sprintf(buf, "%u\n", policy->object);    \
685 }
686
687 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
688 show_one(cpuinfo_max_freq, cpuinfo.max_freq);
689 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
690 show_one(scaling_min_freq, min);
691 show_one(scaling_max_freq, max);
692
693 __weak unsigned int arch_freq_get_on_cpu(int cpu)
694 {
695         return 0;
696 }
697
698 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
699 {
700         ssize_t ret;
701         unsigned int freq;
702
703         freq = arch_freq_get_on_cpu(policy->cpu);
704         if (freq)
705                 ret = sprintf(buf, "%u\n", freq);
706         else if (cpufreq_driver && cpufreq_driver->setpolicy &&
707                         cpufreq_driver->get)
708                 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
709         else
710                 ret = sprintf(buf, "%u\n", policy->cur);
711         return ret;
712 }
713
714 /*
715  * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
716  */
717 #define store_one(file_name, object)                    \
718 static ssize_t store_##file_name                                        \
719 (struct cpufreq_policy *policy, const char *buf, size_t count)          \
720 {                                                                       \
721         unsigned long val;                                              \
722         int ret;                                                        \
723                                                                         \
724         ret = sscanf(buf, "%lu", &val);                                 \
725         if (ret != 1)                                                   \
726                 return -EINVAL;                                         \
727                                                                         \
728         ret = freq_qos_update_request(policy->object##_freq_req, val);\
729         return ret >= 0 ? count : ret;                                  \
730 }
731
732 store_one(scaling_min_freq, min);
733 store_one(scaling_max_freq, max);
734
735 /*
736  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
737  */
738 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
739                                         char *buf)
740 {
741         unsigned int cur_freq = __cpufreq_get(policy);
742
743         if (cur_freq)
744                 return sprintf(buf, "%u\n", cur_freq);
745
746         return sprintf(buf, "<unknown>\n");
747 }
748
749 /*
750  * show_scaling_governor - show the current policy for the specified CPU
751  */
752 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
753 {
754         if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
755                 return sprintf(buf, "powersave\n");
756         else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
757                 return sprintf(buf, "performance\n");
758         else if (policy->governor)
759                 return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n",
760                                 policy->governor->name);
761         return -EINVAL;
762 }
763
764 /*
765  * store_scaling_governor - store policy for the specified CPU
766  */
767 static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
768                                         const char *buf, size_t count)
769 {
770         char str_governor[16];
771         int ret;
772
773         ret = sscanf(buf, "%15s", str_governor);
774         if (ret != 1)
775                 return -EINVAL;
776
777         if (cpufreq_driver->setpolicy) {
778                 unsigned int new_pol;
779
780                 new_pol = cpufreq_parse_policy(str_governor);
781                 if (!new_pol)
782                         return -EINVAL;
783
784                 ret = cpufreq_set_policy(policy, NULL, new_pol);
785         } else {
786                 struct cpufreq_governor *new_gov;
787
788                 new_gov = cpufreq_parse_governor(str_governor);
789                 if (!new_gov)
790                         return -EINVAL;
791
792                 ret = cpufreq_set_policy(policy, new_gov,
793                                          CPUFREQ_POLICY_UNKNOWN);
794
795                 module_put(new_gov->owner);
796         }
797
798         return ret ? ret : count;
799 }
800
801 /*
802  * show_scaling_driver - show the cpufreq driver currently loaded
803  */
804 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
805 {
806         return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
807 }
808
809 /*
810  * show_scaling_available_governors - show the available CPUfreq governors
811  */
812 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
813                                                 char *buf)
814 {
815         ssize_t i = 0;
816         struct cpufreq_governor *t;
817
818         if (!has_target()) {
819                 i += sprintf(buf, "performance powersave");
820                 goto out;
821         }
822
823         mutex_lock(&cpufreq_governor_mutex);
824         for_each_governor(t) {
825                 if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
826                     - (CPUFREQ_NAME_LEN + 2)))
827                         break;
828                 i += scnprintf(&buf[i], CPUFREQ_NAME_PLEN, "%s ", t->name);
829         }
830         mutex_unlock(&cpufreq_governor_mutex);
831 out:
832         i += sprintf(&buf[i], "\n");
833         return i;
834 }
835
836 ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
837 {
838         ssize_t i = 0;
839         unsigned int cpu;
840
841         for_each_cpu(cpu, mask) {
842                 if (i)
843                         i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
844                 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
845                 if (i >= (PAGE_SIZE - 5))
846                         break;
847         }
848         i += sprintf(&buf[i], "\n");
849         return i;
850 }
851 EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
852
853 /*
854  * show_related_cpus - show the CPUs affected by each transition even if
855  * hw coordination is in use
856  */
857 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
858 {
859         return cpufreq_show_cpus(policy->related_cpus, buf);
860 }
861
862 /*
863  * show_affected_cpus - show the CPUs affected by each transition
864  */
865 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
866 {
867         return cpufreq_show_cpus(policy->cpus, buf);
868 }
869
870 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
871                                         const char *buf, size_t count)
872 {
873         unsigned int freq = 0;
874         unsigned int ret;
875
876         if (!policy->governor || !policy->governor->store_setspeed)
877                 return -EINVAL;
878
879         ret = sscanf(buf, "%u", &freq);
880         if (ret != 1)
881                 return -EINVAL;
882
883         policy->governor->store_setspeed(policy, freq);
884
885         return count;
886 }
887
888 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
889 {
890         if (!policy->governor || !policy->governor->show_setspeed)
891                 return sprintf(buf, "<unsupported>\n");
892
893         return policy->governor->show_setspeed(policy, buf);
894 }
895
896 /*
897  * show_bios_limit - show the current cpufreq HW/BIOS limitation
898  */
899 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
900 {
901         unsigned int limit;
902         int ret;
903         ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
904         if (!ret)
905                 return sprintf(buf, "%u\n", limit);
906         return sprintf(buf, "%u\n", policy->cpuinfo.max_freq);
907 }
908
909 cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
910 cpufreq_freq_attr_ro(cpuinfo_min_freq);
911 cpufreq_freq_attr_ro(cpuinfo_max_freq);
912 cpufreq_freq_attr_ro(cpuinfo_transition_latency);
913 cpufreq_freq_attr_ro(scaling_available_governors);
914 cpufreq_freq_attr_ro(scaling_driver);
915 cpufreq_freq_attr_ro(scaling_cur_freq);
916 cpufreq_freq_attr_ro(bios_limit);
917 cpufreq_freq_attr_ro(related_cpus);
918 cpufreq_freq_attr_ro(affected_cpus);
919 cpufreq_freq_attr_rw(scaling_min_freq);
920 cpufreq_freq_attr_rw(scaling_max_freq);
921 cpufreq_freq_attr_rw(scaling_governor);
922 cpufreq_freq_attr_rw(scaling_setspeed);
923
924 static struct attribute *default_attrs[] = {
925         &cpuinfo_min_freq.attr,
926         &cpuinfo_max_freq.attr,
927         &cpuinfo_transition_latency.attr,
928         &scaling_min_freq.attr,
929         &scaling_max_freq.attr,
930         &affected_cpus.attr,
931         &related_cpus.attr,
932         &scaling_governor.attr,
933         &scaling_driver.attr,
934         &scaling_available_governors.attr,
935         &scaling_setspeed.attr,
936         NULL
937 };
938
939 #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
940 #define to_attr(a) container_of(a, struct freq_attr, attr)
941
942 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
943 {
944         struct cpufreq_policy *policy = to_policy(kobj);
945         struct freq_attr *fattr = to_attr(attr);
946         ssize_t ret;
947
948         if (!fattr->show)
949                 return -EIO;
950
951         down_read(&policy->rwsem);
952         ret = fattr->show(policy, buf);
953         up_read(&policy->rwsem);
954
955         return ret;
956 }
957
958 static ssize_t store(struct kobject *kobj, struct attribute *attr,
959                      const char *buf, size_t count)
960 {
961         struct cpufreq_policy *policy = to_policy(kobj);
962         struct freq_attr *fattr = to_attr(attr);
963         ssize_t ret = -EINVAL;
964
965         if (!fattr->store)
966                 return -EIO;
967
968         /*
969          * cpus_read_trylock() is used here to work around a circular lock
970          * dependency problem with respect to the cpufreq_register_driver().
971          */
972         if (!cpus_read_trylock())
973                 return -EBUSY;
974
975         if (cpu_online(policy->cpu)) {
976                 down_write(&policy->rwsem);
977                 ret = fattr->store(policy, buf, count);
978                 up_write(&policy->rwsem);
979         }
980
981         cpus_read_unlock();
982
983         return ret;
984 }
985
986 static void cpufreq_sysfs_release(struct kobject *kobj)
987 {
988         struct cpufreq_policy *policy = to_policy(kobj);
989         pr_debug("last reference is dropped\n");
990         complete(&policy->kobj_unregister);
991 }
992
993 static const struct sysfs_ops sysfs_ops = {
994         .show   = show,
995         .store  = store,
996 };
997
998 static struct kobj_type ktype_cpufreq = {
999         .sysfs_ops      = &sysfs_ops,
1000         .default_attrs  = default_attrs,
1001         .release        = cpufreq_sysfs_release,
1002 };
1003
1004 static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
1005 {
1006         struct device *dev = get_cpu_device(cpu);
1007
1008         if (unlikely(!dev))
1009                 return;
1010
1011         if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1012                 return;
1013
1014         dev_dbg(dev, "%s: Adding symlink\n", __func__);
1015         if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1016                 dev_err(dev, "cpufreq symlink creation failed\n");
1017 }
1018
1019 static void remove_cpu_dev_symlink(struct cpufreq_policy *policy,
1020                                    struct device *dev)
1021 {
1022         dev_dbg(dev, "%s: Removing symlink\n", __func__);
1023         sysfs_remove_link(&dev->kobj, "cpufreq");
1024 }
1025
1026 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1027 {
1028         struct freq_attr **drv_attr;
1029         int ret = 0;
1030
1031         /* set up files for this cpu device */
1032         drv_attr = cpufreq_driver->attr;
1033         while (drv_attr && *drv_attr) {
1034                 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1035                 if (ret)
1036                         return ret;
1037                 drv_attr++;
1038         }
1039         if (cpufreq_driver->get) {
1040                 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1041                 if (ret)
1042                         return ret;
1043         }
1044
1045         ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1046         if (ret)
1047                 return ret;
1048
1049         if (cpufreq_driver->bios_limit) {
1050                 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1051                 if (ret)
1052                         return ret;
1053         }
1054
1055         return 0;
1056 }
1057
1058 static int cpufreq_init_policy(struct cpufreq_policy *policy)
1059 {
1060         struct cpufreq_governor *gov = NULL;
1061         unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1062         int ret;
1063
1064         if (has_target()) {
1065                 /* Update policy governor to the one used before hotplug. */
1066                 gov = get_governor(policy->last_governor);
1067                 if (gov) {
1068                         pr_debug("Restoring governor %s for cpu %d\n",
1069                                  gov->name, policy->cpu);
1070                 } else {
1071                         gov = get_governor(default_governor);
1072                 }
1073
1074                 if (!gov) {
1075                         gov = cpufreq_default_governor();
1076                         __module_get(gov->owner);
1077                 }
1078
1079         } else {
1080
1081                 /* Use the default policy if there is no last_policy. */
1082                 if (policy->last_policy) {
1083                         pol = policy->last_policy;
1084                 } else {
1085                         pol = cpufreq_parse_policy(default_governor);
1086                         /*
1087                          * In case the default governor is neither "performance"
1088                          * nor "powersave", fall back to the initial policy
1089                          * value set by the driver.
1090                          */
1091                         if (pol == CPUFREQ_POLICY_UNKNOWN)
1092                                 pol = policy->policy;
1093                 }
1094                 if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1095                     pol != CPUFREQ_POLICY_POWERSAVE)
1096                         return -ENODATA;
1097         }
1098
1099         ret = cpufreq_set_policy(policy, gov, pol);
1100         if (gov)
1101                 module_put(gov->owner);
1102
1103         return ret;
1104 }
1105
1106 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1107 {
1108         int ret = 0;
1109
1110         /* Has this CPU been taken care of already? */
1111         if (cpumask_test_cpu(cpu, policy->cpus))
1112                 return 0;
1113
1114         down_write(&policy->rwsem);
1115         if (has_target())
1116                 cpufreq_stop_governor(policy);
1117
1118         cpumask_set_cpu(cpu, policy->cpus);
1119
1120         if (has_target()) {
1121                 ret = cpufreq_start_governor(policy);
1122                 if (ret)
1123                         pr_err("%s: Failed to start governor\n", __func__);
1124         }
1125         up_write(&policy->rwsem);
1126         return ret;
1127 }
1128
1129 void refresh_frequency_limits(struct cpufreq_policy *policy)
1130 {
1131         if (!policy_is_inactive(policy)) {
1132                 pr_debug("updating policy for CPU %u\n", policy->cpu);
1133
1134                 cpufreq_set_policy(policy, policy->governor, policy->policy);
1135         }
1136 }
1137 EXPORT_SYMBOL(refresh_frequency_limits);
1138
1139 static void handle_update(struct work_struct *work)
1140 {
1141         struct cpufreq_policy *policy =
1142                 container_of(work, struct cpufreq_policy, update);
1143
1144         pr_debug("handle_update for cpu %u called\n", policy->cpu);
1145         down_write(&policy->rwsem);
1146         refresh_frequency_limits(policy);
1147         up_write(&policy->rwsem);
1148 }
1149
1150 static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1151                                 void *data)
1152 {
1153         struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1154
1155         schedule_work(&policy->update);
1156         return 0;
1157 }
1158
1159 static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1160                                 void *data)
1161 {
1162         struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1163
1164         schedule_work(&policy->update);
1165         return 0;
1166 }
1167
1168 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1169 {
1170         struct kobject *kobj;
1171         struct completion *cmp;
1172
1173         down_write(&policy->rwsem);
1174         cpufreq_stats_free_table(policy);
1175         kobj = &policy->kobj;
1176         cmp = &policy->kobj_unregister;
1177         up_write(&policy->rwsem);
1178         kobject_put(kobj);
1179
1180         /*
1181          * We need to make sure that the underlying kobj is
1182          * actually not referenced anymore by anybody before we
1183          * proceed with unloading.
1184          */
1185         pr_debug("waiting for dropping of refcount\n");
1186         wait_for_completion(cmp);
1187         pr_debug("wait complete\n");
1188 }
1189
1190 static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1191 {
1192         struct cpufreq_policy *policy;
1193         struct device *dev = get_cpu_device(cpu);
1194         int ret;
1195
1196         if (!dev)
1197                 return NULL;
1198
1199         policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1200         if (!policy)
1201                 return NULL;
1202
1203         if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1204                 goto err_free_policy;
1205
1206         if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1207                 goto err_free_cpumask;
1208
1209         if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1210                 goto err_free_rcpumask;
1211
1212         ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1213                                    cpufreq_global_kobject, "policy%u", cpu);
1214         if (ret) {
1215                 dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1216                 /*
1217                  * The entire policy object will be freed below, but the extra
1218                  * memory allocated for the kobject name needs to be freed by
1219                  * releasing the kobject.
1220                  */
1221                 kobject_put(&policy->kobj);
1222                 goto err_free_real_cpus;
1223         }
1224
1225         freq_constraints_init(&policy->constraints);
1226
1227         policy->nb_min.notifier_call = cpufreq_notifier_min;
1228         policy->nb_max.notifier_call = cpufreq_notifier_max;
1229
1230         ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1231                                     &policy->nb_min);
1232         if (ret) {
1233                 dev_err(dev, "Failed to register MIN QoS notifier: %d (%*pbl)\n",
1234                         ret, cpumask_pr_args(policy->cpus));
1235                 goto err_kobj_remove;
1236         }
1237
1238         ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1239                                     &policy->nb_max);
1240         if (ret) {
1241                 dev_err(dev, "Failed to register MAX QoS notifier: %d (%*pbl)\n",
1242                         ret, cpumask_pr_args(policy->cpus));
1243                 goto err_min_qos_notifier;
1244         }
1245
1246         INIT_LIST_HEAD(&policy->policy_list);
1247         init_rwsem(&policy->rwsem);
1248         spin_lock_init(&policy->transition_lock);
1249         init_waitqueue_head(&policy->transition_wait);
1250         init_completion(&policy->kobj_unregister);
1251         INIT_WORK(&policy->update, handle_update);
1252
1253         policy->cpu = cpu;
1254         return policy;
1255
1256 err_min_qos_notifier:
1257         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1258                                  &policy->nb_min);
1259 err_kobj_remove:
1260         cpufreq_policy_put_kobj(policy);
1261 err_free_real_cpus:
1262         free_cpumask_var(policy->real_cpus);
1263 err_free_rcpumask:
1264         free_cpumask_var(policy->related_cpus);
1265 err_free_cpumask:
1266         free_cpumask_var(policy->cpus);
1267 err_free_policy:
1268         kfree(policy);
1269
1270         return NULL;
1271 }
1272
1273 static void cpufreq_policy_free(struct cpufreq_policy *policy)
1274 {
1275         unsigned long flags;
1276         int cpu;
1277
1278         /* Remove policy from list */
1279         write_lock_irqsave(&cpufreq_driver_lock, flags);
1280         list_del(&policy->policy_list);
1281
1282         for_each_cpu(cpu, policy->related_cpus)
1283                 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1284         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1285
1286         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1287                                  &policy->nb_max);
1288         freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1289                                  &policy->nb_min);
1290
1291         /* Cancel any pending policy->update work before freeing the policy. */
1292         cancel_work_sync(&policy->update);
1293
1294         if (policy->max_freq_req) {
1295                 /*
1296                  * CPUFREQ_CREATE_POLICY notification is sent only after
1297                  * successfully adding max_freq_req request.
1298                  */
1299                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1300                                              CPUFREQ_REMOVE_POLICY, policy);
1301                 freq_qos_remove_request(policy->max_freq_req);
1302         }
1303
1304         freq_qos_remove_request(policy->min_freq_req);
1305         kfree(policy->min_freq_req);
1306
1307         cpufreq_policy_put_kobj(policy);
1308         free_cpumask_var(policy->real_cpus);
1309         free_cpumask_var(policy->related_cpus);
1310         free_cpumask_var(policy->cpus);
1311         kfree(policy);
1312 }
1313
1314 static int cpufreq_online(unsigned int cpu)
1315 {
1316         struct cpufreq_policy *policy;
1317         bool new_policy;
1318         unsigned long flags;
1319         unsigned int j;
1320         int ret;
1321
1322         pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1323
1324         /* Check if this CPU already has a policy to manage it */
1325         policy = per_cpu(cpufreq_cpu_data, cpu);
1326         if (policy) {
1327                 WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1328                 if (!policy_is_inactive(policy))
1329                         return cpufreq_add_policy_cpu(policy, cpu);
1330
1331                 /* This is the only online CPU for the policy.  Start over. */
1332                 new_policy = false;
1333                 down_write(&policy->rwsem);
1334                 policy->cpu = cpu;
1335                 policy->governor = NULL;
1336                 up_write(&policy->rwsem);
1337         } else {
1338                 new_policy = true;
1339                 policy = cpufreq_policy_alloc(cpu);
1340                 if (!policy)
1341                         return -ENOMEM;
1342         }
1343
1344         if (!new_policy && cpufreq_driver->online) {
1345                 ret = cpufreq_driver->online(policy);
1346                 if (ret) {
1347                         pr_debug("%s: %d: initialization failed\n", __func__,
1348                                  __LINE__);
1349                         goto out_exit_policy;
1350                 }
1351
1352                 /* Recover policy->cpus using related_cpus */
1353                 cpumask_copy(policy->cpus, policy->related_cpus);
1354         } else {
1355                 cpumask_copy(policy->cpus, cpumask_of(cpu));
1356
1357                 /*
1358                  * Call driver. From then on the cpufreq must be able
1359                  * to accept all calls to ->verify and ->setpolicy for this CPU.
1360                  */
1361                 ret = cpufreq_driver->init(policy);
1362                 if (ret) {
1363                         pr_debug("%s: %d: initialization failed\n", __func__,
1364                                  __LINE__);
1365                         goto out_free_policy;
1366                 }
1367
1368                 ret = cpufreq_table_validate_and_sort(policy);
1369                 if (ret)
1370                         goto out_exit_policy;
1371
1372                 /* related_cpus should at least include policy->cpus. */
1373                 cpumask_copy(policy->related_cpus, policy->cpus);
1374         }
1375
1376         down_write(&policy->rwsem);
1377         /*
1378          * affected cpus must always be the one, which are online. We aren't
1379          * managing offline cpus here.
1380          */
1381         cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1382
1383         if (new_policy) {
1384                 for_each_cpu(j, policy->related_cpus) {
1385                         per_cpu(cpufreq_cpu_data, j) = policy;
1386                         add_cpu_dev_symlink(policy, j);
1387                 }
1388
1389                 policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1390                                                GFP_KERNEL);
1391                 if (!policy->min_freq_req)
1392                         goto out_destroy_policy;
1393
1394                 ret = freq_qos_add_request(&policy->constraints,
1395                                            policy->min_freq_req, FREQ_QOS_MIN,
1396                                            policy->min);
1397                 if (ret < 0) {
1398                         /*
1399                          * So we don't call freq_qos_remove_request() for an
1400                          * uninitialized request.
1401                          */
1402                         kfree(policy->min_freq_req);
1403                         policy->min_freq_req = NULL;
1404                         goto out_destroy_policy;
1405                 }
1406
1407                 /*
1408                  * This must be initialized right here to avoid calling
1409                  * freq_qos_remove_request() on uninitialized request in case
1410                  * of errors.
1411                  */
1412                 policy->max_freq_req = policy->min_freq_req + 1;
1413
1414                 ret = freq_qos_add_request(&policy->constraints,
1415                                            policy->max_freq_req, FREQ_QOS_MAX,
1416                                            policy->max);
1417                 if (ret < 0) {
1418                         policy->max_freq_req = NULL;
1419                         goto out_destroy_policy;
1420                 }
1421
1422                 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1423                                 CPUFREQ_CREATE_POLICY, policy);
1424         }
1425
1426         if (cpufreq_driver->get && has_target()) {
1427                 policy->cur = cpufreq_driver->get(policy->cpu);
1428                 if (!policy->cur) {
1429                         pr_err("%s: ->get() failed\n", __func__);
1430                         goto out_destroy_policy;
1431                 }
1432         }
1433
1434         /*
1435          * Sometimes boot loaders set CPU frequency to a value outside of
1436          * frequency table present with cpufreq core. In such cases CPU might be
1437          * unstable if it has to run on that frequency for long duration of time
1438          * and so its better to set it to a frequency which is specified in
1439          * freq-table. This also makes cpufreq stats inconsistent as
1440          * cpufreq-stats would fail to register because current frequency of CPU
1441          * isn't found in freq-table.
1442          *
1443          * Because we don't want this change to effect boot process badly, we go
1444          * for the next freq which is >= policy->cur ('cur' must be set by now,
1445          * otherwise we will end up setting freq to lowest of the table as 'cur'
1446          * is initialized to zero).
1447          *
1448          * We are passing target-freq as "policy->cur - 1" otherwise
1449          * __cpufreq_driver_target() would simply fail, as policy->cur will be
1450          * equal to target-freq.
1451          */
1452         if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1453             && has_target()) {
1454                 /* Are we running at unknown frequency ? */
1455                 ret = cpufreq_frequency_table_get_index(policy, policy->cur);
1456                 if (ret == -EINVAL) {
1457                         /* Warn user and fix it */
1458                         pr_warn("%s: CPU%d: Running at unlisted freq: %u KHz\n",
1459                                 __func__, policy->cpu, policy->cur);
1460                         ret = __cpufreq_driver_target(policy, policy->cur - 1,
1461                                 CPUFREQ_RELATION_L);
1462
1463                         /*
1464                          * Reaching here after boot in a few seconds may not
1465                          * mean that system will remain stable at "unknown"
1466                          * frequency for longer duration. Hence, a BUG_ON().
1467                          */
1468                         BUG_ON(ret);
1469                         pr_warn("%s: CPU%d: Unlisted initial frequency changed to: %u KHz\n",
1470                                 __func__, policy->cpu, policy->cur);
1471                 }
1472         }
1473
1474         if (new_policy) {
1475                 ret = cpufreq_add_dev_interface(policy);
1476                 if (ret)
1477                         goto out_destroy_policy;
1478
1479                 cpufreq_stats_create_table(policy);
1480
1481                 write_lock_irqsave(&cpufreq_driver_lock, flags);
1482                 list_add(&policy->policy_list, &cpufreq_policy_list);
1483                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1484         }
1485
1486         ret = cpufreq_init_policy(policy);
1487         if (ret) {
1488                 pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1489                        __func__, cpu, ret);
1490                 goto out_destroy_policy;
1491         }
1492
1493         up_write(&policy->rwsem);
1494
1495         kobject_uevent(&policy->kobj, KOBJ_ADD);
1496
1497         /* Callback for handling stuff after policy is ready */
1498         if (cpufreq_driver->ready)
1499                 cpufreq_driver->ready(policy);
1500
1501         if (cpufreq_thermal_control_enabled(cpufreq_driver))
1502                 policy->cdev = of_cpufreq_cooling_register(policy);
1503
1504         pr_debug("initialization complete\n");
1505
1506         return 0;
1507
1508 out_destroy_policy:
1509         for_each_cpu(j, policy->real_cpus)
1510                 remove_cpu_dev_symlink(policy, get_cpu_device(j));
1511
1512         up_write(&policy->rwsem);
1513
1514 out_exit_policy:
1515         if (cpufreq_driver->exit)
1516                 cpufreq_driver->exit(policy);
1517
1518 out_free_policy:
1519         cpufreq_policy_free(policy);
1520         return ret;
1521 }
1522
1523 /**
1524  * cpufreq_add_dev - the cpufreq interface for a CPU device.
1525  * @dev: CPU device.
1526  * @sif: Subsystem interface structure pointer (not used)
1527  */
1528 static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1529 {
1530         struct cpufreq_policy *policy;
1531         unsigned cpu = dev->id;
1532         int ret;
1533
1534         dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1535
1536         if (cpu_online(cpu)) {
1537                 ret = cpufreq_online(cpu);
1538                 if (ret)
1539                         return ret;
1540         }
1541
1542         /* Create sysfs link on CPU registration */
1543         policy = per_cpu(cpufreq_cpu_data, cpu);
1544         if (policy)
1545                 add_cpu_dev_symlink(policy, cpu);
1546
1547         return 0;
1548 }
1549
1550 static int cpufreq_offline(unsigned int cpu)
1551 {
1552         struct cpufreq_policy *policy;
1553         int ret;
1554
1555         pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1556
1557         policy = cpufreq_cpu_get_raw(cpu);
1558         if (!policy) {
1559                 pr_debug("%s: No cpu_data found\n", __func__);
1560                 return 0;
1561         }
1562
1563         down_write(&policy->rwsem);
1564         if (has_target())
1565                 cpufreq_stop_governor(policy);
1566
1567         cpumask_clear_cpu(cpu, policy->cpus);
1568
1569         if (policy_is_inactive(policy)) {
1570                 if (has_target())
1571                         strncpy(policy->last_governor, policy->governor->name,
1572                                 CPUFREQ_NAME_LEN);
1573                 else
1574                         policy->last_policy = policy->policy;
1575         } else if (cpu == policy->cpu) {
1576                 /* Nominate new CPU */
1577                 policy->cpu = cpumask_any(policy->cpus);
1578         }
1579
1580         /* Start governor again for active policy */
1581         if (!policy_is_inactive(policy)) {
1582                 if (has_target()) {
1583                         ret = cpufreq_start_governor(policy);
1584                         if (ret)
1585                                 pr_err("%s: Failed to start governor\n", __func__);
1586                 }
1587
1588                 goto unlock;
1589         }
1590
1591         if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1592                 cpufreq_cooling_unregister(policy->cdev);
1593                 policy->cdev = NULL;
1594         }
1595
1596         if (cpufreq_driver->stop_cpu)
1597                 cpufreq_driver->stop_cpu(policy);
1598
1599         if (has_target())
1600                 cpufreq_exit_governor(policy);
1601
1602         /*
1603          * Perform the ->offline() during light-weight tear-down, as
1604          * that allows fast recovery when the CPU comes back.
1605          */
1606         if (cpufreq_driver->offline) {
1607                 cpufreq_driver->offline(policy);
1608         } else if (cpufreq_driver->exit) {
1609                 cpufreq_driver->exit(policy);
1610                 policy->freq_table = NULL;
1611         }
1612
1613 unlock:
1614         up_write(&policy->rwsem);
1615         return 0;
1616 }
1617
1618 /*
1619  * cpufreq_remove_dev - remove a CPU device
1620  *
1621  * Removes the cpufreq interface for a CPU device.
1622  */
1623 static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1624 {
1625         unsigned int cpu = dev->id;
1626         struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1627
1628         if (!policy)
1629                 return;
1630
1631         if (cpu_online(cpu))
1632                 cpufreq_offline(cpu);
1633
1634         cpumask_clear_cpu(cpu, policy->real_cpus);
1635         remove_cpu_dev_symlink(policy, dev);
1636
1637         if (cpumask_empty(policy->real_cpus)) {
1638                 /* We did light-weight exit earlier, do full tear down now */
1639                 if (cpufreq_driver->offline)
1640                         cpufreq_driver->exit(policy);
1641
1642                 cpufreq_policy_free(policy);
1643         }
1644 }
1645
1646 /**
1647  *      cpufreq_out_of_sync - If actual and saved CPU frequency differs, we're
1648  *      in deep trouble.
1649  *      @policy: policy managing CPUs
1650  *      @new_freq: CPU frequency the CPU actually runs at
1651  *
1652  *      We adjust to current frequency first, and need to clean up later.
1653  *      So either call to cpufreq_update_policy() or schedule handle_update()).
1654  */
1655 static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1656                                 unsigned int new_freq)
1657 {
1658         struct cpufreq_freqs freqs;
1659
1660         pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1661                  policy->cur, new_freq);
1662
1663         freqs.old = policy->cur;
1664         freqs.new = new_freq;
1665
1666         cpufreq_freq_transition_begin(policy, &freqs);
1667         cpufreq_freq_transition_end(policy, &freqs, 0);
1668 }
1669
1670 static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1671 {
1672         unsigned int new_freq;
1673
1674         new_freq = cpufreq_driver->get(policy->cpu);
1675         if (!new_freq)
1676                 return 0;
1677
1678         /*
1679          * If fast frequency switching is used with the given policy, the check
1680          * against policy->cur is pointless, so skip it in that case.
1681          */
1682         if (policy->fast_switch_enabled || !has_target())
1683                 return new_freq;
1684
1685         if (policy->cur != new_freq) {
1686                 cpufreq_out_of_sync(policy, new_freq);
1687                 if (update)
1688                         schedule_work(&policy->update);
1689         }
1690
1691         return new_freq;
1692 }
1693
1694 /**
1695  * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1696  * @cpu: CPU number
1697  *
1698  * This is the last known freq, without actually getting it from the driver.
1699  * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1700  */
1701 unsigned int cpufreq_quick_get(unsigned int cpu)
1702 {
1703         struct cpufreq_policy *policy;
1704         unsigned int ret_freq = 0;
1705         unsigned long flags;
1706
1707         read_lock_irqsave(&cpufreq_driver_lock, flags);
1708
1709         if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1710                 ret_freq = cpufreq_driver->get(cpu);
1711                 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1712                 return ret_freq;
1713         }
1714
1715         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1716
1717         policy = cpufreq_cpu_get(cpu);
1718         if (policy) {
1719                 ret_freq = policy->cur;
1720                 cpufreq_cpu_put(policy);
1721         }
1722
1723         return ret_freq;
1724 }
1725 EXPORT_SYMBOL(cpufreq_quick_get);
1726
1727 /**
1728  * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1729  * @cpu: CPU number
1730  *
1731  * Just return the max possible frequency for a given CPU.
1732  */
1733 unsigned int cpufreq_quick_get_max(unsigned int cpu)
1734 {
1735         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1736         unsigned int ret_freq = 0;
1737
1738         if (policy) {
1739                 ret_freq = policy->max;
1740                 cpufreq_cpu_put(policy);
1741         }
1742
1743         return ret_freq;
1744 }
1745 EXPORT_SYMBOL(cpufreq_quick_get_max);
1746
1747 /**
1748  * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1749  * @cpu: CPU number
1750  *
1751  * The default return value is the max_freq field of cpuinfo.
1752  */
1753 __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1754 {
1755         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1756         unsigned int ret_freq = 0;
1757
1758         if (policy) {
1759                 ret_freq = policy->cpuinfo.max_freq;
1760                 cpufreq_cpu_put(policy);
1761         }
1762
1763         return ret_freq;
1764 }
1765 EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1766
1767 static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1768 {
1769         if (unlikely(policy_is_inactive(policy)))
1770                 return 0;
1771
1772         return cpufreq_verify_current_freq(policy, true);
1773 }
1774
1775 /**
1776  * cpufreq_get - get the current CPU frequency (in kHz)
1777  * @cpu: CPU number
1778  *
1779  * Get the CPU current (static) CPU frequency
1780  */
1781 unsigned int cpufreq_get(unsigned int cpu)
1782 {
1783         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1784         unsigned int ret_freq = 0;
1785
1786         if (policy) {
1787                 down_read(&policy->rwsem);
1788                 if (cpufreq_driver->get)
1789                         ret_freq = __cpufreq_get(policy);
1790                 up_read(&policy->rwsem);
1791
1792                 cpufreq_cpu_put(policy);
1793         }
1794
1795         return ret_freq;
1796 }
1797 EXPORT_SYMBOL(cpufreq_get);
1798
1799 static struct subsys_interface cpufreq_interface = {
1800         .name           = "cpufreq",
1801         .subsys         = &cpu_subsys,
1802         .add_dev        = cpufreq_add_dev,
1803         .remove_dev     = cpufreq_remove_dev,
1804 };
1805
1806 /*
1807  * In case platform wants some specific frequency to be configured
1808  * during suspend..
1809  */
1810 int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1811 {
1812         int ret;
1813
1814         if (!policy->suspend_freq) {
1815                 pr_debug("%s: suspend_freq not defined\n", __func__);
1816                 return 0;
1817         }
1818
1819         pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1820                         policy->suspend_freq);
1821
1822         ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1823                         CPUFREQ_RELATION_H);
1824         if (ret)
1825                 pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1826                                 __func__, policy->suspend_freq, ret);
1827
1828         return ret;
1829 }
1830 EXPORT_SYMBOL(cpufreq_generic_suspend);
1831
1832 /**
1833  * cpufreq_suspend() - Suspend CPUFreq governors
1834  *
1835  * Called during system wide Suspend/Hibernate cycles for suspending governors
1836  * as some platforms can't change frequency after this point in suspend cycle.
1837  * Because some of the devices (like: i2c, regulators, etc) they use for
1838  * changing frequency are suspended quickly after this point.
1839  */
1840 void cpufreq_suspend(void)
1841 {
1842         struct cpufreq_policy *policy;
1843
1844         if (!cpufreq_driver)
1845                 return;
1846
1847         if (!has_target() && !cpufreq_driver->suspend)
1848                 goto suspend;
1849
1850         pr_debug("%s: Suspending Governors\n", __func__);
1851
1852         for_each_active_policy(policy) {
1853                 if (has_target()) {
1854                         down_write(&policy->rwsem);
1855                         cpufreq_stop_governor(policy);
1856                         up_write(&policy->rwsem);
1857                 }
1858
1859                 if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1860                         pr_err("%s: Failed to suspend driver: %s\n", __func__,
1861                                 cpufreq_driver->name);
1862         }
1863
1864 suspend:
1865         cpufreq_suspended = true;
1866 }
1867
1868 /**
1869  * cpufreq_resume() - Resume CPUFreq governors
1870  *
1871  * Called during system wide Suspend/Hibernate cycle for resuming governors that
1872  * are suspended with cpufreq_suspend().
1873  */
1874 void cpufreq_resume(void)
1875 {
1876         struct cpufreq_policy *policy;
1877         int ret;
1878
1879         if (!cpufreq_driver)
1880                 return;
1881
1882         if (unlikely(!cpufreq_suspended))
1883                 return;
1884
1885         cpufreq_suspended = false;
1886
1887         if (!has_target() && !cpufreq_driver->resume)
1888                 return;
1889
1890         pr_debug("%s: Resuming Governors\n", __func__);
1891
1892         for_each_active_policy(policy) {
1893                 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1894                         pr_err("%s: Failed to resume driver: %p\n", __func__,
1895                                 policy);
1896                 } else if (has_target()) {
1897                         down_write(&policy->rwsem);
1898                         ret = cpufreq_start_governor(policy);
1899                         up_write(&policy->rwsem);
1900
1901                         if (ret)
1902                                 pr_err("%s: Failed to start governor for policy: %p\n",
1903                                        __func__, policy);
1904                 }
1905         }
1906 }
1907
1908 /**
1909  *      cpufreq_get_current_driver - return current driver's name
1910  *
1911  *      Return the name string of the currently loaded cpufreq driver
1912  *      or NULL, if none.
1913  */
1914 const char *cpufreq_get_current_driver(void)
1915 {
1916         if (cpufreq_driver)
1917                 return cpufreq_driver->name;
1918
1919         return NULL;
1920 }
1921 EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1922
1923 /**
1924  *      cpufreq_get_driver_data - return current driver data
1925  *
1926  *      Return the private data of the currently loaded cpufreq
1927  *      driver, or NULL if no cpufreq driver is loaded.
1928  */
1929 void *cpufreq_get_driver_data(void)
1930 {
1931         if (cpufreq_driver)
1932                 return cpufreq_driver->driver_data;
1933
1934         return NULL;
1935 }
1936 EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1937
1938 /*********************************************************************
1939  *                     NOTIFIER LISTS INTERFACE                      *
1940  *********************************************************************/
1941
1942 /**
1943  *      cpufreq_register_notifier - register a driver with cpufreq
1944  *      @nb: notifier function to register
1945  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1946  *
1947  *      Add a driver to one of two lists: either a list of drivers that
1948  *      are notified about clock rate changes (once before and once after
1949  *      the transition), or a list of drivers that are notified about
1950  *      changes in cpufreq policy.
1951  *
1952  *      This function may sleep, and has the same return conditions as
1953  *      blocking_notifier_chain_register.
1954  */
1955 int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
1956 {
1957         int ret;
1958
1959         if (cpufreq_disabled())
1960                 return -EINVAL;
1961
1962         switch (list) {
1963         case CPUFREQ_TRANSITION_NOTIFIER:
1964                 mutex_lock(&cpufreq_fast_switch_lock);
1965
1966                 if (cpufreq_fast_switch_count > 0) {
1967                         mutex_unlock(&cpufreq_fast_switch_lock);
1968                         return -EBUSY;
1969                 }
1970                 ret = srcu_notifier_chain_register(
1971                                 &cpufreq_transition_notifier_list, nb);
1972                 if (!ret)
1973                         cpufreq_fast_switch_count--;
1974
1975                 mutex_unlock(&cpufreq_fast_switch_lock);
1976                 break;
1977         case CPUFREQ_POLICY_NOTIFIER:
1978                 ret = blocking_notifier_chain_register(
1979                                 &cpufreq_policy_notifier_list, nb);
1980                 break;
1981         default:
1982                 ret = -EINVAL;
1983         }
1984
1985         return ret;
1986 }
1987 EXPORT_SYMBOL(cpufreq_register_notifier);
1988
1989 /**
1990  *      cpufreq_unregister_notifier - unregister a driver with cpufreq
1991  *      @nb: notifier block to be unregistered
1992  *      @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER
1993  *
1994  *      Remove a driver from the CPU frequency notifier list.
1995  *
1996  *      This function may sleep, and has the same return conditions as
1997  *      blocking_notifier_chain_unregister.
1998  */
1999 int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2000 {
2001         int ret;
2002
2003         if (cpufreq_disabled())
2004                 return -EINVAL;
2005
2006         switch (list) {
2007         case CPUFREQ_TRANSITION_NOTIFIER:
2008                 mutex_lock(&cpufreq_fast_switch_lock);
2009
2010                 ret = srcu_notifier_chain_unregister(
2011                                 &cpufreq_transition_notifier_list, nb);
2012                 if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2013                         cpufreq_fast_switch_count++;
2014
2015                 mutex_unlock(&cpufreq_fast_switch_lock);
2016                 break;
2017         case CPUFREQ_POLICY_NOTIFIER:
2018                 ret = blocking_notifier_chain_unregister(
2019                                 &cpufreq_policy_notifier_list, nb);
2020                 break;
2021         default:
2022                 ret = -EINVAL;
2023         }
2024
2025         return ret;
2026 }
2027 EXPORT_SYMBOL(cpufreq_unregister_notifier);
2028
2029
2030 /*********************************************************************
2031  *                              GOVERNORS                            *
2032  *********************************************************************/
2033
2034 /**
2035  * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2036  * @policy: cpufreq policy to switch the frequency for.
2037  * @target_freq: New frequency to set (may be approximate).
2038  *
2039  * Carry out a fast frequency switch without sleeping.
2040  *
2041  * The driver's ->fast_switch() callback invoked by this function must be
2042  * suitable for being called from within RCU-sched read-side critical sections
2043  * and it is expected to select the minimum available frequency greater than or
2044  * equal to @target_freq (CPUFREQ_RELATION_L).
2045  *
2046  * This function must not be called if policy->fast_switch_enabled is unset.
2047  *
2048  * Governors calling this function must guarantee that it will never be invoked
2049  * twice in parallel for the same policy and that it will never be called in
2050  * parallel with either ->target() or ->target_index() for the same policy.
2051  *
2052  * Returns the actual frequency set for the CPU.
2053  *
2054  * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2055  * error condition, the hardware configuration must be preserved.
2056  */
2057 unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2058                                         unsigned int target_freq)
2059 {
2060         target_freq = clamp_val(target_freq, policy->min, policy->max);
2061
2062         return cpufreq_driver->fast_switch(policy, target_freq);
2063 }
2064 EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2065
2066 /* Must set freqs->new to intermediate frequency */
2067 static int __target_intermediate(struct cpufreq_policy *policy,
2068                                  struct cpufreq_freqs *freqs, int index)
2069 {
2070         int ret;
2071
2072         freqs->new = cpufreq_driver->get_intermediate(policy, index);
2073
2074         /* We don't need to switch to intermediate freq */
2075         if (!freqs->new)
2076                 return 0;
2077
2078         pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2079                  __func__, policy->cpu, freqs->old, freqs->new);
2080
2081         cpufreq_freq_transition_begin(policy, freqs);
2082         ret = cpufreq_driver->target_intermediate(policy, index);
2083         cpufreq_freq_transition_end(policy, freqs, ret);
2084
2085         if (ret)
2086                 pr_err("%s: Failed to change to intermediate frequency: %d\n",
2087                        __func__, ret);
2088
2089         return ret;
2090 }
2091
2092 static int __target_index(struct cpufreq_policy *policy, int index)
2093 {
2094         struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2095         unsigned int intermediate_freq = 0;
2096         unsigned int newfreq = policy->freq_table[index].frequency;
2097         int retval = -EINVAL;
2098         bool notify;
2099
2100         if (newfreq == policy->cur)
2101                 return 0;
2102
2103         notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2104         if (notify) {
2105                 /* Handle switching to intermediate frequency */
2106                 if (cpufreq_driver->get_intermediate) {
2107                         retval = __target_intermediate(policy, &freqs, index);
2108                         if (retval)
2109                                 return retval;
2110
2111                         intermediate_freq = freqs.new;
2112                         /* Set old freq to intermediate */
2113                         if (intermediate_freq)
2114                                 freqs.old = freqs.new;
2115                 }
2116
2117                 freqs.new = newfreq;
2118                 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2119                          __func__, policy->cpu, freqs.old, freqs.new);
2120
2121                 cpufreq_freq_transition_begin(policy, &freqs);
2122         }
2123
2124         retval = cpufreq_driver->target_index(policy, index);
2125         if (retval)
2126                 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2127                        retval);
2128
2129         if (notify) {
2130                 cpufreq_freq_transition_end(policy, &freqs, retval);
2131
2132                 /*
2133                  * Failed after setting to intermediate freq? Driver should have
2134                  * reverted back to initial frequency and so should we. Check
2135                  * here for intermediate_freq instead of get_intermediate, in
2136                  * case we haven't switched to intermediate freq at all.
2137                  */
2138                 if (unlikely(retval && intermediate_freq)) {
2139                         freqs.old = intermediate_freq;
2140                         freqs.new = policy->restore_freq;
2141                         cpufreq_freq_transition_begin(policy, &freqs);
2142                         cpufreq_freq_transition_end(policy, &freqs, 0);
2143                 }
2144         }
2145
2146         return retval;
2147 }
2148
2149 int __cpufreq_driver_target(struct cpufreq_policy *policy,
2150                             unsigned int target_freq,
2151                             unsigned int relation)
2152 {
2153         unsigned int old_target_freq = target_freq;
2154         int index;
2155
2156         if (cpufreq_disabled())
2157                 return -ENODEV;
2158
2159         /* Make sure that target_freq is within supported range */
2160         target_freq = clamp_val(target_freq, policy->min, policy->max);
2161
2162         pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2163                  policy->cpu, target_freq, relation, old_target_freq);
2164
2165         /*
2166          * This might look like a redundant call as we are checking it again
2167          * after finding index. But it is left intentionally for cases where
2168          * exactly same freq is called again and so we can save on few function
2169          * calls.
2170          */
2171         if (target_freq == policy->cur)
2172                 return 0;
2173
2174         /* Save last value to restore later on errors */
2175         policy->restore_freq = policy->cur;
2176
2177         if (cpufreq_driver->target)
2178                 return cpufreq_driver->target(policy, target_freq, relation);
2179
2180         if (!cpufreq_driver->target_index)
2181                 return -EINVAL;
2182
2183         index = cpufreq_frequency_table_target(policy, target_freq, relation);
2184
2185         return __target_index(policy, index);
2186 }
2187 EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2188
2189 int cpufreq_driver_target(struct cpufreq_policy *policy,
2190                           unsigned int target_freq,
2191                           unsigned int relation)
2192 {
2193         int ret;
2194
2195         down_write(&policy->rwsem);
2196
2197         ret = __cpufreq_driver_target(policy, target_freq, relation);
2198
2199         up_write(&policy->rwsem);
2200
2201         return ret;
2202 }
2203 EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2204
2205 __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2206 {
2207         return NULL;
2208 }
2209
2210 static int cpufreq_init_governor(struct cpufreq_policy *policy)
2211 {
2212         int ret;
2213
2214         /* Don't start any governor operations if we are entering suspend */
2215         if (cpufreq_suspended)
2216                 return 0;
2217         /*
2218          * Governor might not be initiated here if ACPI _PPC changed
2219          * notification happened, so check it.
2220          */
2221         if (!policy->governor)
2222                 return -EINVAL;
2223
2224         /* Platform doesn't want dynamic frequency switching ? */
2225         if (policy->governor->dynamic_switching &&
2226             cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2227                 struct cpufreq_governor *gov = cpufreq_fallback_governor();
2228
2229                 if (gov) {
2230                         pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2231                                 policy->governor->name, gov->name);
2232                         policy->governor = gov;
2233                 } else {
2234                         return -EINVAL;
2235                 }
2236         }
2237
2238         if (!try_module_get(policy->governor->owner))
2239                 return -EINVAL;
2240
2241         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2242
2243         if (policy->governor->init) {
2244                 ret = policy->governor->init(policy);
2245                 if (ret) {
2246                         module_put(policy->governor->owner);
2247                         return ret;
2248                 }
2249         }
2250
2251         return 0;
2252 }
2253
2254 static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2255 {
2256         if (cpufreq_suspended || !policy->governor)
2257                 return;
2258
2259         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2260
2261         if (policy->governor->exit)
2262                 policy->governor->exit(policy);
2263
2264         module_put(policy->governor->owner);
2265 }
2266
2267 int cpufreq_start_governor(struct cpufreq_policy *policy)
2268 {
2269         int ret;
2270
2271         if (cpufreq_suspended)
2272                 return 0;
2273
2274         if (!policy->governor)
2275                 return -EINVAL;
2276
2277         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2278
2279         if (cpufreq_driver->get)
2280                 cpufreq_verify_current_freq(policy, false);
2281
2282         if (policy->governor->start) {
2283                 ret = policy->governor->start(policy);
2284                 if (ret)
2285                         return ret;
2286         }
2287
2288         if (policy->governor->limits)
2289                 policy->governor->limits(policy);
2290
2291         return 0;
2292 }
2293
2294 void cpufreq_stop_governor(struct cpufreq_policy *policy)
2295 {
2296         if (cpufreq_suspended || !policy->governor)
2297                 return;
2298
2299         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2300
2301         if (policy->governor->stop)
2302                 policy->governor->stop(policy);
2303 }
2304
2305 static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2306 {
2307         if (cpufreq_suspended || !policy->governor)
2308                 return;
2309
2310         pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2311
2312         if (policy->governor->limits)
2313                 policy->governor->limits(policy);
2314 }
2315
2316 int cpufreq_register_governor(struct cpufreq_governor *governor)
2317 {
2318         int err;
2319
2320         if (!governor)
2321                 return -EINVAL;
2322
2323         if (cpufreq_disabled())
2324                 return -ENODEV;
2325
2326         mutex_lock(&cpufreq_governor_mutex);
2327
2328         err = -EBUSY;
2329         if (!find_governor(governor->name)) {
2330                 err = 0;
2331                 list_add(&governor->governor_list, &cpufreq_governor_list);
2332         }
2333
2334         mutex_unlock(&cpufreq_governor_mutex);
2335         return err;
2336 }
2337 EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2338
2339 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2340 {
2341         struct cpufreq_policy *policy;
2342         unsigned long flags;
2343
2344         if (!governor)
2345                 return;
2346
2347         if (cpufreq_disabled())
2348                 return;
2349
2350         /* clear last_governor for all inactive policies */
2351         read_lock_irqsave(&cpufreq_driver_lock, flags);
2352         for_each_inactive_policy(policy) {
2353                 if (!strcmp(policy->last_governor, governor->name)) {
2354                         policy->governor = NULL;
2355                         strcpy(policy->last_governor, "\0");
2356                 }
2357         }
2358         read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2359
2360         mutex_lock(&cpufreq_governor_mutex);
2361         list_del(&governor->governor_list);
2362         mutex_unlock(&cpufreq_governor_mutex);
2363 }
2364 EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2365
2366
2367 /*********************************************************************
2368  *                          POLICY INTERFACE                         *
2369  *********************************************************************/
2370
2371 /**
2372  * cpufreq_get_policy - get the current cpufreq_policy
2373  * @policy: struct cpufreq_policy into which the current cpufreq_policy
2374  *      is written
2375  * @cpu: CPU to find the policy for
2376  *
2377  * Reads the current cpufreq policy.
2378  */
2379 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2380 {
2381         struct cpufreq_policy *cpu_policy;
2382         if (!policy)
2383                 return -EINVAL;
2384
2385         cpu_policy = cpufreq_cpu_get(cpu);
2386         if (!cpu_policy)
2387                 return -EINVAL;
2388
2389         memcpy(policy, cpu_policy, sizeof(*policy));
2390
2391         cpufreq_cpu_put(cpu_policy);
2392         return 0;
2393 }
2394 EXPORT_SYMBOL(cpufreq_get_policy);
2395
2396 /**
2397  * cpufreq_set_policy - Modify cpufreq policy parameters.
2398  * @policy: Policy object to modify.
2399  * @new_gov: Policy governor pointer.
2400  * @new_pol: Policy value (for drivers with built-in governors).
2401  *
2402  * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2403  * limits to be set for the policy, update @policy with the verified limits
2404  * values and either invoke the driver's ->setpolicy() callback (if present) or
2405  * carry out a governor update for @policy.  That is, run the current governor's
2406  * ->limits() callback (if @new_gov points to the same object as the one in
2407  * @policy) or replace the governor for @policy with @new_gov.
2408  *
2409  * The cpuinfo part of @policy is not updated by this function.
2410  */
2411 static int cpufreq_set_policy(struct cpufreq_policy *policy,
2412                               struct cpufreq_governor *new_gov,
2413                               unsigned int new_pol)
2414 {
2415         struct cpufreq_policy_data new_data;
2416         struct cpufreq_governor *old_gov;
2417         int ret;
2418
2419         memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2420         new_data.freq_table = policy->freq_table;
2421         new_data.cpu = policy->cpu;
2422         /*
2423          * PM QoS framework collects all the requests from users and provide us
2424          * the final aggregated value here.
2425          */
2426         new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2427         new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2428
2429         pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2430                  new_data.cpu, new_data.min, new_data.max);
2431
2432         /*
2433          * Verify that the CPU speed can be set within these limits and make sure
2434          * that min <= max.
2435          */
2436         ret = cpufreq_driver->verify(&new_data);
2437         if (ret)
2438                 return ret;
2439
2440         policy->min = new_data.min;
2441         policy->max = new_data.max;
2442         trace_cpu_frequency_limits(policy);
2443
2444         policy->cached_target_freq = UINT_MAX;
2445
2446         pr_debug("new min and max freqs are %u - %u kHz\n",
2447                  policy->min, policy->max);
2448
2449         if (cpufreq_driver->setpolicy) {
2450                 policy->policy = new_pol;
2451                 pr_debug("setting range\n");
2452                 return cpufreq_driver->setpolicy(policy);
2453         }
2454
2455         if (new_gov == policy->governor) {
2456                 pr_debug("governor limits update\n");
2457                 cpufreq_governor_limits(policy);
2458                 return 0;
2459         }
2460
2461         pr_debug("governor switch\n");
2462
2463         /* save old, working values */
2464         old_gov = policy->governor;
2465         /* end old governor */
2466         if (old_gov) {
2467                 cpufreq_stop_governor(policy);
2468                 cpufreq_exit_governor(policy);
2469         }
2470
2471         /* start new governor */
2472         policy->governor = new_gov;
2473         ret = cpufreq_init_governor(policy);
2474         if (!ret) {
2475                 ret = cpufreq_start_governor(policy);
2476                 if (!ret) {
2477                         pr_debug("governor change\n");
2478                         sched_cpufreq_governor_change(policy, old_gov);
2479                         return 0;
2480                 }
2481                 cpufreq_exit_governor(policy);
2482         }
2483
2484         /* new governor failed, so re-start old one */
2485         pr_debug("starting governor %s failed\n", policy->governor->name);
2486         if (old_gov) {
2487                 policy->governor = old_gov;
2488                 if (cpufreq_init_governor(policy))
2489                         policy->governor = NULL;
2490                 else
2491                         cpufreq_start_governor(policy);
2492         }
2493
2494         return ret;
2495 }
2496
2497 /**
2498  * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2499  * @cpu: CPU to re-evaluate the policy for.
2500  *
2501  * Update the current frequency for the cpufreq policy of @cpu and use
2502  * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2503  * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2504  * for the policy in question, among other things.
2505  */
2506 void cpufreq_update_policy(unsigned int cpu)
2507 {
2508         struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2509
2510         if (!policy)
2511                 return;
2512
2513         /*
2514          * BIOS might change freq behind our back
2515          * -> ask driver for current freq and notify governors about a change
2516          */
2517         if (cpufreq_driver->get && has_target() &&
2518             (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2519                 goto unlock;
2520
2521         refresh_frequency_limits(policy);
2522
2523 unlock:
2524         cpufreq_cpu_release(policy);
2525 }
2526 EXPORT_SYMBOL(cpufreq_update_policy);
2527
2528 /**
2529  * cpufreq_update_limits - Update policy limits for a given CPU.
2530  * @cpu: CPU to update the policy limits for.
2531  *
2532  * Invoke the driver's ->update_limits callback if present or call
2533  * cpufreq_update_policy() for @cpu.
2534  */
2535 void cpufreq_update_limits(unsigned int cpu)
2536 {
2537         if (cpufreq_driver->update_limits)
2538                 cpufreq_driver->update_limits(cpu);
2539         else
2540                 cpufreq_update_policy(cpu);
2541 }
2542 EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2543
2544 /*********************************************************************
2545  *               BOOST                                               *
2546  *********************************************************************/
2547 static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2548 {
2549         int ret;
2550
2551         if (!policy->freq_table)
2552                 return -ENXIO;
2553
2554         ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2555         if (ret) {
2556                 pr_err("%s: Policy frequency update failed\n", __func__);
2557                 return ret;
2558         }
2559
2560         ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2561         if (ret < 0)
2562                 return ret;
2563
2564         return 0;
2565 }
2566
2567 int cpufreq_boost_trigger_state(int state)
2568 {
2569         struct cpufreq_policy *policy;
2570         unsigned long flags;
2571         int ret = 0;
2572
2573         if (cpufreq_driver->boost_enabled == state)
2574                 return 0;
2575
2576         write_lock_irqsave(&cpufreq_driver_lock, flags);
2577         cpufreq_driver->boost_enabled = state;
2578         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2579
2580         get_online_cpus();
2581         for_each_active_policy(policy) {
2582                 ret = cpufreq_driver->set_boost(policy, state);
2583                 if (ret)
2584                         goto err_reset_state;
2585         }
2586         put_online_cpus();
2587
2588         return 0;
2589
2590 err_reset_state:
2591         put_online_cpus();
2592
2593         write_lock_irqsave(&cpufreq_driver_lock, flags);
2594         cpufreq_driver->boost_enabled = !state;
2595         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2596
2597         pr_err("%s: Cannot %s BOOST\n",
2598                __func__, state ? "enable" : "disable");
2599
2600         return ret;
2601 }
2602
2603 static bool cpufreq_boost_supported(void)
2604 {
2605         return cpufreq_driver->set_boost;
2606 }
2607
2608 static int create_boost_sysfs_file(void)
2609 {
2610         int ret;
2611
2612         ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2613         if (ret)
2614                 pr_err("%s: cannot register global BOOST sysfs file\n",
2615                        __func__);
2616
2617         return ret;
2618 }
2619
2620 static void remove_boost_sysfs_file(void)
2621 {
2622         if (cpufreq_boost_supported())
2623                 sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2624 }
2625
2626 int cpufreq_enable_boost_support(void)
2627 {
2628         if (!cpufreq_driver)
2629                 return -EINVAL;
2630
2631         if (cpufreq_boost_supported())
2632                 return 0;
2633
2634         cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2635
2636         /* This will get removed on driver unregister */
2637         return create_boost_sysfs_file();
2638 }
2639 EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2640
2641 int cpufreq_boost_enabled(void)
2642 {
2643         return cpufreq_driver->boost_enabled;
2644 }
2645 EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2646
2647 /*********************************************************************
2648  *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2649  *********************************************************************/
2650 static enum cpuhp_state hp_online;
2651
2652 static int cpuhp_cpufreq_online(unsigned int cpu)
2653 {
2654         cpufreq_online(cpu);
2655
2656         return 0;
2657 }
2658
2659 static int cpuhp_cpufreq_offline(unsigned int cpu)
2660 {
2661         cpufreq_offline(cpu);
2662
2663         return 0;
2664 }
2665
2666 /**
2667  * cpufreq_register_driver - register a CPU Frequency driver
2668  * @driver_data: A struct cpufreq_driver containing the values#
2669  * submitted by the CPU Frequency driver.
2670  *
2671  * Registers a CPU Frequency driver to this core code. This code
2672  * returns zero on success, -EEXIST when another driver got here first
2673  * (and isn't unregistered in the meantime).
2674  *
2675  */
2676 int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2677 {
2678         unsigned long flags;
2679         int ret;
2680
2681         if (cpufreq_disabled())
2682                 return -ENODEV;
2683
2684         /*
2685          * The cpufreq core depends heavily on the availability of device
2686          * structure, make sure they are available before proceeding further.
2687          */
2688         if (!get_cpu_device(0))
2689                 return -EPROBE_DEFER;
2690
2691         if (!driver_data || !driver_data->verify || !driver_data->init ||
2692             !(driver_data->setpolicy || driver_data->target_index ||
2693                     driver_data->target) ||
2694              (driver_data->setpolicy && (driver_data->target_index ||
2695                     driver_data->target)) ||
2696              (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2697              (!driver_data->online != !driver_data->offline))
2698                 return -EINVAL;
2699
2700         pr_debug("trying to register driver %s\n", driver_data->name);
2701
2702         /* Protect against concurrent CPU online/offline. */
2703         cpus_read_lock();
2704
2705         write_lock_irqsave(&cpufreq_driver_lock, flags);
2706         if (cpufreq_driver) {
2707                 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2708                 ret = -EEXIST;
2709                 goto out;
2710         }
2711         cpufreq_driver = driver_data;
2712         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2713
2714         if (driver_data->setpolicy)
2715                 driver_data->flags |= CPUFREQ_CONST_LOOPS;
2716
2717         if (cpufreq_boost_supported()) {
2718                 ret = create_boost_sysfs_file();
2719                 if (ret)
2720                         goto err_null_driver;
2721         }
2722
2723         ret = subsys_interface_register(&cpufreq_interface);
2724         if (ret)
2725                 goto err_boost_unreg;
2726
2727         if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
2728             list_empty(&cpufreq_policy_list)) {
2729                 /* if all ->init() calls failed, unregister */
2730                 ret = -ENODEV;
2731                 pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2732                          driver_data->name);
2733                 goto err_if_unreg;
2734         }
2735
2736         ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2737                                                    "cpufreq:online",
2738                                                    cpuhp_cpufreq_online,
2739                                                    cpuhp_cpufreq_offline);
2740         if (ret < 0)
2741                 goto err_if_unreg;
2742         hp_online = ret;
2743         ret = 0;
2744
2745         pr_debug("driver %s up and running\n", driver_data->name);
2746         goto out;
2747
2748 err_if_unreg:
2749         subsys_interface_unregister(&cpufreq_interface);
2750 err_boost_unreg:
2751         remove_boost_sysfs_file();
2752 err_null_driver:
2753         write_lock_irqsave(&cpufreq_driver_lock, flags);
2754         cpufreq_driver = NULL;
2755         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2756 out:
2757         cpus_read_unlock();
2758         return ret;
2759 }
2760 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2761
2762 /*
2763  * cpufreq_unregister_driver - unregister the current CPUFreq driver
2764  *
2765  * Unregister the current CPUFreq driver. Only call this if you have
2766  * the right to do so, i.e. if you have succeeded in initialising before!
2767  * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2768  * currently not initialised.
2769  */
2770 int cpufreq_unregister_driver(struct cpufreq_driver *driver)
2771 {
2772         unsigned long flags;
2773
2774         if (!cpufreq_driver || (driver != cpufreq_driver))
2775                 return -EINVAL;
2776
2777         pr_debug("unregistering driver %s\n", driver->name);
2778
2779         /* Protect against concurrent cpu hotplug */
2780         cpus_read_lock();
2781         subsys_interface_unregister(&cpufreq_interface);
2782         remove_boost_sysfs_file();
2783         cpuhp_remove_state_nocalls_cpuslocked(hp_online);
2784
2785         write_lock_irqsave(&cpufreq_driver_lock, flags);
2786
2787         cpufreq_driver = NULL;
2788
2789         write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2790         cpus_read_unlock();
2791
2792         return 0;
2793 }
2794 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2795
2796 static int __init cpufreq_core_init(void)
2797 {
2798         struct cpufreq_governor *gov = cpufreq_default_governor();
2799
2800         if (cpufreq_disabled())
2801                 return -ENODEV;
2802
2803         cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
2804         BUG_ON(!cpufreq_global_kobject);
2805
2806         if (!strlen(default_governor))
2807                 strncpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
2808
2809         return 0;
2810 }
2811 module_param(off, int, 0444);
2812 module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
2813 core_initcall(cpufreq_core_init);
This page took 0.192596 seconds and 4 git commands to generate.