2 * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
8 * - Added processor hotplug support
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33 #include <linux/sysdev.h>
35 #include <asm/uaccess.h>
37 #include <acpi/acpi_bus.h>
38 #include <acpi/processor.h>
39 #include <acpi/acpi_drivers.h>
41 #define PREFIX "ACPI: "
43 #define ACPI_PROCESSOR_CLASS "processor"
44 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
45 ACPI_MODULE_NAME("processor_thermal");
47 #ifdef CONFIG_CPU_FREQ
49 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
50 * offers (in most cases) voltage scaling in addition to frequency scaling, and
51 * thus a cubic (instead of linear) reduction of energy. Also, we allow for
52 * _any_ cpufreq driver and not only the acpi-cpufreq driver.
55 #define CPUFREQ_THERMAL_MIN_STEP 0
56 #define CPUFREQ_THERMAL_MAX_STEP 3
58 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
59 static unsigned int acpi_thermal_cpufreq_is_init = 0;
61 static int cpu_has_cpufreq(unsigned int cpu)
63 struct cpufreq_policy policy;
64 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
69 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
70 unsigned long event, void *data)
72 struct cpufreq_policy *policy = data;
73 unsigned long max_freq = 0;
75 if (event != CPUFREQ_ADJUST)
79 policy->cpuinfo.max_freq *
80 (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
83 cpufreq_verify_within_limits(policy, 0, max_freq);
89 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
90 .notifier_call = acpi_thermal_cpufreq_notifier,
93 static int cpufreq_get_max_state(unsigned int cpu)
95 if (!cpu_has_cpufreq(cpu))
98 return CPUFREQ_THERMAL_MAX_STEP;
101 static int cpufreq_get_cur_state(unsigned int cpu)
103 if (!cpu_has_cpufreq(cpu))
106 return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
109 static int cpufreq_set_cur_state(unsigned int cpu, int state)
111 if (!cpu_has_cpufreq(cpu))
114 per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
115 cpufreq_update_policy(cpu);
119 void acpi_thermal_cpufreq_init(void)
123 for (i = 0; i < nr_cpu_ids; i++)
125 per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
127 i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
128 CPUFREQ_POLICY_NOTIFIER);
130 acpi_thermal_cpufreq_is_init = 1;
133 void acpi_thermal_cpufreq_exit(void)
135 if (acpi_thermal_cpufreq_is_init)
136 cpufreq_unregister_notifier
137 (&acpi_thermal_cpufreq_notifier_block,
138 CPUFREQ_POLICY_NOTIFIER);
140 acpi_thermal_cpufreq_is_init = 0;
143 #else /* ! CONFIG_CPU_FREQ */
144 static int cpufreq_get_max_state(unsigned int cpu)
149 static int cpufreq_get_cur_state(unsigned int cpu)
154 static int cpufreq_set_cur_state(unsigned int cpu, int state)
161 int acpi_processor_get_limit_info(struct acpi_processor *pr)
167 if (pr->flags.throttling)
173 /* thermal coolign device callbacks */
174 static int acpi_processor_max_state(struct acpi_processor *pr)
179 * There exists four states according to
180 * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
182 max_state += cpufreq_get_max_state(pr->id);
183 if (pr->flags.throttling)
184 max_state += (pr->throttling.state_count -1);
189 processor_get_max_state(struct thermal_cooling_device *cdev,
190 unsigned long *state)
192 struct acpi_device *device = cdev->devdata;
193 struct acpi_processor *pr = acpi_driver_data(device);
198 *state = acpi_processor_max_state(pr);
203 processor_get_cur_state(struct thermal_cooling_device *cdev,
204 unsigned long *cur_state)
206 struct acpi_device *device = cdev->devdata;
207 struct acpi_processor *pr = acpi_driver_data(device);
212 *cur_state = cpufreq_get_cur_state(pr->id);
213 if (pr->flags.throttling)
214 *cur_state += pr->throttling.state;
219 processor_set_cur_state(struct thermal_cooling_device *cdev,
222 struct acpi_device *device = cdev->devdata;
223 struct acpi_processor *pr = acpi_driver_data(device);
230 max_pstate = cpufreq_get_max_state(pr->id);
232 if (state > acpi_processor_max_state(pr))
235 if (state <= max_pstate) {
236 if (pr->flags.throttling && pr->throttling.state)
237 result = acpi_processor_set_throttling(pr, 0, false);
238 cpufreq_set_cur_state(pr->id, state);
240 cpufreq_set_cur_state(pr->id, max_pstate);
241 result = acpi_processor_set_throttling(pr,
242 state - max_pstate, false);
247 const struct thermal_cooling_device_ops processor_cooling_ops = {
248 .get_max_state = processor_get_max_state,
249 .get_cur_state = processor_get_cur_state,
250 .set_cur_state = processor_set_cur_state,