]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
3a58df35 | 2 | * acpi-cpufreq.c - ACPI Processor P-States Driver |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 2001, 2002 Andy Grover <[email protected]> | |
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <[email protected]> | |
6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <[email protected]> | |
fe27cb35 | 7 | * Copyright (C) 2006 Denis Sadykov <[email protected]> |
1da177e4 LT |
8 | * |
9 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
10 | * | |
11 | * This program is free software; you can redistribute it and/or modify | |
12 | * it under the terms of the GNU General Public License as published by | |
13 | * the Free Software Foundation; either version 2 of the License, or (at | |
14 | * your option) any later version. | |
15 | * | |
16 | * This program is distributed in the hope that it will be useful, but | |
17 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
19 | * General Public License for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with this program; if not, write to the Free Software Foundation, Inc., | |
23 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. | |
24 | * | |
25 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | |
26 | */ | |
27 | ||
1c5864e2 JP |
28 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
29 | ||
1da177e4 LT |
30 | #include <linux/kernel.h> |
31 | #include <linux/module.h> | |
32 | #include <linux/init.h> | |
fe27cb35 VP |
33 | #include <linux/smp.h> |
34 | #include <linux/sched.h> | |
1da177e4 | 35 | #include <linux/cpufreq.h> |
d395bf12 | 36 | #include <linux/compiler.h> |
8adcc0c6 | 37 | #include <linux/dmi.h> |
5a0e3ad6 | 38 | #include <linux/slab.h> |
1da177e4 LT |
39 | |
40 | #include <linux/acpi.h> | |
3a58df35 DJ |
41 | #include <linux/io.h> |
42 | #include <linux/delay.h> | |
43 | #include <linux/uaccess.h> | |
44 | ||
1da177e4 LT |
45 | #include <acpi/processor.h> |
46 | ||
dde9f7ba | 47 | #include <asm/msr.h> |
fe27cb35 VP |
48 | #include <asm/processor.h> |
49 | #include <asm/cpufeature.h> | |
fe27cb35 | 50 | |
1da177e4 LT |
51 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
52 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); | |
53 | MODULE_LICENSE("GPL"); | |
54 | ||
dde9f7ba VP |
55 | enum { |
56 | UNDEFINED_CAPABLE = 0, | |
57 | SYSTEM_INTEL_MSR_CAPABLE, | |
3dc9a633 | 58 | SYSTEM_AMD_MSR_CAPABLE, |
dde9f7ba VP |
59 | SYSTEM_IO_CAPABLE, |
60 | }; | |
61 | ||
62 | #define INTEL_MSR_RANGE (0xffff) | |
3dc9a633 | 63 | #define AMD_MSR_RANGE (0x7) |
dde9f7ba | 64 | |
615b7300 AP |
65 | #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) |
66 | ||
fe27cb35 | 67 | struct acpi_cpufreq_data { |
64be7eed VP |
68 | unsigned int resume; |
69 | unsigned int cpu_feature; | |
8cfcfd39 | 70 | unsigned int acpi_perf_cpu; |
f4fd3797 | 71 | cpumask_var_t freqdomain_cpus; |
ed757a2c RW |
72 | void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val); |
73 | u32 (*cpu_freq_read)(struct acpi_pct_register *reg); | |
1da177e4 LT |
74 | }; |
75 | ||
50109292 | 76 | /* acpi_perf_data is a pointer to percpu data. */ |
3f6c4df7 | 77 | static struct acpi_processor_performance __percpu *acpi_perf_data; |
1da177e4 | 78 | |
3427616b RW |
79 | static inline struct acpi_processor_performance *to_perf_data(struct acpi_cpufreq_data *data) |
80 | { | |
81 | return per_cpu_ptr(acpi_perf_data, data->acpi_perf_cpu); | |
82 | } | |
83 | ||
1da177e4 LT |
84 | static struct cpufreq_driver acpi_cpufreq_driver; |
85 | ||
d395bf12 | 86 | static unsigned int acpi_pstate_strict; |
615b7300 AP |
87 | |
88 | static bool boost_state(unsigned int cpu) | |
89 | { | |
90 | u32 lo, hi; | |
91 | u64 msr; | |
92 | ||
93 | switch (boot_cpu_data.x86_vendor) { | |
94 | case X86_VENDOR_INTEL: | |
95 | rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); | |
96 | msr = lo | ((u64)hi << 32); | |
97 | return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); | |
98 | case X86_VENDOR_AMD: | |
99 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); | |
100 | msr = lo | ((u64)hi << 32); | |
101 | return !(msr & MSR_K7_HWCR_CPB_DIS); | |
102 | } | |
103 | return false; | |
104 | } | |
105 | ||
a3605c46 | 106 | static int boost_set_msr(bool enable) |
615b7300 | 107 | { |
615b7300 | 108 | u32 msr_addr; |
a3605c46 | 109 | u64 msr_mask, val; |
615b7300 AP |
110 | |
111 | switch (boot_cpu_data.x86_vendor) { | |
112 | case X86_VENDOR_INTEL: | |
113 | msr_addr = MSR_IA32_MISC_ENABLE; | |
114 | msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; | |
115 | break; | |
116 | case X86_VENDOR_AMD: | |
117 | msr_addr = MSR_K7_HWCR; | |
118 | msr_mask = MSR_K7_HWCR_CPB_DIS; | |
119 | break; | |
120 | default: | |
a3605c46 | 121 | return -EINVAL; |
615b7300 AP |
122 | } |
123 | ||
a3605c46 | 124 | rdmsrl(msr_addr, val); |
615b7300 | 125 | |
a3605c46 SAS |
126 | if (enable) |
127 | val &= ~msr_mask; | |
128 | else | |
129 | val |= msr_mask; | |
615b7300 | 130 | |
a3605c46 SAS |
131 | wrmsrl(msr_addr, val); |
132 | return 0; | |
133 | } | |
134 | ||
135 | static void boost_set_msr_each(void *p_en) | |
136 | { | |
137 | bool enable = (bool) p_en; | |
138 | ||
139 | boost_set_msr(enable); | |
615b7300 AP |
140 | } |
141 | ||
17135782 | 142 | static int set_boost(int val) |
615b7300 | 143 | { |
615b7300 | 144 | get_online_cpus(); |
a3605c46 | 145 | on_each_cpu(boost_set_msr_each, (void *)(long)val, 1); |
615b7300 | 146 | put_online_cpus(); |
615b7300 AP |
147 | pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); |
148 | ||
cfc9c8ed | 149 | return 0; |
615b7300 AP |
150 | } |
151 | ||
f4fd3797 LT |
152 | static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) |
153 | { | |
eb0b3e78 | 154 | struct acpi_cpufreq_data *data = policy->driver_data; |
f4fd3797 | 155 | |
e2530367 SP |
156 | if (unlikely(!data)) |
157 | return -ENODEV; | |
158 | ||
f4fd3797 LT |
159 | return cpufreq_show_cpus(data->freqdomain_cpus, buf); |
160 | } | |
161 | ||
162 | cpufreq_freq_attr_ro(freqdomain_cpus); | |
163 | ||
11269ff5 | 164 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
17135782 RW |
165 | static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, |
166 | size_t count) | |
cfc9c8ed LM |
167 | { |
168 | int ret; | |
17135782 | 169 | unsigned int val = 0; |
cfc9c8ed | 170 | |
7a6c79f2 | 171 | if (!acpi_cpufreq_driver.set_boost) |
cfc9c8ed LM |
172 | return -EINVAL; |
173 | ||
17135782 RW |
174 | ret = kstrtouint(buf, 10, &val); |
175 | if (ret || val > 1) | |
cfc9c8ed LM |
176 | return -EINVAL; |
177 | ||
17135782 | 178 | set_boost(val); |
cfc9c8ed LM |
179 | |
180 | return count; | |
181 | } | |
182 | ||
11269ff5 AP |
183 | static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) |
184 | { | |
cfc9c8ed | 185 | return sprintf(buf, "%u\n", acpi_cpufreq_driver.boost_enabled); |
11269ff5 AP |
186 | } |
187 | ||
59027d35 | 188 | cpufreq_freq_attr_rw(cpb); |
11269ff5 AP |
189 | #endif |
190 | ||
dde9f7ba VP |
191 | static int check_est_cpu(unsigned int cpuid) |
192 | { | |
92cb7612 | 193 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
dde9f7ba | 194 | |
0de51088 | 195 | return cpu_has(cpu, X86_FEATURE_EST); |
dde9f7ba VP |
196 | } |
197 | ||
3dc9a633 MG |
198 | static int check_amd_hwpstate_cpu(unsigned int cpuid) |
199 | { | |
200 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); | |
201 | ||
202 | return cpu_has(cpu, X86_FEATURE_HW_PSTATE); | |
203 | } | |
204 | ||
8cee1eed | 205 | static unsigned extract_io(struct cpufreq_policy *policy, u32 value) |
fe27cb35 | 206 | { |
8cee1eed | 207 | struct acpi_cpufreq_data *data = policy->driver_data; |
64be7eed VP |
208 | struct acpi_processor_performance *perf; |
209 | int i; | |
fe27cb35 | 210 | |
3427616b | 211 | perf = to_perf_data(data); |
fe27cb35 | 212 | |
3a58df35 | 213 | for (i = 0; i < perf->state_count; i++) { |
fe27cb35 | 214 | if (value == perf->states[i].status) |
8cee1eed | 215 | return policy->freq_table[i].frequency; |
fe27cb35 VP |
216 | } |
217 | return 0; | |
218 | } | |
219 | ||
8cee1eed | 220 | static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) |
dde9f7ba | 221 | { |
8cee1eed | 222 | struct acpi_cpufreq_data *data = policy->driver_data; |
041526f9 | 223 | struct cpufreq_frequency_table *pos; |
a6f6e6e6 | 224 | struct acpi_processor_performance *perf; |
dde9f7ba | 225 | |
3dc9a633 MG |
226 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
227 | msr &= AMD_MSR_RANGE; | |
228 | else | |
229 | msr &= INTEL_MSR_RANGE; | |
230 | ||
3427616b | 231 | perf = to_perf_data(data); |
a6f6e6e6 | 232 | |
8cee1eed | 233 | cpufreq_for_each_entry(pos, policy->freq_table) |
041526f9 SK |
234 | if (msr == perf->states[pos->driver_data].status) |
235 | return pos->frequency; | |
8cee1eed | 236 | return policy->freq_table[0].frequency; |
dde9f7ba VP |
237 | } |
238 | ||
8cee1eed | 239 | static unsigned extract_freq(struct cpufreq_policy *policy, u32 val) |
dde9f7ba | 240 | { |
8cee1eed VK |
241 | struct acpi_cpufreq_data *data = policy->driver_data; |
242 | ||
dde9f7ba | 243 | switch (data->cpu_feature) { |
64be7eed | 244 | case SYSTEM_INTEL_MSR_CAPABLE: |
3dc9a633 | 245 | case SYSTEM_AMD_MSR_CAPABLE: |
8cee1eed | 246 | return extract_msr(policy, val); |
64be7eed | 247 | case SYSTEM_IO_CAPABLE: |
8cee1eed | 248 | return extract_io(policy, val); |
64be7eed | 249 | default: |
dde9f7ba VP |
250 | return 0; |
251 | } | |
252 | } | |
253 | ||
ac13b996 | 254 | static u32 cpu_freq_read_intel(struct acpi_pct_register *not_used) |
ed757a2c RW |
255 | { |
256 | u32 val, dummy; | |
dde9f7ba | 257 | |
ed757a2c RW |
258 | rdmsr(MSR_IA32_PERF_CTL, val, dummy); |
259 | return val; | |
260 | } | |
261 | ||
ac13b996 | 262 | static void cpu_freq_write_intel(struct acpi_pct_register *not_used, u32 val) |
ed757a2c RW |
263 | { |
264 | u32 lo, hi; | |
265 | ||
266 | rdmsr(MSR_IA32_PERF_CTL, lo, hi); | |
267 | lo = (lo & ~INTEL_MSR_RANGE) | (val & INTEL_MSR_RANGE); | |
268 | wrmsr(MSR_IA32_PERF_CTL, lo, hi); | |
269 | } | |
270 | ||
ac13b996 | 271 | static u32 cpu_freq_read_amd(struct acpi_pct_register *not_used) |
ed757a2c RW |
272 | { |
273 | u32 val, dummy; | |
274 | ||
275 | rdmsr(MSR_AMD_PERF_CTL, val, dummy); | |
276 | return val; | |
277 | } | |
278 | ||
ac13b996 | 279 | static void cpu_freq_write_amd(struct acpi_pct_register *not_used, u32 val) |
ed757a2c RW |
280 | { |
281 | wrmsr(MSR_AMD_PERF_CTL, val, 0); | |
282 | } | |
283 | ||
ac13b996 | 284 | static u32 cpu_freq_read_io(struct acpi_pct_register *reg) |
ed757a2c RW |
285 | { |
286 | u32 val; | |
287 | ||
288 | acpi_os_read_port(reg->address, &val, reg->bit_width); | |
289 | return val; | |
290 | } | |
291 | ||
ac13b996 | 292 | static void cpu_freq_write_io(struct acpi_pct_register *reg, u32 val) |
ed757a2c RW |
293 | { |
294 | acpi_os_write_port(reg->address, val, reg->bit_width); | |
295 | } | |
fe27cb35 VP |
296 | |
297 | struct drv_cmd { | |
ed757a2c | 298 | struct acpi_pct_register *reg; |
fe27cb35 | 299 | u32 val; |
ed757a2c RW |
300 | union { |
301 | void (*write)(struct acpi_pct_register *reg, u32 val); | |
302 | u32 (*read)(struct acpi_pct_register *reg); | |
303 | } func; | |
fe27cb35 VP |
304 | }; |
305 | ||
01599fca AM |
306 | /* Called via smp_call_function_single(), on the target CPU */ |
307 | static void do_drv_read(void *_cmd) | |
1da177e4 | 308 | { |
72859081 | 309 | struct drv_cmd *cmd = _cmd; |
dde9f7ba | 310 | |
ed757a2c | 311 | cmd->val = cmd->func.read(cmd->reg); |
fe27cb35 | 312 | } |
1da177e4 | 313 | |
ed757a2c | 314 | static u32 drv_read(struct acpi_cpufreq_data *data, const struct cpumask *mask) |
fe27cb35 | 315 | { |
ed757a2c RW |
316 | struct acpi_processor_performance *perf = to_perf_data(data); |
317 | struct drv_cmd cmd = { | |
318 | .reg = &perf->control_register, | |
319 | .func.read = data->cpu_freq_read, | |
320 | }; | |
321 | int err; | |
dde9f7ba | 322 | |
ed757a2c RW |
323 | err = smp_call_function_any(mask, do_drv_read, &cmd, 1); |
324 | WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ | |
325 | return cmd.val; | |
fe27cb35 | 326 | } |
1da177e4 | 327 | |
ed757a2c RW |
328 | /* Called via smp_call_function_many(), on the target CPUs */ |
329 | static void do_drv_write(void *_cmd) | |
fe27cb35 | 330 | { |
ed757a2c | 331 | struct drv_cmd *cmd = _cmd; |
fe27cb35 | 332 | |
ed757a2c | 333 | cmd->func.write(cmd->reg, cmd->val); |
fe27cb35 VP |
334 | } |
335 | ||
ed757a2c RW |
336 | static void drv_write(struct acpi_cpufreq_data *data, |
337 | const struct cpumask *mask, u32 val) | |
fe27cb35 | 338 | { |
ed757a2c RW |
339 | struct acpi_processor_performance *perf = to_perf_data(data); |
340 | struct drv_cmd cmd = { | |
341 | .reg = &perf->control_register, | |
342 | .val = val, | |
343 | .func.write = data->cpu_freq_write, | |
344 | }; | |
ea34f43a LT |
345 | int this_cpu; |
346 | ||
347 | this_cpu = get_cpu(); | |
ed757a2c RW |
348 | if (cpumask_test_cpu(this_cpu, mask)) |
349 | do_drv_write(&cmd); | |
350 | ||
351 | smp_call_function_many(mask, do_drv_write, &cmd, 1); | |
ea34f43a | 352 | put_cpu(); |
fe27cb35 | 353 | } |
1da177e4 | 354 | |
ed757a2c | 355 | static u32 get_cur_val(const struct cpumask *mask, struct acpi_cpufreq_data *data) |
fe27cb35 | 356 | { |
ed757a2c | 357 | u32 val; |
1da177e4 | 358 | |
4d8bb537 | 359 | if (unlikely(cpumask_empty(mask))) |
fe27cb35 | 360 | return 0; |
1da177e4 | 361 | |
ed757a2c | 362 | val = drv_read(data, mask); |
1da177e4 | 363 | |
ed757a2c | 364 | pr_debug("get_cur_val = %u\n", val); |
fe27cb35 | 365 | |
ed757a2c | 366 | return val; |
fe27cb35 | 367 | } |
1da177e4 | 368 | |
fe27cb35 VP |
369 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
370 | { | |
eb0b3e78 PX |
371 | struct acpi_cpufreq_data *data; |
372 | struct cpufreq_policy *policy; | |
64be7eed | 373 | unsigned int freq; |
e56a727b | 374 | unsigned int cached_freq; |
fe27cb35 | 375 | |
2d06d8c4 | 376 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); |
fe27cb35 | 377 | |
1f0bd44e | 378 | policy = cpufreq_cpu_get_raw(cpu); |
eb0b3e78 PX |
379 | if (unlikely(!policy)) |
380 | return 0; | |
381 | ||
382 | data = policy->driver_data; | |
8cee1eed | 383 | if (unlikely(!data || !policy->freq_table)) |
fe27cb35 | 384 | return 0; |
1da177e4 | 385 | |
8cee1eed VK |
386 | cached_freq = policy->freq_table[to_perf_data(data)->state].frequency; |
387 | freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data)); | |
e56a727b VP |
388 | if (freq != cached_freq) { |
389 | /* | |
390 | * The dreaded BIOS frequency change behind our back. | |
391 | * Force set the frequency on next target call. | |
392 | */ | |
393 | data->resume = 1; | |
394 | } | |
395 | ||
2d06d8c4 | 396 | pr_debug("cur freq = %u\n", freq); |
1da177e4 | 397 | |
fe27cb35 | 398 | return freq; |
1da177e4 LT |
399 | } |
400 | ||
8cee1eed VK |
401 | static unsigned int check_freqs(struct cpufreq_policy *policy, |
402 | const struct cpumask *mask, unsigned int freq) | |
fe27cb35 | 403 | { |
8cee1eed | 404 | struct acpi_cpufreq_data *data = policy->driver_data; |
64be7eed VP |
405 | unsigned int cur_freq; |
406 | unsigned int i; | |
1da177e4 | 407 | |
3a58df35 | 408 | for (i = 0; i < 100; i++) { |
8cee1eed | 409 | cur_freq = extract_freq(policy, get_cur_val(mask, data)); |
fe27cb35 VP |
410 | if (cur_freq == freq) |
411 | return 1; | |
412 | udelay(10); | |
413 | } | |
414 | return 0; | |
415 | } | |
416 | ||
417 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |
9c0ebcf7 | 418 | unsigned int index) |
1da177e4 | 419 | { |
eb0b3e78 | 420 | struct acpi_cpufreq_data *data = policy->driver_data; |
64be7eed | 421 | struct acpi_processor_performance *perf; |
ed757a2c | 422 | const struct cpumask *mask; |
8edc59d9 | 423 | unsigned int next_perf_state = 0; /* Index into perf table */ |
64be7eed | 424 | int result = 0; |
fe27cb35 | 425 | |
8cee1eed | 426 | if (unlikely(!data)) { |
fe27cb35 VP |
427 | return -ENODEV; |
428 | } | |
1da177e4 | 429 | |
3427616b | 430 | perf = to_perf_data(data); |
8cee1eed | 431 | next_perf_state = policy->freq_table[index].driver_data; |
7650b281 | 432 | if (perf->state == next_perf_state) { |
fe27cb35 | 433 | if (unlikely(data->resume)) { |
2d06d8c4 | 434 | pr_debug("Called after resume, resetting to P%d\n", |
64be7eed | 435 | next_perf_state); |
fe27cb35 VP |
436 | data->resume = 0; |
437 | } else { | |
2d06d8c4 | 438 | pr_debug("Already at target state (P%d)\n", |
64be7eed | 439 | next_perf_state); |
9a909a14 | 440 | return 0; |
fe27cb35 | 441 | } |
09b4d1ee VP |
442 | } |
443 | ||
ed757a2c RW |
444 | /* |
445 | * The core won't allow CPUs to go away until the governor has been | |
446 | * stopped, so we can rely on the stability of policy->cpus. | |
447 | */ | |
448 | mask = policy->shared_type == CPUFREQ_SHARED_TYPE_ANY ? | |
449 | cpumask_of(policy->cpu) : policy->cpus; | |
09b4d1ee | 450 | |
ed757a2c | 451 | drv_write(data, mask, perf->states[next_perf_state].control); |
09b4d1ee | 452 | |
fe27cb35 | 453 | if (acpi_pstate_strict) { |
8cee1eed VK |
454 | if (!check_freqs(policy, mask, |
455 | policy->freq_table[index].frequency)) { | |
2d06d8c4 | 456 | pr_debug("acpi_cpufreq_target failed (%d)\n", |
64be7eed | 457 | policy->cpu); |
4d8bb537 | 458 | result = -EAGAIN; |
09b4d1ee VP |
459 | } |
460 | } | |
461 | ||
e15d8309 VK |
462 | if (!result) |
463 | perf->state = next_perf_state; | |
fe27cb35 VP |
464 | |
465 | return result; | |
1da177e4 LT |
466 | } |
467 | ||
b7898fda RW |
468 | unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy, |
469 | unsigned int target_freq) | |
470 | { | |
471 | struct acpi_cpufreq_data *data = policy->driver_data; | |
472 | struct acpi_processor_performance *perf; | |
473 | struct cpufreq_frequency_table *entry; | |
82577360 | 474 | unsigned int next_perf_state, next_freq, index; |
b7898fda RW |
475 | |
476 | /* | |
477 | * Find the closest frequency above target_freq. | |
b7898fda | 478 | */ |
5b6667c7 SM |
479 | if (policy->cached_target_freq == target_freq) |
480 | index = policy->cached_resolved_idx; | |
481 | else | |
482 | index = cpufreq_table_find_index_dl(policy, target_freq); | |
82577360 VK |
483 | |
484 | entry = &policy->freq_table[index]; | |
b7898fda RW |
485 | next_freq = entry->frequency; |
486 | next_perf_state = entry->driver_data; | |
487 | ||
488 | perf = to_perf_data(data); | |
489 | if (perf->state == next_perf_state) { | |
490 | if (unlikely(data->resume)) | |
491 | data->resume = 0; | |
492 | else | |
493 | return next_freq; | |
494 | } | |
495 | ||
496 | data->cpu_freq_write(&perf->control_register, | |
497 | perf->states[next_perf_state].control); | |
498 | perf->state = next_perf_state; | |
499 | return next_freq; | |
500 | } | |
501 | ||
1da177e4 | 502 | static unsigned long |
64be7eed | 503 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
1da177e4 | 504 | { |
3427616b | 505 | struct acpi_processor_performance *perf; |
09b4d1ee | 506 | |
3427616b | 507 | perf = to_perf_data(data); |
1da177e4 LT |
508 | if (cpu_khz) { |
509 | /* search the closest match to cpu_khz */ | |
510 | unsigned int i; | |
511 | unsigned long freq; | |
09b4d1ee | 512 | unsigned long freqn = perf->states[0].core_frequency * 1000; |
1da177e4 | 513 | |
3a58df35 | 514 | for (i = 0; i < (perf->state_count-1); i++) { |
1da177e4 | 515 | freq = freqn; |
95dd7227 | 516 | freqn = perf->states[i+1].core_frequency * 1000; |
1da177e4 | 517 | if ((2 * cpu_khz) > (freqn + freq)) { |
09b4d1ee | 518 | perf->state = i; |
64be7eed | 519 | return freq; |
1da177e4 LT |
520 | } |
521 | } | |
95dd7227 | 522 | perf->state = perf->state_count-1; |
64be7eed | 523 | return freqn; |
09b4d1ee | 524 | } else { |
1da177e4 | 525 | /* assume CPU is at P0... */ |
09b4d1ee VP |
526 | perf->state = 0; |
527 | return perf->states[0].core_frequency * 1000; | |
528 | } | |
1da177e4 LT |
529 | } |
530 | ||
2fdf66b4 RR |
531 | static void free_acpi_perf_data(void) |
532 | { | |
533 | unsigned int i; | |
534 | ||
535 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ | |
536 | for_each_possible_cpu(i) | |
537 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) | |
538 | ->shared_cpu_map); | |
539 | free_percpu(acpi_perf_data); | |
540 | } | |
541 | ||
4d66ddf2 | 542 | static int cpufreq_boost_online(unsigned int cpu) |
615b7300 | 543 | { |
615b7300 | 544 | /* |
4d66ddf2 SAS |
545 | * On the CPU_UP path we simply keep the boost-disable flag |
546 | * in sync with the current global state. | |
615b7300 | 547 | */ |
a3605c46 | 548 | return boost_set_msr(acpi_cpufreq_driver.boost_enabled); |
4d66ddf2 | 549 | } |
615b7300 | 550 | |
4d66ddf2 SAS |
551 | static int cpufreq_boost_down_prep(unsigned int cpu) |
552 | { | |
4d66ddf2 SAS |
553 | /* |
554 | * Clear the boost-disable bit on the CPU_DOWN path so that | |
555 | * this cpu cannot block the remaining ones from boosting. | |
556 | */ | |
a3605c46 | 557 | return boost_set_msr(1); |
615b7300 AP |
558 | } |
559 | ||
09b4d1ee VP |
560 | /* |
561 | * acpi_cpufreq_early_init - initialize ACPI P-States library | |
562 | * | |
563 | * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) | |
564 | * in order to determine correct frequency and voltage pairings. We can | |
565 | * do _PDC and _PSD and find out the processor dependency for the | |
566 | * actual init that will happen later... | |
567 | */ | |
50109292 | 568 | static int __init acpi_cpufreq_early_init(void) |
09b4d1ee | 569 | { |
2fdf66b4 | 570 | unsigned int i; |
2d06d8c4 | 571 | pr_debug("acpi_cpufreq_early_init\n"); |
09b4d1ee | 572 | |
50109292 FY |
573 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
574 | if (!acpi_perf_data) { | |
2d06d8c4 | 575 | pr_debug("Memory allocation error for acpi_perf_data.\n"); |
50109292 | 576 | return -ENOMEM; |
09b4d1ee | 577 | } |
2fdf66b4 | 578 | for_each_possible_cpu(i) { |
eaa95840 | 579 | if (!zalloc_cpumask_var_node( |
80855f73 MT |
580 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, |
581 | GFP_KERNEL, cpu_to_node(i))) { | |
2fdf66b4 RR |
582 | |
583 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ | |
584 | free_acpi_perf_data(); | |
585 | return -ENOMEM; | |
586 | } | |
587 | } | |
09b4d1ee VP |
588 | |
589 | /* Do initialization in ACPI core */ | |
fe27cb35 VP |
590 | acpi_processor_preregister_performance(acpi_perf_data); |
591 | return 0; | |
09b4d1ee VP |
592 | } |
593 | ||
95625b8f | 594 | #ifdef CONFIG_SMP |
8adcc0c6 VP |
595 | /* |
596 | * Some BIOSes do SW_ANY coordination internally, either set it up in hw | |
597 | * or do it in BIOS firmware and won't inform about it to OS. If not | |
598 | * detected, this has a side effect of making CPU run at a different speed | |
599 | * than OS intended it to run at. Detect it and handle it cleanly. | |
600 | */ | |
601 | static int bios_with_sw_any_bug; | |
602 | ||
1855256c | 603 | static int sw_any_bug_found(const struct dmi_system_id *d) |
8adcc0c6 VP |
604 | { |
605 | bios_with_sw_any_bug = 1; | |
606 | return 0; | |
607 | } | |
608 | ||
1855256c | 609 | static const struct dmi_system_id sw_any_bug_dmi_table[] = { |
8adcc0c6 VP |
610 | { |
611 | .callback = sw_any_bug_found, | |
612 | .ident = "Supermicro Server X6DLP", | |
613 | .matches = { | |
614 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), | |
615 | DMI_MATCH(DMI_BIOS_VERSION, "080010"), | |
616 | DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), | |
617 | }, | |
618 | }, | |
619 | { } | |
620 | }; | |
1a8e42fa PB |
621 | |
622 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | |
623 | { | |
293afe44 JV |
624 | /* Intel Xeon Processor 7100 Series Specification Update |
625 | * http://www.intel.com/Assets/PDF/specupdate/314554.pdf | |
1a8e42fa PB |
626 | * AL30: A Machine Check Exception (MCE) Occurring during an |
627 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | |
293afe44 | 628 | * Both Processor Cores to Lock Up. */ |
1a8e42fa PB |
629 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
630 | if ((c->x86 == 15) && | |
631 | (c->x86_model == 6) && | |
293afe44 | 632 | (c->x86_mask == 8)) { |
1c5864e2 | 633 | pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n"); |
1a8e42fa | 634 | return -ENODEV; |
293afe44 | 635 | } |
1a8e42fa PB |
636 | } |
637 | return 0; | |
638 | } | |
95625b8f | 639 | #endif |
8adcc0c6 | 640 | |
64be7eed | 641 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
1da177e4 | 642 | { |
64be7eed VP |
643 | unsigned int i; |
644 | unsigned int valid_states = 0; | |
645 | unsigned int cpu = policy->cpu; | |
646 | struct acpi_cpufreq_data *data; | |
64be7eed | 647 | unsigned int result = 0; |
92cb7612 | 648 | struct cpuinfo_x86 *c = &cpu_data(policy->cpu); |
64be7eed | 649 | struct acpi_processor_performance *perf; |
8cee1eed | 650 | struct cpufreq_frequency_table *freq_table; |
293afe44 JV |
651 | #ifdef CONFIG_SMP |
652 | static int blacklisted; | |
653 | #endif | |
1da177e4 | 654 | |
2d06d8c4 | 655 | pr_debug("acpi_cpufreq_cpu_init\n"); |
1da177e4 | 656 | |
1a8e42fa | 657 | #ifdef CONFIG_SMP |
293afe44 JV |
658 | if (blacklisted) |
659 | return blacklisted; | |
660 | blacklisted = acpi_cpufreq_blacklist(c); | |
661 | if (blacklisted) | |
662 | return blacklisted; | |
1a8e42fa PB |
663 | #endif |
664 | ||
d5b73cd8 | 665 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
1da177e4 | 666 | if (!data) |
64be7eed | 667 | return -ENOMEM; |
1da177e4 | 668 | |
f4fd3797 LT |
669 | if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) { |
670 | result = -ENOMEM; | |
671 | goto err_free; | |
672 | } | |
673 | ||
3427616b | 674 | perf = per_cpu_ptr(acpi_perf_data, cpu); |
8cfcfd39 | 675 | data->acpi_perf_cpu = cpu; |
eb0b3e78 | 676 | policy->driver_data = data; |
1da177e4 | 677 | |
95dd7227 | 678 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
fe27cb35 | 679 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
1da177e4 | 680 | |
3427616b | 681 | result = acpi_processor_register_performance(perf, cpu); |
1da177e4 | 682 | if (result) |
f4fd3797 | 683 | goto err_free_mask; |
1da177e4 | 684 | |
09b4d1ee | 685 | policy->shared_type = perf->shared_type; |
95dd7227 | 686 | |
46f18e3a | 687 | /* |
95dd7227 | 688 | * Will let policy->cpus know about dependency only when software |
46f18e3a VP |
689 | * coordination is required. |
690 | */ | |
691 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || | |
8adcc0c6 | 692 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
835481d9 | 693 | cpumask_copy(policy->cpus, perf->shared_cpu_map); |
8adcc0c6 | 694 | } |
f4fd3797 | 695 | cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map); |
8adcc0c6 VP |
696 | |
697 | #ifdef CONFIG_SMP | |
698 | dmi_check_system(sw_any_bug_dmi_table); | |
2624f90c | 699 | if (bios_with_sw_any_bug && !policy_is_shared(policy)) { |
8adcc0c6 | 700 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
3280c3c8 | 701 | cpumask_copy(policy->cpus, topology_core_cpumask(cpu)); |
8adcc0c6 | 702 | } |
acd31624 AP |
703 | |
704 | if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { | |
705 | cpumask_clear(policy->cpus); | |
706 | cpumask_set_cpu(cpu, policy->cpus); | |
3280c3c8 BG |
707 | cpumask_copy(data->freqdomain_cpus, |
708 | topology_sibling_cpumask(cpu)); | |
acd31624 | 709 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; |
1c5864e2 | 710 | pr_info_once("overriding BIOS provided _PSD data\n"); |
acd31624 | 711 | } |
8adcc0c6 | 712 | #endif |
09b4d1ee | 713 | |
1da177e4 | 714 | /* capability check */ |
09b4d1ee | 715 | if (perf->state_count <= 1) { |
2d06d8c4 | 716 | pr_debug("No P-States\n"); |
1da177e4 LT |
717 | result = -ENODEV; |
718 | goto err_unreg; | |
719 | } | |
09b4d1ee | 720 | |
fe27cb35 VP |
721 | if (perf->control_register.space_id != perf->status_register.space_id) { |
722 | result = -ENODEV; | |
723 | goto err_unreg; | |
724 | } | |
725 | ||
726 | switch (perf->control_register.space_id) { | |
64be7eed | 727 | case ACPI_ADR_SPACE_SYSTEM_IO: |
c40a4518 MG |
728 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
729 | boot_cpu_data.x86 == 0xf) { | |
730 | pr_debug("AMD K8 systems must use native drivers.\n"); | |
731 | result = -ENODEV; | |
732 | goto err_unreg; | |
733 | } | |
2d06d8c4 | 734 | pr_debug("SYSTEM IO addr space\n"); |
dde9f7ba | 735 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
ed757a2c RW |
736 | data->cpu_freq_read = cpu_freq_read_io; |
737 | data->cpu_freq_write = cpu_freq_write_io; | |
dde9f7ba | 738 | break; |
64be7eed | 739 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
2d06d8c4 | 740 | pr_debug("HARDWARE addr space\n"); |
3dc9a633 MG |
741 | if (check_est_cpu(cpu)) { |
742 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; | |
ed757a2c RW |
743 | data->cpu_freq_read = cpu_freq_read_intel; |
744 | data->cpu_freq_write = cpu_freq_write_intel; | |
3dc9a633 | 745 | break; |
dde9f7ba | 746 | } |
3dc9a633 MG |
747 | if (check_amd_hwpstate_cpu(cpu)) { |
748 | data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; | |
ed757a2c RW |
749 | data->cpu_freq_read = cpu_freq_read_amd; |
750 | data->cpu_freq_write = cpu_freq_write_amd; | |
3dc9a633 MG |
751 | break; |
752 | } | |
753 | result = -ENODEV; | |
754 | goto err_unreg; | |
64be7eed | 755 | default: |
2d06d8c4 | 756 | pr_debug("Unknown addr space %d\n", |
64be7eed | 757 | (u32) (perf->control_register.space_id)); |
1da177e4 LT |
758 | result = -ENODEV; |
759 | goto err_unreg; | |
760 | } | |
761 | ||
8cee1eed | 762 | freq_table = kzalloc(sizeof(*freq_table) * |
95dd7227 | 763 | (perf->state_count+1), GFP_KERNEL); |
8cee1eed | 764 | if (!freq_table) { |
1da177e4 LT |
765 | result = -ENOMEM; |
766 | goto err_unreg; | |
767 | } | |
768 | ||
769 | /* detect transition latency */ | |
770 | policy->cpuinfo.transition_latency = 0; | |
3a58df35 | 771 | for (i = 0; i < perf->state_count; i++) { |
64be7eed VP |
772 | if ((perf->states[i].transition_latency * 1000) > |
773 | policy->cpuinfo.transition_latency) | |
774 | policy->cpuinfo.transition_latency = | |
775 | perf->states[i].transition_latency * 1000; | |
1da177e4 | 776 | } |
1da177e4 | 777 | |
a59d1637 PV |
778 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ |
779 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && | |
780 | policy->cpuinfo.transition_latency > 20 * 1000) { | |
a59d1637 | 781 | policy->cpuinfo.transition_latency = 20 * 1000; |
b49c22a6 | 782 | pr_info_once("P-state transition latency capped at 20 uS\n"); |
a59d1637 PV |
783 | } |
784 | ||
1da177e4 | 785 | /* table init */ |
3a58df35 DJ |
786 | for (i = 0; i < perf->state_count; i++) { |
787 | if (i > 0 && perf->states[i].core_frequency >= | |
8cee1eed | 788 | freq_table[valid_states-1].frequency / 1000) |
fe27cb35 VP |
789 | continue; |
790 | ||
8cee1eed VK |
791 | freq_table[valid_states].driver_data = i; |
792 | freq_table[valid_states].frequency = | |
64be7eed | 793 | perf->states[i].core_frequency * 1000; |
fe27cb35 | 794 | valid_states++; |
1da177e4 | 795 | } |
8cee1eed | 796 | freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
8edc59d9 | 797 | perf->state = 0; |
1da177e4 | 798 | |
8cee1eed | 799 | result = cpufreq_table_validate_and_show(policy, freq_table); |
95dd7227 | 800 | if (result) |
1da177e4 | 801 | goto err_freqfree; |
1da177e4 | 802 | |
d876dfbb | 803 | if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) |
b49c22a6 | 804 | pr_warn(FW_WARN "P-state 0 is not max freq\n"); |
d876dfbb | 805 | |
a507ac4b | 806 | switch (perf->control_register.space_id) { |
64be7eed | 807 | case ACPI_ADR_SPACE_SYSTEM_IO: |
1bab64d5 VK |
808 | /* |
809 | * The core will not set policy->cur, because | |
810 | * cpufreq_driver->get is NULL, so we need to set it here. | |
811 | * However, we have to guess it, because the current speed is | |
812 | * unknown and not detectable via IO ports. | |
813 | */ | |
dde9f7ba VP |
814 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); |
815 | break; | |
64be7eed | 816 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
7650b281 | 817 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; |
dde9f7ba | 818 | break; |
64be7eed | 819 | default: |
dde9f7ba VP |
820 | break; |
821 | } | |
822 | ||
1da177e4 LT |
823 | /* notify BIOS that we exist */ |
824 | acpi_processor_notify_smm(THIS_MODULE); | |
825 | ||
2d06d8c4 | 826 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); |
09b4d1ee | 827 | for (i = 0; i < perf->state_count; i++) |
2d06d8c4 | 828 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", |
64be7eed | 829 | (i == perf->state ? '*' : ' '), i, |
09b4d1ee VP |
830 | (u32) perf->states[i].core_frequency, |
831 | (u32) perf->states[i].power, | |
832 | (u32) perf->states[i].transition_latency); | |
1da177e4 | 833 | |
4b31e774 DB |
834 | /* |
835 | * the first call to ->target() should result in us actually | |
836 | * writing something to the appropriate registers. | |
837 | */ | |
838 | data->resume = 1; | |
64be7eed | 839 | |
b7898fda RW |
840 | policy->fast_switch_possible = !acpi_pstate_strict && |
841 | !(policy_is_shared(policy) && policy->shared_type != CPUFREQ_SHARED_TYPE_ANY); | |
842 | ||
fe27cb35 | 843 | return result; |
1da177e4 | 844 | |
95dd7227 | 845 | err_freqfree: |
8cee1eed | 846 | kfree(freq_table); |
95dd7227 | 847 | err_unreg: |
b2f8dc4c | 848 | acpi_processor_unregister_performance(cpu); |
f4fd3797 LT |
849 | err_free_mask: |
850 | free_cpumask_var(data->freqdomain_cpus); | |
95dd7227 | 851 | err_free: |
1da177e4 | 852 | kfree(data); |
eb0b3e78 | 853 | policy->driver_data = NULL; |
1da177e4 | 854 | |
64be7eed | 855 | return result; |
1da177e4 LT |
856 | } |
857 | ||
64be7eed | 858 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
1da177e4 | 859 | { |
eb0b3e78 | 860 | struct acpi_cpufreq_data *data = policy->driver_data; |
1da177e4 | 861 | |
2d06d8c4 | 862 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
1da177e4 | 863 | |
9b55f55a VK |
864 | policy->fast_switch_possible = false; |
865 | policy->driver_data = NULL; | |
866 | acpi_processor_unregister_performance(data->acpi_perf_cpu); | |
867 | free_cpumask_var(data->freqdomain_cpus); | |
8cee1eed | 868 | kfree(policy->freq_table); |
9b55f55a | 869 | kfree(data); |
1da177e4 | 870 | |
64be7eed | 871 | return 0; |
1da177e4 LT |
872 | } |
873 | ||
64be7eed | 874 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
1da177e4 | 875 | { |
eb0b3e78 | 876 | struct acpi_cpufreq_data *data = policy->driver_data; |
1da177e4 | 877 | |
2d06d8c4 | 878 | pr_debug("acpi_cpufreq_resume\n"); |
1da177e4 LT |
879 | |
880 | data->resume = 1; | |
881 | ||
64be7eed | 882 | return 0; |
1da177e4 LT |
883 | } |
884 | ||
64be7eed | 885 | static struct freq_attr *acpi_cpufreq_attr[] = { |
1da177e4 | 886 | &cpufreq_freq_attr_scaling_available_freqs, |
f4fd3797 | 887 | &freqdomain_cpus, |
f56c50e3 RW |
888 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
889 | &cpb, | |
890 | #endif | |
1da177e4 LT |
891 | NULL, |
892 | }; | |
893 | ||
894 | static struct cpufreq_driver acpi_cpufreq_driver = { | |
db9be219 | 895 | .verify = cpufreq_generic_frequency_table_verify, |
9c0ebcf7 | 896 | .target_index = acpi_cpufreq_target, |
b7898fda | 897 | .fast_switch = acpi_cpufreq_fast_switch, |
e2f74f35 TR |
898 | .bios_limit = acpi_processor_get_bios_limit, |
899 | .init = acpi_cpufreq_cpu_init, | |
900 | .exit = acpi_cpufreq_cpu_exit, | |
901 | .resume = acpi_cpufreq_resume, | |
902 | .name = "acpi-cpufreq", | |
e2f74f35 | 903 | .attr = acpi_cpufreq_attr, |
1da177e4 LT |
904 | }; |
905 | ||
4d66ddf2 SAS |
906 | static enum cpuhp_state acpi_cpufreq_online; |
907 | ||
615b7300 AP |
908 | static void __init acpi_cpufreq_boost_init(void) |
909 | { | |
4d66ddf2 | 910 | int ret; |
615b7300 | 911 | |
4d66ddf2 SAS |
912 | if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) |
913 | return; | |
0197fbd2 | 914 | |
4d66ddf2 SAS |
915 | acpi_cpufreq_driver.set_boost = set_boost; |
916 | acpi_cpufreq_driver.boost_enabled = boost_state(0); | |
615b7300 | 917 | |
4d66ddf2 SAS |
918 | /* |
919 | * This calls the online callback on all online cpu and forces all | |
920 | * MSRs to the same value. | |
921 | */ | |
922 | ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "cpufreq/acpi:online", | |
923 | cpufreq_boost_online, cpufreq_boost_down_prep); | |
924 | if (ret < 0) { | |
925 | pr_err("acpi_cpufreq: failed to register hotplug callbacks\n"); | |
4d66ddf2 | 926 | return; |
cfc9c8ed | 927 | } |
4d66ddf2 | 928 | acpi_cpufreq_online = ret; |
615b7300 AP |
929 | } |
930 | ||
eb8c68ef | 931 | static void acpi_cpufreq_boost_exit(void) |
615b7300 | 932 | { |
2a8fa123 | 933 | if (acpi_cpufreq_online > 0) |
4d66ddf2 | 934 | cpuhp_remove_state_nocalls(acpi_cpufreq_online); |
615b7300 AP |
935 | } |
936 | ||
64be7eed | 937 | static int __init acpi_cpufreq_init(void) |
1da177e4 | 938 | { |
50109292 FY |
939 | int ret; |
940 | ||
75c07581 RW |
941 | if (acpi_disabled) |
942 | return -ENODEV; | |
943 | ||
8a61e12e YL |
944 | /* don't keep reloading if cpufreq_driver exists */ |
945 | if (cpufreq_get_current_driver()) | |
75c07581 | 946 | return -EEXIST; |
ee297533 | 947 | |
2d06d8c4 | 948 | pr_debug("acpi_cpufreq_init\n"); |
1da177e4 | 949 | |
50109292 FY |
950 | ret = acpi_cpufreq_early_init(); |
951 | if (ret) | |
952 | return ret; | |
09b4d1ee | 953 | |
11269ff5 AP |
954 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
955 | /* this is a sysfs file with a strange name and an even stranger | |
956 | * semantic - per CPU instantiation, but system global effect. | |
957 | * Lets enable it only on AMD CPUs for compatibility reasons and | |
958 | * only if configured. This is considered legacy code, which | |
959 | * will probably be removed at some point in the future. | |
960 | */ | |
f56c50e3 RW |
961 | if (!check_amd_hwpstate_cpu(0)) { |
962 | struct freq_attr **attr; | |
11269ff5 | 963 | |
f56c50e3 | 964 | pr_debug("CPB unsupported, do not expose it\n"); |
11269ff5 | 965 | |
f56c50e3 RW |
966 | for (attr = acpi_cpufreq_attr; *attr; attr++) |
967 | if (*attr == &cpb) { | |
968 | *attr = NULL; | |
969 | break; | |
970 | } | |
11269ff5 AP |
971 | } |
972 | #endif | |
cfc9c8ed | 973 | acpi_cpufreq_boost_init(); |
11269ff5 | 974 | |
847aef6f | 975 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
eb8c68ef | 976 | if (ret) { |
2fdf66b4 | 977 | free_acpi_perf_data(); |
eb8c68ef KRW |
978 | acpi_cpufreq_boost_exit(); |
979 | } | |
847aef6f | 980 | return ret; |
1da177e4 LT |
981 | } |
982 | ||
64be7eed | 983 | static void __exit acpi_cpufreq_exit(void) |
1da177e4 | 984 | { |
2d06d8c4 | 985 | pr_debug("acpi_cpufreq_exit\n"); |
1da177e4 | 986 | |
615b7300 AP |
987 | acpi_cpufreq_boost_exit(); |
988 | ||
1da177e4 LT |
989 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
990 | ||
50f4ddd4 | 991 | free_acpi_perf_data(); |
1da177e4 LT |
992 | } |
993 | ||
d395bf12 | 994 | module_param(acpi_pstate_strict, uint, 0644); |
64be7eed | 995 | MODULE_PARM_DESC(acpi_pstate_strict, |
95dd7227 DJ |
996 | "value 0 or non-zero. non-zero -> strict ACPI checks are " |
997 | "performed during frequency changes."); | |
1da177e4 LT |
998 | |
999 | late_initcall(acpi_cpufreq_init); | |
1000 | module_exit(acpi_cpufreq_exit); | |
1001 | ||
efa17194 MG |
1002 | static const struct x86_cpu_id acpi_cpufreq_ids[] = { |
1003 | X86_FEATURE_MATCH(X86_FEATURE_ACPI), | |
1004 | X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), | |
1005 | {} | |
1006 | }; | |
1007 | MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); | |
1008 | ||
c655affb RW |
1009 | static const struct acpi_device_id processor_device_ids[] = { |
1010 | {ACPI_PROCESSOR_OBJECT_HID, }, | |
1011 | {ACPI_PROCESSOR_DEVICE_HID, }, | |
1012 | {}, | |
1013 | }; | |
1014 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); | |
1015 | ||
1da177e4 | 1016 | MODULE_ALIAS("acpi"); |