]>
Commit | Line | Data |
---|---|---|
2025cf9e | 1 | // SPDX-License-Identifier: GPL-2.0-only |
8e0af514 SL |
2 | /* |
3 | * acpi_pad.c ACPI Processor Aggregator Driver | |
4 | * | |
5 | * Copyright (c) 2009, Intel Corporation. | |
8e0af514 SL |
6 | */ |
7 | ||
8 | #include <linux/kernel.h> | |
9 | #include <linux/cpumask.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/init.h> | |
12 | #include <linux/types.h> | |
13 | #include <linux/kthread.h> | |
ae7e81c0 | 14 | #include <uapi/linux/sched/types.h> |
8e0af514 SL |
15 | #include <linux/freezer.h> |
16 | #include <linux/cpu.h> | |
979081e7 | 17 | #include <linux/tick.h> |
5a0e3ad6 | 18 | #include <linux/slab.h> |
8b48463f | 19 | #include <linux/acpi.h> |
bc83cccc | 20 | #include <asm/mwait.h> |
e311404f | 21 | #include <xen/xen.h> |
8e0af514 | 22 | |
a40770a9 | 23 | #define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad" |
8e0af514 SL |
24 | #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" |
25 | #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 | |
26 | static DEFINE_MUTEX(isolated_cpus_lock); | |
5f160126 | 27 | static DEFINE_MUTEX(round_robin_lock); |
8e0af514 | 28 | |
8e0af514 | 29 | static unsigned long power_saving_mwait_eax; |
0dc698b9 VP |
30 | |
31 | static unsigned char tsc_detected_unstable; | |
32 | static unsigned char tsc_marked_unstable; | |
33 | ||
8e0af514 SL |
34 | static void power_saving_mwait_init(void) |
35 | { | |
36 | unsigned int eax, ebx, ecx, edx; | |
37 | unsigned int highest_cstate = 0; | |
38 | unsigned int highest_subcstate = 0; | |
39 | int i; | |
40 | ||
41 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) | |
42 | return; | |
43 | if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) | |
44 | return; | |
45 | ||
46 | cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); | |
47 | ||
48 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || | |
49 | !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) | |
50 | return; | |
51 | ||
52 | edx >>= MWAIT_SUBSTATE_SIZE; | |
53 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { | |
54 | if (edx & MWAIT_SUBSTATE_MASK) { | |
55 | highest_cstate = i; | |
56 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; | |
57 | } | |
58 | } | |
59 | power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | | |
60 | (highest_subcstate - 1); | |
61 | ||
592913ec | 62 | #if defined(CONFIG_X86) |
8e0af514 | 63 | switch (boot_cpu_data.x86_vendor) { |
7377ed4b | 64 | case X86_VENDOR_HYGON: |
8e0af514 SL |
65 | case X86_VENDOR_AMD: |
66 | case X86_VENDOR_INTEL: | |
773b2f30 | 67 | case X86_VENDOR_ZHAOXIN: |
8e0af514 SL |
68 | /* |
69 | * AMD Fam10h TSC will tick in all | |
70 | * C/P/S0/S1 states when this bit is set. | |
71 | */ | |
8aa4b14e CG |
72 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) |
73 | tsc_detected_unstable = 1; | |
8aa4b14e | 74 | break; |
8e0af514 | 75 | default: |
3ff70551 | 76 | /* TSC could halt in idle */ |
0dc698b9 | 77 | tsc_detected_unstable = 1; |
8e0af514 SL |
78 | } |
79 | #endif | |
80 | } | |
81 | ||
82 | static unsigned long cpu_weight[NR_CPUS]; | |
83 | static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; | |
84 | static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); | |
85 | static void round_robin_cpu(unsigned int tsk_index) | |
86 | { | |
87 | struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); | |
88 | cpumask_var_t tmp; | |
89 | int cpu; | |
f67538f8 | 90 | unsigned long min_weight = -1; |
3f649ab7 | 91 | unsigned long preferred_cpu; |
8e0af514 SL |
92 | |
93 | if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) | |
94 | return; | |
95 | ||
5f160126 | 96 | mutex_lock(&round_robin_lock); |
8e0af514 SL |
97 | cpumask_clear(tmp); |
98 | for_each_cpu(cpu, pad_busy_cpus) | |
06931e62 | 99 | cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu)); |
8e0af514 SL |
100 | cpumask_andnot(tmp, cpu_online_mask, tmp); |
101 | /* avoid HT sibilings if possible */ | |
102 | if (cpumask_empty(tmp)) | |
103 | cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); | |
104 | if (cpumask_empty(tmp)) { | |
5f160126 | 105 | mutex_unlock(&round_robin_lock); |
8b29d29a | 106 | free_cpumask_var(tmp); |
8e0af514 SL |
107 | return; |
108 | } | |
109 | for_each_cpu(cpu, tmp) { | |
110 | if (cpu_weight[cpu] < min_weight) { | |
111 | min_weight = cpu_weight[cpu]; | |
112 | preferred_cpu = cpu; | |
113 | } | |
114 | } | |
115 | ||
116 | if (tsk_in_cpu[tsk_index] != -1) | |
117 | cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); | |
118 | tsk_in_cpu[tsk_index] = preferred_cpu; | |
119 | cpumask_set_cpu(preferred_cpu, pad_busy_cpus); | |
120 | cpu_weight[preferred_cpu]++; | |
5f160126 | 121 | mutex_unlock(&round_robin_lock); |
8e0af514 SL |
122 | |
123 | set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); | |
8b29d29a LS |
124 | |
125 | free_cpumask_var(tmp); | |
8e0af514 SL |
126 | } |
127 | ||
128 | static void exit_round_robin(unsigned int tsk_index) | |
129 | { | |
130 | struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); | |
131 | cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); | |
132 | tsk_in_cpu[tsk_index] = -1; | |
133 | } | |
134 | ||
135 | static unsigned int idle_pct = 5; /* percentage */ | |
fa7584e1 | 136 | static unsigned int round_robin_time = 1; /* second */ |
8e0af514 SL |
137 | static int power_saving_thread(void *data) |
138 | { | |
139 | struct sched_param param = {.sched_priority = 1}; | |
140 | int do_sleep; | |
141 | unsigned int tsk_index = (unsigned long)data; | |
142 | u64 last_jiffies = 0; | |
143 | ||
144 | sched_setscheduler(current, SCHED_RR, ¶m); | |
145 | ||
146 | while (!kthread_should_stop()) { | |
4ff248f3 | 147 | unsigned long expire_time; |
8e0af514 | 148 | |
8e0af514 | 149 | /* round robin to cpus */ |
4ff248f3 MS |
150 | expire_time = last_jiffies + round_robin_time * HZ; |
151 | if (time_before(expire_time, jiffies)) { | |
8e0af514 SL |
152 | last_jiffies = jiffies; |
153 | round_robin_cpu(tsk_index); | |
154 | } | |
155 | ||
156 | do_sleep = 0; | |
157 | ||
8e0af514 SL |
158 | expire_time = jiffies + HZ * (100 - idle_pct) / 100; |
159 | ||
160 | while (!need_resched()) { | |
0dc698b9 VP |
161 | if (tsc_detected_unstable && !tsc_marked_unstable) { |
162 | /* TSC could halt in idle, so notify users */ | |
163 | mark_tsc_unstable("TSC halts in idle"); | |
164 | tsc_marked_unstable = 1; | |
165 | } | |
8e0af514 | 166 | local_irq_disable(); |
979081e7 | 167 | tick_broadcast_enable(); |
c7952135 | 168 | tick_broadcast_enter(); |
8e0af514 SL |
169 | stop_critical_timings(); |
170 | ||
16824255 | 171 | mwait_idle_with_hints(power_saving_mwait_eax, 1); |
8e0af514 SL |
172 | |
173 | start_critical_timings(); | |
c7952135 | 174 | tick_broadcast_exit(); |
8e0af514 SL |
175 | local_irq_enable(); |
176 | ||
4ff248f3 | 177 | if (time_before(expire_time, jiffies)) { |
8e0af514 SL |
178 | do_sleep = 1; |
179 | break; | |
180 | } | |
181 | } | |
182 | ||
8e0af514 SL |
183 | /* |
184 | * current sched_rt has threshold for rt task running time. | |
185 | * When a rt task uses 95% CPU time, the rt thread will be | |
186 | * scheduled out for 5% CPU time to not starve other tasks. But | |
187 | * the mechanism only works when all CPUs have RT task running, | |
188 | * as if one CPU hasn't RT task, RT task from other CPUs will | |
189 | * borrow CPU time from this CPU and cause RT task use > 95% | |
3b8cb427 | 190 | * CPU time. To make 'avoid starvation' work, takes a nap here. |
8e0af514 | 191 | */ |
5b59c69e | 192 | if (unlikely(do_sleep)) |
8e0af514 | 193 | schedule_timeout_killable(HZ * idle_pct / 100); |
5b59c69e TC |
194 | |
195 | /* If an external event has set the need_resched flag, then | |
196 | * we need to deal with it, or this loop will continue to | |
197 | * spin without calling __mwait(). | |
198 | */ | |
199 | if (unlikely(need_resched())) | |
200 | schedule(); | |
8e0af514 SL |
201 | } |
202 | ||
203 | exit_round_robin(tsk_index); | |
204 | return 0; | |
205 | } | |
206 | ||
207 | static struct task_struct *ps_tsks[NR_CPUS]; | |
208 | static unsigned int ps_tsk_num; | |
209 | static int create_power_saving_task(void) | |
210 | { | |
5d7e4386 | 211 | int rc; |
3b8cb427 | 212 | |
8e0af514 SL |
213 | ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, |
214 | (void *)(unsigned long)ps_tsk_num, | |
150ed86f | 215 | "acpi_pad/%d", ps_tsk_num); |
5d7e4386 RR |
216 | |
217 | if (IS_ERR(ps_tsks[ps_tsk_num])) { | |
218 | rc = PTR_ERR(ps_tsks[ps_tsk_num]); | |
3b8cb427 | 219 | ps_tsks[ps_tsk_num] = NULL; |
5d7e4386 RR |
220 | } else { |
221 | rc = 0; | |
222 | ps_tsk_num++; | |
223 | } | |
3b8cb427 CG |
224 | |
225 | return rc; | |
8e0af514 SL |
226 | } |
227 | ||
228 | static void destroy_power_saving_task(void) | |
229 | { | |
230 | if (ps_tsk_num > 0) { | |
231 | ps_tsk_num--; | |
232 | kthread_stop(ps_tsks[ps_tsk_num]); | |
3b8cb427 | 233 | ps_tsks[ps_tsk_num] = NULL; |
8e0af514 SL |
234 | } |
235 | } | |
236 | ||
237 | static void set_power_saving_task_num(unsigned int num) | |
238 | { | |
239 | if (num > ps_tsk_num) { | |
240 | while (ps_tsk_num < num) { | |
241 | if (create_power_saving_task()) | |
242 | return; | |
243 | } | |
244 | } else if (num < ps_tsk_num) { | |
245 | while (ps_tsk_num > num) | |
246 | destroy_power_saving_task(); | |
247 | } | |
248 | } | |
249 | ||
3b8cb427 | 250 | static void acpi_pad_idle_cpus(unsigned int num_cpus) |
8e0af514 SL |
251 | { |
252 | get_online_cpus(); | |
253 | ||
254 | num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); | |
255 | set_power_saving_task_num(num_cpus); | |
256 | ||
257 | put_online_cpus(); | |
8e0af514 SL |
258 | } |
259 | ||
260 | static uint32_t acpi_pad_idle_cpus_num(void) | |
261 | { | |
262 | return ps_tsk_num; | |
263 | } | |
264 | ||
265 | static ssize_t acpi_pad_rrtime_store(struct device *dev, | |
266 | struct device_attribute *attr, const char *buf, size_t count) | |
267 | { | |
268 | unsigned long num; | |
73d4511a | 269 | if (kstrtoul(buf, 0, &num)) |
8e0af514 SL |
270 | return -EINVAL; |
271 | if (num < 1 || num >= 100) | |
272 | return -EINVAL; | |
273 | mutex_lock(&isolated_cpus_lock); | |
274 | round_robin_time = num; | |
275 | mutex_unlock(&isolated_cpus_lock); | |
276 | return count; | |
277 | } | |
278 | ||
279 | static ssize_t acpi_pad_rrtime_show(struct device *dev, | |
280 | struct device_attribute *attr, char *buf) | |
281 | { | |
32297abd | 282 | return scnprintf(buf, PAGE_SIZE, "%d\n", round_robin_time); |
8e0af514 SL |
283 | } |
284 | static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, | |
285 | acpi_pad_rrtime_show, | |
286 | acpi_pad_rrtime_store); | |
287 | ||
288 | static ssize_t acpi_pad_idlepct_store(struct device *dev, | |
289 | struct device_attribute *attr, const char *buf, size_t count) | |
290 | { | |
291 | unsigned long num; | |
73d4511a | 292 | if (kstrtoul(buf, 0, &num)) |
8e0af514 SL |
293 | return -EINVAL; |
294 | if (num < 1 || num >= 100) | |
295 | return -EINVAL; | |
296 | mutex_lock(&isolated_cpus_lock); | |
297 | idle_pct = num; | |
298 | mutex_unlock(&isolated_cpus_lock); | |
299 | return count; | |
300 | } | |
301 | ||
302 | static ssize_t acpi_pad_idlepct_show(struct device *dev, | |
303 | struct device_attribute *attr, char *buf) | |
304 | { | |
32297abd | 305 | return scnprintf(buf, PAGE_SIZE, "%d\n", idle_pct); |
8e0af514 SL |
306 | } |
307 | static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, | |
308 | acpi_pad_idlepct_show, | |
309 | acpi_pad_idlepct_store); | |
310 | ||
311 | static ssize_t acpi_pad_idlecpus_store(struct device *dev, | |
312 | struct device_attribute *attr, const char *buf, size_t count) | |
313 | { | |
314 | unsigned long num; | |
73d4511a | 315 | if (kstrtoul(buf, 0, &num)) |
8e0af514 SL |
316 | return -EINVAL; |
317 | mutex_lock(&isolated_cpus_lock); | |
318 | acpi_pad_idle_cpus(num); | |
319 | mutex_unlock(&isolated_cpus_lock); | |
320 | return count; | |
321 | } | |
322 | ||
323 | static ssize_t acpi_pad_idlecpus_show(struct device *dev, | |
324 | struct device_attribute *attr, char *buf) | |
325 | { | |
5aaba363 SH |
326 | return cpumap_print_to_pagebuf(false, buf, |
327 | to_cpumask(pad_busy_cpus_bits)); | |
8e0af514 | 328 | } |
5aaba363 | 329 | |
8e0af514 SL |
330 | static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, |
331 | acpi_pad_idlecpus_show, | |
332 | acpi_pad_idlecpus_store); | |
333 | ||
334 | static int acpi_pad_add_sysfs(struct acpi_device *device) | |
335 | { | |
336 | int result; | |
337 | ||
338 | result = device_create_file(&device->dev, &dev_attr_idlecpus); | |
339 | if (result) | |
340 | return -ENODEV; | |
341 | result = device_create_file(&device->dev, &dev_attr_idlepct); | |
342 | if (result) { | |
343 | device_remove_file(&device->dev, &dev_attr_idlecpus); | |
344 | return -ENODEV; | |
345 | } | |
346 | result = device_create_file(&device->dev, &dev_attr_rrtime); | |
347 | if (result) { | |
348 | device_remove_file(&device->dev, &dev_attr_idlecpus); | |
349 | device_remove_file(&device->dev, &dev_attr_idlepct); | |
350 | return -ENODEV; | |
351 | } | |
352 | return 0; | |
353 | } | |
354 | ||
355 | static void acpi_pad_remove_sysfs(struct acpi_device *device) | |
356 | { | |
357 | device_remove_file(&device->dev, &dev_attr_idlecpus); | |
358 | device_remove_file(&device->dev, &dev_attr_idlepct); | |
359 | device_remove_file(&device->dev, &dev_attr_rrtime); | |
360 | } | |
361 | ||
c9ad8e06 LB |
362 | /* |
363 | * Query firmware how many CPUs should be idle | |
364 | * return -1 on failure | |
365 | */ | |
366 | static int acpi_pad_pur(acpi_handle handle) | |
8e0af514 SL |
367 | { |
368 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | |
8e0af514 | 369 | union acpi_object *package; |
c9ad8e06 | 370 | int num = -1; |
8e0af514 | 371 | |
3b8cb427 | 372 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) |
c9ad8e06 | 373 | return num; |
3b8cb427 CG |
374 | |
375 | if (!buffer.length || !buffer.pointer) | |
c9ad8e06 | 376 | return num; |
3b8cb427 | 377 | |
8e0af514 | 378 | package = buffer.pointer; |
c9ad8e06 LB |
379 | |
380 | if (package->type == ACPI_TYPE_PACKAGE && | |
381 | package->package.count == 2 && | |
382 | package->package.elements[0].integer.value == 1) /* rev 1 */ | |
383 | ||
384 | num = package->package.elements[1].integer.value; | |
385 | ||
8e0af514 | 386 | kfree(buffer.pointer); |
c9ad8e06 | 387 | return num; |
8e0af514 SL |
388 | } |
389 | ||
8e0af514 SL |
390 | static void acpi_pad_handle_notify(acpi_handle handle) |
391 | { | |
3b8cb427 | 392 | int num_cpus; |
8e0af514 | 393 | uint32_t idle_cpus; |
8b296d94 JL |
394 | struct acpi_buffer param = { |
395 | .length = 4, | |
396 | .pointer = (void *)&idle_cpus, | |
397 | }; | |
8e0af514 SL |
398 | |
399 | mutex_lock(&isolated_cpus_lock); | |
c9ad8e06 LB |
400 | num_cpus = acpi_pad_pur(handle); |
401 | if (num_cpus < 0) { | |
8e0af514 SL |
402 | mutex_unlock(&isolated_cpus_lock); |
403 | return; | |
404 | } | |
3b8cb427 | 405 | acpi_pad_idle_cpus(num_cpus); |
8e0af514 | 406 | idle_cpus = acpi_pad_idle_cpus_num(); |
8b296d94 | 407 | acpi_evaluate_ost(handle, ACPI_PROCESSOR_AGGREGATOR_NOTIFY, 0, ¶m); |
8e0af514 SL |
408 | mutex_unlock(&isolated_cpus_lock); |
409 | } | |
410 | ||
411 | static void acpi_pad_notify(acpi_handle handle, u32 event, | |
412 | void *data) | |
413 | { | |
414 | struct acpi_device *device = data; | |
415 | ||
416 | switch (event) { | |
417 | case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: | |
418 | acpi_pad_handle_notify(handle); | |
8e0af514 SL |
419 | acpi_bus_generate_netlink_event(device->pnp.device_class, |
420 | dev_name(&device->dev), event, 0); | |
421 | break; | |
422 | default: | |
73d4511a | 423 | pr_warn("Unsupported event [0x%x]\n", event); |
8e0af514 SL |
424 | break; |
425 | } | |
426 | } | |
427 | ||
428 | static int acpi_pad_add(struct acpi_device *device) | |
429 | { | |
430 | acpi_status status; | |
431 | ||
432 | strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); | |
433 | strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); | |
434 | ||
435 | if (acpi_pad_add_sysfs(device)) | |
436 | return -ENODEV; | |
437 | ||
438 | status = acpi_install_notify_handler(device->handle, | |
439 | ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); | |
440 | if (ACPI_FAILURE(status)) { | |
441 | acpi_pad_remove_sysfs(device); | |
442 | return -ENODEV; | |
443 | } | |
444 | ||
445 | return 0; | |
446 | } | |
447 | ||
51fac838 | 448 | static int acpi_pad_remove(struct acpi_device *device) |
8e0af514 SL |
449 | { |
450 | mutex_lock(&isolated_cpus_lock); | |
451 | acpi_pad_idle_cpus(0); | |
452 | mutex_unlock(&isolated_cpus_lock); | |
453 | ||
454 | acpi_remove_notify_handler(device->handle, | |
455 | ACPI_DEVICE_NOTIFY, acpi_pad_notify); | |
456 | acpi_pad_remove_sysfs(device); | |
457 | return 0; | |
458 | } | |
459 | ||
460 | static const struct acpi_device_id pad_device_ids[] = { | |
461 | {"ACPI000C", 0}, | |
462 | {"", 0}, | |
463 | }; | |
464 | MODULE_DEVICE_TABLE(acpi, pad_device_ids); | |
465 | ||
466 | static struct acpi_driver acpi_pad_driver = { | |
467 | .name = "processor_aggregator", | |
468 | .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, | |
469 | .ids = pad_device_ids, | |
470 | .ops = { | |
471 | .add = acpi_pad_add, | |
472 | .remove = acpi_pad_remove, | |
473 | }, | |
474 | }; | |
475 | ||
476 | static int __init acpi_pad_init(void) | |
477 | { | |
e311404f JG |
478 | /* Xen ACPI PAD is used when running as Xen Dom0. */ |
479 | if (xen_initial_domain()) | |
480 | return -ENODEV; | |
481 | ||
8e0af514 SL |
482 | power_saving_mwait_init(); |
483 | if (power_saving_mwait_eax == 0) | |
484 | return -EINVAL; | |
485 | ||
486 | return acpi_bus_register_driver(&acpi_pad_driver); | |
487 | } | |
488 | ||
489 | static void __exit acpi_pad_exit(void) | |
490 | { | |
491 | acpi_bus_unregister_driver(&acpi_pad_driver); | |
492 | } | |
493 | ||
494 | module_init(acpi_pad_init); | |
495 | module_exit(acpi_pad_exit); | |
496 | MODULE_AUTHOR("Shaohua Li<[email protected]>"); | |
497 | MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); | |
498 | MODULE_LICENSE("GPL"); |