1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * processor_idle - idle state submodule to the ACPI processor driver
9 * - Added processor hotplug support
11 * - Added support for C3 on SMP
13 #define pr_fmt(fmt) "ACPI: " fmt
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h> /* need_resched() */
19 #include <linux/sort.h>
20 #include <linux/tick.h>
21 #include <linux/cpuidle.h>
22 #include <linux/cpu.h>
23 #include <linux/minmax.h>
24 #include <linux/perf_event.h>
25 #include <acpi/processor.h>
26 #include <linux/context_tracking.h>
29 * Include the apic definitions for x86 to have the APIC timer related defines
30 * available also for UP (on SMP it gets magically included via linux/smp.h).
31 * asm/acpi.h is not an option, as it would require more include magic. Also
32 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
39 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
41 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
42 module_param(max_cstate, uint, 0400);
43 static bool nocst __read_mostly;
44 module_param(nocst, bool, 0400);
45 static bool bm_check_disable __read_mostly;
46 module_param(bm_check_disable, bool, 0400);
48 static unsigned int latency_factor __read_mostly = 2;
49 module_param(latency_factor, uint, 0644);
51 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
53 struct cpuidle_driver acpi_idle_driver = {
58 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
60 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
62 static int disabled_by_idle_boot_param(void)
64 return boot_option_idle_override == IDLE_POLL ||
65 boot_option_idle_override == IDLE_HALT;
69 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
70 * For now disable this. Probably a bug somewhere else.
72 * To skip this limit, boot/load with a large max_cstate limit.
74 static int set_max_cstate(const struct dmi_system_id *id)
76 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
79 pr_notice("%s detected - limiting to C%ld max_cstate."
80 " Override with \"processor.max_cstate=%d\"\n", id->ident,
81 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
83 max_cstate = (long)id->driver_data;
88 static const struct dmi_system_id processor_power_dmi_table[] = {
89 { set_max_cstate, "Clevo 5600D", {
90 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
91 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
93 { set_max_cstate, "Pavilion zv5000", {
94 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
95 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
97 { set_max_cstate, "Asus L8400B", {
98 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
99 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
106 * Callers should disable interrupts before the call and enable
107 * interrupts after return.
109 static void __cpuidle acpi_safe_halt(void)
111 if (!tif_need_resched()) {
113 raw_local_irq_disable();
117 #ifdef ARCH_APICTIMER_STOPS_ON_C3
120 * Some BIOS implementations switch to C3 in the published C2 state.
121 * This seems to be a common problem on AMD boxen, but other vendors
122 * are affected too. We pick the most conservative approach: we assume
123 * that the local APIC stops in both C2 and C3.
125 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
126 struct acpi_processor_cx *cx)
128 struct acpi_processor_power *pwr = &pr->power;
129 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
131 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
134 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
135 type = ACPI_STATE_C1;
138 * Check, if one of the previous states already marked the lapic
141 if (pwr->timer_broadcast_on_state < state)
144 if (cx->type >= type)
145 pr->power.timer_broadcast_on_state = state;
148 static void __lapic_timer_propagate_broadcast(void *arg)
150 struct acpi_processor *pr = arg;
152 if (pr->power.timer_broadcast_on_state < INT_MAX)
153 tick_broadcast_enable();
155 tick_broadcast_disable();
158 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
160 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
164 /* Power(C) State timer broadcast control */
165 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
166 struct acpi_processor_cx *cx)
168 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
173 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
174 struct acpi_processor_cx *cstate) { }
175 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
177 static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
178 struct acpi_processor_cx *cx)
185 #if defined(CONFIG_X86)
186 static void tsc_check_state(int state)
188 switch (boot_cpu_data.x86_vendor) {
189 case X86_VENDOR_HYGON:
191 case X86_VENDOR_INTEL:
192 case X86_VENDOR_CENTAUR:
193 case X86_VENDOR_ZHAOXIN:
195 * AMD Fam10h TSC will tick in all
196 * C/P/S0/S1 states when this bit is set.
198 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
202 /* TSC could halt in idle, so notify users */
203 if (state > ACPI_STATE_C1)
204 mark_tsc_unstable("TSC halts in idle");
208 static void tsc_check_state(int state) { return; }
211 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
217 /* if info is obtained from pblk/fadt, type equals state */
218 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
219 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
221 #ifndef CONFIG_HOTPLUG_CPU
223 * Check for P_LVL2_UP flag before entering C2 and above on
226 if ((num_online_cpus() > 1) &&
227 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
231 /* determine C2 and C3 address from pblk */
232 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
233 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
235 /* determine latencies from FADT */
236 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
237 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
240 * FADT specified C2 latency must be less than or equal to
243 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
244 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
245 acpi_gbl_FADT.c2_latency);
247 pr->power.states[ACPI_STATE_C2].address = 0;
251 * FADT supplied C3 latency must be less than or equal to
254 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
255 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
256 acpi_gbl_FADT.c3_latency);
258 pr->power.states[ACPI_STATE_C3].address = 0;
261 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
262 pr->power.states[ACPI_STATE_C2].address,
263 pr->power.states[ACPI_STATE_C3].address);
265 snprintf(pr->power.states[ACPI_STATE_C2].desc,
266 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
267 pr->power.states[ACPI_STATE_C2].address);
268 snprintf(pr->power.states[ACPI_STATE_C3].desc,
269 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
270 pr->power.states[ACPI_STATE_C3].address);
275 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
277 if (!pr->power.states[ACPI_STATE_C1].valid) {
278 /* set the first C-State to C1 */
279 /* all processors need to support C1 */
280 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
281 pr->power.states[ACPI_STATE_C1].valid = 1;
282 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
284 snprintf(pr->power.states[ACPI_STATE_C1].desc,
285 ACPI_CX_DESC_LEN, "ACPI HLT");
287 /* the C0 state only exists as a filler in our array */
288 pr->power.states[ACPI_STATE_C0].valid = 1;
292 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
299 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
303 if (!pr->power.count)
306 pr->flags.has_cst = 1;
310 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
311 struct acpi_processor_cx *cx)
313 static int bm_check_flag = -1;
314 static int bm_control_flag = -1;
321 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
322 * DMA transfers are used by any ISA device to avoid livelock.
323 * Note that we could disable Type-F DMA (as recommended by
324 * the erratum), but this is known to disrupt certain ISA
325 * devices thus we take the conservative approach.
327 if (errata.piix4.fdma) {
328 acpi_handle_debug(pr->handle,
329 "C3 not supported on PIIX4 with Type-F DMA\n");
333 /* All the logic here assumes flags.bm_check is same across all CPUs */
334 if (bm_check_flag == -1) {
335 /* Determine whether bm_check is needed based on CPU */
336 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
337 bm_check_flag = pr->flags.bm_check;
338 bm_control_flag = pr->flags.bm_control;
340 pr->flags.bm_check = bm_check_flag;
341 pr->flags.bm_control = bm_control_flag;
344 if (pr->flags.bm_check) {
345 if (!pr->flags.bm_control) {
346 if (pr->flags.has_cst != 1) {
347 /* bus mastering control is necessary */
348 acpi_handle_debug(pr->handle,
349 "C3 support requires BM control\n");
352 /* Here we enter C3 without bus mastering */
353 acpi_handle_debug(pr->handle,
354 "C3 support without BM control\n");
359 * WBINVD should be set in fadt, for C3 state to be
360 * supported on when bm_check is not required.
362 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
363 acpi_handle_debug(pr->handle,
364 "Cache invalidation should work properly"
365 " for C3 to be enabled on SMP systems\n");
371 * Otherwise we've met all of our C3 requirements.
372 * Normalize the C3 latency to expidite policy. Enable
373 * checking of bus mastering status (bm_check) so we can
374 * use this in our C3 policy
379 * On older chipsets, BM_RLD needs to be set
380 * in order for Bus Master activity to wake the
381 * system from C3. Newer chipsets handle DMA
382 * during C3 automatically and BM_RLD is a NOP.
383 * In either case, the proper way to
384 * handle BM_RLD is to set it and leave it set.
386 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
389 static int acpi_cst_latency_cmp(const void *a, const void *b)
391 const struct acpi_processor_cx *x = a, *y = b;
393 if (!(x->valid && y->valid))
395 if (x->latency > y->latency)
397 if (x->latency < y->latency)
401 static void acpi_cst_latency_swap(void *a, void *b, int n)
403 struct acpi_processor_cx *x = a, *y = b;
405 if (!(x->valid && y->valid))
407 swap(x->latency, y->latency);
410 static int acpi_processor_power_verify(struct acpi_processor *pr)
413 unsigned int working = 0;
414 unsigned int last_latency = 0;
415 unsigned int last_type = 0;
416 bool buggy_latency = false;
418 pr->power.timer_broadcast_on_state = INT_MAX;
420 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
421 struct acpi_processor_cx *cx = &pr->power.states[i];
435 acpi_processor_power_verify_c3(pr, cx);
440 if (cx->type >= last_type && cx->latency < last_latency)
441 buggy_latency = true;
442 last_latency = cx->latency;
443 last_type = cx->type;
445 lapic_timer_check_state(i, pr, cx);
446 tsc_check_state(cx->type);
451 pr_notice("FW issue: working around C-state latencies out of order\n");
452 sort(&pr->power.states[1], max_cstate,
453 sizeof(struct acpi_processor_cx),
454 acpi_cst_latency_cmp,
455 acpi_cst_latency_swap);
458 lapic_timer_propagate_broadcast(pr);
463 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
469 /* NOTE: the idle thread may not be running while calling
472 /* Zero initialize all the C-states info. */
473 memset(pr->power.states, 0, sizeof(pr->power.states));
475 result = acpi_processor_get_power_info_cst(pr);
476 if (result == -ENODEV)
477 result = acpi_processor_get_power_info_fadt(pr);
482 acpi_processor_get_power_info_default(pr);
484 pr->power.count = acpi_processor_power_verify(pr);
487 * if one state of type C2 or C3 is available, mark this
488 * CPU as being "idle manageable"
490 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
491 if (pr->power.states[i].valid) {
501 * acpi_idle_bm_check - checks if bus master activity was detected
503 static int acpi_idle_bm_check(void)
507 if (bm_check_disable)
510 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
512 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
514 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
515 * the true state of bus mastering activity; forcing us to
516 * manually check the BMIDEA bit of each IDE channel.
518 else if (errata.piix4.bmisx) {
519 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
520 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
526 static __cpuidle void io_idle(unsigned long addr)
528 /* IO port based C-state */
532 /* No delay is needed if we are in guest */
533 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
536 * Modern (>=Nehalem) Intel systems use ACPI via intel_idle,
537 * not this code. Assume that any Intel systems using this
538 * are ancient and may need the dummy wait. This also assumes
539 * that the motivating chipset issue was Intel-only.
541 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
545 * Dummy wait op - must do something useless after P_LVL2 read
546 * because chipsets cannot guarantee that STPCLK# signal gets
547 * asserted in time to freeze execution properly
549 * This workaround has been in place since the original ACPI
550 * implementation was merged, circa 2002.
552 * If a profile is pointing to this instruction, please first
553 * consider moving your system to a more modern idle
556 inl(acpi_gbl_FADT.xpm_timer_block.address);
560 * acpi_idle_do_entry - enter idle state using the appropriate method
563 * Caller disables interrupt before call and enables interrupt after return.
565 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
569 if (cx->entry_method == ACPI_CSTATE_FFH) {
570 /* Call into architectural FFH based C-state */
571 acpi_processor_ffh_cstate_enter(cx);
572 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
575 io_idle(cx->address);
578 perf_lopwr_cb(false);
582 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
583 * @dev: the target CPU
584 * @index: the index of suggested state
586 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
588 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
590 ACPI_FLUSH_CPU_CACHE();
594 if (cx->entry_method == ACPI_CSTATE_HALT)
596 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
597 io_idle(cx->address);
601 #if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
610 static __always_inline bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
612 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
613 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
616 static int c3_cpu_count;
617 static DEFINE_RAW_SPINLOCK(c3_lock);
620 * acpi_idle_enter_bm - enters C3 with proper BM handling
621 * @drv: cpuidle driver
622 * @pr: Target processor
623 * @cx: Target state context
624 * @index: index of target state
626 static int __cpuidle acpi_idle_enter_bm(struct cpuidle_driver *drv,
627 struct acpi_processor *pr,
628 struct acpi_processor_cx *cx,
631 static struct acpi_processor_cx safe_cx = {
632 .entry_method = ACPI_CSTATE_HALT,
637 * bm_check implies we need ARB_DIS
638 * bm_control implies whether we can do ARB_DIS
640 * That leaves a case where bm_check is set and bm_control is not set.
641 * In that case we cannot do much, we enter C3 without doing anything.
643 bool dis_bm = pr->flags.bm_control;
645 instrumentation_begin();
647 /* If we can skip BM, demote to a safe state. */
648 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
650 index = drv->safe_state_index;
652 cx = this_cpu_read(acpi_cstate[index]);
660 raw_spin_lock(&c3_lock);
662 /* Disable bus master arbitration when all CPUs are in C3 */
663 if (c3_cpu_count == num_online_cpus())
664 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
665 raw_spin_unlock(&c3_lock);
670 acpi_idle_do_entry(cx);
674 /* Re-enable bus master arbitration */
676 raw_spin_lock(&c3_lock);
677 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
679 raw_spin_unlock(&c3_lock);
682 instrumentation_end();
687 static int __cpuidle acpi_idle_enter(struct cpuidle_device *dev,
688 struct cpuidle_driver *drv, int index)
690 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
691 struct acpi_processor *pr;
693 pr = __this_cpu_read(processors);
697 if (cx->type != ACPI_STATE_C1) {
698 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
699 return acpi_idle_enter_bm(drv, pr, cx, index);
701 /* C2 to C1 demotion. */
702 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
703 index = ACPI_IDLE_STATE_START;
704 cx = per_cpu(acpi_cstate[index], dev->cpu);
708 if (cx->type == ACPI_STATE_C3)
709 ACPI_FLUSH_CPU_CACHE();
711 acpi_idle_do_entry(cx);
716 static int __cpuidle acpi_idle_enter_s2idle(struct cpuidle_device *dev,
717 struct cpuidle_driver *drv, int index)
719 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
721 if (cx->type == ACPI_STATE_C3) {
722 struct acpi_processor *pr = __this_cpu_read(processors);
727 if (pr->flags.bm_check) {
728 u8 bm_sts_skip = cx->bm_sts_skip;
730 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
732 acpi_idle_enter_bm(drv, pr, cx, index);
733 cx->bm_sts_skip = bm_sts_skip;
737 ACPI_FLUSH_CPU_CACHE();
740 acpi_idle_do_entry(cx);
745 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
746 struct cpuidle_device *dev)
748 int i, count = ACPI_IDLE_STATE_START;
749 struct acpi_processor_cx *cx;
750 struct cpuidle_state *state;
755 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
756 state = &acpi_idle_driver.states[count];
757 cx = &pr->power.states[i];
762 per_cpu(acpi_cstate[count], dev->cpu) = cx;
764 if (lapic_timer_needs_broadcast(pr, cx))
765 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
767 if (cx->type == ACPI_STATE_C3) {
768 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
769 if (pr->flags.bm_check)
770 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
774 if (count == CPUIDLE_STATE_MAX)
784 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
787 struct acpi_processor_cx *cx;
788 struct cpuidle_state *state;
789 struct cpuidle_driver *drv = &acpi_idle_driver;
794 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
795 cpuidle_poll_state_init(drv);
801 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
802 cx = &pr->power.states[i];
807 state = &drv->states[count];
808 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
809 strscpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
810 state->exit_latency = cx->latency;
811 state->target_residency = cx->latency * latency_factor;
812 state->enter = acpi_idle_enter;
815 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
816 cx->type == ACPI_STATE_C3) {
817 state->enter_dead = acpi_idle_play_dead;
818 if (cx->type != ACPI_STATE_C3)
819 drv->safe_state_index = count;
822 * Halt-induced C1 is not good for ->enter_s2idle, because it
823 * re-enables interrupts on exit. Moreover, C1 is generally not
824 * particularly interesting from the suspend-to-idle angle, so
825 * avoid C1 and the situations in which we may need to fall back
828 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
829 state->enter_s2idle = acpi_idle_enter_s2idle;
832 if (count == CPUIDLE_STATE_MAX)
836 drv->state_count = count;
844 static inline void acpi_processor_cstate_first_run_checks(void)
846 static int first_run;
850 dmi_check_system(processor_power_dmi_table);
851 max_cstate = acpi_processor_cstate_check(max_cstate);
852 if (max_cstate < ACPI_C_STATES_MAX)
853 pr_notice("processor limited to max C-state %d\n", max_cstate);
860 acpi_processor_claim_cst_control();
864 static inline int disabled_by_idle_boot_param(void) { return 0; }
865 static inline void acpi_processor_cstate_first_run_checks(void) { }
866 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
871 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
872 struct cpuidle_device *dev)
877 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
882 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
884 struct acpi_lpi_states_array {
886 unsigned int composite_states_size;
887 struct acpi_lpi_state *entries;
888 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
891 static int obj_get_integer(union acpi_object *obj, u32 *value)
893 if (obj->type != ACPI_TYPE_INTEGER)
896 *value = obj->integer.value;
900 static int acpi_processor_evaluate_lpi(acpi_handle handle,
901 struct acpi_lpi_states_array *info)
905 int pkg_count, state_idx = 1, loop;
906 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
907 union acpi_object *lpi_data;
908 struct acpi_lpi_state *lpi_state;
910 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
911 if (ACPI_FAILURE(status)) {
912 acpi_handle_debug(handle, "No _LPI, giving up\n");
916 lpi_data = buffer.pointer;
918 /* There must be at least 4 elements = 3 elements + 1 package */
919 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
920 lpi_data->package.count < 4) {
921 pr_debug("not enough elements in _LPI\n");
926 pkg_count = lpi_data->package.elements[2].integer.value;
928 /* Validate number of power states. */
929 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
930 pr_debug("count given by _LPI is not valid\n");
935 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
941 info->size = pkg_count;
942 info->entries = lpi_state;
944 /* LPI States start at index 3 */
945 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
946 union acpi_object *element, *pkg_elem, *obj;
948 element = &lpi_data->package.elements[loop];
949 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
952 pkg_elem = element->package.elements;
955 if (obj->type == ACPI_TYPE_BUFFER) {
956 struct acpi_power_register *reg;
958 reg = (struct acpi_power_register *)obj->buffer.pointer;
959 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
960 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
963 lpi_state->address = reg->address;
964 lpi_state->entry_method =
965 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
966 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
967 } else if (obj->type == ACPI_TYPE_INTEGER) {
968 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
969 lpi_state->address = obj->integer.value;
974 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
977 if (obj->type == ACPI_TYPE_STRING)
978 strscpy(lpi_state->desc, obj->string.pointer,
981 lpi_state->index = state_idx;
982 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
983 pr_debug("No min. residency found, assuming 10 us\n");
984 lpi_state->min_residency = 10;
987 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
988 pr_debug("No wakeup residency found, assuming 10 us\n");
989 lpi_state->wake_latency = 10;
992 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
993 lpi_state->flags = 0;
995 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
996 lpi_state->arch_flags = 0;
998 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
999 lpi_state->res_cnt_freq = 1;
1001 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
1002 lpi_state->enable_parent_state = 0;
1005 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1007 kfree(buffer.pointer);
1012 * flat_state_cnt - the number of composite LPI states after the process of flattening
1014 static int flat_state_cnt;
1017 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1019 * @local: local LPI state
1020 * @parent: parent LPI state
1021 * @result: composite LPI state
1023 static bool combine_lpi_states(struct acpi_lpi_state *local,
1024 struct acpi_lpi_state *parent,
1025 struct acpi_lpi_state *result)
1027 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1028 if (!parent->address) /* 0 means autopromotable */
1030 result->address = local->address + parent->address;
1032 result->address = parent->address;
1035 result->min_residency = max(local->min_residency, parent->min_residency);
1036 result->wake_latency = local->wake_latency + parent->wake_latency;
1037 result->enable_parent_state = parent->enable_parent_state;
1038 result->entry_method = local->entry_method;
1040 result->flags = parent->flags;
1041 result->arch_flags = parent->arch_flags;
1042 result->index = parent->index;
1044 strscpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1045 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1046 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1050 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1052 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1053 struct acpi_lpi_state *t)
1055 curr_level->composite_states[curr_level->composite_states_size++] = t;
1058 static int flatten_lpi_states(struct acpi_processor *pr,
1059 struct acpi_lpi_states_array *curr_level,
1060 struct acpi_lpi_states_array *prev_level)
1062 int i, j, state_count = curr_level->size;
1063 struct acpi_lpi_state *p, *t = curr_level->entries;
1065 curr_level->composite_states_size = 0;
1066 for (j = 0; j < state_count; j++, t++) {
1067 struct acpi_lpi_state *flpi;
1069 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1072 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1073 pr_warn("Limiting number of LPI states to max (%d)\n",
1074 ACPI_PROCESSOR_MAX_POWER);
1075 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1079 flpi = &pr->power.lpi_states[flat_state_cnt];
1081 if (!prev_level) { /* leaf/processor node */
1082 memcpy(flpi, t, sizeof(*t));
1083 stash_composite_state(curr_level, flpi);
1088 for (i = 0; i < prev_level->composite_states_size; i++) {
1089 p = prev_level->composite_states[i];
1090 if (t->index <= p->enable_parent_state &&
1091 combine_lpi_states(p, t, flpi)) {
1092 stash_composite_state(curr_level, flpi);
1099 kfree(curr_level->entries);
1103 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1108 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1112 acpi_handle handle = pr->handle, pr_ahandle;
1113 struct acpi_device *d = NULL;
1114 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1116 /* make sure our architecture has support */
1117 ret = acpi_processor_ffh_lpi_probe(pr->id);
1118 if (ret == -EOPNOTSUPP)
1121 if (!osc_pc_lpi_support_confirmed)
1124 if (!acpi_has_method(handle, "_LPI"))
1130 handle = pr->handle;
1131 ret = acpi_processor_evaluate_lpi(handle, prev);
1134 flatten_lpi_states(pr, prev, NULL);
1136 status = acpi_get_parent(handle, &pr_ahandle);
1137 while (ACPI_SUCCESS(status)) {
1138 d = acpi_fetch_acpi_dev(pr_ahandle);
1142 handle = pr_ahandle;
1144 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1147 /* can be optional ? */
1148 if (!acpi_has_method(handle, "_LPI"))
1151 ret = acpi_processor_evaluate_lpi(handle, curr);
1155 /* flatten all the LPI states in this level of hierarchy */
1156 flatten_lpi_states(pr, curr, prev);
1158 tmp = prev, prev = curr, curr = tmp;
1160 status = acpi_get_parent(handle, &pr_ahandle);
1163 pr->power.count = flat_state_cnt;
1164 /* reset the index after flattening */
1165 for (i = 0; i < pr->power.count; i++)
1166 pr->power.lpi_states[i].index = i;
1168 /* Tell driver that _LPI is supported. */
1169 pr->flags.has_lpi = 1;
1170 pr->flags.power = 1;
1175 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1181 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1182 * @dev: the target CPU
1183 * @drv: cpuidle driver containing cpuidle state info
1184 * @index: index of target state
1186 * Return: 0 for success or negative value for error
1188 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1189 struct cpuidle_driver *drv, int index)
1191 struct acpi_processor *pr;
1192 struct acpi_lpi_state *lpi;
1194 pr = __this_cpu_read(processors);
1199 lpi = &pr->power.lpi_states[index];
1200 if (lpi->entry_method == ACPI_CSTATE_FFH)
1201 return acpi_processor_ffh_lpi_enter(lpi);
1206 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1209 struct acpi_lpi_state *lpi;
1210 struct cpuidle_state *state;
1211 struct cpuidle_driver *drv = &acpi_idle_driver;
1213 if (!pr->flags.has_lpi)
1216 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1217 lpi = &pr->power.lpi_states[i];
1219 state = &drv->states[i];
1220 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1221 strscpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1222 state->exit_latency = lpi->wake_latency;
1223 state->target_residency = lpi->min_residency;
1224 if (lpi->arch_flags)
1225 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1226 if (i != 0 && lpi->entry_method == ACPI_CSTATE_FFH)
1227 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
1228 state->enter = acpi_idle_lpi_enter;
1229 drv->safe_state_index = i;
1232 drv->state_count = i;
1238 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1239 * global state data i.e. idle routines
1241 * @pr: the ACPI processor
1243 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1246 struct cpuidle_driver *drv = &acpi_idle_driver;
1248 if (!pr->flags.power_setup_done || !pr->flags.power)
1251 drv->safe_state_index = -1;
1252 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1253 drv->states[i].name[0] = '\0';
1254 drv->states[i].desc[0] = '\0';
1257 if (pr->flags.has_lpi)
1258 return acpi_processor_setup_lpi_states(pr);
1260 return acpi_processor_setup_cstates(pr);
1264 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1265 * device i.e. per-cpu data
1267 * @pr: the ACPI processor
1268 * @dev : the cpuidle device
1270 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1271 struct cpuidle_device *dev)
1273 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1277 if (pr->flags.has_lpi)
1278 return acpi_processor_ffh_lpi_probe(pr->id);
1280 return acpi_processor_setup_cpuidle_cx(pr, dev);
1283 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1287 ret = acpi_processor_get_lpi_info(pr);
1289 ret = acpi_processor_get_cstate_info(pr);
1294 int acpi_processor_hotplug(struct acpi_processor *pr)
1297 struct cpuidle_device *dev;
1299 if (disabled_by_idle_boot_param())
1302 if (!pr->flags.power_setup_done)
1305 dev = per_cpu(acpi_cpuidle_device, pr->id);
1306 cpuidle_pause_and_lock();
1307 cpuidle_disable_device(dev);
1308 ret = acpi_processor_get_power_info(pr);
1309 if (!ret && pr->flags.power) {
1310 acpi_processor_setup_cpuidle_dev(pr, dev);
1311 ret = cpuidle_enable_device(dev);
1313 cpuidle_resume_and_unlock();
1318 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1321 struct acpi_processor *_pr;
1322 struct cpuidle_device *dev;
1324 if (disabled_by_idle_boot_param())
1327 if (!pr->flags.power_setup_done)
1331 * FIXME: Design the ACPI notification to make it once per
1332 * system instead of once per-cpu. This condition is a hack
1333 * to make the code that updates C-States be called once.
1336 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1338 /* Protect against cpu-hotplug */
1340 cpuidle_pause_and_lock();
1342 /* Disable all cpuidle devices */
1343 for_each_online_cpu(cpu) {
1344 _pr = per_cpu(processors, cpu);
1345 if (!_pr || !_pr->flags.power_setup_done)
1347 dev = per_cpu(acpi_cpuidle_device, cpu);
1348 cpuidle_disable_device(dev);
1351 /* Populate Updated C-state information */
1352 acpi_processor_get_power_info(pr);
1353 acpi_processor_setup_cpuidle_states(pr);
1355 /* Enable all cpuidle devices */
1356 for_each_online_cpu(cpu) {
1357 _pr = per_cpu(processors, cpu);
1358 if (!_pr || !_pr->flags.power_setup_done)
1360 acpi_processor_get_power_info(_pr);
1361 if (_pr->flags.power) {
1362 dev = per_cpu(acpi_cpuidle_device, cpu);
1363 acpi_processor_setup_cpuidle_dev(_pr, dev);
1364 cpuidle_enable_device(dev);
1367 cpuidle_resume_and_unlock();
1374 static int acpi_processor_registered;
1376 int acpi_processor_power_init(struct acpi_processor *pr)
1379 struct cpuidle_device *dev;
1381 if (disabled_by_idle_boot_param())
1384 acpi_processor_cstate_first_run_checks();
1386 if (!acpi_processor_get_power_info(pr))
1387 pr->flags.power_setup_done = 1;
1390 * Install the idle handler if processor power management is supported.
1391 * Note that we use previously set idle handler will be used on
1392 * platforms that only support C1.
1394 if (pr->flags.power) {
1395 /* Register acpi_idle_driver if not already registered */
1396 if (!acpi_processor_registered) {
1397 acpi_processor_setup_cpuidle_states(pr);
1398 retval = cpuidle_register_driver(&acpi_idle_driver);
1401 pr_debug("%s registered with cpuidle\n",
1402 acpi_idle_driver.name);
1405 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1408 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1410 acpi_processor_setup_cpuidle_dev(pr, dev);
1412 /* Register per-cpu cpuidle_device. Cpuidle driver
1413 * must already be registered before registering device
1415 retval = cpuidle_register_device(dev);
1417 if (acpi_processor_registered == 0)
1418 cpuidle_unregister_driver(&acpi_idle_driver);
1421 acpi_processor_registered++;
1426 int acpi_processor_power_exit(struct acpi_processor *pr)
1428 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1430 if (disabled_by_idle_boot_param())
1433 if (pr->flags.power) {
1434 cpuidle_unregister_device(dev);
1435 acpi_processor_registered--;
1436 if (acpi_processor_registered == 0)
1437 cpuidle_unregister_driver(&acpi_idle_driver);
1440 pr->flags.power_setup_done = 0;