]>
Commit | Line | Data |
---|---|---|
9c92ab61 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
ab10023e CC |
2 | /* |
3 | * Copyright (C) 2011 Google, Inc. | |
4 | * | |
5 | * Author: | |
6 | * Colin Cross <[email protected]> | |
ab10023e CC |
7 | */ |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/cpu_pm.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/spinlock.h> | |
6f3eaec8 | 14 | #include <linux/syscore_ops.h> |
ab10023e | 15 | |
b2f6662a VS |
16 | /* |
17 | * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT. | |
18 | * Notifications for cpu_pm will be issued by the idle task itself, which can | |
19 | * never block, IOW it requires using a raw_spinlock_t. | |
20 | */ | |
21 | static struct { | |
22 | struct raw_notifier_head chain; | |
23 | raw_spinlock_t lock; | |
24 | } cpu_pm_notifier = { | |
25 | .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain), | |
26 | .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock), | |
27 | }; | |
ab10023e | 28 | |
70d93298 | 29 | static int cpu_pm_notify(enum cpu_pm_event event) |
ab10023e CC |
30 | { |
31 | int ret; | |
32 | ||
313c8c16 | 33 | /* |
b2f6662a VS |
34 | * This introduces a RCU read critical section, which could be |
35 | * disfunctional in cpu idle. Copy RCU_NONIDLE code to let RCU know | |
36 | * this. | |
313c8c16 | 37 | */ |
6f0e6c15 | 38 | ct_irq_enter_irqson(); |
b2f6662a VS |
39 | rcu_read_lock(); |
40 | ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL); | |
41 | rcu_read_unlock(); | |
6f0e6c15 | 42 | ct_irq_exit_irqson(); |
70d93298 PZ |
43 | |
44 | return notifier_to_errno(ret); | |
45 | } | |
46 | ||
47 | static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down) | |
48 | { | |
b2f6662a | 49 | unsigned long flags; |
70d93298 PZ |
50 | int ret; |
51 | ||
6f0e6c15 | 52 | ct_irq_enter_irqson(); |
b2f6662a VS |
53 | raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); |
54 | ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL); | |
55 | raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); | |
6f0e6c15 | 56 | ct_irq_exit_irqson(); |
ab10023e CC |
57 | |
58 | return notifier_to_errno(ret); | |
59 | } | |
60 | ||
61 | /** | |
62 | * cpu_pm_register_notifier - register a driver with cpu_pm | |
63 | * @nb: notifier block to register | |
64 | * | |
65 | * Add a driver to a list of drivers that are notified about | |
66 | * CPU and CPU cluster low power entry and exit. | |
67 | * | |
b2f6662a | 68 | * This function has the same return conditions as raw_notifier_chain_register. |
ab10023e CC |
69 | */ |
70 | int cpu_pm_register_notifier(struct notifier_block *nb) | |
71 | { | |
b2f6662a VS |
72 | unsigned long flags; |
73 | int ret; | |
74 | ||
75 | raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); | |
76 | ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb); | |
77 | raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); | |
78 | return ret; | |
ab10023e CC |
79 | } |
80 | EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); | |
81 | ||
82 | /** | |
83 | * cpu_pm_unregister_notifier - unregister a driver with cpu_pm | |
84 | * @nb: notifier block to be unregistered | |
85 | * | |
86 | * Remove a driver from the CPU PM notifier list. | |
87 | * | |
b2f6662a | 88 | * This function has the same return conditions as raw_notifier_chain_unregister. |
ab10023e CC |
89 | */ |
90 | int cpu_pm_unregister_notifier(struct notifier_block *nb) | |
91 | { | |
b2f6662a VS |
92 | unsigned long flags; |
93 | int ret; | |
94 | ||
95 | raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); | |
96 | ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb); | |
97 | raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); | |
98 | return ret; | |
ab10023e CC |
99 | } |
100 | EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); | |
101 | ||
102 | /** | |
d84970bb | 103 | * cpu_pm_enter - CPU low power entry notifier |
ab10023e CC |
104 | * |
105 | * Notifies listeners that a single CPU is entering a low power state that may | |
106 | * cause some blocks in the same power domain as the cpu to reset. | |
107 | * | |
108 | * Must be called on the affected CPU with interrupts disabled. Platform is | |
109 | * responsible for ensuring that cpu_pm_enter is not called twice on the same | |
110 | * CPU before cpu_pm_exit is called. Notified drivers can include VFP | |
d84970bb | 111 | * co-processor, interrupt controller and its PM extensions, local CPU |
ab10023e CC |
112 | * timers context save/restore which shouldn't be interrupted. Hence it |
113 | * must be called with interrupts disabled. | |
114 | * | |
115 | * Return conditions are same as __raw_notifier_call_chain. | |
116 | */ | |
117 | int cpu_pm_enter(void) | |
118 | { | |
70d93298 | 119 | return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED); |
ab10023e CC |
120 | } |
121 | EXPORT_SYMBOL_GPL(cpu_pm_enter); | |
122 | ||
123 | /** | |
d84970bb | 124 | * cpu_pm_exit - CPU low power exit notifier |
ab10023e CC |
125 | * |
126 | * Notifies listeners that a single CPU is exiting a low power state that may | |
127 | * have caused some blocks in the same power domain as the cpu to reset. | |
128 | * | |
129 | * Notified drivers can include VFP co-processor, interrupt controller | |
d84970bb | 130 | * and its PM extensions, local CPU timers context save/restore which |
ab10023e CC |
131 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. |
132 | * | |
133 | * Return conditions are same as __raw_notifier_call_chain. | |
134 | */ | |
135 | int cpu_pm_exit(void) | |
136 | { | |
70d93298 | 137 | return cpu_pm_notify(CPU_PM_EXIT); |
ab10023e CC |
138 | } |
139 | EXPORT_SYMBOL_GPL(cpu_pm_exit); | |
140 | ||
141 | /** | |
d84970bb | 142 | * cpu_cluster_pm_enter - CPU cluster low power entry notifier |
ab10023e CC |
143 | * |
144 | * Notifies listeners that all cpus in a power domain are entering a low power | |
145 | * state that may cause some blocks in the same power domain to reset. | |
146 | * | |
147 | * Must be called after cpu_pm_enter has been called on all cpus in the power | |
148 | * domain, and before cpu_pm_exit has been called on any cpu in the power | |
149 | * domain. Notified drivers can include VFP co-processor, interrupt controller | |
d84970bb | 150 | * and its PM extensions, local CPU timers context save/restore which |
ab10023e CC |
151 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. |
152 | * | |
153 | * Must be called with interrupts disabled. | |
154 | * | |
155 | * Return conditions are same as __raw_notifier_call_chain. | |
156 | */ | |
157 | int cpu_cluster_pm_enter(void) | |
158 | { | |
70d93298 | 159 | return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED); |
ab10023e CC |
160 | } |
161 | EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); | |
162 | ||
163 | /** | |
d84970bb | 164 | * cpu_cluster_pm_exit - CPU cluster low power exit notifier |
ab10023e CC |
165 | * |
166 | * Notifies listeners that all cpus in a power domain are exiting form a | |
167 | * low power state that may have caused some blocks in the same power domain | |
168 | * to reset. | |
169 | * | |
21dd33b0 | 170 | * Must be called after cpu_cluster_pm_enter has been called for the power |
ab10023e CC |
171 | * domain, and before cpu_pm_exit has been called on any cpu in the power |
172 | * domain. Notified drivers can include VFP co-processor, interrupt controller | |
d84970bb | 173 | * and its PM extensions, local CPU timers context save/restore which |
ab10023e CC |
174 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. |
175 | * | |
176 | * Return conditions are same as __raw_notifier_call_chain. | |
177 | */ | |
178 | int cpu_cluster_pm_exit(void) | |
179 | { | |
70d93298 | 180 | return cpu_pm_notify(CPU_CLUSTER_PM_EXIT); |
ab10023e CC |
181 | } |
182 | EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); | |
6f3eaec8 CC |
183 | |
184 | #ifdef CONFIG_PM | |
185 | static int cpu_pm_suspend(void) | |
186 | { | |
187 | int ret; | |
188 | ||
189 | ret = cpu_pm_enter(); | |
190 | if (ret) | |
191 | return ret; | |
192 | ||
193 | ret = cpu_cluster_pm_enter(); | |
194 | return ret; | |
195 | } | |
196 | ||
197 | static void cpu_pm_resume(void) | |
198 | { | |
199 | cpu_cluster_pm_exit(); | |
200 | cpu_pm_exit(); | |
201 | } | |
202 | ||
203 | static struct syscore_ops cpu_pm_syscore_ops = { | |
204 | .suspend = cpu_pm_suspend, | |
205 | .resume = cpu_pm_resume, | |
206 | }; | |
207 | ||
208 | static int cpu_pm_init(void) | |
209 | { | |
210 | register_syscore_ops(&cpu_pm_syscore_ops); | |
211 | return 0; | |
212 | } | |
213 | core_initcall(cpu_pm_init); | |
214 | #endif |