]>
Commit | Line | Data |
---|---|---|
9c92ab61 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
ab10023e CC |
2 | /* |
3 | * Copyright (C) 2011 Google, Inc. | |
4 | * | |
5 | * Author: | |
6 | * Colin Cross <[email protected]> | |
ab10023e CC |
7 | */ |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/cpu_pm.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/notifier.h> | |
13 | #include <linux/spinlock.h> | |
6f3eaec8 | 14 | #include <linux/syscore_ops.h> |
ab10023e | 15 | |
b2f6662a VS |
16 | /* |
17 | * atomic_notifiers use a spinlock_t, which can block under PREEMPT_RT. | |
18 | * Notifications for cpu_pm will be issued by the idle task itself, which can | |
19 | * never block, IOW it requires using a raw_spinlock_t. | |
20 | */ | |
21 | static struct { | |
22 | struct raw_notifier_head chain; | |
23 | raw_spinlock_t lock; | |
24 | } cpu_pm_notifier = { | |
25 | .chain = RAW_NOTIFIER_INIT(cpu_pm_notifier.chain), | |
26 | .lock = __RAW_SPIN_LOCK_UNLOCKED(cpu_pm_notifier.lock), | |
27 | }; | |
ab10023e | 28 | |
70d93298 | 29 | static int cpu_pm_notify(enum cpu_pm_event event) |
ab10023e CC |
30 | { |
31 | int ret; | |
32 | ||
b2f6662a VS |
33 | rcu_read_lock(); |
34 | ret = raw_notifier_call_chain(&cpu_pm_notifier.chain, event, NULL); | |
35 | rcu_read_unlock(); | |
70d93298 PZ |
36 | |
37 | return notifier_to_errno(ret); | |
38 | } | |
39 | ||
40 | static int cpu_pm_notify_robust(enum cpu_pm_event event_up, enum cpu_pm_event event_down) | |
41 | { | |
b2f6662a | 42 | unsigned long flags; |
70d93298 PZ |
43 | int ret; |
44 | ||
b2f6662a VS |
45 | raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); |
46 | ret = raw_notifier_call_chain_robust(&cpu_pm_notifier.chain, event_up, event_down, NULL); | |
47 | raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); | |
ab10023e CC |
48 | |
49 | return notifier_to_errno(ret); | |
50 | } | |
51 | ||
52 | /** | |
53 | * cpu_pm_register_notifier - register a driver with cpu_pm | |
54 | * @nb: notifier block to register | |
55 | * | |
56 | * Add a driver to a list of drivers that are notified about | |
57 | * CPU and CPU cluster low power entry and exit. | |
58 | * | |
b2f6662a | 59 | * This function has the same return conditions as raw_notifier_chain_register. |
ab10023e CC |
60 | */ |
61 | int cpu_pm_register_notifier(struct notifier_block *nb) | |
62 | { | |
b2f6662a VS |
63 | unsigned long flags; |
64 | int ret; | |
65 | ||
66 | raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); | |
67 | ret = raw_notifier_chain_register(&cpu_pm_notifier.chain, nb); | |
68 | raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); | |
69 | return ret; | |
ab10023e CC |
70 | } |
71 | EXPORT_SYMBOL_GPL(cpu_pm_register_notifier); | |
72 | ||
73 | /** | |
74 | * cpu_pm_unregister_notifier - unregister a driver with cpu_pm | |
75 | * @nb: notifier block to be unregistered | |
76 | * | |
77 | * Remove a driver from the CPU PM notifier list. | |
78 | * | |
b2f6662a | 79 | * This function has the same return conditions as raw_notifier_chain_unregister. |
ab10023e CC |
80 | */ |
81 | int cpu_pm_unregister_notifier(struct notifier_block *nb) | |
82 | { | |
b2f6662a VS |
83 | unsigned long flags; |
84 | int ret; | |
85 | ||
86 | raw_spin_lock_irqsave(&cpu_pm_notifier.lock, flags); | |
87 | ret = raw_notifier_chain_unregister(&cpu_pm_notifier.chain, nb); | |
88 | raw_spin_unlock_irqrestore(&cpu_pm_notifier.lock, flags); | |
89 | return ret; | |
ab10023e CC |
90 | } |
91 | EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); | |
92 | ||
93 | /** | |
d84970bb | 94 | * cpu_pm_enter - CPU low power entry notifier |
ab10023e CC |
95 | * |
96 | * Notifies listeners that a single CPU is entering a low power state that may | |
97 | * cause some blocks in the same power domain as the cpu to reset. | |
98 | * | |
99 | * Must be called on the affected CPU with interrupts disabled. Platform is | |
100 | * responsible for ensuring that cpu_pm_enter is not called twice on the same | |
101 | * CPU before cpu_pm_exit is called. Notified drivers can include VFP | |
d84970bb | 102 | * co-processor, interrupt controller and its PM extensions, local CPU |
ab10023e CC |
103 | * timers context save/restore which shouldn't be interrupted. Hence it |
104 | * must be called with interrupts disabled. | |
105 | * | |
106 | * Return conditions are same as __raw_notifier_call_chain. | |
107 | */ | |
108 | int cpu_pm_enter(void) | |
109 | { | |
70d93298 | 110 | return cpu_pm_notify_robust(CPU_PM_ENTER, CPU_PM_ENTER_FAILED); |
ab10023e CC |
111 | } |
112 | EXPORT_SYMBOL_GPL(cpu_pm_enter); | |
113 | ||
114 | /** | |
d84970bb | 115 | * cpu_pm_exit - CPU low power exit notifier |
ab10023e CC |
116 | * |
117 | * Notifies listeners that a single CPU is exiting a low power state that may | |
118 | * have caused some blocks in the same power domain as the cpu to reset. | |
119 | * | |
120 | * Notified drivers can include VFP co-processor, interrupt controller | |
d84970bb | 121 | * and its PM extensions, local CPU timers context save/restore which |
ab10023e CC |
122 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. |
123 | * | |
124 | * Return conditions are same as __raw_notifier_call_chain. | |
125 | */ | |
126 | int cpu_pm_exit(void) | |
127 | { | |
70d93298 | 128 | return cpu_pm_notify(CPU_PM_EXIT); |
ab10023e CC |
129 | } |
130 | EXPORT_SYMBOL_GPL(cpu_pm_exit); | |
131 | ||
132 | /** | |
d84970bb | 133 | * cpu_cluster_pm_enter - CPU cluster low power entry notifier |
ab10023e CC |
134 | * |
135 | * Notifies listeners that all cpus in a power domain are entering a low power | |
136 | * state that may cause some blocks in the same power domain to reset. | |
137 | * | |
138 | * Must be called after cpu_pm_enter has been called on all cpus in the power | |
139 | * domain, and before cpu_pm_exit has been called on any cpu in the power | |
140 | * domain. Notified drivers can include VFP co-processor, interrupt controller | |
d84970bb | 141 | * and its PM extensions, local CPU timers context save/restore which |
ab10023e CC |
142 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. |
143 | * | |
144 | * Must be called with interrupts disabled. | |
145 | * | |
146 | * Return conditions are same as __raw_notifier_call_chain. | |
147 | */ | |
148 | int cpu_cluster_pm_enter(void) | |
149 | { | |
70d93298 | 150 | return cpu_pm_notify_robust(CPU_CLUSTER_PM_ENTER, CPU_CLUSTER_PM_ENTER_FAILED); |
ab10023e CC |
151 | } |
152 | EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); | |
153 | ||
154 | /** | |
d84970bb | 155 | * cpu_cluster_pm_exit - CPU cluster low power exit notifier |
ab10023e CC |
156 | * |
157 | * Notifies listeners that all cpus in a power domain are exiting form a | |
158 | * low power state that may have caused some blocks in the same power domain | |
159 | * to reset. | |
160 | * | |
21dd33b0 | 161 | * Must be called after cpu_cluster_pm_enter has been called for the power |
ab10023e CC |
162 | * domain, and before cpu_pm_exit has been called on any cpu in the power |
163 | * domain. Notified drivers can include VFP co-processor, interrupt controller | |
d84970bb | 164 | * and its PM extensions, local CPU timers context save/restore which |
ab10023e CC |
165 | * shouldn't be interrupted. Hence it must be called with interrupts disabled. |
166 | * | |
167 | * Return conditions are same as __raw_notifier_call_chain. | |
168 | */ | |
169 | int cpu_cluster_pm_exit(void) | |
170 | { | |
70d93298 | 171 | return cpu_pm_notify(CPU_CLUSTER_PM_EXIT); |
ab10023e CC |
172 | } |
173 | EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit); | |
6f3eaec8 CC |
174 | |
175 | #ifdef CONFIG_PM | |
176 | static int cpu_pm_suspend(void) | |
177 | { | |
178 | int ret; | |
179 | ||
180 | ret = cpu_pm_enter(); | |
181 | if (ret) | |
182 | return ret; | |
183 | ||
184 | ret = cpu_cluster_pm_enter(); | |
185 | return ret; | |
186 | } | |
187 | ||
188 | static void cpu_pm_resume(void) | |
189 | { | |
190 | cpu_cluster_pm_exit(); | |
191 | cpu_pm_exit(); | |
192 | } | |
193 | ||
194 | static struct syscore_ops cpu_pm_syscore_ops = { | |
195 | .suspend = cpu_pm_suspend, | |
196 | .resume = cpu_pm_resume, | |
197 | }; | |
198 | ||
199 | static int cpu_pm_init(void) | |
200 | { | |
201 | register_syscore_ops(&cpu_pm_syscore_ops); | |
202 | return 0; | |
203 | } | |
204 | core_initcall(cpu_pm_init); | |
205 | #endif |