]>
Commit | Line | Data |
---|---|---|
215e262f KO |
1 | #define pr_fmt(fmt) "%s: " fmt "\n", __func__ |
2 | ||
3 | #include <linux/kernel.h> | |
490c79a6 TH |
4 | #include <linux/sched.h> |
5 | #include <linux/wait.h> | |
215e262f KO |
6 | #include <linux/percpu-refcount.h> |
7 | ||
8 | /* | |
9 | * Initially, a percpu refcount is just a set of percpu counters. Initially, we | |
10 | * don't try to detect the ref hitting 0 - which means that get/put can just | |
11 | * increment or decrement the local counter. Note that the counter on a | |
12 | * particular cpu can (and will) wrap - this is fine, when we go to shutdown the | |
13 | * percpu counters will all sum to the correct value | |
14 | * | |
15 | * (More precisely: because moduler arithmatic is commutative the sum of all the | |
eecc16ba TH |
16 | * percpu_count vars will be equal to what it would have been if all the gets |
17 | * and puts were done to a single integer, even if some of the percpu integers | |
215e262f KO |
18 | * overflow or underflow). |
19 | * | |
20 | * The real trick to implementing percpu refcounts is shutdown. We can't detect | |
21 | * the ref hitting 0 on every put - this would require global synchronization | |
22 | * and defeat the whole purpose of using percpu refs. | |
23 | * | |
24 | * What we do is require the user to keep track of the initial refcount; we know | |
25 | * the ref can't hit 0 before the user drops the initial ref, so as long as we | |
26 | * convert to non percpu mode before the initial ref is dropped everything | |
27 | * works. | |
28 | * | |
29 | * Converting to non percpu mode is done with some RCUish stuff in | |
e625305b TH |
30 | * percpu_ref_kill. Additionally, we need a bias value so that the |
31 | * atomic_long_t can't hit 0 before we've added up all the percpu refs. | |
215e262f KO |
32 | */ |
33 | ||
eecc16ba | 34 | #define PERCPU_COUNT_BIAS (1LU << (BITS_PER_LONG - 1)) |
215e262f | 35 | |
490c79a6 TH |
36 | static DECLARE_WAIT_QUEUE_HEAD(percpu_ref_switch_waitq); |
37 | ||
eecc16ba | 38 | static unsigned long __percpu *percpu_count_ptr(struct percpu_ref *ref) |
eae7975d | 39 | { |
eecc16ba | 40 | return (unsigned long __percpu *) |
27344a90 | 41 | (ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC_DEAD); |
eae7975d TH |
42 | } |
43 | ||
215e262f KO |
44 | /** |
45 | * percpu_ref_init - initialize a percpu refcount | |
ac899061 TH |
46 | * @ref: percpu_ref to initialize |
47 | * @release: function which will be called when refcount hits 0 | |
2aad2a86 | 48 | * @flags: PERCPU_REF_INIT_* flags |
a34375ef | 49 | * @gfp: allocation mask to use |
215e262f | 50 | * |
2aad2a86 TH |
51 | * Initializes @ref. If @flags is zero, @ref starts in percpu mode with a |
52 | * refcount of 1; analagous to atomic_long_set(ref, 1). See the | |
53 | * definitions of PERCPU_REF_INIT_* flags for flag behaviors. | |
215e262f KO |
54 | * |
55 | * Note that @release must not sleep - it may potentially be called from RCU | |
56 | * callback context by percpu_ref_kill(). | |
57 | */ | |
a34375ef | 58 | int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release, |
2aad2a86 | 59 | unsigned int flags, gfp_t gfp) |
215e262f | 60 | { |
27344a90 TH |
61 | size_t align = max_t(size_t, 1 << __PERCPU_REF_FLAG_BITS, |
62 | __alignof__(unsigned long)); | |
2aad2a86 | 63 | unsigned long start_count = 0; |
215e262f | 64 | |
27344a90 TH |
65 | ref->percpu_count_ptr = (unsigned long) |
66 | __alloc_percpu_gfp(sizeof(unsigned long), align, gfp); | |
eecc16ba | 67 | if (!ref->percpu_count_ptr) |
215e262f KO |
68 | return -ENOMEM; |
69 | ||
1cae13e7 TH |
70 | ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC; |
71 | ||
2aad2a86 TH |
72 | if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD)) |
73 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | |
74 | else | |
75 | start_count += PERCPU_COUNT_BIAS; | |
76 | ||
77 | if (flags & PERCPU_REF_INIT_DEAD) | |
78 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | |
79 | else | |
80 | start_count++; | |
81 | ||
82 | atomic_long_set(&ref->count, start_count); | |
83 | ||
215e262f KO |
84 | ref->release = release; |
85 | return 0; | |
86 | } | |
5e9dd373 | 87 | EXPORT_SYMBOL_GPL(percpu_ref_init); |
215e262f | 88 | |
bc497bd3 | 89 | /** |
9a1049da TH |
90 | * percpu_ref_exit - undo percpu_ref_init() |
91 | * @ref: percpu_ref to exit | |
bc497bd3 | 92 | * |
9a1049da TH |
93 | * This function exits @ref. The caller is responsible for ensuring that |
94 | * @ref is no longer in active use. The usual places to invoke this | |
95 | * function from are the @ref->release() callback or in init failure path | |
96 | * where percpu_ref_init() succeeded but other parts of the initialization | |
97 | * of the embedding object failed. | |
bc497bd3 | 98 | */ |
9a1049da | 99 | void percpu_ref_exit(struct percpu_ref *ref) |
bc497bd3 | 100 | { |
eecc16ba | 101 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
bc497bd3 | 102 | |
eecc16ba TH |
103 | if (percpu_count) { |
104 | free_percpu(percpu_count); | |
27344a90 | 105 | ref->percpu_count_ptr = __PERCPU_REF_ATOMIC_DEAD; |
bc497bd3 TH |
106 | } |
107 | } | |
9a1049da | 108 | EXPORT_SYMBOL_GPL(percpu_ref_exit); |
bc497bd3 | 109 | |
490c79a6 TH |
110 | static void percpu_ref_call_confirm_rcu(struct rcu_head *rcu) |
111 | { | |
112 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | |
113 | ||
114 | ref->confirm_switch(ref); | |
115 | ref->confirm_switch = NULL; | |
116 | wake_up_all(&percpu_ref_switch_waitq); | |
117 | ||
118 | /* drop ref from percpu_ref_switch_to_atomic() */ | |
119 | percpu_ref_put(ref); | |
120 | } | |
121 | ||
122 | static void percpu_ref_switch_to_atomic_rcu(struct rcu_head *rcu) | |
215e262f KO |
123 | { |
124 | struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu); | |
eecc16ba | 125 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
e625305b | 126 | unsigned long count = 0; |
215e262f KO |
127 | int cpu; |
128 | ||
215e262f | 129 | for_each_possible_cpu(cpu) |
eecc16ba | 130 | count += *per_cpu_ptr(percpu_count, cpu); |
215e262f | 131 | |
eecc16ba | 132 | pr_debug("global %ld percpu %ld", |
e625305b | 133 | atomic_long_read(&ref->count), (long)count); |
215e262f KO |
134 | |
135 | /* | |
136 | * It's crucial that we sum the percpu counters _before_ adding the sum | |
137 | * to &ref->count; since gets could be happening on one cpu while puts | |
138 | * happen on another, adding a single cpu's count could cause | |
139 | * @ref->count to hit 0 before we've got a consistent value - but the | |
140 | * sum of all the counts will be consistent and correct. | |
141 | * | |
142 | * Subtracting the bias value then has to happen _after_ adding count to | |
143 | * &ref->count; we need the bias value to prevent &ref->count from | |
144 | * reaching 0 before we add the percpu counts. But doing it at the same | |
145 | * time is equivalent and saves us atomic operations: | |
146 | */ | |
eecc16ba | 147 | atomic_long_add((long)count - PERCPU_COUNT_BIAS, &ref->count); |
215e262f | 148 | |
e625305b | 149 | WARN_ONCE(atomic_long_read(&ref->count) <= 0, |
490c79a6 | 150 | "percpu ref (%pf) <= 0 (%ld) after switching to atomic", |
e625305b | 151 | ref->release, atomic_long_read(&ref->count)); |
687b0ad2 | 152 | |
490c79a6 TH |
153 | /* @ref is viewed as dead on all CPUs, send out switch confirmation */ |
154 | percpu_ref_call_confirm_rcu(rcu); | |
155 | } | |
dbece3a0 | 156 | |
490c79a6 TH |
157 | static void percpu_ref_noop_confirm_switch(struct percpu_ref *ref) |
158 | { | |
159 | } | |
160 | ||
161 | static void __percpu_ref_switch_to_atomic(struct percpu_ref *ref, | |
162 | percpu_ref_func_t *confirm_switch) | |
163 | { | |
164 | if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) { | |
165 | /* switching from percpu to atomic */ | |
166 | ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC; | |
167 | ||
168 | /* | |
169 | * Non-NULL ->confirm_switch is used to indicate that | |
170 | * switching is in progress. Use noop one if unspecified. | |
171 | */ | |
172 | WARN_ON_ONCE(ref->confirm_switch); | |
173 | ref->confirm_switch = | |
174 | confirm_switch ?: percpu_ref_noop_confirm_switch; | |
175 | ||
176 | percpu_ref_get(ref); /* put after confirmation */ | |
177 | call_rcu_sched(&ref->rcu, percpu_ref_switch_to_atomic_rcu); | |
178 | } else if (confirm_switch) { | |
179 | /* | |
180 | * Somebody already set ATOMIC. Switching may still be in | |
181 | * progress. @confirm_switch must be invoked after the | |
182 | * switching is complete and a full sched RCU grace period | |
183 | * has passed. Wait synchronously for the previous | |
184 | * switching and schedule @confirm_switch invocation. | |
185 | */ | |
186 | wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); | |
187 | ref->confirm_switch = confirm_switch; | |
188 | ||
189 | percpu_ref_get(ref); /* put after confirmation */ | |
190 | call_rcu_sched(&ref->rcu, percpu_ref_call_confirm_rcu); | |
191 | } | |
215e262f KO |
192 | } |
193 | ||
194 | /** | |
490c79a6 TH |
195 | * percpu_ref_switch_to_atomic - switch a percpu_ref to atomic mode |
196 | * @ref: percpu_ref to switch to atomic mode | |
197 | * @confirm_switch: optional confirmation callback | |
215e262f | 198 | * |
490c79a6 TH |
199 | * There's no reason to use this function for the usual reference counting. |
200 | * Use percpu_ref_kill[_and_confirm](). | |
201 | * | |
202 | * Schedule switching of @ref to atomic mode. All its percpu counts will | |
203 | * be collected to the main atomic counter. On completion, when all CPUs | |
204 | * are guaraneed to be in atomic mode, @confirm_switch, which may not | |
205 | * block, is invoked. This function may be invoked concurrently with all | |
206 | * the get/put operations and can safely be mixed with kill and reinit | |
1cae13e7 TH |
207 | * operations. Note that @ref will stay in atomic mode across kill/reinit |
208 | * cycles until percpu_ref_switch_to_percpu() is called. | |
215e262f | 209 | * |
490c79a6 TH |
210 | * This function normally doesn't block and can be called from any context |
211 | * but it may block if @confirm_kill is specified and @ref is already in | |
212 | * the process of switching to atomic mode. In such cases, @confirm_switch | |
213 | * will be invoked after the switching is complete. | |
214 | * | |
215 | * Due to the way percpu_ref is implemented, @confirm_switch will be called | |
216 | * after at least one full sched RCU grace period has passed but this is an | |
217 | * implementation detail and must not be depended upon. | |
215e262f | 218 | */ |
490c79a6 TH |
219 | void percpu_ref_switch_to_atomic(struct percpu_ref *ref, |
220 | percpu_ref_func_t *confirm_switch) | |
215e262f | 221 | { |
1cae13e7 | 222 | ref->force_atomic = true; |
490c79a6 | 223 | __percpu_ref_switch_to_atomic(ref, confirm_switch); |
215e262f | 224 | } |
a2237370 | 225 | |
f47ad457 | 226 | static void __percpu_ref_switch_to_percpu(struct percpu_ref *ref) |
a2237370 | 227 | { |
eecc16ba | 228 | unsigned long __percpu *percpu_count = percpu_count_ptr(ref); |
a2237370 TH |
229 | int cpu; |
230 | ||
eecc16ba | 231 | BUG_ON(!percpu_count); |
a2237370 | 232 | |
f47ad457 TH |
233 | if (!(ref->percpu_count_ptr & __PERCPU_REF_ATOMIC)) |
234 | return; | |
235 | ||
236 | wait_event(percpu_ref_switch_waitq, !ref->confirm_switch); | |
237 | ||
238 | atomic_long_add(PERCPU_COUNT_BIAS, &ref->count); | |
a2237370 TH |
239 | |
240 | /* | |
241 | * Restore per-cpu operation. smp_store_release() is paired with | |
9e804d1f TH |
242 | * smp_read_barrier_depends() in __ref_is_percpu() and guarantees |
243 | * that the zeroing is visible to all percpu accesses which can see | |
f47ad457 | 244 | * the following __PERCPU_REF_ATOMIC clearing. |
a2237370 TH |
245 | */ |
246 | for_each_possible_cpu(cpu) | |
eecc16ba | 247 | *per_cpu_ptr(percpu_count, cpu) = 0; |
a2237370 | 248 | |
eecc16ba | 249 | smp_store_release(&ref->percpu_count_ptr, |
f47ad457 TH |
250 | ref->percpu_count_ptr & ~__PERCPU_REF_ATOMIC); |
251 | } | |
252 | ||
253 | /** | |
254 | * percpu_ref_switch_to_percpu - switch a percpu_ref to percpu mode | |
255 | * @ref: percpu_ref to switch to percpu mode | |
256 | * | |
257 | * There's no reason to use this function for the usual reference counting. | |
258 | * To re-use an expired ref, use percpu_ref_reinit(). | |
259 | * | |
260 | * Switch @ref to percpu mode. This function may be invoked concurrently | |
261 | * with all the get/put operations and can safely be mixed with kill and | |
1cae13e7 TH |
262 | * reinit operations. This function reverses the sticky atomic state set |
263 | * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is | |
264 | * dying or dead, the actual switching takes place on the following | |
265 | * percpu_ref_reinit(). | |
f47ad457 TH |
266 | * |
267 | * This function normally doesn't block and can be called from any context | |
268 | * but it may block if @ref is in the process of switching to atomic mode | |
269 | * by percpu_ref_switch_atomic(). | |
270 | */ | |
271 | void percpu_ref_switch_to_percpu(struct percpu_ref *ref) | |
272 | { | |
1cae13e7 TH |
273 | ref->force_atomic = false; |
274 | ||
f47ad457 TH |
275 | /* a dying or dead ref can't be switched to percpu mode w/o reinit */ |
276 | if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) | |
277 | __percpu_ref_switch_to_percpu(ref); | |
a2237370 | 278 | } |
490c79a6 TH |
279 | |
280 | /** | |
281 | * percpu_ref_kill_and_confirm - drop the initial ref and schedule confirmation | |
282 | * @ref: percpu_ref to kill | |
283 | * @confirm_kill: optional confirmation callback | |
284 | * | |
285 | * Equivalent to percpu_ref_kill() but also schedules kill confirmation if | |
286 | * @confirm_kill is not NULL. @confirm_kill, which may not block, will be | |
287 | * called after @ref is seen as dead from all CPUs at which point all | |
288 | * further invocations of percpu_ref_tryget_live() will fail. See | |
289 | * percpu_ref_tryget_live() for details. | |
290 | * | |
291 | * This function normally doesn't block and can be called from any context | |
f47ad457 TH |
292 | * but it may block if @confirm_kill is specified and @ref is in the |
293 | * process of switching to atomic mode by percpu_ref_switch_atomic(). | |
490c79a6 TH |
294 | * |
295 | * Due to the way percpu_ref is implemented, @confirm_switch will be called | |
296 | * after at least one full sched RCU grace period has passed but this is an | |
297 | * implementation detail and must not be depended upon. | |
298 | */ | |
299 | void percpu_ref_kill_and_confirm(struct percpu_ref *ref, | |
300 | percpu_ref_func_t *confirm_kill) | |
301 | { | |
302 | WARN_ONCE(ref->percpu_count_ptr & __PERCPU_REF_DEAD, | |
303 | "%s called more than once on %pf!", __func__, ref->release); | |
304 | ||
305 | ref->percpu_count_ptr |= __PERCPU_REF_DEAD; | |
306 | __percpu_ref_switch_to_atomic(ref, confirm_kill); | |
307 | percpu_ref_put(ref); | |
308 | } | |
309 | EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm); | |
f47ad457 TH |
310 | |
311 | /** | |
312 | * percpu_ref_reinit - re-initialize a percpu refcount | |
313 | * @ref: perpcu_ref to re-initialize | |
314 | * | |
315 | * Re-initialize @ref so that it's in the same state as when it finished | |
1cae13e7 TH |
316 | * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been |
317 | * initialized successfully and reached 0 but not exited. | |
f47ad457 TH |
318 | * |
319 | * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while | |
320 | * this function is in progress. | |
321 | */ | |
322 | void percpu_ref_reinit(struct percpu_ref *ref) | |
323 | { | |
324 | WARN_ON_ONCE(!percpu_ref_is_zero(ref)); | |
325 | ||
326 | ref->percpu_count_ptr &= ~__PERCPU_REF_DEAD; | |
327 | percpu_ref_get(ref); | |
1cae13e7 TH |
328 | if (!ref->force_atomic) |
329 | __percpu_ref_switch_to_percpu(ref); | |
f47ad457 TH |
330 | } |
331 | EXPORT_SYMBOL_GPL(percpu_ref_reinit); |