1 /* SPDX-License-Identifier: GPL-2.0 */
4 * (C) 2012 Google, Inc.
7 * This implements a refcount with similar semantics to atomic_t - atomic_inc(),
8 * atomic_dec_and_test() - but percpu.
10 * There's one important difference between percpu refs and normal atomic_t
11 * refcounts; you have to keep track of your initial refcount, and then when you
12 * start shutting down you call percpu_ref_kill() _before_ dropping the initial
15 * The refcount will have a range of 0 to ((1U << 31) - 1), i.e. one bit less
16 * than an atomic_t - this is because of the way shutdown works, see
17 * percpu_ref_kill()/PERCPU_COUNT_BIAS.
19 * Before you call percpu_ref_kill(), percpu_ref_put() does not check for the
20 * refcount hitting 0 - it can't, if it was in percpu mode. percpu_ref_kill()
21 * puts the ref back in single atomic_t mode, collecting the per cpu refs and
22 * issuing the appropriate barriers, and then marks the ref as shutting down so
23 * that percpu_ref_put() will check for the ref hitting 0. After it returns,
24 * it's safe to drop the initial ref.
28 * See fs/aio.c for some example usage; it's used there for struct kioctx, which
29 * is created when userspaces calls io_setup(), and destroyed when userspace
30 * calls io_destroy() or the process exits.
32 * In the aio code, kill_ioctx() is called when we wish to destroy a kioctx; it
33 * removes the kioctx from the proccess's table of kioctxs and kills percpu_ref.
34 * After that, there can't be any new users of the kioctx (from lookup_ioctx())
35 * and it's then safe to drop the initial ref with percpu_ref_put().
37 * Note that the free path, free_ioctx(), needs to go through explicit call_rcu()
38 * to synchronize with RCU protected lookup_ioctx(). percpu_ref operations don't
39 * imply RCU grace periods of any kind and if a user wants to combine percpu_ref
40 * with RCU protection, it must be done explicitly.
42 * Code that does a two stage shutdown like this often needs some kind of
43 * explicit synchronization to ensure the initial refcount can only be dropped
44 * once - percpu_ref_kill() does this for you, it returns true once and false if
45 * someone else already called it. The aio code uses it this way, but it's not
46 * necessary if the code has some other mechanism to synchronize teardown.
50 #ifndef _LINUX_PERCPU_REFCOUNT_H
51 #define _LINUX_PERCPU_REFCOUNT_H
53 #include <linux/atomic.h>
54 #include <linux/kernel.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/gfp.h>
60 typedef void (percpu_ref_func_t)(struct percpu_ref *);
62 /* flags set in the lower bits of percpu_ref->percpu_count_ptr */
64 __PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
65 __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
66 __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
68 __PERCPU_REF_FLAG_BITS = 2,
71 /* @flags for percpu_ref_init() */
74 * Start w/ ref == 1 in atomic mode. Can be switched to percpu
75 * operation using percpu_ref_switch_to_percpu(). If initialized
76 * with this flag, the ref will stay in atomic mode until
77 * percpu_ref_switch_to_percpu() is invoked on it.
79 PERCPU_REF_INIT_ATOMIC = 1 << 0,
82 * Start dead w/ ref == 0 in atomic mode. Must be revived with
83 * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
85 PERCPU_REF_INIT_DEAD = 1 << 1,
91 * The low bit of the pointer indicates whether the ref is in percpu
92 * mode; if set, then get/put will manipulate the atomic_t.
94 unsigned long percpu_count_ptr;
95 percpu_ref_func_t *release;
96 percpu_ref_func_t *confirm_switch;
101 int __must_check percpu_ref_init(struct percpu_ref *ref,
102 percpu_ref_func_t *release, unsigned int flags,
104 void percpu_ref_exit(struct percpu_ref *ref);
105 void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
106 percpu_ref_func_t *confirm_switch);
107 void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
108 void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
109 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
110 percpu_ref_func_t *confirm_kill);
111 void percpu_ref_resurrect(struct percpu_ref *ref);
112 void percpu_ref_reinit(struct percpu_ref *ref);
115 * percpu_ref_kill - drop the initial ref
116 * @ref: percpu_ref to kill
118 * Must be used to drop the initial ref on a percpu refcount; must be called
119 * precisely once before shutdown.
121 * Switches @ref into atomic mode before gathering up the percpu counters
122 * and dropping the initial ref.
124 * There are no implied RCU grace periods between kill and release.
126 static inline void percpu_ref_kill(struct percpu_ref *ref)
128 percpu_ref_kill_and_confirm(ref, NULL);
132 * Internal helper. Don't use outside percpu-refcount proper. The
133 * function doesn't return the pointer and let the caller test it for NULL
134 * because doing so forces the compiler to generate two conditional
135 * branches as it can't assume that @ref->percpu_count is not NULL.
137 static inline bool __ref_is_percpu(struct percpu_ref *ref,
138 unsigned long __percpu **percpu_countp)
140 unsigned long percpu_ptr;
143 * The value of @ref->percpu_count_ptr is tested for
144 * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
145 * used as a pointer. If the compiler generates a separate fetch
146 * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
147 * between contaminating the pointer value, meaning that
148 * READ_ONCE() is required when fetching it.
150 * The smp_read_barrier_depends() implied by READ_ONCE() pairs
151 * with smp_store_release() in __percpu_ref_switch_to_percpu().
153 percpu_ptr = READ_ONCE(ref->percpu_count_ptr);
156 * Theoretically, the following could test just ATOMIC; however,
157 * then we'd have to mask off DEAD separately as DEAD may be
158 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
159 * implies ATOMIC anyway. Test them together.
161 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
164 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
169 * percpu_ref_get_many - increment a percpu refcount
170 * @ref: percpu_ref to get
171 * @nr: number of references to get
173 * Analogous to atomic_long_add().
175 * This function is safe to call as long as @ref is between init and exit.
177 static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
179 unsigned long __percpu *percpu_count;
181 rcu_read_lock_sched();
183 if (__ref_is_percpu(ref, &percpu_count))
184 this_cpu_add(*percpu_count, nr);
186 atomic_long_add(nr, &ref->count);
188 rcu_read_unlock_sched();
192 * percpu_ref_get - increment a percpu refcount
193 * @ref: percpu_ref to get
195 * Analagous to atomic_long_inc().
197 * This function is safe to call as long as @ref is between init and exit.
199 static inline void percpu_ref_get(struct percpu_ref *ref)
201 percpu_ref_get_many(ref, 1);
205 * percpu_ref_tryget - try to increment a percpu refcount
206 * @ref: percpu_ref to try-get
208 * Increment a percpu refcount unless its count already reached zero.
209 * Returns %true on success; %false on failure.
211 * This function is safe to call as long as @ref is between init and exit.
213 static inline bool percpu_ref_tryget(struct percpu_ref *ref)
215 unsigned long __percpu *percpu_count;
218 rcu_read_lock_sched();
220 if (__ref_is_percpu(ref, &percpu_count)) {
221 this_cpu_inc(*percpu_count);
224 ret = atomic_long_inc_not_zero(&ref->count);
227 rcu_read_unlock_sched();
233 * percpu_ref_tryget_live - try to increment a live percpu refcount
234 * @ref: percpu_ref to try-get
236 * Increment a percpu refcount unless it has already been killed. Returns
237 * %true on success; %false on failure.
239 * Completion of percpu_ref_kill() in itself doesn't guarantee that this
240 * function will fail. For such guarantee, percpu_ref_kill_and_confirm()
241 * should be used. After the confirm_kill callback is invoked, it's
242 * guaranteed that no new reference will be given out by
243 * percpu_ref_tryget_live().
245 * This function is safe to call as long as @ref is between init and exit.
247 static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
249 unsigned long __percpu *percpu_count;
252 rcu_read_lock_sched();
254 if (__ref_is_percpu(ref, &percpu_count)) {
255 this_cpu_inc(*percpu_count);
257 } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
258 ret = atomic_long_inc_not_zero(&ref->count);
261 rcu_read_unlock_sched();
267 * percpu_ref_put_many - decrement a percpu refcount
268 * @ref: percpu_ref to put
269 * @nr: number of references to put
271 * Decrement the refcount, and if 0, call the release function (which was passed
272 * to percpu_ref_init())
274 * This function is safe to call as long as @ref is between init and exit.
276 static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
278 unsigned long __percpu *percpu_count;
280 rcu_read_lock_sched();
282 if (__ref_is_percpu(ref, &percpu_count))
283 this_cpu_sub(*percpu_count, nr);
284 else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
287 rcu_read_unlock_sched();
291 * percpu_ref_put - decrement a percpu refcount
292 * @ref: percpu_ref to put
294 * Decrement the refcount, and if 0, call the release function (which was passed
295 * to percpu_ref_init())
297 * This function is safe to call as long as @ref is between init and exit.
299 static inline void percpu_ref_put(struct percpu_ref *ref)
301 percpu_ref_put_many(ref, 1);
305 * percpu_ref_is_dying - test whether a percpu refcount is dying or dead
306 * @ref: percpu_ref to test
308 * Returns %true if @ref is dying or dead.
310 * This function is safe to call as long as @ref is between init and exit
311 * and the caller is responsible for synchronizing against state changes.
313 static inline bool percpu_ref_is_dying(struct percpu_ref *ref)
315 return ref->percpu_count_ptr & __PERCPU_REF_DEAD;
319 * percpu_ref_is_zero - test whether a percpu refcount reached zero
320 * @ref: percpu_ref to test
322 * Returns %true if @ref reached zero.
324 * This function is safe to call as long as @ref is between init and exit.
326 static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
328 unsigned long __percpu *percpu_count;
330 if (__ref_is_percpu(ref, &percpu_count))
332 return !atomic_long_read(&ref->count);