]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
29dee3c0 PZ |
2 | /* |
3 | * Variant of atomic_t specialized for reference counts. | |
4 | * | |
5 | * The interface matches the atomic_t interface (to aid in porting) but only | |
6 | * provides the few functions one should use for reference counting. | |
7 | * | |
8 | * It differs in that the counter saturates at UINT_MAX and will not move once | |
9 | * there. This avoids wrapping the counter and causing 'spurious' | |
10 | * use-after-free issues. | |
11 | * | |
12 | * Memory ordering rules are slightly relaxed wrt regular atomic_t functions | |
13 | * and provide only what is strictly required for refcounts. | |
14 | * | |
15 | * The increments are fully relaxed; these will not provide ordering. The | |
16 | * rationale is that whatever is used to obtain the object we're increasing the | |
17 | * reference count on will provide the ordering. For locked data structures, | |
18 | * its the lock acquire, for RCU/lockless data structures its the dependent | |
19 | * load. | |
20 | * | |
21 | * Do note that inc_not_zero() provides a control dependency which will order | |
22 | * future stores against the inc, this ensures we'll never modify the object | |
23 | * if we did not in fact acquire a reference. | |
24 | * | |
25 | * The decrements will provide release order, such that all the prior loads and | |
26 | * stores will be issued before, it also provides a control dependency, which | |
27 | * will order us against the subsequent free(). | |
28 | * | |
29 | * The control dependency is against the load of the cmpxchg (ll/sc) that | |
30 | * succeeded. This means the stores aren't fully ordered, but this is fine | |
31 | * because the 1->0 transition indicates no concurrency. | |
32 | * | |
33 | * Note that the allocator is responsible for ordering things between free() | |
34 | * and alloc(). | |
35 | * | |
36 | */ | |
37 | ||
75a040ff | 38 | #include <linux/mutex.h> |
29dee3c0 | 39 | #include <linux/refcount.h> |
75a040ff | 40 | #include <linux/spinlock.h> |
29dee3c0 PZ |
41 | #include <linux/bug.h> |
42 | ||
bd174169 | 43 | /** |
afed7bcf | 44 | * refcount_add_not_zero_checked - add a value to a refcount unless it is 0 |
bd174169 DW |
45 | * @i: the value to add to the refcount |
46 | * @r: the refcount | |
47 | * | |
48 | * Will saturate at UINT_MAX and WARN. | |
49 | * | |
50 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
51 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
52 | * and thereby orders future stores. See the comment on top. | |
53 | * | |
54 | * Use of this function is not recommended for the normal reference counting | |
55 | * use case in which references are taken and released one at a time. In these | |
56 | * cases, refcount_inc(), or one of its variants, should instead be used to | |
57 | * increment a reference count. | |
58 | * | |
59 | * Return: false if the passed refcount is 0, true otherwise | |
60 | */ | |
afed7bcf | 61 | bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) |
29dee3c0 | 62 | { |
b78c0d47 | 63 | unsigned int new, val = atomic_read(&r->refs); |
29dee3c0 | 64 | |
b78c0d47 | 65 | do { |
29dee3c0 PZ |
66 | if (!val) |
67 | return false; | |
68 | ||
69 | if (unlikely(val == UINT_MAX)) | |
70 | return true; | |
71 | ||
72 | new = val + i; | |
73 | if (new < val) | |
74 | new = UINT_MAX; | |
29dee3c0 | 75 | |
b78c0d47 | 76 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
29dee3c0 | 77 | |
9dcfe2c7 | 78 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
29dee3c0 PZ |
79 | |
80 | return true; | |
81 | } | |
afed7bcf | 82 | EXPORT_SYMBOL(refcount_add_not_zero_checked); |
29dee3c0 | 83 | |
bd174169 | 84 | /** |
afed7bcf | 85 | * refcount_add_checked - add a value to a refcount |
bd174169 DW |
86 | * @i: the value to add to the refcount |
87 | * @r: the refcount | |
88 | * | |
89 | * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. | |
90 | * | |
91 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
92 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
93 | * and thereby orders future stores. See the comment on top. | |
94 | * | |
95 | * Use of this function is not recommended for the normal reference counting | |
96 | * use case in which references are taken and released one at a time. In these | |
97 | * cases, refcount_inc(), or one of its variants, should instead be used to | |
98 | * increment a reference count. | |
99 | */ | |
afed7bcf | 100 | void refcount_add_checked(unsigned int i, refcount_t *r) |
29dee3c0 | 101 | { |
afed7bcf | 102 | WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n"); |
29dee3c0 | 103 | } |
afed7bcf | 104 | EXPORT_SYMBOL(refcount_add_checked); |
29dee3c0 | 105 | |
bd174169 | 106 | /** |
afed7bcf | 107 | * refcount_inc_not_zero_checked - increment a refcount unless it is 0 |
bd174169 DW |
108 | * @r: the refcount to increment |
109 | * | |
110 | * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. | |
29dee3c0 PZ |
111 | * |
112 | * Provides no memory ordering, it is assumed the caller has guaranteed the | |
113 | * object memory to be stable (RCU, etc.). It does provide a control dependency | |
114 | * and thereby orders future stores. See the comment on top. | |
bd174169 DW |
115 | * |
116 | * Return: true if the increment was successful, false otherwise | |
29dee3c0 | 117 | */ |
afed7bcf | 118 | bool refcount_inc_not_zero_checked(refcount_t *r) |
29dee3c0 | 119 | { |
b78c0d47 | 120 | unsigned int new, val = atomic_read(&r->refs); |
29dee3c0 | 121 | |
b78c0d47 | 122 | do { |
29dee3c0 PZ |
123 | new = val + 1; |
124 | ||
125 | if (!val) | |
126 | return false; | |
127 | ||
128 | if (unlikely(!new)) | |
129 | return true; | |
130 | ||
b78c0d47 | 131 | } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); |
29dee3c0 | 132 | |
9dcfe2c7 | 133 | WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); |
29dee3c0 PZ |
134 | |
135 | return true; | |
136 | } | |
afed7bcf | 137 | EXPORT_SYMBOL(refcount_inc_not_zero_checked); |
29dee3c0 | 138 | |
bd174169 | 139 | /** |
afed7bcf | 140 | * refcount_inc_checked - increment a refcount |
bd174169 DW |
141 | * @r: the refcount to increment |
142 | * | |
143 | * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. | |
29dee3c0 PZ |
144 | * |
145 | * Provides no memory ordering, it is assumed the caller already has a | |
bd174169 DW |
146 | * reference on the object. |
147 | * | |
148 | * Will WARN if the refcount is 0, as this represents a possible use-after-free | |
149 | * condition. | |
29dee3c0 | 150 | */ |
afed7bcf | 151 | void refcount_inc_checked(refcount_t *r) |
29dee3c0 | 152 | { |
afed7bcf | 153 | WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n"); |
29dee3c0 | 154 | } |
afed7bcf | 155 | EXPORT_SYMBOL(refcount_inc_checked); |
29dee3c0 | 156 | |
bd174169 | 157 | /** |
afed7bcf | 158 | * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 |
bd174169 DW |
159 | * @i: amount to subtract from the refcount |
160 | * @r: the refcount | |
161 | * | |
162 | * Similar to atomic_dec_and_test(), but it will WARN, return false and | |
163 | * ultimately leak on underflow and will fail to decrement when saturated | |
164 | * at UINT_MAX. | |
165 | * | |
166 | * Provides release memory ordering, such that prior loads and stores are done | |
167 | * before, and provides a control dependency such that free() must come after. | |
168 | * See the comment on top. | |
169 | * | |
170 | * Use of this function is not recommended for the normal reference counting | |
171 | * use case in which references are taken and released one at a time. In these | |
172 | * cases, refcount_dec(), or one of its variants, should instead be used to | |
173 | * decrement a reference count. | |
174 | * | |
175 | * Return: true if the resulting refcount is 0, false otherwise | |
176 | */ | |
afed7bcf | 177 | bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) |
29dee3c0 | 178 | { |
b78c0d47 | 179 | unsigned int new, val = atomic_read(&r->refs); |
29dee3c0 | 180 | |
b78c0d47 | 181 | do { |
29dee3c0 PZ |
182 | if (unlikely(val == UINT_MAX)) |
183 | return false; | |
184 | ||
185 | new = val - i; | |
186 | if (new > val) { | |
9dcfe2c7 | 187 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
29dee3c0 PZ |
188 | return false; |
189 | } | |
190 | ||
b78c0d47 | 191 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c0 PZ |
192 | |
193 | return !new; | |
194 | } | |
afed7bcf | 195 | EXPORT_SYMBOL(refcount_sub_and_test_checked); |
29dee3c0 | 196 | |
bd174169 | 197 | /** |
afed7bcf | 198 | * refcount_dec_and_test_checked - decrement a refcount and test if it is 0 |
bd174169 DW |
199 | * @r: the refcount |
200 | * | |
29dee3c0 PZ |
201 | * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to |
202 | * decrement when saturated at UINT_MAX. | |
203 | * | |
204 | * Provides release memory ordering, such that prior loads and stores are done | |
205 | * before, and provides a control dependency such that free() must come after. | |
206 | * See the comment on top. | |
bd174169 DW |
207 | * |
208 | * Return: true if the resulting refcount is 0, false otherwise | |
29dee3c0 | 209 | */ |
afed7bcf | 210 | bool refcount_dec_and_test_checked(refcount_t *r) |
29dee3c0 | 211 | { |
afed7bcf | 212 | return refcount_sub_and_test_checked(1, r); |
29dee3c0 | 213 | } |
afed7bcf | 214 | EXPORT_SYMBOL(refcount_dec_and_test_checked); |
29dee3c0 | 215 | |
bd174169 | 216 | /** |
afed7bcf | 217 | * refcount_dec_checked - decrement a refcount |
bd174169 DW |
218 | * @r: the refcount |
219 | * | |
29dee3c0 PZ |
220 | * Similar to atomic_dec(), it will WARN on underflow and fail to decrement |
221 | * when saturated at UINT_MAX. | |
222 | * | |
223 | * Provides release memory ordering, such that prior loads and stores are done | |
224 | * before. | |
225 | */ | |
afed7bcf | 226 | void refcount_dec_checked(refcount_t *r) |
29dee3c0 | 227 | { |
afed7bcf | 228 | WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n"); |
29dee3c0 | 229 | } |
afed7bcf | 230 | EXPORT_SYMBOL(refcount_dec_checked); |
29dee3c0 | 231 | |
bd174169 DW |
232 | /** |
233 | * refcount_dec_if_one - decrement a refcount if it is 1 | |
234 | * @r: the refcount | |
235 | * | |
29dee3c0 PZ |
236 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
237 | * success thereof. | |
238 | * | |
239 | * Like all decrement operations, it provides release memory order and provides | |
240 | * a control dependency. | |
241 | * | |
242 | * It can be used like a try-delete operator; this explicit case is provided | |
243 | * and not cmpxchg in generic, because that would allow implementing unsafe | |
244 | * operations. | |
bd174169 DW |
245 | * |
246 | * Return: true if the resulting refcount is 0, false otherwise | |
29dee3c0 PZ |
247 | */ |
248 | bool refcount_dec_if_one(refcount_t *r) | |
249 | { | |
b78c0d47 PZ |
250 | int val = 1; |
251 | ||
252 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); | |
29dee3c0 | 253 | } |
d557d1b5 | 254 | EXPORT_SYMBOL(refcount_dec_if_one); |
29dee3c0 | 255 | |
bd174169 DW |
256 | /** |
257 | * refcount_dec_not_one - decrement a refcount if it is not 1 | |
258 | * @r: the refcount | |
259 | * | |
29dee3c0 PZ |
260 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
261 | * it will return false. | |
262 | * | |
263 | * Was often done like: atomic_add_unless(&var, -1, 1) | |
bd174169 DW |
264 | * |
265 | * Return: true if the decrement operation was successful, false otherwise | |
29dee3c0 PZ |
266 | */ |
267 | bool refcount_dec_not_one(refcount_t *r) | |
268 | { | |
b78c0d47 | 269 | unsigned int new, val = atomic_read(&r->refs); |
29dee3c0 | 270 | |
b78c0d47 | 271 | do { |
29dee3c0 PZ |
272 | if (unlikely(val == UINT_MAX)) |
273 | return true; | |
274 | ||
275 | if (val == 1) | |
276 | return false; | |
277 | ||
278 | new = val - 1; | |
279 | if (new > val) { | |
9dcfe2c7 | 280 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
29dee3c0 PZ |
281 | return true; |
282 | } | |
283 | ||
b78c0d47 | 284 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c0 PZ |
285 | |
286 | return true; | |
287 | } | |
d557d1b5 | 288 | EXPORT_SYMBOL(refcount_dec_not_one); |
29dee3c0 | 289 | |
bd174169 DW |
290 | /** |
291 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement | |
292 | * refcount to 0 | |
293 | * @r: the refcount | |
294 | * @lock: the mutex to be locked | |
295 | * | |
29dee3c0 PZ |
296 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
297 | * to decrement when saturated at UINT_MAX. | |
298 | * | |
299 | * Provides release memory ordering, such that prior loads and stores are done | |
300 | * before, and provides a control dependency such that free() must come after. | |
301 | * See the comment on top. | |
bd174169 DW |
302 | * |
303 | * Return: true and hold mutex if able to decrement refcount to 0, false | |
304 | * otherwise | |
29dee3c0 PZ |
305 | */ |
306 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) | |
307 | { | |
308 | if (refcount_dec_not_one(r)) | |
309 | return false; | |
310 | ||
311 | mutex_lock(lock); | |
312 | if (!refcount_dec_and_test(r)) { | |
313 | mutex_unlock(lock); | |
314 | return false; | |
315 | } | |
316 | ||
317 | return true; | |
318 | } | |
d557d1b5 | 319 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
29dee3c0 | 320 | |
bd174169 DW |
321 | /** |
322 | * refcount_dec_and_lock - return holding spinlock if able to decrement | |
323 | * refcount to 0 | |
324 | * @r: the refcount | |
325 | * @lock: the spinlock to be locked | |
326 | * | |
29dee3c0 PZ |
327 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
328 | * decrement when saturated at UINT_MAX. | |
329 | * | |
330 | * Provides release memory ordering, such that prior loads and stores are done | |
331 | * before, and provides a control dependency such that free() must come after. | |
332 | * See the comment on top. | |
bd174169 DW |
333 | * |
334 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
335 | * otherwise | |
29dee3c0 PZ |
336 | */ |
337 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) | |
338 | { | |
339 | if (refcount_dec_not_one(r)) | |
340 | return false; | |
341 | ||
342 | spin_lock(lock); | |
343 | if (!refcount_dec_and_test(r)) { | |
344 | spin_unlock(lock); | |
345 | return false; | |
346 | } | |
347 | ||
348 | return true; | |
349 | } | |
d557d1b5 | 350 | EXPORT_SYMBOL(refcount_dec_and_lock); |
29dee3c0 | 351 | |
7ea959c4 AMG |
352 | /** |
353 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled | |
354 | * interrupts if able to decrement refcount to 0 | |
355 | * @r: the refcount | |
356 | * @lock: the spinlock to be locked | |
357 | * @flags: saved IRQ-flags if the is acquired | |
358 | * | |
359 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired | |
360 | * with disabled interupts. | |
361 | * | |
362 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
363 | * otherwise | |
364 | */ | |
365 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, | |
366 | unsigned long *flags) | |
367 | { | |
368 | if (refcount_dec_not_one(r)) | |
369 | return false; | |
370 | ||
371 | spin_lock_irqsave(lock, *flags); | |
372 | if (!refcount_dec_and_test(r)) { | |
373 | spin_unlock_irqrestore(lock, *flags); | |
374 | return false; | |
375 | } | |
376 | ||
377 | return true; | |
378 | } | |
379 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |