]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
29dee3c0 | 2 | /* |
fb041bb7 | 3 | * Out-of-line refcount functions. |
29dee3c0 PZ |
4 | */ |
5 | ||
75a040ff | 6 | #include <linux/mutex.h> |
29dee3c0 | 7 | #include <linux/refcount.h> |
75a040ff | 8 | #include <linux/spinlock.h> |
29dee3c0 PZ |
9 | #include <linux/bug.h> |
10 | ||
1eb085d9 WD |
11 | #define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n") |
12 | ||
13 | void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t) | |
14 | { | |
15 | refcount_set(r, REFCOUNT_SATURATED); | |
16 | ||
17 | switch (t) { | |
18 | case REFCOUNT_ADD_NOT_ZERO_OVF: | |
19 | REFCOUNT_WARN("saturated; leaking memory"); | |
20 | break; | |
21 | case REFCOUNT_ADD_OVF: | |
22 | REFCOUNT_WARN("saturated; leaking memory"); | |
23 | break; | |
24 | case REFCOUNT_ADD_UAF: | |
25 | REFCOUNT_WARN("addition on 0; use-after-free"); | |
26 | break; | |
27 | case REFCOUNT_SUB_UAF: | |
28 | REFCOUNT_WARN("underflow; use-after-free"); | |
29 | break; | |
30 | case REFCOUNT_DEC_LEAK: | |
31 | REFCOUNT_WARN("decrement hit 0; leaking memory"); | |
32 | break; | |
33 | default: | |
34 | REFCOUNT_WARN("unknown saturation event!?"); | |
35 | } | |
36 | } | |
37 | EXPORT_SYMBOL(refcount_warn_saturate); | |
38 | ||
bd174169 DW |
39 | /** |
40 | * refcount_dec_if_one - decrement a refcount if it is 1 | |
41 | * @r: the refcount | |
42 | * | |
29dee3c0 PZ |
43 | * No atomic_t counterpart, it attempts a 1 -> 0 transition and returns the |
44 | * success thereof. | |
45 | * | |
46 | * Like all decrement operations, it provides release memory order and provides | |
47 | * a control dependency. | |
48 | * | |
49 | * It can be used like a try-delete operator; this explicit case is provided | |
50 | * and not cmpxchg in generic, because that would allow implementing unsafe | |
51 | * operations. | |
bd174169 DW |
52 | * |
53 | * Return: true if the resulting refcount is 0, false otherwise | |
29dee3c0 PZ |
54 | */ |
55 | bool refcount_dec_if_one(refcount_t *r) | |
56 | { | |
b78c0d47 PZ |
57 | int val = 1; |
58 | ||
59 | return atomic_try_cmpxchg_release(&r->refs, &val, 0); | |
29dee3c0 | 60 | } |
d557d1b5 | 61 | EXPORT_SYMBOL(refcount_dec_if_one); |
29dee3c0 | 62 | |
bd174169 DW |
63 | /** |
64 | * refcount_dec_not_one - decrement a refcount if it is not 1 | |
65 | * @r: the refcount | |
66 | * | |
29dee3c0 PZ |
67 | * No atomic_t counterpart, it decrements unless the value is 1, in which case |
68 | * it will return false. | |
69 | * | |
70 | * Was often done like: atomic_add_unless(&var, -1, 1) | |
bd174169 DW |
71 | * |
72 | * Return: true if the decrement operation was successful, false otherwise | |
29dee3c0 PZ |
73 | */ |
74 | bool refcount_dec_not_one(refcount_t *r) | |
75 | { | |
b78c0d47 | 76 | unsigned int new, val = atomic_read(&r->refs); |
29dee3c0 | 77 | |
b78c0d47 | 78 | do { |
23e6b169 | 79 | if (unlikely(val == REFCOUNT_SATURATED)) |
29dee3c0 PZ |
80 | return true; |
81 | ||
82 | if (val == 1) | |
83 | return false; | |
84 | ||
85 | new = val - 1; | |
86 | if (new > val) { | |
9dcfe2c7 | 87 | WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); |
29dee3c0 PZ |
88 | return true; |
89 | } | |
90 | ||
b78c0d47 | 91 | } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); |
29dee3c0 PZ |
92 | |
93 | return true; | |
94 | } | |
d557d1b5 | 95 | EXPORT_SYMBOL(refcount_dec_not_one); |
29dee3c0 | 96 | |
bd174169 DW |
97 | /** |
98 | * refcount_dec_and_mutex_lock - return holding mutex if able to decrement | |
99 | * refcount to 0 | |
100 | * @r: the refcount | |
101 | * @lock: the mutex to be locked | |
102 | * | |
29dee3c0 | 103 | * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail |
23e6b169 | 104 | * to decrement when saturated at REFCOUNT_SATURATED. |
29dee3c0 PZ |
105 | * |
106 | * Provides release memory ordering, such that prior loads and stores are done | |
107 | * before, and provides a control dependency such that free() must come after. | |
108 | * See the comment on top. | |
bd174169 DW |
109 | * |
110 | * Return: true and hold mutex if able to decrement refcount to 0, false | |
111 | * otherwise | |
29dee3c0 PZ |
112 | */ |
113 | bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) | |
114 | { | |
115 | if (refcount_dec_not_one(r)) | |
116 | return false; | |
117 | ||
118 | mutex_lock(lock); | |
119 | if (!refcount_dec_and_test(r)) { | |
120 | mutex_unlock(lock); | |
121 | return false; | |
122 | } | |
123 | ||
124 | return true; | |
125 | } | |
d557d1b5 | 126 | EXPORT_SYMBOL(refcount_dec_and_mutex_lock); |
29dee3c0 | 127 | |
bd174169 DW |
128 | /** |
129 | * refcount_dec_and_lock - return holding spinlock if able to decrement | |
130 | * refcount to 0 | |
131 | * @r: the refcount | |
132 | * @lock: the spinlock to be locked | |
133 | * | |
29dee3c0 | 134 | * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to |
23e6b169 | 135 | * decrement when saturated at REFCOUNT_SATURATED. |
29dee3c0 PZ |
136 | * |
137 | * Provides release memory ordering, such that prior loads and stores are done | |
138 | * before, and provides a control dependency such that free() must come after. | |
139 | * See the comment on top. | |
bd174169 DW |
140 | * |
141 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
142 | * otherwise | |
29dee3c0 PZ |
143 | */ |
144 | bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) | |
145 | { | |
146 | if (refcount_dec_not_one(r)) | |
147 | return false; | |
148 | ||
149 | spin_lock(lock); | |
150 | if (!refcount_dec_and_test(r)) { | |
151 | spin_unlock(lock); | |
152 | return false; | |
153 | } | |
154 | ||
155 | return true; | |
156 | } | |
d557d1b5 | 157 | EXPORT_SYMBOL(refcount_dec_and_lock); |
29dee3c0 | 158 | |
7ea959c4 AMG |
159 | /** |
160 | * refcount_dec_and_lock_irqsave - return holding spinlock with disabled | |
161 | * interrupts if able to decrement refcount to 0 | |
162 | * @r: the refcount | |
163 | * @lock: the spinlock to be locked | |
164 | * @flags: saved IRQ-flags if the is acquired | |
165 | * | |
166 | * Same as refcount_dec_and_lock() above except that the spinlock is acquired | |
9dbbc3b9 | 167 | * with disabled interrupts. |
7ea959c4 AMG |
168 | * |
169 | * Return: true and hold spinlock if able to decrement refcount to 0, false | |
170 | * otherwise | |
171 | */ | |
172 | bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, | |
173 | unsigned long *flags) | |
174 | { | |
175 | if (refcount_dec_not_one(r)) | |
176 | return false; | |
177 | ||
178 | spin_lock_irqsave(lock, *flags); | |
179 | if (!refcount_dec_and_test(r)) { | |
180 | spin_unlock_irqrestore(lock, *flags); | |
181 | return false; | |
182 | } | |
183 | ||
184 | return true; | |
185 | } | |
186 | EXPORT_SYMBOL(refcount_dec_and_lock_irqsave); |