]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
2f4f12e5 LT |
2 | #include <linux/export.h> |
3 | #include <linux/lockref.h> | |
4 | ||
57f4257e | 5 | #if USE_CMPXCHG_LOCKREF |
bc08b449 LT |
6 | |
7 | /* | |
8 | * Note that the "cmpxchg()" reloads the "old" value for the | |
9 | * failure case. | |
10 | */ | |
11 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ | |
12 | struct lockref old; \ | |
13 | BUILD_BUG_ON(sizeof(old) != 8); \ | |
4d3199e4 | 14 | old.lock_count = READ_ONCE(lockref->lock_count); \ |
bc08b449 LT |
15 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ |
16 | struct lockref new = old, prev = old; \ | |
17 | CODE \ | |
d2212b4d WD |
18 | old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ |
19 | old.lock_count, \ | |
20 | new.lock_count); \ | |
bc08b449 LT |
21 | if (likely(old.lock_count == prev.lock_count)) { \ |
22 | SUCCESS; \ | |
23 | } \ | |
f2f09a4c | 24 | cpu_relax(); \ |
bc08b449 LT |
25 | } \ |
26 | } while (0) | |
27 | ||
28 | #else | |
29 | ||
30 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) | |
31 | ||
32 | #endif | |
33 | ||
2f4f12e5 LT |
34 | /** |
35 | * lockref_get - Increments reference count unconditionally | |
44a0cf92 | 36 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
37 | * |
38 | * This operation is only valid if you already hold a reference | |
39 | * to the object, so you know the count cannot be zero. | |
40 | */ | |
41 | void lockref_get(struct lockref *lockref) | |
42 | { | |
bc08b449 LT |
43 | CMPXCHG_LOOP( |
44 | new.count++; | |
45 | , | |
46 | return; | |
47 | ); | |
48 | ||
2f4f12e5 LT |
49 | spin_lock(&lockref->lock); |
50 | lockref->count++; | |
51 | spin_unlock(&lockref->lock); | |
52 | } | |
53 | EXPORT_SYMBOL(lockref_get); | |
54 | ||
55 | /** | |
360f5479 | 56 | * lockref_get_not_zero - Increments count unless the count is 0 or dead |
44a0cf92 | 57 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
58 | * Return: 1 if count updated successfully or 0 if count was zero |
59 | */ | |
60 | int lockref_get_not_zero(struct lockref *lockref) | |
61 | { | |
bc08b449 LT |
62 | int retval; |
63 | ||
64 | CMPXCHG_LOOP( | |
65 | new.count++; | |
360f5479 | 66 | if (old.count <= 0) |
bc08b449 LT |
67 | return 0; |
68 | , | |
69 | return 1; | |
70 | ); | |
2f4f12e5 LT |
71 | |
72 | spin_lock(&lockref->lock); | |
bc08b449 | 73 | retval = 0; |
360f5479 | 74 | if (lockref->count > 0) { |
2f4f12e5 LT |
75 | lockref->count++; |
76 | retval = 1; | |
77 | } | |
78 | spin_unlock(&lockref->lock); | |
79 | return retval; | |
80 | } | |
81 | EXPORT_SYMBOL(lockref_get_not_zero); | |
82 | ||
83 | /** | |
360f5479 | 84 | * lockref_get_or_lock - Increments count unless the count is 0 or dead |
44a0cf92 | 85 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
86 | * Return: 1 if count updated successfully or 0 if count was zero |
87 | * and we got the lock instead. | |
88 | */ | |
89 | int lockref_get_or_lock(struct lockref *lockref) | |
90 | { | |
bc08b449 LT |
91 | CMPXCHG_LOOP( |
92 | new.count++; | |
360f5479 | 93 | if (old.count <= 0) |
bc08b449 LT |
94 | break; |
95 | , | |
96 | return 1; | |
97 | ); | |
98 | ||
2f4f12e5 | 99 | spin_lock(&lockref->lock); |
360f5479 | 100 | if (lockref->count <= 0) |
2f4f12e5 LT |
101 | return 0; |
102 | lockref->count++; | |
103 | spin_unlock(&lockref->lock); | |
104 | return 1; | |
105 | } | |
106 | EXPORT_SYMBOL(lockref_get_or_lock); | |
107 | ||
360f5479 LT |
108 | /** |
109 | * lockref_put_return - Decrement reference count if possible | |
110 | * @lockref: pointer to lockref structure | |
111 | * | |
112 | * Decrement the reference count and return the new value. | |
113 | * If the lockref was dead or locked, return an error. | |
114 | */ | |
115 | int lockref_put_return(struct lockref *lockref) | |
116 | { | |
117 | CMPXCHG_LOOP( | |
118 | new.count--; | |
119 | if (old.count <= 0) | |
120 | return -1; | |
121 | , | |
122 | return new.count; | |
123 | ); | |
124 | return -1; | |
125 | } | |
126 | EXPORT_SYMBOL(lockref_put_return); | |
127 | ||
2f4f12e5 LT |
128 | /** |
129 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | |
44a0cf92 | 130 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
131 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken |
132 | */ | |
133 | int lockref_put_or_lock(struct lockref *lockref) | |
134 | { | |
bc08b449 LT |
135 | CMPXCHG_LOOP( |
136 | new.count--; | |
137 | if (old.count <= 1) | |
138 | break; | |
139 | , | |
140 | return 1; | |
141 | ); | |
142 | ||
2f4f12e5 LT |
143 | spin_lock(&lockref->lock); |
144 | if (lockref->count <= 1) | |
145 | return 0; | |
146 | lockref->count--; | |
147 | spin_unlock(&lockref->lock); | |
148 | return 1; | |
149 | } | |
150 | EXPORT_SYMBOL(lockref_put_or_lock); | |
e7d33bb5 LT |
151 | |
152 | /** | |
153 | * lockref_mark_dead - mark lockref dead | |
154 | * @lockref: pointer to lockref structure | |
155 | */ | |
156 | void lockref_mark_dead(struct lockref *lockref) | |
157 | { | |
158 | assert_spin_locked(&lockref->lock); | |
159 | lockref->count = -128; | |
160 | } | |
e66cf161 | 161 | EXPORT_SYMBOL(lockref_mark_dead); |
e7d33bb5 LT |
162 | |
163 | /** | |
164 | * lockref_get_not_dead - Increments count unless the ref is dead | |
165 | * @lockref: pointer to lockref structure | |
166 | * Return: 1 if count updated successfully or 0 if lockref was dead | |
167 | */ | |
168 | int lockref_get_not_dead(struct lockref *lockref) | |
169 | { | |
170 | int retval; | |
171 | ||
172 | CMPXCHG_LOOP( | |
173 | new.count++; | |
360f5479 | 174 | if (old.count < 0) |
e7d33bb5 LT |
175 | return 0; |
176 | , | |
177 | return 1; | |
178 | ); | |
179 | ||
180 | spin_lock(&lockref->lock); | |
181 | retval = 0; | |
360f5479 | 182 | if (lockref->count >= 0) { |
e7d33bb5 LT |
183 | lockref->count++; |
184 | retval = 1; | |
185 | } | |
186 | spin_unlock(&lockref->lock); | |
187 | return retval; | |
188 | } | |
189 | EXPORT_SYMBOL(lockref_get_not_dead); |