]>
Commit | Line | Data |
---|---|---|
2f4f12e5 LT |
1 | #include <linux/export.h> |
2 | #include <linux/lockref.h> | |
3 | ||
57f4257e | 4 | #if USE_CMPXCHG_LOCKREF |
bc08b449 | 5 | |
d2212b4d WD |
6 | /* |
7 | * Allow weakly-ordered memory architectures to provide barrier-less | |
8 | * cmpxchg semantics for lockref updates. | |
9 | */ | |
10 | #ifndef cmpxchg64_relaxed | |
11 | # define cmpxchg64_relaxed cmpxchg64 | |
12 | #endif | |
13 | ||
491f6f8e HC |
14 | /* |
15 | * Allow architectures to override the default cpu_relax() within CMPXCHG_LOOP. | |
16 | * This is useful for architectures with an expensive cpu_relax(). | |
17 | */ | |
18 | #ifndef arch_mutex_cpu_relax | |
19 | # define arch_mutex_cpu_relax() cpu_relax() | |
20 | #endif | |
21 | ||
bc08b449 LT |
22 | /* |
23 | * Note that the "cmpxchg()" reloads the "old" value for the | |
24 | * failure case. | |
25 | */ | |
26 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ | |
27 | struct lockref old; \ | |
28 | BUILD_BUG_ON(sizeof(old) != 8); \ | |
29 | old.lock_count = ACCESS_ONCE(lockref->lock_count); \ | |
30 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | |
31 | struct lockref new = old, prev = old; \ | |
32 | CODE \ | |
d2212b4d WD |
33 | old.lock_count = cmpxchg64_relaxed(&lockref->lock_count, \ |
34 | old.lock_count, \ | |
35 | new.lock_count); \ | |
bc08b449 LT |
36 | if (likely(old.lock_count == prev.lock_count)) { \ |
37 | SUCCESS; \ | |
38 | } \ | |
491f6f8e | 39 | arch_mutex_cpu_relax(); \ |
bc08b449 LT |
40 | } \ |
41 | } while (0) | |
42 | ||
43 | #else | |
44 | ||
45 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) | |
46 | ||
47 | #endif | |
48 | ||
2f4f12e5 LT |
49 | /** |
50 | * lockref_get - Increments reference count unconditionally | |
44a0cf92 | 51 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
52 | * |
53 | * This operation is only valid if you already hold a reference | |
54 | * to the object, so you know the count cannot be zero. | |
55 | */ | |
56 | void lockref_get(struct lockref *lockref) | |
57 | { | |
bc08b449 LT |
58 | CMPXCHG_LOOP( |
59 | new.count++; | |
60 | , | |
61 | return; | |
62 | ); | |
63 | ||
2f4f12e5 LT |
64 | spin_lock(&lockref->lock); |
65 | lockref->count++; | |
66 | spin_unlock(&lockref->lock); | |
67 | } | |
68 | EXPORT_SYMBOL(lockref_get); | |
69 | ||
70 | /** | |
71 | * lockref_get_not_zero - Increments count unless the count is 0 | |
44a0cf92 | 72 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
73 | * Return: 1 if count updated successfully or 0 if count was zero |
74 | */ | |
75 | int lockref_get_not_zero(struct lockref *lockref) | |
76 | { | |
bc08b449 LT |
77 | int retval; |
78 | ||
79 | CMPXCHG_LOOP( | |
80 | new.count++; | |
81 | if (!old.count) | |
82 | return 0; | |
83 | , | |
84 | return 1; | |
85 | ); | |
2f4f12e5 LT |
86 | |
87 | spin_lock(&lockref->lock); | |
bc08b449 | 88 | retval = 0; |
2f4f12e5 LT |
89 | if (lockref->count) { |
90 | lockref->count++; | |
91 | retval = 1; | |
92 | } | |
93 | spin_unlock(&lockref->lock); | |
94 | return retval; | |
95 | } | |
96 | EXPORT_SYMBOL(lockref_get_not_zero); | |
97 | ||
98 | /** | |
99 | * lockref_get_or_lock - Increments count unless the count is 0 | |
44a0cf92 | 100 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
101 | * Return: 1 if count updated successfully or 0 if count was zero |
102 | * and we got the lock instead. | |
103 | */ | |
104 | int lockref_get_or_lock(struct lockref *lockref) | |
105 | { | |
bc08b449 LT |
106 | CMPXCHG_LOOP( |
107 | new.count++; | |
108 | if (!old.count) | |
109 | break; | |
110 | , | |
111 | return 1; | |
112 | ); | |
113 | ||
2f4f12e5 LT |
114 | spin_lock(&lockref->lock); |
115 | if (!lockref->count) | |
116 | return 0; | |
117 | lockref->count++; | |
118 | spin_unlock(&lockref->lock); | |
119 | return 1; | |
120 | } | |
121 | EXPORT_SYMBOL(lockref_get_or_lock); | |
122 | ||
123 | /** | |
124 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | |
44a0cf92 | 125 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
126 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken |
127 | */ | |
128 | int lockref_put_or_lock(struct lockref *lockref) | |
129 | { | |
bc08b449 LT |
130 | CMPXCHG_LOOP( |
131 | new.count--; | |
132 | if (old.count <= 1) | |
133 | break; | |
134 | , | |
135 | return 1; | |
136 | ); | |
137 | ||
2f4f12e5 LT |
138 | spin_lock(&lockref->lock); |
139 | if (lockref->count <= 1) | |
140 | return 0; | |
141 | lockref->count--; | |
142 | spin_unlock(&lockref->lock); | |
143 | return 1; | |
144 | } | |
145 | EXPORT_SYMBOL(lockref_put_or_lock); | |
e7d33bb5 LT |
146 | |
147 | /** | |
148 | * lockref_mark_dead - mark lockref dead | |
149 | * @lockref: pointer to lockref structure | |
150 | */ | |
151 | void lockref_mark_dead(struct lockref *lockref) | |
152 | { | |
153 | assert_spin_locked(&lockref->lock); | |
154 | lockref->count = -128; | |
155 | } | |
e66cf161 | 156 | EXPORT_SYMBOL(lockref_mark_dead); |
e7d33bb5 LT |
157 | |
158 | /** | |
159 | * lockref_get_not_dead - Increments count unless the ref is dead | |
160 | * @lockref: pointer to lockref structure | |
161 | * Return: 1 if count updated successfully or 0 if lockref was dead | |
162 | */ | |
163 | int lockref_get_not_dead(struct lockref *lockref) | |
164 | { | |
165 | int retval; | |
166 | ||
167 | CMPXCHG_LOOP( | |
168 | new.count++; | |
169 | if ((int)old.count < 0) | |
170 | return 0; | |
171 | , | |
172 | return 1; | |
173 | ); | |
174 | ||
175 | spin_lock(&lockref->lock); | |
176 | retval = 0; | |
177 | if ((int) lockref->count >= 0) { | |
178 | lockref->count++; | |
179 | retval = 1; | |
180 | } | |
181 | spin_unlock(&lockref->lock); | |
182 | return retval; | |
183 | } | |
184 | EXPORT_SYMBOL(lockref_get_not_dead); |