]>
Commit | Line | Data |
---|---|---|
2f4f12e5 LT |
1 | #include <linux/export.h> |
2 | #include <linux/lockref.h> | |
3 | ||
bc08b449 LT |
4 | #ifdef CONFIG_CMPXCHG_LOCKREF |
5 | ||
6 | /* | |
7 | * Note that the "cmpxchg()" reloads the "old" value for the | |
8 | * failure case. | |
9 | */ | |
10 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ | |
11 | struct lockref old; \ | |
12 | BUILD_BUG_ON(sizeof(old) != 8); \ | |
13 | old.lock_count = ACCESS_ONCE(lockref->lock_count); \ | |
14 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ | |
15 | struct lockref new = old, prev = old; \ | |
16 | CODE \ | |
8f4c3446 WD |
17 | old.lock_count = cmpxchg64(&lockref->lock_count, \ |
18 | old.lock_count, new.lock_count); \ | |
bc08b449 LT |
19 | if (likely(old.lock_count == prev.lock_count)) { \ |
20 | SUCCESS; \ | |
21 | } \ | |
d472d9d9 | 22 | cpu_relax(); \ |
bc08b449 LT |
23 | } \ |
24 | } while (0) | |
25 | ||
26 | #else | |
27 | ||
28 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) | |
29 | ||
30 | #endif | |
31 | ||
2f4f12e5 LT |
32 | /** |
33 | * lockref_get - Increments reference count unconditionally | |
44a0cf92 | 34 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
35 | * |
36 | * This operation is only valid if you already hold a reference | |
37 | * to the object, so you know the count cannot be zero. | |
38 | */ | |
39 | void lockref_get(struct lockref *lockref) | |
40 | { | |
bc08b449 LT |
41 | CMPXCHG_LOOP( |
42 | new.count++; | |
43 | , | |
44 | return; | |
45 | ); | |
46 | ||
2f4f12e5 LT |
47 | spin_lock(&lockref->lock); |
48 | lockref->count++; | |
49 | spin_unlock(&lockref->lock); | |
50 | } | |
51 | EXPORT_SYMBOL(lockref_get); | |
52 | ||
53 | /** | |
54 | * lockref_get_not_zero - Increments count unless the count is 0 | |
44a0cf92 | 55 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
56 | * Return: 1 if count updated successfully or 0 if count was zero |
57 | */ | |
58 | int lockref_get_not_zero(struct lockref *lockref) | |
59 | { | |
bc08b449 LT |
60 | int retval; |
61 | ||
62 | CMPXCHG_LOOP( | |
63 | new.count++; | |
64 | if (!old.count) | |
65 | return 0; | |
66 | , | |
67 | return 1; | |
68 | ); | |
2f4f12e5 LT |
69 | |
70 | spin_lock(&lockref->lock); | |
bc08b449 | 71 | retval = 0; |
2f4f12e5 LT |
72 | if (lockref->count) { |
73 | lockref->count++; | |
74 | retval = 1; | |
75 | } | |
76 | spin_unlock(&lockref->lock); | |
77 | return retval; | |
78 | } | |
79 | EXPORT_SYMBOL(lockref_get_not_zero); | |
80 | ||
81 | /** | |
82 | * lockref_get_or_lock - Increments count unless the count is 0 | |
44a0cf92 | 83 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
84 | * Return: 1 if count updated successfully or 0 if count was zero |
85 | * and we got the lock instead. | |
86 | */ | |
87 | int lockref_get_or_lock(struct lockref *lockref) | |
88 | { | |
bc08b449 LT |
89 | CMPXCHG_LOOP( |
90 | new.count++; | |
91 | if (!old.count) | |
92 | break; | |
93 | , | |
94 | return 1; | |
95 | ); | |
96 | ||
2f4f12e5 LT |
97 | spin_lock(&lockref->lock); |
98 | if (!lockref->count) | |
99 | return 0; | |
100 | lockref->count++; | |
101 | spin_unlock(&lockref->lock); | |
102 | return 1; | |
103 | } | |
104 | EXPORT_SYMBOL(lockref_get_or_lock); | |
105 | ||
106 | /** | |
107 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement | |
44a0cf92 | 108 | * @lockref: pointer to lockref structure |
2f4f12e5 LT |
109 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken |
110 | */ | |
111 | int lockref_put_or_lock(struct lockref *lockref) | |
112 | { | |
bc08b449 LT |
113 | CMPXCHG_LOOP( |
114 | new.count--; | |
115 | if (old.count <= 1) | |
116 | break; | |
117 | , | |
118 | return 1; | |
119 | ); | |
120 | ||
2f4f12e5 LT |
121 | spin_lock(&lockref->lock); |
122 | if (lockref->count <= 1) | |
123 | return 0; | |
124 | lockref->count--; | |
125 | spin_unlock(&lockref->lock); | |
126 | return 1; | |
127 | } | |
128 | EXPORT_SYMBOL(lockref_put_or_lock); | |
e7d33bb5 LT |
129 | |
130 | /** | |
131 | * lockref_mark_dead - mark lockref dead | |
132 | * @lockref: pointer to lockref structure | |
133 | */ | |
134 | void lockref_mark_dead(struct lockref *lockref) | |
135 | { | |
136 | assert_spin_locked(&lockref->lock); | |
137 | lockref->count = -128; | |
138 | } | |
139 | ||
140 | /** | |
141 | * lockref_get_not_dead - Increments count unless the ref is dead | |
142 | * @lockref: pointer to lockref structure | |
143 | * Return: 1 if count updated successfully or 0 if lockref was dead | |
144 | */ | |
145 | int lockref_get_not_dead(struct lockref *lockref) | |
146 | { | |
147 | int retval; | |
148 | ||
149 | CMPXCHG_LOOP( | |
150 | new.count++; | |
151 | if ((int)old.count < 0) | |
152 | return 0; | |
153 | , | |
154 | return 1; | |
155 | ); | |
156 | ||
157 | spin_lock(&lockref->lock); | |
158 | retval = 0; | |
159 | if ((int) lockref->count >= 0) { | |
160 | lockref->count++; | |
161 | retval = 1; | |
162 | } | |
163 | spin_unlock(&lockref->lock); | |
164 | return retval; | |
165 | } | |
166 | EXPORT_SYMBOL(lockref_get_not_dead); |