1 /* Atomic operations usable in machine independent code */
2 #ifndef _LINUX_ATOMIC_H
3 #define _LINUX_ATOMIC_H
4 #include <asm/atomic.h>
5 #include <asm/barrier.h>
8 * Relaxed variants of xchg, cmpxchg and some atomic operations.
10 * We support four variants:
12 * - Fully ordered: The default implementation, no suffix required.
13 * - Acquire: Provides ACQUIRE semantics, _acquire suffix.
14 * - Release: Provides RELEASE semantics, _release suffix.
15 * - Relaxed: No ordering guarantees, _relaxed suffix.
17 * For compound atomics performing both a load and a store, ACQUIRE
18 * semantics apply only to the load and RELEASE semantics only to the
19 * store portion of the operation. Note that a failed cmpxchg_acquire
20 * does -not- imply any memory ordering constraints.
22 * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions.
25 #ifndef atomic_read_acquire
26 #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter)
29 #ifndef atomic_set_release
30 #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i))
34 * The idea here is to build acquire/release variants by adding explicit
35 * barriers on top of the relaxed variant. In the case where the relaxed
36 * variant is already fully ordered, no additional barriers are needed.
38 * Besides, if an arch has a special barrier for acquire/release, it could
39 * implement its own __atomic_op_* and use the same framework for building
42 #ifndef __atomic_op_acquire
43 #define __atomic_op_acquire(op, args...) \
45 typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
46 smp_mb__after_atomic(); \
51 #ifndef __atomic_op_release
52 #define __atomic_op_release(op, args...) \
54 smp_mb__before_atomic(); \
59 #ifndef __atomic_op_fence
60 #define __atomic_op_fence(op, args...) \
62 typeof(op##_relaxed(args)) __ret; \
63 smp_mb__before_atomic(); \
64 __ret = op##_relaxed(args); \
65 smp_mb__after_atomic(); \
70 /* atomic_add_return_relaxed */
71 #ifndef atomic_add_return_relaxed
72 #define atomic_add_return_relaxed atomic_add_return
73 #define atomic_add_return_acquire atomic_add_return
74 #define atomic_add_return_release atomic_add_return
76 #else /* atomic_add_return_relaxed */
78 #ifndef atomic_add_return_acquire
79 #define atomic_add_return_acquire(...) \
80 __atomic_op_acquire(atomic_add_return, __VA_ARGS__)
83 #ifndef atomic_add_return_release
84 #define atomic_add_return_release(...) \
85 __atomic_op_release(atomic_add_return, __VA_ARGS__)
88 #ifndef atomic_add_return
89 #define atomic_add_return(...) \
90 __atomic_op_fence(atomic_add_return, __VA_ARGS__)
92 #endif /* atomic_add_return_relaxed */
94 /* atomic_inc_return_relaxed */
95 #ifndef atomic_inc_return_relaxed
96 #define atomic_inc_return_relaxed atomic_inc_return
97 #define atomic_inc_return_acquire atomic_inc_return
98 #define atomic_inc_return_release atomic_inc_return
100 #else /* atomic_inc_return_relaxed */
102 #ifndef atomic_inc_return_acquire
103 #define atomic_inc_return_acquire(...) \
104 __atomic_op_acquire(atomic_inc_return, __VA_ARGS__)
107 #ifndef atomic_inc_return_release
108 #define atomic_inc_return_release(...) \
109 __atomic_op_release(atomic_inc_return, __VA_ARGS__)
112 #ifndef atomic_inc_return
113 #define atomic_inc_return(...) \
114 __atomic_op_fence(atomic_inc_return, __VA_ARGS__)
116 #endif /* atomic_inc_return_relaxed */
118 /* atomic_sub_return_relaxed */
119 #ifndef atomic_sub_return_relaxed
120 #define atomic_sub_return_relaxed atomic_sub_return
121 #define atomic_sub_return_acquire atomic_sub_return
122 #define atomic_sub_return_release atomic_sub_return
124 #else /* atomic_sub_return_relaxed */
126 #ifndef atomic_sub_return_acquire
127 #define atomic_sub_return_acquire(...) \
128 __atomic_op_acquire(atomic_sub_return, __VA_ARGS__)
131 #ifndef atomic_sub_return_release
132 #define atomic_sub_return_release(...) \
133 __atomic_op_release(atomic_sub_return, __VA_ARGS__)
136 #ifndef atomic_sub_return
137 #define atomic_sub_return(...) \
138 __atomic_op_fence(atomic_sub_return, __VA_ARGS__)
140 #endif /* atomic_sub_return_relaxed */
142 /* atomic_dec_return_relaxed */
143 #ifndef atomic_dec_return_relaxed
144 #define atomic_dec_return_relaxed atomic_dec_return
145 #define atomic_dec_return_acquire atomic_dec_return
146 #define atomic_dec_return_release atomic_dec_return
148 #else /* atomic_dec_return_relaxed */
150 #ifndef atomic_dec_return_acquire
151 #define atomic_dec_return_acquire(...) \
152 __atomic_op_acquire(atomic_dec_return, __VA_ARGS__)
155 #ifndef atomic_dec_return_release
156 #define atomic_dec_return_release(...) \
157 __atomic_op_release(atomic_dec_return, __VA_ARGS__)
160 #ifndef atomic_dec_return
161 #define atomic_dec_return(...) \
162 __atomic_op_fence(atomic_dec_return, __VA_ARGS__)
164 #endif /* atomic_dec_return_relaxed */
166 /* atomic_xchg_relaxed */
167 #ifndef atomic_xchg_relaxed
168 #define atomic_xchg_relaxed atomic_xchg
169 #define atomic_xchg_acquire atomic_xchg
170 #define atomic_xchg_release atomic_xchg
172 #else /* atomic_xchg_relaxed */
174 #ifndef atomic_xchg_acquire
175 #define atomic_xchg_acquire(...) \
176 __atomic_op_acquire(atomic_xchg, __VA_ARGS__)
179 #ifndef atomic_xchg_release
180 #define atomic_xchg_release(...) \
181 __atomic_op_release(atomic_xchg, __VA_ARGS__)
185 #define atomic_xchg(...) \
186 __atomic_op_fence(atomic_xchg, __VA_ARGS__)
188 #endif /* atomic_xchg_relaxed */
190 /* atomic_cmpxchg_relaxed */
191 #ifndef atomic_cmpxchg_relaxed
192 #define atomic_cmpxchg_relaxed atomic_cmpxchg
193 #define atomic_cmpxchg_acquire atomic_cmpxchg
194 #define atomic_cmpxchg_release atomic_cmpxchg
196 #else /* atomic_cmpxchg_relaxed */
198 #ifndef atomic_cmpxchg_acquire
199 #define atomic_cmpxchg_acquire(...) \
200 __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__)
203 #ifndef atomic_cmpxchg_release
204 #define atomic_cmpxchg_release(...) \
205 __atomic_op_release(atomic_cmpxchg, __VA_ARGS__)
208 #ifndef atomic_cmpxchg
209 #define atomic_cmpxchg(...) \
210 __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__)
212 #endif /* atomic_cmpxchg_relaxed */
214 #ifndef atomic64_read_acquire
215 #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter)
218 #ifndef atomic64_set_release
219 #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i))
222 /* atomic64_add_return_relaxed */
223 #ifndef atomic64_add_return_relaxed
224 #define atomic64_add_return_relaxed atomic64_add_return
225 #define atomic64_add_return_acquire atomic64_add_return
226 #define atomic64_add_return_release atomic64_add_return
228 #else /* atomic64_add_return_relaxed */
230 #ifndef atomic64_add_return_acquire
231 #define atomic64_add_return_acquire(...) \
232 __atomic_op_acquire(atomic64_add_return, __VA_ARGS__)
235 #ifndef atomic64_add_return_release
236 #define atomic64_add_return_release(...) \
237 __atomic_op_release(atomic64_add_return, __VA_ARGS__)
240 #ifndef atomic64_add_return
241 #define atomic64_add_return(...) \
242 __atomic_op_fence(atomic64_add_return, __VA_ARGS__)
244 #endif /* atomic64_add_return_relaxed */
246 /* atomic64_inc_return_relaxed */
247 #ifndef atomic64_inc_return_relaxed
248 #define atomic64_inc_return_relaxed atomic64_inc_return
249 #define atomic64_inc_return_acquire atomic64_inc_return
250 #define atomic64_inc_return_release atomic64_inc_return
252 #else /* atomic64_inc_return_relaxed */
254 #ifndef atomic64_inc_return_acquire
255 #define atomic64_inc_return_acquire(...) \
256 __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__)
259 #ifndef atomic64_inc_return_release
260 #define atomic64_inc_return_release(...) \
261 __atomic_op_release(atomic64_inc_return, __VA_ARGS__)
264 #ifndef atomic64_inc_return
265 #define atomic64_inc_return(...) \
266 __atomic_op_fence(atomic64_inc_return, __VA_ARGS__)
268 #endif /* atomic64_inc_return_relaxed */
271 /* atomic64_sub_return_relaxed */
272 #ifndef atomic64_sub_return_relaxed
273 #define atomic64_sub_return_relaxed atomic64_sub_return
274 #define atomic64_sub_return_acquire atomic64_sub_return
275 #define atomic64_sub_return_release atomic64_sub_return
277 #else /* atomic64_sub_return_relaxed */
279 #ifndef atomic64_sub_return_acquire
280 #define atomic64_sub_return_acquire(...) \
281 __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__)
284 #ifndef atomic64_sub_return_release
285 #define atomic64_sub_return_release(...) \
286 __atomic_op_release(atomic64_sub_return, __VA_ARGS__)
289 #ifndef atomic64_sub_return
290 #define atomic64_sub_return(...) \
291 __atomic_op_fence(atomic64_sub_return, __VA_ARGS__)
293 #endif /* atomic64_sub_return_relaxed */
295 /* atomic64_dec_return_relaxed */
296 #ifndef atomic64_dec_return_relaxed
297 #define atomic64_dec_return_relaxed atomic64_dec_return
298 #define atomic64_dec_return_acquire atomic64_dec_return
299 #define atomic64_dec_return_release atomic64_dec_return
301 #else /* atomic64_dec_return_relaxed */
303 #ifndef atomic64_dec_return_acquire
304 #define atomic64_dec_return_acquire(...) \
305 __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__)
308 #ifndef atomic64_dec_return_release
309 #define atomic64_dec_return_release(...) \
310 __atomic_op_release(atomic64_dec_return, __VA_ARGS__)
313 #ifndef atomic64_dec_return
314 #define atomic64_dec_return(...) \
315 __atomic_op_fence(atomic64_dec_return, __VA_ARGS__)
317 #endif /* atomic64_dec_return_relaxed */
319 /* atomic64_xchg_relaxed */
320 #ifndef atomic64_xchg_relaxed
321 #define atomic64_xchg_relaxed atomic64_xchg
322 #define atomic64_xchg_acquire atomic64_xchg
323 #define atomic64_xchg_release atomic64_xchg
325 #else /* atomic64_xchg_relaxed */
327 #ifndef atomic64_xchg_acquire
328 #define atomic64_xchg_acquire(...) \
329 __atomic_op_acquire(atomic64_xchg, __VA_ARGS__)
332 #ifndef atomic64_xchg_release
333 #define atomic64_xchg_release(...) \
334 __atomic_op_release(atomic64_xchg, __VA_ARGS__)
337 #ifndef atomic64_xchg
338 #define atomic64_xchg(...) \
339 __atomic_op_fence(atomic64_xchg, __VA_ARGS__)
341 #endif /* atomic64_xchg_relaxed */
343 /* atomic64_cmpxchg_relaxed */
344 #ifndef atomic64_cmpxchg_relaxed
345 #define atomic64_cmpxchg_relaxed atomic64_cmpxchg
346 #define atomic64_cmpxchg_acquire atomic64_cmpxchg
347 #define atomic64_cmpxchg_release atomic64_cmpxchg
349 #else /* atomic64_cmpxchg_relaxed */
351 #ifndef atomic64_cmpxchg_acquire
352 #define atomic64_cmpxchg_acquire(...) \
353 __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__)
356 #ifndef atomic64_cmpxchg_release
357 #define atomic64_cmpxchg_release(...) \
358 __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__)
361 #ifndef atomic64_cmpxchg
362 #define atomic64_cmpxchg(...) \
363 __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__)
365 #endif /* atomic64_cmpxchg_relaxed */
367 /* cmpxchg_relaxed */
368 #ifndef cmpxchg_relaxed
369 #define cmpxchg_relaxed cmpxchg
370 #define cmpxchg_acquire cmpxchg
371 #define cmpxchg_release cmpxchg
373 #else /* cmpxchg_relaxed */
375 #ifndef cmpxchg_acquire
376 #define cmpxchg_acquire(...) \
377 __atomic_op_acquire(cmpxchg, __VA_ARGS__)
380 #ifndef cmpxchg_release
381 #define cmpxchg_release(...) \
382 __atomic_op_release(cmpxchg, __VA_ARGS__)
386 #define cmpxchg(...) \
387 __atomic_op_fence(cmpxchg, __VA_ARGS__)
389 #endif /* cmpxchg_relaxed */
391 /* cmpxchg64_relaxed */
392 #ifndef cmpxchg64_relaxed
393 #define cmpxchg64_relaxed cmpxchg64
394 #define cmpxchg64_acquire cmpxchg64
395 #define cmpxchg64_release cmpxchg64
397 #else /* cmpxchg64_relaxed */
399 #ifndef cmpxchg64_acquire
400 #define cmpxchg64_acquire(...) \
401 __atomic_op_acquire(cmpxchg64, __VA_ARGS__)
404 #ifndef cmpxchg64_release
405 #define cmpxchg64_release(...) \
406 __atomic_op_release(cmpxchg64, __VA_ARGS__)
410 #define cmpxchg64(...) \
411 __atomic_op_fence(cmpxchg64, __VA_ARGS__)
413 #endif /* cmpxchg64_relaxed */
417 #define xchg_relaxed xchg
418 #define xchg_acquire xchg
419 #define xchg_release xchg
421 #else /* xchg_relaxed */
424 #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__)
428 #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__)
432 #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__)
434 #endif /* xchg_relaxed */
437 * atomic_add_unless - add unless the number is already a given value
438 * @v: pointer of type atomic_t
439 * @a: the amount to add to v...
440 * @u: ...unless v is equal to u.
442 * Atomically adds @a to @v, so long as @v was not already @u.
443 * Returns non-zero if @v was not @u, and zero otherwise.
445 static inline int atomic_add_unless(atomic_t *v, int a, int u)
447 return __atomic_add_unless(v, a, u) != u;
451 * atomic_inc_not_zero - increment unless the number is zero
452 * @v: pointer of type atomic_t
454 * Atomically increments @v by 1, so long as @v is non-zero.
455 * Returns non-zero if @v was non-zero, and zero otherwise.
457 #ifndef atomic_inc_not_zero
458 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
461 #ifndef atomic_andnot
462 static inline void atomic_andnot(int i, atomic_t *v)
468 static inline __deprecated void atomic_clear_mask(unsigned int mask, atomic_t *v)
470 atomic_andnot(mask, v);
473 static inline __deprecated void atomic_set_mask(unsigned int mask, atomic_t *v)
479 * atomic_inc_not_zero_hint - increment if not null
480 * @v: pointer of type atomic_t
481 * @hint: probable value of the atomic before the increment
483 * This version of atomic_inc_not_zero() gives a hint of probable
484 * value of the atomic. This helps processor to not read the memory
485 * before doing the atomic read/modify/write cycle, lowering
486 * number of bus transactions on some arches.
488 * Returns: 0 if increment was not done, 1 otherwise.
490 #ifndef atomic_inc_not_zero_hint
491 static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
495 /* sanity test, should be removed by compiler if hint is a constant */
497 return atomic_inc_not_zero(v);
500 val = atomic_cmpxchg(v, c, c + 1);
510 #ifndef atomic_inc_unless_negative
511 static inline int atomic_inc_unless_negative(atomic_t *p)
514 for (v = 0; v >= 0; v = v1) {
515 v1 = atomic_cmpxchg(p, v, v + 1);
523 #ifndef atomic_dec_unless_positive
524 static inline int atomic_dec_unless_positive(atomic_t *p)
527 for (v = 0; v <= 0; v = v1) {
528 v1 = atomic_cmpxchg(p, v, v - 1);
537 * atomic_dec_if_positive - decrement by 1 if old value positive
538 * @v: pointer of type atomic_t
540 * The function returns the old value of *v minus 1, even if
541 * the atomic variable, v, was not decremented.
543 #ifndef atomic_dec_if_positive
544 static inline int atomic_dec_if_positive(atomic_t *v)
550 if (unlikely(dec < 0))
552 old = atomic_cmpxchg((v), c, dec);
553 if (likely(old == c))
562 * atomic_fetch_or - perform *p |= mask and return old value of *p
563 * @p: pointer to atomic_t
564 * @mask: mask to OR on the atomic_t
566 #ifndef atomic_fetch_or
567 static inline int atomic_fetch_or(atomic_t *p, int mask)
569 int old, val = atomic_read(p);
572 old = atomic_cmpxchg(p, val, val | mask);
583 * fetch_or - perform *ptr |= mask and return old value of *ptr
584 * @ptr: pointer to value
585 * @mask: mask to OR on the value
587 * cmpxchg based fetch_or, macro so it works for different integer types
590 #define fetch_or(ptr, mask) \
591 ({ typeof(*(ptr)) __old, __val = *(ptr); \
593 __old = cmpxchg((ptr), __val, __val | (mask)); \
594 if (__old == __val) \
603 #ifdef CONFIG_GENERIC_ATOMIC64
604 #include <asm-generic/atomic64.h>
607 #ifndef atomic64_andnot
608 static inline void atomic64_andnot(long long i, atomic64_t *v)
614 #include <asm-generic/atomic-long.h>
616 #endif /* _LINUX_ATOMIC_H */