]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
acac43e2 | 2 | /* Atomic operations usable in machine independent code */ |
3f9d35b9 ED |
3 | #ifndef _LINUX_ATOMIC_H |
4 | #define _LINUX_ATOMIC_H | |
ade5ef92 MR |
5 | #include <linux/types.h> |
6 | ||
3f9d35b9 | 7 | #include <asm/atomic.h> |
654672d4 WD |
8 | #include <asm/barrier.h> |
9 | ||
10 | /* | |
11 | * Relaxed variants of xchg, cmpxchg and some atomic operations. | |
12 | * | |
13 | * We support four variants: | |
14 | * | |
15 | * - Fully ordered: The default implementation, no suffix required. | |
16 | * - Acquire: Provides ACQUIRE semantics, _acquire suffix. | |
17 | * - Release: Provides RELEASE semantics, _release suffix. | |
18 | * - Relaxed: No ordering guarantees, _relaxed suffix. | |
19 | * | |
20 | * For compound atomics performing both a load and a store, ACQUIRE | |
21 | * semantics apply only to the load and RELEASE semantics only to the | |
22 | * store portion of the operation. Note that a failed cmpxchg_acquire | |
23 | * does -not- imply any memory ordering constraints. | |
24 | * | |
25 | * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. | |
26 | */ | |
27 | ||
37f8173d PZ |
28 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) |
29 | #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | |
30 | ||
31 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | |
32 | #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | |
33 | ||
654672d4 WD |
34 | /* |
35 | * The idea here is to build acquire/release variants by adding explicit | |
36 | * barriers on top of the relaxed variant. In the case where the relaxed | |
37 | * variant is already fully ordered, no additional barriers are needed. | |
e1ab7f39 | 38 | * |
fd2efaa4 MR |
39 | * If an architecture overrides __atomic_acquire_fence() it will probably |
40 | * want to define smp_mb__after_spinlock(). | |
654672d4 | 41 | */ |
fd2efaa4 MR |
42 | #ifndef __atomic_acquire_fence |
43 | #define __atomic_acquire_fence smp_mb__after_atomic | |
44 | #endif | |
45 | ||
46 | #ifndef __atomic_release_fence | |
47 | #define __atomic_release_fence smp_mb__before_atomic | |
48 | #endif | |
49 | ||
50 | #ifndef __atomic_pre_full_fence | |
51 | #define __atomic_pre_full_fence smp_mb__before_atomic | |
52 | #endif | |
53 | ||
54 | #ifndef __atomic_post_full_fence | |
55 | #define __atomic_post_full_fence smp_mb__after_atomic | |
56 | #endif | |
57 | ||
654672d4 WD |
58 | #define __atomic_op_acquire(op, args...) \ |
59 | ({ \ | |
60 | typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \ | |
fd2efaa4 | 61 | __atomic_acquire_fence(); \ |
654672d4 WD |
62 | __ret; \ |
63 | }) | |
64 | ||
65 | #define __atomic_op_release(op, args...) \ | |
66 | ({ \ | |
fd2efaa4 | 67 | __atomic_release_fence(); \ |
654672d4 WD |
68 | op##_relaxed(args); \ |
69 | }) | |
70 | ||
71 | #define __atomic_op_fence(op, args...) \ | |
72 | ({ \ | |
73 | typeof(op##_relaxed(args)) __ret; \ | |
fd2efaa4 | 74 | __atomic_pre_full_fence(); \ |
654672d4 | 75 | __ret = op##_relaxed(args); \ |
fd2efaa4 | 76 | __atomic_post_full_fence(); \ |
654672d4 WD |
77 | __ret; \ |
78 | }) | |
79 | ||
e3d18cee | 80 | #include <linux/atomic/atomic-arch-fallback.h> |
e3d18cee | 81 | #include <linux/atomic/atomic-long.h> |
67d1b0de | 82 | #include <linux/atomic/atomic-instrumented.h> |
90fe6514 | 83 | |
3f9d35b9 | 84 | #endif /* _LINUX_ATOMIC_H */ |