1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC_H
3 #define _ASM_X86_ATOMIC_H
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 #include <asm/alternative.h>
8 #include <asm/cmpxchg.h>
10 #include <asm/barrier.h>
13 * Atomic operations that C can't guarantee us. Useful for
14 * resource counting etc..
17 #define ATOMIC_INIT(i) { (i) }
20 * arch_atomic_read - read atomic variable
21 * @v: pointer of type atomic_t
23 * Atomically reads the value of @v.
25 static __always_inline int arch_atomic_read(const atomic_t *v)
28 * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here,
29 * it's non-inlined function that increases binary size and stack usage.
31 return READ_ONCE((v)->counter);
35 * arch_atomic_set - set atomic variable
36 * @v: pointer of type atomic_t
39 * Atomically sets the value of @v to @i.
41 static __always_inline void arch_atomic_set(atomic_t *v, int i)
43 WRITE_ONCE(v->counter, i);
47 * arch_atomic_add - add integer to atomic variable
48 * @i: integer value to add
49 * @v: pointer of type atomic_t
51 * Atomically adds @i to @v.
53 static __always_inline void arch_atomic_add(int i, atomic_t *v)
55 asm volatile(LOCK_PREFIX "addl %1,%0"
61 * arch_atomic_sub - subtract integer from atomic variable
62 * @i: integer value to subtract
63 * @v: pointer of type atomic_t
65 * Atomically subtracts @i from @v.
67 static __always_inline void arch_atomic_sub(int i, atomic_t *v)
69 asm volatile(LOCK_PREFIX "subl %1,%0"
75 * arch_atomic_sub_and_test - subtract value from variable and test result
76 * @i: integer value to subtract
77 * @v: pointer of type atomic_t
79 * Atomically subtracts @i from @v and returns
80 * true if the result is zero, or false for all
83 static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
85 return GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, e, "er", i);
87 #define arch_atomic_sub_and_test arch_atomic_sub_and_test
90 * arch_atomic_inc - increment atomic variable
91 * @v: pointer of type atomic_t
93 * Atomically increments @v by 1.
95 static __always_inline void arch_atomic_inc(atomic_t *v)
97 asm volatile(LOCK_PREFIX "incl %0"
100 #define arch_atomic_inc arch_atomic_inc
103 * arch_atomic_dec - decrement atomic variable
104 * @v: pointer of type atomic_t
106 * Atomically decrements @v by 1.
108 static __always_inline void arch_atomic_dec(atomic_t *v)
110 asm volatile(LOCK_PREFIX "decl %0"
111 : "+m" (v->counter));
113 #define arch_atomic_dec arch_atomic_dec
116 * arch_atomic_dec_and_test - decrement and test
117 * @v: pointer of type atomic_t
119 * Atomically decrements @v by 1 and
120 * returns true if the result is 0, or false for all other
123 static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
125 return GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, e);
127 #define arch_atomic_dec_and_test arch_atomic_dec_and_test
130 * arch_atomic_inc_and_test - increment and test
131 * @v: pointer of type atomic_t
133 * Atomically increments @v by 1
134 * and returns true if the result is zero, or false for all
137 static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
139 return GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, e);
141 #define arch_atomic_inc_and_test arch_atomic_inc_and_test
144 * arch_atomic_add_negative - add and test if negative
145 * @i: integer value to add
146 * @v: pointer of type atomic_t
148 * Atomically adds @i to @v and returns true
149 * if the result is negative, or false when
150 * result is greater than or equal to zero.
152 static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
154 return GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, s, "er", i);
156 #define arch_atomic_add_negative arch_atomic_add_negative
159 * arch_atomic_add_return - add integer and return
160 * @i: integer value to add
161 * @v: pointer of type atomic_t
163 * Atomically adds @i to @v and returns @i + @v
165 static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
167 return i + xadd(&v->counter, i);
171 * arch_atomic_sub_return - subtract integer and return
172 * @v: pointer of type atomic_t
173 * @i: integer value to subtract
175 * Atomically subtracts @i from @v and returns @v - @i
177 static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
179 return arch_atomic_add_return(-i, v);
182 static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
184 return xadd(&v->counter, i);
187 static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
189 return xadd(&v->counter, -i);
192 static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
194 return arch_cmpxchg(&v->counter, old, new);
197 #define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
198 static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
200 return try_cmpxchg(&v->counter, old, new);
203 static inline int arch_atomic_xchg(atomic_t *v, int new)
205 return arch_xchg(&v->counter, new);
208 static inline void arch_atomic_and(int i, atomic_t *v)
210 asm volatile(LOCK_PREFIX "andl %1,%0"
216 static inline int arch_atomic_fetch_and(int i, atomic_t *v)
218 int val = arch_atomic_read(v);
220 do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
225 static inline void arch_atomic_or(int i, atomic_t *v)
227 asm volatile(LOCK_PREFIX "orl %1,%0"
233 static inline int arch_atomic_fetch_or(int i, atomic_t *v)
235 int val = arch_atomic_read(v);
237 do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
242 static inline void arch_atomic_xor(int i, atomic_t *v)
244 asm volatile(LOCK_PREFIX "xorl %1,%0"
250 static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
252 int val = arch_atomic_read(v);
254 do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
260 # include <asm/atomic64_32.h>
262 # include <asm/atomic64_64.h>
265 #include <asm-generic/atomic-instrumented.h>
267 #endif /* _ASM_X86_ATOMIC_H */