1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/cmpxchg.h
5 * Copyright (C) 2012 ARM Ltd.
7 #ifndef __ASM_CMPXCHG_H
8 #define __ASM_CMPXCHG_H
10 #include <linux/build_bug.h>
11 #include <linux/compiler.h>
13 #include <asm/barrier.h>
17 * We need separate acquire parameters for ll/sc and lse, since the full
18 * barrier case is generated as release+dmb for the former and
19 * acquire+release for the latter.
21 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
22 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr) \
27 asm volatile(ARM64_LSE_ATOMIC_INSN( \
29 " prfm pstl1strm, %2\n" \
30 "1: ld" #acq "xr" #sfx "\t%" #w "0, %2\n" \
31 " st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n" \
35 " swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n" \
38 : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr) \
45 __XCHG_CASE(w, b, , 8, , , , , , )
46 __XCHG_CASE(w, h, , 16, , , , , , )
47 __XCHG_CASE(w, , , 32, , , , , , )
48 __XCHG_CASE( , , , 64, , , , , , )
49 __XCHG_CASE(w, b, acq_, 8, , , a, a, , "memory")
50 __XCHG_CASE(w, h, acq_, 16, , , a, a, , "memory")
51 __XCHG_CASE(w, , acq_, 32, , , a, a, , "memory")
52 __XCHG_CASE( , , acq_, 64, , , a, a, , "memory")
53 __XCHG_CASE(w, b, rel_, 8, , , , , l, "memory")
54 __XCHG_CASE(w, h, rel_, 16, , , , , l, "memory")
55 __XCHG_CASE(w, , rel_, 32, , , , , l, "memory")
56 __XCHG_CASE( , , rel_, 64, , , , , l, "memory")
57 __XCHG_CASE(w, b, mb_, 8, dmb ish, nop, , a, l, "memory")
58 __XCHG_CASE(w, h, mb_, 16, dmb ish, nop, , a, l, "memory")
59 __XCHG_CASE(w, , mb_, 32, dmb ish, nop, , a, l, "memory")
60 __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory")
64 #define __XCHG_GEN(sfx) \
65 static __always_inline unsigned long \
66 __arch_xchg##sfx(unsigned long x, volatile void *ptr, int size) \
70 return __xchg_case##sfx##_8(x, ptr); \
72 return __xchg_case##sfx##_16(x, ptr); \
74 return __xchg_case##sfx##_32(x, ptr); \
76 return __xchg_case##sfx##_64(x, ptr); \
91 #define __xchg_wrapper(sfx, ptr, x) \
93 __typeof__(*(ptr)) __ret; \
94 __ret = (__typeof__(*(ptr))) \
95 __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
100 #define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
101 #define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
102 #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
103 #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
105 #define __CMPXCHG_CASE(name, sz) \
106 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \
110 return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \
115 __CMPXCHG_CASE( , 16)
116 __CMPXCHG_CASE( , 32)
117 __CMPXCHG_CASE( , 64)
118 __CMPXCHG_CASE(acq_, 8)
119 __CMPXCHG_CASE(acq_, 16)
120 __CMPXCHG_CASE(acq_, 32)
121 __CMPXCHG_CASE(acq_, 64)
122 __CMPXCHG_CASE(rel_, 8)
123 __CMPXCHG_CASE(rel_, 16)
124 __CMPXCHG_CASE(rel_, 32)
125 __CMPXCHG_CASE(rel_, 64)
126 __CMPXCHG_CASE(mb_, 8)
127 __CMPXCHG_CASE(mb_, 16)
128 __CMPXCHG_CASE(mb_, 32)
129 __CMPXCHG_CASE(mb_, 64)
131 #undef __CMPXCHG_CASE
133 #define __CMPXCHG_DBL(name) \
134 static inline long __cmpxchg_double##name(unsigned long old1, \
135 unsigned long old2, \
136 unsigned long new1, \
137 unsigned long new2, \
138 volatile void *ptr) \
140 return __lse_ll_sc_body(_cmpxchg_double##name, \
141 old1, old2, new1, new2, ptr); \
149 #define __CMPXCHG_GEN(sfx) \
150 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
157 return __cmpxchg_case##sfx##_8(ptr, old, new); \
159 return __cmpxchg_case##sfx##_16(ptr, old, new); \
161 return __cmpxchg_case##sfx##_32(ptr, old, new); \
163 return __cmpxchg_case##sfx##_64(ptr, old, new); \
178 #define __cmpxchg_wrapper(sfx, ptr, o, n) \
180 __typeof__(*(ptr)) __ret; \
181 __ret = (__typeof__(*(ptr))) \
182 __cmpxchg##sfx((ptr), (unsigned long)(o), \
183 (unsigned long)(n), sizeof(*(ptr))); \
188 #define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
189 #define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
190 #define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
191 #define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
192 #define arch_cmpxchg_local arch_cmpxchg_relaxed
195 #define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
196 #define arch_cmpxchg64_acquire arch_cmpxchg_acquire
197 #define arch_cmpxchg64_release arch_cmpxchg_release
198 #define arch_cmpxchg64 arch_cmpxchg
199 #define arch_cmpxchg64_local arch_cmpxchg_local
202 #define system_has_cmpxchg_double() 1
204 #define __cmpxchg_double_check(ptr1, ptr2) \
206 if (sizeof(*(ptr1)) != 8) \
208 VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1); \
211 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
214 __cmpxchg_double_check(ptr1, ptr2); \
215 __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2), \
216 (unsigned long)(n1), (unsigned long)(n2), \
221 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
224 __cmpxchg_double_check(ptr1, ptr2); \
225 __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2), \
226 (unsigned long)(n1), (unsigned long)(n2), \
231 #define __CMPWAIT_CASE(w, sfx, sz) \
232 static inline void __cmpwait_case_##sz(volatile void *ptr, \
240 " ldxr" #sfx "\t%" #w "[tmp], %[v]\n" \
241 " eor %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n" \
242 " cbnz %" #w "[tmp], 1f\n" \
245 : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \
246 : [val] "r" (val)); \
249 __CMPWAIT_CASE(w, b, 8);
250 __CMPWAIT_CASE(w, h, 16);
251 __CMPWAIT_CASE(w, , 32);
252 __CMPWAIT_CASE( , , 64);
254 #undef __CMPWAIT_CASE
256 #define __CMPWAIT_GEN(sfx) \
257 static __always_inline void __cmpwait##sfx(volatile void *ptr, \
263 return __cmpwait_case##sfx##_8(ptr, (u8)val); \
265 return __cmpwait_case##sfx##_16(ptr, (u16)val); \
267 return __cmpwait_case##sfx##_32(ptr, val); \
269 return __cmpwait_case##sfx##_64(ptr, val); \
281 #define __cmpwait_relaxed(ptr, val) \
282 __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
284 #endif /* __ASM_CMPXCHG_H */