]> Git Repo - linux.git/blob - arch/arm64/include/asm/cmpxchg.h
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / arch / arm64 / include / asm / cmpxchg.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/cmpxchg.h
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  */
7 #ifndef __ASM_CMPXCHG_H
8 #define __ASM_CMPXCHG_H
9
10 #include <linux/build_bug.h>
11 #include <linux/compiler.h>
12
13 #include <asm/barrier.h>
14 #include <asm/lse.h>
15
16 /*
17  * We need separate acquire parameters for ll/sc and lse, since the full
18  * barrier case is generated as release+dmb for the former and
19  * acquire+release for the latter.
20  */
21 #define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl)       \
22 static inline u##sz __xchg_case_##name##sz(u##sz x, volatile void *ptr)         \
23 {                                                                               \
24         u##sz ret;                                                              \
25         unsigned long tmp;                                                      \
26                                                                                 \
27         asm volatile(ARM64_LSE_ATOMIC_INSN(                                     \
28         /* LL/SC */                                                             \
29         "       prfm    pstl1strm, %2\n"                                        \
30         "1:     ld" #acq "xr" #sfx "\t%" #w "0, %2\n"                           \
31         "       st" #rel "xr" #sfx "\t%w1, %" #w "3, %2\n"                      \
32         "       cbnz    %w1, 1b\n"                                              \
33         "       " #mb,                                                          \
34         /* LSE atomics */                                                       \
35         "       swp" #acq_lse #rel #sfx "\t%" #w "3, %" #w "0, %2\n"            \
36                 __nops(3)                                                       \
37         "       " #nop_lse)                                                     \
38         : "=&r" (ret), "=&r" (tmp), "+Q" (*(u##sz *)ptr)                        \
39         : "r" (x)                                                               \
40         : cl);                                                                  \
41                                                                                 \
42         return ret;                                                             \
43 }
44
45 __XCHG_CASE(w, b,     ,  8,        ,    ,  ,  ,  ,         )
46 __XCHG_CASE(w, h,     , 16,        ,    ,  ,  ,  ,         )
47 __XCHG_CASE(w,  ,     , 32,        ,    ,  ,  ,  ,         )
48 __XCHG_CASE( ,  ,     , 64,        ,    ,  ,  ,  ,         )
49 __XCHG_CASE(w, b, acq_,  8,        ,    , a, a,  , "memory")
50 __XCHG_CASE(w, h, acq_, 16,        ,    , a, a,  , "memory")
51 __XCHG_CASE(w,  , acq_, 32,        ,    , a, a,  , "memory")
52 __XCHG_CASE( ,  , acq_, 64,        ,    , a, a,  , "memory")
53 __XCHG_CASE(w, b, rel_,  8,        ,    ,  ,  , l, "memory")
54 __XCHG_CASE(w, h, rel_, 16,        ,    ,  ,  , l, "memory")
55 __XCHG_CASE(w,  , rel_, 32,        ,    ,  ,  , l, "memory")
56 __XCHG_CASE( ,  , rel_, 64,        ,    ,  ,  , l, "memory")
57 __XCHG_CASE(w, b,  mb_,  8, dmb ish, nop,  , a, l, "memory")
58 __XCHG_CASE(w, h,  mb_, 16, dmb ish, nop,  , a, l, "memory")
59 __XCHG_CASE(w,  ,  mb_, 32, dmb ish, nop,  , a, l, "memory")
60 __XCHG_CASE( ,  ,  mb_, 64, dmb ish, nop,  , a, l, "memory")
61
62 #undef __XCHG_CASE
63
64 #define __XCHG_GEN(sfx)                                                 \
65 static __always_inline unsigned long                                    \
66 __arch_xchg##sfx(unsigned long x, volatile void *ptr, int size)         \
67 {                                                                       \
68         switch (size) {                                                 \
69         case 1:                                                         \
70                 return __xchg_case##sfx##_8(x, ptr);                    \
71         case 2:                                                         \
72                 return __xchg_case##sfx##_16(x, ptr);                   \
73         case 4:                                                         \
74                 return __xchg_case##sfx##_32(x, ptr);                   \
75         case 8:                                                         \
76                 return __xchg_case##sfx##_64(x, ptr);                   \
77         default:                                                        \
78                 BUILD_BUG();                                            \
79         }                                                               \
80                                                                         \
81         unreachable();                                                  \
82 }
83
84 __XCHG_GEN()
85 __XCHG_GEN(_acq)
86 __XCHG_GEN(_rel)
87 __XCHG_GEN(_mb)
88
89 #undef __XCHG_GEN
90
91 #define __xchg_wrapper(sfx, ptr, x)                                     \
92 ({                                                                      \
93         __typeof__(*(ptr)) __ret;                                       \
94         __ret = (__typeof__(*(ptr)))                                    \
95                 __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \
96         __ret;                                                          \
97 })
98
99 /* xchg */
100 #define arch_xchg_relaxed(...)  __xchg_wrapper(    , __VA_ARGS__)
101 #define arch_xchg_acquire(...)  __xchg_wrapper(_acq, __VA_ARGS__)
102 #define arch_xchg_release(...)  __xchg_wrapper(_rel, __VA_ARGS__)
103 #define arch_xchg(...)          __xchg_wrapper( _mb, __VA_ARGS__)
104
105 #define __CMPXCHG_CASE(name, sz)                        \
106 static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr,       \
107                                               u##sz old,                \
108                                               u##sz new)                \
109 {                                                                       \
110         return __lse_ll_sc_body(_cmpxchg_case_##name##sz,               \
111                                 ptr, old, new);                         \
112 }
113
114 __CMPXCHG_CASE(    ,  8)
115 __CMPXCHG_CASE(    , 16)
116 __CMPXCHG_CASE(    , 32)
117 __CMPXCHG_CASE(    , 64)
118 __CMPXCHG_CASE(acq_,  8)
119 __CMPXCHG_CASE(acq_, 16)
120 __CMPXCHG_CASE(acq_, 32)
121 __CMPXCHG_CASE(acq_, 64)
122 __CMPXCHG_CASE(rel_,  8)
123 __CMPXCHG_CASE(rel_, 16)
124 __CMPXCHG_CASE(rel_, 32)
125 __CMPXCHG_CASE(rel_, 64)
126 __CMPXCHG_CASE(mb_,  8)
127 __CMPXCHG_CASE(mb_, 16)
128 __CMPXCHG_CASE(mb_, 32)
129 __CMPXCHG_CASE(mb_, 64)
130
131 #undef __CMPXCHG_CASE
132
133 #define __CMPXCHG_DBL(name)                                             \
134 static inline long __cmpxchg_double##name(unsigned long old1,           \
135                                          unsigned long old2,            \
136                                          unsigned long new1,            \
137                                          unsigned long new2,            \
138                                          volatile void *ptr)            \
139 {                                                                       \
140         return __lse_ll_sc_body(_cmpxchg_double##name,                  \
141                                 old1, old2, new1, new2, ptr);           \
142 }
143
144 __CMPXCHG_DBL(   )
145 __CMPXCHG_DBL(_mb)
146
147 #undef __CMPXCHG_DBL
148
149 #define __CMPXCHG_GEN(sfx)                                              \
150 static __always_inline unsigned long __cmpxchg##sfx(volatile void *ptr, \
151                                            unsigned long old,           \
152                                            unsigned long new,           \
153                                            int size)                    \
154 {                                                                       \
155         switch (size) {                                                 \
156         case 1:                                                         \
157                 return __cmpxchg_case##sfx##_8(ptr, old, new);          \
158         case 2:                                                         \
159                 return __cmpxchg_case##sfx##_16(ptr, old, new);         \
160         case 4:                                                         \
161                 return __cmpxchg_case##sfx##_32(ptr, old, new);         \
162         case 8:                                                         \
163                 return __cmpxchg_case##sfx##_64(ptr, old, new);         \
164         default:                                                        \
165                 BUILD_BUG();                                            \
166         }                                                               \
167                                                                         \
168         unreachable();                                                  \
169 }
170
171 __CMPXCHG_GEN()
172 __CMPXCHG_GEN(_acq)
173 __CMPXCHG_GEN(_rel)
174 __CMPXCHG_GEN(_mb)
175
176 #undef __CMPXCHG_GEN
177
178 #define __cmpxchg_wrapper(sfx, ptr, o, n)                               \
179 ({                                                                      \
180         __typeof__(*(ptr)) __ret;                                       \
181         __ret = (__typeof__(*(ptr)))                                    \
182                 __cmpxchg##sfx((ptr), (unsigned long)(o),               \
183                                 (unsigned long)(n), sizeof(*(ptr)));    \
184         __ret;                                                          \
185 })
186
187 /* cmpxchg */
188 #define arch_cmpxchg_relaxed(...)       __cmpxchg_wrapper(    , __VA_ARGS__)
189 #define arch_cmpxchg_acquire(...)       __cmpxchg_wrapper(_acq, __VA_ARGS__)
190 #define arch_cmpxchg_release(...)       __cmpxchg_wrapper(_rel, __VA_ARGS__)
191 #define arch_cmpxchg(...)               __cmpxchg_wrapper( _mb, __VA_ARGS__)
192 #define arch_cmpxchg_local              arch_cmpxchg_relaxed
193
194 /* cmpxchg64 */
195 #define arch_cmpxchg64_relaxed          arch_cmpxchg_relaxed
196 #define arch_cmpxchg64_acquire          arch_cmpxchg_acquire
197 #define arch_cmpxchg64_release          arch_cmpxchg_release
198 #define arch_cmpxchg64                  arch_cmpxchg
199 #define arch_cmpxchg64_local            arch_cmpxchg_local
200
201 /* cmpxchg_double */
202 #define system_has_cmpxchg_double()     1
203
204 #define __cmpxchg_double_check(ptr1, ptr2)                                      \
205 ({                                                                              \
206         if (sizeof(*(ptr1)) != 8)                                               \
207                 BUILD_BUG();                                                    \
208         VM_BUG_ON((unsigned long *)(ptr2) - (unsigned long *)(ptr1) != 1);      \
209 })
210
211 #define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2)                         \
212 ({                                                                              \
213         int __ret;                                                              \
214         __cmpxchg_double_check(ptr1, ptr2);                                     \
215         __ret = !__cmpxchg_double_mb((unsigned long)(o1), (unsigned long)(o2),  \
216                                      (unsigned long)(n1), (unsigned long)(n2),  \
217                                      ptr1);                                     \
218         __ret;                                                                  \
219 })
220
221 #define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2)                   \
222 ({                                                                              \
223         int __ret;                                                              \
224         __cmpxchg_double_check(ptr1, ptr2);                                     \
225         __ret = !__cmpxchg_double((unsigned long)(o1), (unsigned long)(o2),     \
226                                   (unsigned long)(n1), (unsigned long)(n2),     \
227                                   ptr1);                                        \
228         __ret;                                                                  \
229 })
230
231 #define __CMPWAIT_CASE(w, sfx, sz)                                      \
232 static inline void __cmpwait_case_##sz(volatile void *ptr,              \
233                                        unsigned long val)               \
234 {                                                                       \
235         unsigned long tmp;                                              \
236                                                                         \
237         asm volatile(                                                   \
238         "       sevl\n"                                                 \
239         "       wfe\n"                                                  \
240         "       ldxr" #sfx "\t%" #w "[tmp], %[v]\n"                     \
241         "       eor     %" #w "[tmp], %" #w "[tmp], %" #w "[val]\n"     \
242         "       cbnz    %" #w "[tmp], 1f\n"                             \
243         "       wfe\n"                                                  \
244         "1:"                                                            \
245         : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr)                   \
246         : [val] "r" (val));                                             \
247 }
248
249 __CMPWAIT_CASE(w, b, 8);
250 __CMPWAIT_CASE(w, h, 16);
251 __CMPWAIT_CASE(w,  , 32);
252 __CMPWAIT_CASE( ,  , 64);
253
254 #undef __CMPWAIT_CASE
255
256 #define __CMPWAIT_GEN(sfx)                                              \
257 static __always_inline void __cmpwait##sfx(volatile void *ptr,          \
258                                   unsigned long val,                    \
259                                   int size)                             \
260 {                                                                       \
261         switch (size) {                                                 \
262         case 1:                                                         \
263                 return __cmpwait_case##sfx##_8(ptr, (u8)val);           \
264         case 2:                                                         \
265                 return __cmpwait_case##sfx##_16(ptr, (u16)val);         \
266         case 4:                                                         \
267                 return __cmpwait_case##sfx##_32(ptr, val);              \
268         case 8:                                                         \
269                 return __cmpwait_case##sfx##_64(ptr, val);              \
270         default:                                                        \
271                 BUILD_BUG();                                            \
272         }                                                               \
273                                                                         \
274         unreachable();                                                  \
275 }
276
277 __CMPWAIT_GEN()
278
279 #undef __CMPWAIT_GEN
280
281 #define __cmpwait_relaxed(ptr, val) \
282         __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr)))
283
284 #endif  /* __ASM_CMPXCHG_H */
This page took 0.051457 seconds and 4 git commands to generate.