]> Git Repo - linux.git/blob - arch/loongarch/include/asm/atomic.h
crypto: testmgr - allow ecdsa-nist-p256 and -p384 in FIPS mode
[linux.git] / arch / loongarch / include / asm / atomic.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Atomic operations.
4  *
5  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6  */
7 #ifndef _ASM_ATOMIC_H
8 #define _ASM_ATOMIC_H
9
10 #include <linux/types.h>
11 #include <asm/barrier.h>
12 #include <asm/cmpxchg.h>
13
14 #if __SIZEOF_LONG__ == 4
15 #define __LL            "ll.w   "
16 #define __SC            "sc.w   "
17 #define __AMADD         "amadd.w        "
18 #define __AMAND_DB      "amand_db.w     "
19 #define __AMOR_DB       "amor_db.w      "
20 #define __AMXOR_DB      "amxor_db.w     "
21 #elif __SIZEOF_LONG__ == 8
22 #define __LL            "ll.d   "
23 #define __SC            "sc.d   "
24 #define __AMADD         "amadd.d        "
25 #define __AMAND_DB      "amand_db.d     "
26 #define __AMOR_DB       "amor_db.d      "
27 #define __AMXOR_DB      "amxor_db.d     "
28 #endif
29
30 #define ATOMIC_INIT(i)    { (i) }
31
32 /*
33  * arch_atomic_read - read atomic variable
34  * @v: pointer of type atomic_t
35  *
36  * Atomically reads the value of @v.
37  */
38 #define arch_atomic_read(v)     READ_ONCE((v)->counter)
39
40 /*
41  * arch_atomic_set - set atomic variable
42  * @v: pointer of type atomic_t
43  * @i: required value
44  *
45  * Atomically sets the value of @v to @i.
46  */
47 #define arch_atomic_set(v, i)   WRITE_ONCE((v)->counter, (i))
48
49 #define ATOMIC_OP(op, I, asm_op)                                        \
50 static inline void arch_atomic_##op(int i, atomic_t *v)                 \
51 {                                                                       \
52         __asm__ __volatile__(                                           \
53         "am"#asm_op"_db.w" " $zero, %1, %0      \n"                     \
54         : "+ZB" (v->counter)                                            \
55         : "r" (I)                                                       \
56         : "memory");                                                    \
57 }
58
59 #define ATOMIC_OP_RETURN(op, I, asm_op, c_op)                           \
60 static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
61 {                                                                       \
62         int result;                                                     \
63                                                                         \
64         __asm__ __volatile__(                                           \
65         "am"#asm_op"_db.w" " %1, %2, %0         \n"                     \
66         : "+ZB" (v->counter), "=&r" (result)                            \
67         : "r" (I)                                                       \
68         : "memory");                                                    \
69                                                                         \
70         return result c_op I;                                           \
71 }
72
73 #define ATOMIC_FETCH_OP(op, I, asm_op)                                  \
74 static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v)  \
75 {                                                                       \
76         int result;                                                     \
77                                                                         \
78         __asm__ __volatile__(                                           \
79         "am"#asm_op"_db.w" " %1, %2, %0         \n"                     \
80         : "+ZB" (v->counter), "=&r" (result)                            \
81         : "r" (I)                                                       \
82         : "memory");                                                    \
83                                                                         \
84         return result;                                                  \
85 }
86
87 #define ATOMIC_OPS(op, I, asm_op, c_op)                                 \
88         ATOMIC_OP(op, I, asm_op)                                        \
89         ATOMIC_OP_RETURN(op, I, asm_op, c_op)                           \
90         ATOMIC_FETCH_OP(op, I, asm_op)
91
92 ATOMIC_OPS(add, i, add, +)
93 ATOMIC_OPS(sub, -i, add, +)
94
95 #define arch_atomic_add_return_relaxed  arch_atomic_add_return_relaxed
96 #define arch_atomic_sub_return_relaxed  arch_atomic_sub_return_relaxed
97 #define arch_atomic_fetch_add_relaxed   arch_atomic_fetch_add_relaxed
98 #define arch_atomic_fetch_sub_relaxed   arch_atomic_fetch_sub_relaxed
99
100 #undef ATOMIC_OPS
101
102 #define ATOMIC_OPS(op, I, asm_op)                                       \
103         ATOMIC_OP(op, I, asm_op)                                        \
104         ATOMIC_FETCH_OP(op, I, asm_op)
105
106 ATOMIC_OPS(and, i, and)
107 ATOMIC_OPS(or, i, or)
108 ATOMIC_OPS(xor, i, xor)
109
110 #define arch_atomic_fetch_and_relaxed   arch_atomic_fetch_and_relaxed
111 #define arch_atomic_fetch_or_relaxed    arch_atomic_fetch_or_relaxed
112 #define arch_atomic_fetch_xor_relaxed   arch_atomic_fetch_xor_relaxed
113
114 #undef ATOMIC_OPS
115 #undef ATOMIC_FETCH_OP
116 #undef ATOMIC_OP_RETURN
117 #undef ATOMIC_OP
118
119 static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
120 {
121        int prev, rc;
122
123         __asm__ __volatile__ (
124                 "0:     ll.w    %[p],  %[c]\n"
125                 "       beq     %[p],  %[u], 1f\n"
126                 "       add.w   %[rc], %[p], %[a]\n"
127                 "       sc.w    %[rc], %[c]\n"
128                 "       beqz    %[rc], 0b\n"
129                 "       b       2f\n"
130                 "1:\n"
131                 __WEAK_LLSC_MB
132                 "2:\n"
133                 : [p]"=&r" (prev), [rc]"=&r" (rc),
134                   [c]"=ZB" (v->counter)
135                 : [a]"r" (a), [u]"r" (u)
136                 : "memory");
137
138         return prev;
139 }
140 #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
141
142 /*
143  * arch_atomic_sub_if_positive - conditionally subtract integer from atomic variable
144  * @i: integer value to subtract
145  * @v: pointer of type atomic_t
146  *
147  * Atomically test @v and subtract @i if @v is greater or equal than @i.
148  * The function returns the old value of @v minus @i.
149  */
150 static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
151 {
152         int result;
153         int temp;
154
155         if (__builtin_constant_p(i)) {
156                 __asm__ __volatile__(
157                 "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
158                 "       addi.w  %0, %1, %3                              \n"
159                 "       move    %1, %0                                  \n"
160                 "       bltz    %0, 2f                                  \n"
161                 "       sc.w    %1, %2                                  \n"
162                 "       beqz    %1, 1b                                  \n"
163                 "2:                                                     \n"
164                 __WEAK_LLSC_MB
165                 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
166                 : "I" (-i));
167         } else {
168                 __asm__ __volatile__(
169                 "1:     ll.w    %1, %2          # atomic_sub_if_positive\n"
170                 "       sub.w   %0, %1, %3                              \n"
171                 "       move    %1, %0                                  \n"
172                 "       bltz    %0, 2f                                  \n"
173                 "       sc.w    %1, %2                                  \n"
174                 "       beqz    %1, 1b                                  \n"
175                 "2:                                                     \n"
176                 __WEAK_LLSC_MB
177                 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
178                 : "r" (i));
179         }
180
181         return result;
182 }
183
184 #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
185 #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
186
187 /*
188  * arch_atomic_dec_if_positive - decrement by 1 if old value positive
189  * @v: pointer of type atomic_t
190  */
191 #define arch_atomic_dec_if_positive(v)  arch_atomic_sub_if_positive(1, v)
192
193 #ifdef CONFIG_64BIT
194
195 #define ATOMIC64_INIT(i)    { (i) }
196
197 /*
198  * arch_atomic64_read - read atomic variable
199  * @v: pointer of type atomic64_t
200  *
201  */
202 #define arch_atomic64_read(v)   READ_ONCE((v)->counter)
203
204 /*
205  * arch_atomic64_set - set atomic variable
206  * @v: pointer of type atomic64_t
207  * @i: required value
208  */
209 #define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
210
211 #define ATOMIC64_OP(op, I, asm_op)                                      \
212 static inline void arch_atomic64_##op(long i, atomic64_t *v)            \
213 {                                                                       \
214         __asm__ __volatile__(                                           \
215         "am"#asm_op"_db.d " " $zero, %1, %0     \n"                     \
216         : "+ZB" (v->counter)                                            \
217         : "r" (I)                                                       \
218         : "memory");                                                    \
219 }
220
221 #define ATOMIC64_OP_RETURN(op, I, asm_op, c_op)                                 \
222 static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)   \
223 {                                                                               \
224         long result;                                                            \
225         __asm__ __volatile__(                                                   \
226         "am"#asm_op"_db.d " " %1, %2, %0                \n"                     \
227         : "+ZB" (v->counter), "=&r" (result)                                    \
228         : "r" (I)                                                               \
229         : "memory");                                                            \
230                                                                                 \
231         return result c_op I;                                                   \
232 }
233
234 #define ATOMIC64_FETCH_OP(op, I, asm_op)                                        \
235 static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v)    \
236 {                                                                               \
237         long result;                                                            \
238                                                                                 \
239         __asm__ __volatile__(                                                   \
240         "am"#asm_op"_db.d " " %1, %2, %0                \n"                     \
241         : "+ZB" (v->counter), "=&r" (result)                                    \
242         : "r" (I)                                                               \
243         : "memory");                                                            \
244                                                                                 \
245         return result;                                                          \
246 }
247
248 #define ATOMIC64_OPS(op, I, asm_op, c_op)                                     \
249         ATOMIC64_OP(op, I, asm_op)                                            \
250         ATOMIC64_OP_RETURN(op, I, asm_op, c_op)                               \
251         ATOMIC64_FETCH_OP(op, I, asm_op)
252
253 ATOMIC64_OPS(add, i, add, +)
254 ATOMIC64_OPS(sub, -i, add, +)
255
256 #define arch_atomic64_add_return_relaxed        arch_atomic64_add_return_relaxed
257 #define arch_atomic64_sub_return_relaxed        arch_atomic64_sub_return_relaxed
258 #define arch_atomic64_fetch_add_relaxed         arch_atomic64_fetch_add_relaxed
259 #define arch_atomic64_fetch_sub_relaxed         arch_atomic64_fetch_sub_relaxed
260
261 #undef ATOMIC64_OPS
262
263 #define ATOMIC64_OPS(op, I, asm_op)                                           \
264         ATOMIC64_OP(op, I, asm_op)                                            \
265         ATOMIC64_FETCH_OP(op, I, asm_op)
266
267 ATOMIC64_OPS(and, i, and)
268 ATOMIC64_OPS(or, i, or)
269 ATOMIC64_OPS(xor, i, xor)
270
271 #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
272 #define arch_atomic64_fetch_or_relaxed  arch_atomic64_fetch_or_relaxed
273 #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
274
275 #undef ATOMIC64_OPS
276 #undef ATOMIC64_FETCH_OP
277 #undef ATOMIC64_OP_RETURN
278 #undef ATOMIC64_OP
279
280 static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u)
281 {
282        long prev, rc;
283
284         __asm__ __volatile__ (
285                 "0:     ll.d    %[p],  %[c]\n"
286                 "       beq     %[p],  %[u], 1f\n"
287                 "       add.d   %[rc], %[p], %[a]\n"
288                 "       sc.d    %[rc], %[c]\n"
289                 "       beqz    %[rc], 0b\n"
290                 "       b       2f\n"
291                 "1:\n"
292                 __WEAK_LLSC_MB
293                 "2:\n"
294                 : [p]"=&r" (prev), [rc]"=&r" (rc),
295                   [c] "=ZB" (v->counter)
296                 : [a]"r" (a), [u]"r" (u)
297                 : "memory");
298
299         return prev;
300 }
301 #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
302
303 /*
304  * arch_atomic64_sub_if_positive - conditionally subtract integer from atomic variable
305  * @i: integer value to subtract
306  * @v: pointer of type atomic64_t
307  *
308  * Atomically test @v and subtract @i if @v is greater or equal than @i.
309  * The function returns the old value of @v minus @i.
310  */
311 static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
312 {
313         long result;
314         long temp;
315
316         if (__builtin_constant_p(i)) {
317                 __asm__ __volatile__(
318                 "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
319                 "       addi.d  %0, %1, %3                              \n"
320                 "       move    %1, %0                                  \n"
321                 "       bltz    %0, 2f                                  \n"
322                 "       sc.d    %1, %2                                  \n"
323                 "       beqz    %1, 1b                                  \n"
324                 "2:                                                     \n"
325                 __WEAK_LLSC_MB
326                 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
327                 : "I" (-i));
328         } else {
329                 __asm__ __volatile__(
330                 "1:     ll.d    %1, %2  # atomic64_sub_if_positive      \n"
331                 "       sub.d   %0, %1, %3                              \n"
332                 "       move    %1, %0                                  \n"
333                 "       bltz    %0, 2f                                  \n"
334                 "       sc.d    %1, %2                                  \n"
335                 "       beqz    %1, 1b                                  \n"
336                 "2:                                                     \n"
337                 __WEAK_LLSC_MB
338                 : "=&r" (result), "=&r" (temp), "+ZC" (v->counter)
339                 : "r" (i));
340         }
341
342         return result;
343 }
344
345 #define arch_atomic64_cmpxchg(v, o, n) \
346         ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
347 #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
348
349 /*
350  * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
351  * @v: pointer of type atomic64_t
352  */
353 #define arch_atomic64_dec_if_positive(v)        arch_atomic64_sub_if_positive(1, v)
354
355 #endif /* CONFIG_64BIT */
356
357 #endif /* _ASM_ATOMIC_H */
This page took 0.053793 seconds and 4 git commands to generate.