1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
6 #ifndef _ASM_ARC_ATOMIC_H
7 #define _ASM_ARC_ATOMIC_H
11 #include <linux/types.h>
12 #include <linux/compiler.h>
13 #include <asm/cmpxchg.h>
14 #include <asm/barrier.h>
17 #ifndef CONFIG_ARC_PLAT_EZNPS
19 #define atomic_read(v) READ_ONCE((v)->counter)
21 #ifdef CONFIG_ARC_HAS_LLSC
23 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
25 #define ATOMIC_OP(op, c_op, asm_op) \
26 static inline void atomic_##op(int i, atomic_t *v) \
30 __asm__ __volatile__( \
31 "1: llock %[val], [%[ctr]] \n" \
32 " " #asm_op " %[val], %[val], %[i] \n" \
33 " scond %[val], [%[ctr]] \n" \
35 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
36 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
41 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
42 static inline int atomic_##op##_return(int i, atomic_t *v) \
47 * Explicit full memory barrier needed before/after as \
48 * LLOCK/SCOND thmeselves don't provide any such semantics \
52 __asm__ __volatile__( \
53 "1: llock %[val], [%[ctr]] \n" \
54 " " #asm_op " %[val], %[val], %[i] \n" \
55 " scond %[val], [%[ctr]] \n" \
58 : [ctr] "r" (&v->counter), \
67 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
68 static inline int atomic_fetch_##op(int i, atomic_t *v) \
70 unsigned int val, orig; \
73 * Explicit full memory barrier needed before/after as \
74 * LLOCK/SCOND thmeselves don't provide any such semantics \
78 __asm__ __volatile__( \
79 "1: llock %[orig], [%[ctr]] \n" \
80 " " #asm_op " %[val], %[orig], %[i] \n" \
81 " scond %[val], [%[ctr]] \n" \
83 : [val] "=&r" (val), \
85 : [ctr] "r" (&v->counter), \
94 #else /* !CONFIG_ARC_HAS_LLSC */
98 /* violating atomic_xxx API locking protocol in UP for optimization sake */
99 #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
103 static inline void atomic_set(atomic_t *v, int i)
106 * Independent of hardware support, all of the atomic_xxx() APIs need
107 * to follow the same locking rules to make sure that a "hardware"
108 * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
111 * Thus atomic_set() despite being 1 insn (and seemingly atomic)
112 * requires the locking.
116 atomic_ops_lock(flags);
117 WRITE_ONCE(v->counter, i);
118 atomic_ops_unlock(flags);
121 #define atomic_set_release(v, i) atomic_set((v), (i))
126 * Non hardware assisted Atomic-R-M-W
127 * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
130 #define ATOMIC_OP(op, c_op, asm_op) \
131 static inline void atomic_##op(int i, atomic_t *v) \
133 unsigned long flags; \
135 atomic_ops_lock(flags); \
137 atomic_ops_unlock(flags); \
140 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
141 static inline int atomic_##op##_return(int i, atomic_t *v) \
143 unsigned long flags; \
144 unsigned long temp; \
147 * spin lock/unlock provides the needed smp_mb() before/after \
149 atomic_ops_lock(flags); \
153 atomic_ops_unlock(flags); \
158 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
159 static inline int atomic_fetch_##op(int i, atomic_t *v) \
161 unsigned long flags; \
162 unsigned long orig; \
165 * spin lock/unlock provides the needed smp_mb() before/after \
167 atomic_ops_lock(flags); \
170 atomic_ops_unlock(flags); \
175 #endif /* !CONFIG_ARC_HAS_LLSC */
177 #define ATOMIC_OPS(op, c_op, asm_op) \
178 ATOMIC_OP(op, c_op, asm_op) \
179 ATOMIC_OP_RETURN(op, c_op, asm_op) \
180 ATOMIC_FETCH_OP(op, c_op, asm_op)
182 ATOMIC_OPS(add, +=, add)
183 ATOMIC_OPS(sub, -=, sub)
185 #define atomic_andnot atomic_andnot
186 #define atomic_fetch_andnot atomic_fetch_andnot
189 #define ATOMIC_OPS(op, c_op, asm_op) \
190 ATOMIC_OP(op, c_op, asm_op) \
191 ATOMIC_FETCH_OP(op, c_op, asm_op)
193 ATOMIC_OPS(and, &=, and)
194 ATOMIC_OPS(andnot, &= ~, bic)
195 ATOMIC_OPS(or, |=, or)
196 ATOMIC_OPS(xor, ^=, xor)
198 #else /* CONFIG_ARC_PLAT_EZNPS */
200 static inline int atomic_read(const atomic_t *v)
204 __asm__ __volatile__(
212 static inline void atomic_set(atomic_t *v, int i)
214 __asm__ __volatile__(
217 : "r"(i), "r"(&v->counter)
221 #define ATOMIC_OP(op, c_op, asm_op) \
222 static inline void atomic_##op(int i, atomic_t *v) \
224 __asm__ __volatile__( \
229 : "r"(i), "r"(&v->counter), "i"(asm_op) \
230 : "r2", "r3", "memory"); \
233 #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
234 static inline int atomic_##op##_return(int i, atomic_t *v) \
236 unsigned int temp = i; \
238 /* Explicit full memory barrier needed before/after */ \
241 __asm__ __volatile__( \
247 : "r"(&v->counter), "i"(asm_op) \
248 : "r2", "r3", "memory"); \
257 #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
258 static inline int atomic_fetch_##op(int i, atomic_t *v) \
260 unsigned int temp = i; \
262 /* Explicit full memory barrier needed before/after */ \
265 __asm__ __volatile__( \
271 : "r"(&v->counter), "i"(asm_op) \
272 : "r2", "r3", "memory"); \
279 #define ATOMIC_OPS(op, c_op, asm_op) \
280 ATOMIC_OP(op, c_op, asm_op) \
281 ATOMIC_OP_RETURN(op, c_op, asm_op) \
282 ATOMIC_FETCH_OP(op, c_op, asm_op)
284 ATOMIC_OPS(add, +=, CTOP_INST_AADD_DI_R2_R2_R3)
285 #define atomic_sub(i, v) atomic_add(-(i), (v))
286 #define atomic_sub_return(i, v) atomic_add_return(-(i), (v))
287 #define atomic_fetch_sub(i, v) atomic_fetch_add(-(i), (v))
290 #define ATOMIC_OPS(op, c_op, asm_op) \
291 ATOMIC_OP(op, c_op, asm_op) \
292 ATOMIC_FETCH_OP(op, c_op, asm_op)
294 ATOMIC_OPS(and, &=, CTOP_INST_AAND_DI_R2_R2_R3)
295 ATOMIC_OPS(or, |=, CTOP_INST_AOR_DI_R2_R2_R3)
296 ATOMIC_OPS(xor, ^=, CTOP_INST_AXOR_DI_R2_R2_R3)
298 #endif /* CONFIG_ARC_PLAT_EZNPS */
301 #undef ATOMIC_FETCH_OP
302 #undef ATOMIC_OP_RETURN
305 #ifdef CONFIG_GENERIC_ATOMIC64
307 #include <asm-generic/atomic64.h>
309 #else /* Kconfig ensures this is only enabled with needed h/w assist */
312 * ARCv2 supports 64-bit exclusive load (LLOCKD) / store (SCONDD)
313 * - The address HAS to be 64-bit aligned
314 * - There are 2 semantics involved here:
315 * = exclusive implies no interim update between load/store to same addr
316 * = both words are observed/updated together: this is guaranteed even
317 * for regular 64-bit load (LDD) / store (STD). Thus atomic64_set()
318 * is NOT required to use LLOCKD+SCONDD, STD suffices
322 s64 __aligned(8) counter;
325 #define ATOMIC64_INIT(a) { (a) }
327 static inline s64 atomic64_read(const atomic64_t *v)
331 __asm__ __volatile__(
339 static inline void atomic64_set(atomic64_t *v, s64 a)
342 * This could have been a simple assignment in "C" but would need
343 * explicit volatile. Otherwise gcc optimizers could elide the store
344 * which borked atomic64 self-test
345 * In the inline asm version, memory clobber needed for exact same
346 * reason, to tell gcc about the store.
348 * This however is not needed for sibling atomic64_add() etc since both
349 * load/store are explicitly done in inline asm. As long as API is used
350 * for each access, gcc has no way to optimize away any load/store
352 __asm__ __volatile__(
355 : "r"(a), "r"(&v->counter)
359 #define ATOMIC64_OP(op, op1, op2) \
360 static inline void atomic64_##op(s64 a, atomic64_t *v) \
364 __asm__ __volatile__( \
366 " llockd %0, [%1] \n" \
367 " " #op1 " %L0, %L0, %L2 \n" \
368 " " #op2 " %H0, %H0, %H2 \n" \
369 " scondd %0, [%1] \n" \
372 : "r"(&v->counter), "ir"(a) \
376 #define ATOMIC64_OP_RETURN(op, op1, op2) \
377 static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
383 __asm__ __volatile__( \
385 " llockd %0, [%1] \n" \
386 " " #op1 " %L0, %L0, %L2 \n" \
387 " " #op2 " %H0, %H0, %H2 \n" \
388 " scondd %0, [%1] \n" \
391 : "r"(&v->counter), "ir"(a) \
392 : "cc"); /* memory clobber comes from smp_mb() */ \
399 #define ATOMIC64_FETCH_OP(op, op1, op2) \
400 static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
406 __asm__ __volatile__( \
408 " llockd %0, [%2] \n" \
409 " " #op1 " %L1, %L0, %L3 \n" \
410 " " #op2 " %H1, %H0, %H3 \n" \
411 " scondd %1, [%2] \n" \
413 : "=&r"(orig), "=&r"(val) \
414 : "r"(&v->counter), "ir"(a) \
415 : "cc"); /* memory clobber comes from smp_mb() */ \
422 #define ATOMIC64_OPS(op, op1, op2) \
423 ATOMIC64_OP(op, op1, op2) \
424 ATOMIC64_OP_RETURN(op, op1, op2) \
425 ATOMIC64_FETCH_OP(op, op1, op2)
427 #define atomic64_andnot atomic64_andnot
428 #define atomic64_fetch_andnot atomic64_fetch_andnot
430 ATOMIC64_OPS(add, add.f, adc)
431 ATOMIC64_OPS(sub, sub.f, sbc)
432 ATOMIC64_OPS(and, and, and)
433 ATOMIC64_OPS(andnot, bic, bic)
434 ATOMIC64_OPS(or, or, or)
435 ATOMIC64_OPS(xor, xor, xor)
438 #undef ATOMIC64_FETCH_OP
439 #undef ATOMIC64_OP_RETURN
443 atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
449 __asm__ __volatile__(
450 "1: llockd %0, [%1] \n"
451 " brne %L0, %L2, 2f \n"
452 " brne %H0, %H2, 2f \n"
453 " scondd %3, [%1] \n"
457 : "r"(ptr), "ir"(expected), "r"(new)
458 : "cc"); /* memory clobber comes from smp_mb() */
465 static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
471 __asm__ __volatile__(
472 "1: llockd %0, [%1] \n"
473 " scondd %2, [%1] \n"
478 : "cc"); /* memory clobber comes from smp_mb() */
486 * atomic64_dec_if_positive - decrement by 1 if old value positive
487 * @v: pointer of type atomic64_t
489 * The function returns the old value of *v minus 1, even if
490 * the atomic variable, v, was not decremented.
493 static inline s64 atomic64_dec_if_positive(atomic64_t *v)
499 __asm__ __volatile__(
500 "1: llockd %0, [%1] \n"
501 " sub.f %L0, %L0, 1 # w0 - 1, set C on borrow\n"
502 " sub.c %H0, %H0, 1 # if C set, w1 - 1\n"
503 " brlt %H0, 0, 2f \n"
504 " scondd %0, [%1] \n"
509 : "cc"); /* memory clobber comes from smp_mb() */
515 #define atomic64_dec_if_positive atomic64_dec_if_positive
518 * atomic64_fetch_add_unless - add unless the number is a given value
519 * @v: pointer of type atomic64_t
520 * @a: the amount to add to v...
521 * @u: ...unless v is equal to u.
523 * Atomically adds @a to @v, if it was not @u.
524 * Returns the old value of @v
526 static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
532 __asm__ __volatile__(
533 "1: llockd %0, [%2] \n"
534 " brne %L0, %L4, 2f # continue to add since v != u \n"
535 " breq.d %H0, %H4, 3f # return since v == u \n"
537 " add.f %L1, %L0, %L3 \n"
538 " adc %H1, %H0, %H3 \n"
539 " scondd %1, [%2] \n"
542 : "=&r"(old), "=&r" (temp)
543 : "r"(&v->counter), "r"(a), "r"(u)
544 : "cc"); /* memory clobber comes from smp_mb() */
550 #define atomic64_fetch_add_unless atomic64_fetch_add_unless
552 #endif /* !CONFIG_GENERIC_ATOMIC64 */
554 #endif /* !__ASSEMBLY__ */