1 /* SPDX-License-Identifier: GPL-2.0 */
6 #ifndef _ASM_PARISC_ATOMIC_H_
7 #define _ASM_PARISC_ATOMIC_H_
9 #include <linux/types.h>
10 #include <asm/cmpxchg.h>
11 #include <asm/barrier.h>
14 * Atomic operations that C can't guarantee us. Useful for
15 * resource counting etc..
17 * And probably incredibly slow on parisc. OTOH, we don't
18 * have to write any serious assembly. prumpf
22 #include <asm/spinlock.h>
23 #include <asm/cache.h> /* we use L1_CACHE_BYTES */
25 /* Use an array of spinlocks for our atomic_ts.
26 * Hash function to index into a different SPINLOCK.
27 * Since "a" is usually an address, use one spinlock per cacheline.
29 # define ATOMIC_HASH_SIZE 4
30 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
32 extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
34 /* Can't use raw_spin_lock_irq because of #include problems, so
35 * this is the substitute */
36 #define _atomic_spin_lock_irqsave(l,f) do { \
37 arch_spinlock_t *s = ATOMIC_HASH(l); \
42 #define _atomic_spin_unlock_irqrestore(l,f) do { \
43 arch_spinlock_t *s = ATOMIC_HASH(l); \
44 arch_spin_unlock(s); \
45 local_irq_restore(f); \
50 # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51 # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
55 * Note that we need not lock read accesses - aligned word writes/reads
56 * are atomic, so a reader never sees inconsistent values.
59 static __inline__ void arch_atomic_set(atomic_t *v, int i)
62 _atomic_spin_lock_irqsave(v, flags);
66 _atomic_spin_unlock_irqrestore(v, flags);
69 #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
71 static __inline__ int arch_atomic_read(const atomic_t *v)
73 return READ_ONCE((v)->counter);
76 #define ATOMIC_OP(op, c_op) \
77 static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
79 unsigned long flags; \
81 _atomic_spin_lock_irqsave(v, flags); \
83 _atomic_spin_unlock_irqrestore(v, flags); \
86 #define ATOMIC_OP_RETURN(op, c_op) \
87 static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
89 unsigned long flags; \
92 _atomic_spin_lock_irqsave(v, flags); \
93 ret = (v->counter c_op i); \
94 _atomic_spin_unlock_irqrestore(v, flags); \
99 #define ATOMIC_FETCH_OP(op, c_op) \
100 static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
102 unsigned long flags; \
105 _atomic_spin_lock_irqsave(v, flags); \
108 _atomic_spin_unlock_irqrestore(v, flags); \
113 #define ATOMIC_OPS(op, c_op) \
114 ATOMIC_OP(op, c_op) \
115 ATOMIC_OP_RETURN(op, c_op) \
116 ATOMIC_FETCH_OP(op, c_op)
121 #define arch_atomic_add_return arch_atomic_add_return
122 #define arch_atomic_sub_return arch_atomic_sub_return
123 #define arch_atomic_fetch_add arch_atomic_fetch_add
124 #define arch_atomic_fetch_sub arch_atomic_fetch_sub
127 #define ATOMIC_OPS(op, c_op) \
128 ATOMIC_OP(op, c_op) \
129 ATOMIC_FETCH_OP(op, c_op)
135 #define arch_atomic_fetch_and arch_atomic_fetch_and
136 #define arch_atomic_fetch_or arch_atomic_fetch_or
137 #define arch_atomic_fetch_xor arch_atomic_fetch_xor
140 #undef ATOMIC_FETCH_OP
141 #undef ATOMIC_OP_RETURN
146 #define ATOMIC64_INIT(i) { (i) }
148 #define ATOMIC64_OP(op, c_op) \
149 static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
151 unsigned long flags; \
153 _atomic_spin_lock_irqsave(v, flags); \
155 _atomic_spin_unlock_irqrestore(v, flags); \
158 #define ATOMIC64_OP_RETURN(op, c_op) \
159 static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
161 unsigned long flags; \
164 _atomic_spin_lock_irqsave(v, flags); \
165 ret = (v->counter c_op i); \
166 _atomic_spin_unlock_irqrestore(v, flags); \
171 #define ATOMIC64_FETCH_OP(op, c_op) \
172 static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
174 unsigned long flags; \
177 _atomic_spin_lock_irqsave(v, flags); \
180 _atomic_spin_unlock_irqrestore(v, flags); \
185 #define ATOMIC64_OPS(op, c_op) \
186 ATOMIC64_OP(op, c_op) \
187 ATOMIC64_OP_RETURN(op, c_op) \
188 ATOMIC64_FETCH_OP(op, c_op)
190 ATOMIC64_OPS(add, +=)
191 ATOMIC64_OPS(sub, -=)
193 #define arch_atomic64_add_return arch_atomic64_add_return
194 #define arch_atomic64_sub_return arch_atomic64_sub_return
195 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
196 #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
199 #define ATOMIC64_OPS(op, c_op) \
200 ATOMIC64_OP(op, c_op) \
201 ATOMIC64_FETCH_OP(op, c_op)
203 ATOMIC64_OPS(and, &=)
205 ATOMIC64_OPS(xor, ^=)
207 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
208 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
209 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
212 #undef ATOMIC64_FETCH_OP
213 #undef ATOMIC64_OP_RETURN
216 static __inline__ void
217 arch_atomic64_set(atomic64_t *v, s64 i)
220 _atomic_spin_lock_irqsave(v, flags);
224 _atomic_spin_unlock_irqrestore(v, flags);
227 #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
229 static __inline__ s64
230 arch_atomic64_read(const atomic64_t *v)
232 return READ_ONCE((v)->counter);
235 #endif /* !CONFIG_64BIT */
238 #endif /* _ASM_PARISC_ATOMIC_H_ */