1 #ifndef _ASM_IA64_ATOMIC_H
2 #define _ASM_IA64_ATOMIC_H
5 * Atomic operations that C can't guarantee us. Useful for
6 * resource counting etc..
8 * NOTE: don't mess with the types below! The "unsigned long" and
9 * "int" types were carefully placed so as to ensure proper operation
12 * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
15 #include <linux/types.h>
17 #include <asm/intrinsics.h>
18 #include <asm/barrier.h>
21 #define ATOMIC_INIT(i) { (i) }
22 #define ATOMIC64_INIT(i) { (i) }
24 #define atomic_read(v) READ_ONCE((v)->counter)
25 #define atomic64_read(v) READ_ONCE((v)->counter)
27 #define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
28 #define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
30 #define ATOMIC_OP(op, c_op) \
31 static __inline__ int \
32 ia64_atomic_##op (int i, atomic_t *v) \
35 CMPXCHG_BUGCHECK_DECL \
38 CMPXCHG_BUGCHECK(v); \
39 old = atomic_read(v); \
41 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
48 #define atomic_add_return(i,v) \
50 int __ia64_aar_i = (i); \
51 (__builtin_constant_p(i) \
52 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
53 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
54 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
55 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
56 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
57 : ia64_atomic_add(__ia64_aar_i, v); \
60 #define atomic_sub_return(i,v) \
62 int __ia64_asr_i = (i); \
63 (__builtin_constant_p(i) \
64 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
65 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
66 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
67 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
68 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
69 : ia64_atomic_sub(__ia64_asr_i, v); \
76 #define atomic_and(i,v) (void)ia64_atomic_and(i,v)
77 #define atomic_or(i,v) (void)ia64_atomic_or(i,v)
78 #define atomic_xor(i,v) (void)ia64_atomic_xor(i,v)
82 #define ATOMIC64_OP(op, c_op) \
83 static __inline__ long \
84 ia64_atomic64_##op (__s64 i, atomic64_t *v) \
87 CMPXCHG_BUGCHECK_DECL \
90 CMPXCHG_BUGCHECK(v); \
91 old = atomic64_read(v); \
93 } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
100 #define atomic64_add_return(i,v) \
102 long __ia64_aar_i = (i); \
103 (__builtin_constant_p(i) \
104 && ( (__ia64_aar_i == 1) || (__ia64_aar_i == 4) \
105 || (__ia64_aar_i == 8) || (__ia64_aar_i == 16) \
106 || (__ia64_aar_i == -1) || (__ia64_aar_i == -4) \
107 || (__ia64_aar_i == -8) || (__ia64_aar_i == -16))) \
108 ? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter) \
109 : ia64_atomic64_add(__ia64_aar_i, v); \
112 #define atomic64_sub_return(i,v) \
114 long __ia64_asr_i = (i); \
115 (__builtin_constant_p(i) \
116 && ( (__ia64_asr_i == 1) || (__ia64_asr_i == 4) \
117 || (__ia64_asr_i == 8) || (__ia64_asr_i == 16) \
118 || (__ia64_asr_i == -1) || (__ia64_asr_i == -4) \
119 || (__ia64_asr_i == -8) || (__ia64_asr_i == -16))) \
120 ? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter) \
121 : ia64_atomic64_sub(__ia64_asr_i, v); \
128 #define atomic64_and(i,v) (void)ia64_atomic64_and(i,v)
129 #define atomic64_or(i,v) (void)ia64_atomic64_or(i,v)
130 #define atomic64_xor(i,v) (void)ia64_atomic64_xor(i,v)
134 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
135 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
137 #define atomic64_cmpxchg(v, old, new) \
138 (cmpxchg(&((v)->counter), old, new))
139 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
141 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
146 if (unlikely(c == (u)))
148 old = atomic_cmpxchg((v), c, c + (a));
149 if (likely(old == c))
157 static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u)
160 c = atomic64_read(v);
162 if (unlikely(c == (u)))
164 old = atomic64_cmpxchg((v), c, c + (a));
165 if (likely(old == c))
172 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
175 * Atomically add I to V and return TRUE if the resulting value is
178 static __inline__ int
179 atomic_add_negative (int i, atomic_t *v)
181 return atomic_add_return(i, v) < 0;
184 static __inline__ long
185 atomic64_add_negative (__s64 i, atomic64_t *v)
187 return atomic64_add_return(i, v) < 0;
190 #define atomic_dec_return(v) atomic_sub_return(1, (v))
191 #define atomic_inc_return(v) atomic_add_return(1, (v))
192 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
193 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
195 #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
196 #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
197 #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
198 #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
199 #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
200 #define atomic64_inc_and_test(v) (atomic64_add_return(1, (v)) == 0)
202 #define atomic_add(i,v) (void)atomic_add_return((i), (v))
203 #define atomic_sub(i,v) (void)atomic_sub_return((i), (v))
204 #define atomic_inc(v) atomic_add(1, (v))
205 #define atomic_dec(v) atomic_sub(1, (v))
207 #define atomic64_add(i,v) (void)atomic64_add_return((i), (v))
208 #define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v))
209 #define atomic64_inc(v) atomic64_add(1, (v))
210 #define atomic64_dec(v) atomic64_sub(1, (v))
212 #endif /* _ASM_IA64_ATOMIC_H */