1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_ATOMIC64_32_H
3 #define _ASM_X86_ATOMIC64_32_H
5 #include <linux/compiler.h>
6 #include <linux/types.h>
7 //#include <asm/cmpxchg.h>
9 /* An 64bit atomic type */
12 s64 __aligned(8) counter;
15 #define ATOMIC64_INIT(val) { (val) }
18 * Read an atomic64_t non-atomically.
20 * This is intended to be used in cases where a subsequent atomic operation
21 * will handle the torn value, and can be used to prime the first iteration
22 * of unconditional try_cmpxchg() loops, e.g.:
24 * s64 val = arch_atomic64_read_nonatomic(v);
25 * do { } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
27 * This is NOT safe to use where the value is not always checked by a
28 * subsequent atomic operation, such as in conditional try_cmpxchg() loops
29 * that can break before the atomic operation, e.g.:
31 * s64 val = arch_atomic64_read_nonatomic(v);
35 * } while (!arch_atomic64_try_cmpxchg(v, &val, val OP i);
37 static __always_inline s64 arch_atomic64_read_nonatomic(const atomic64_t *v)
39 /* See comment in arch_atomic_read(). */
40 return __READ_ONCE(v->counter);
43 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
44 #ifndef ATOMIC64_EXPORT
45 #define ATOMIC64_DECL_ONE __ATOMIC64_DECL
47 #define ATOMIC64_DECL_ONE(sym) __ATOMIC64_DECL(sym); \
48 ATOMIC64_EXPORT(atomic64_##sym)
51 #ifdef CONFIG_X86_CMPXCHG64
52 #define __alternative_atomic64(f, g, out, in...) \
53 asm volatile("call %c[func]" \
54 : ALT_OUTPUT_SP(out) \
55 : [func] "i" (atomic64_##g##_cx8), ## in)
57 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8)
59 #define __alternative_atomic64(f, g, out, in...) \
60 alternative_call(atomic64_##f##_386, atomic64_##g##_cx8, \
61 X86_FEATURE_CX8, ASM_OUTPUT2(out), ## in)
63 #define ATOMIC64_DECL(sym) ATOMIC64_DECL_ONE(sym##_cx8); \
64 ATOMIC64_DECL_ONE(sym##_386)
66 ATOMIC64_DECL_ONE(add_386);
67 ATOMIC64_DECL_ONE(sub_386);
68 ATOMIC64_DECL_ONE(inc_386);
69 ATOMIC64_DECL_ONE(dec_386);
72 #define alternative_atomic64(f, out, in...) \
73 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
78 ATOMIC64_DECL(add_return);
79 ATOMIC64_DECL(sub_return);
80 ATOMIC64_DECL(inc_return);
81 ATOMIC64_DECL(dec_return);
82 ATOMIC64_DECL(dec_if_positive);
83 ATOMIC64_DECL(inc_not_zero);
84 ATOMIC64_DECL(add_unless);
87 #undef ATOMIC64_DECL_ONE
88 #undef __ATOMIC64_DECL
89 #undef ATOMIC64_EXPORT
91 static __always_inline s64 arch_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
93 return arch_cmpxchg64(&v->counter, old, new);
95 #define arch_atomic64_cmpxchg arch_atomic64_cmpxchg
97 static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
99 return arch_try_cmpxchg64(&v->counter, old, new);
101 #define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
103 static __always_inline s64 arch_atomic64_xchg(atomic64_t *v, s64 n)
106 unsigned high = (unsigned)(n >> 32);
107 unsigned low = (unsigned)n;
108 alternative_atomic64(xchg, "=&A" (o),
109 "S" (v), "b" (low), "c" (high)
113 #define arch_atomic64_xchg arch_atomic64_xchg
115 static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
117 unsigned high = (unsigned)(i >> 32);
118 unsigned low = (unsigned)i;
119 alternative_atomic64(set, /* no output */,
120 "S" (v), "b" (low), "c" (high)
121 : "eax", "edx", "memory");
124 static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
127 alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
131 static __always_inline s64 arch_atomic64_add_return(s64 i, atomic64_t *v)
133 alternative_atomic64(add_return,
134 ASM_OUTPUT2("+A" (i), "+c" (v)),
135 ASM_NO_INPUT_CLOBBER("memory"));
138 #define arch_atomic64_add_return arch_atomic64_add_return
140 static __always_inline s64 arch_atomic64_sub_return(s64 i, atomic64_t *v)
142 alternative_atomic64(sub_return,
143 ASM_OUTPUT2("+A" (i), "+c" (v)),
144 ASM_NO_INPUT_CLOBBER("memory"));
147 #define arch_atomic64_sub_return arch_atomic64_sub_return
149 static __always_inline s64 arch_atomic64_inc_return(atomic64_t *v)
152 alternative_atomic64(inc_return, "=&A" (a),
153 "S" (v) : "memory", "ecx");
156 #define arch_atomic64_inc_return arch_atomic64_inc_return
158 static __always_inline s64 arch_atomic64_dec_return(atomic64_t *v)
161 alternative_atomic64(dec_return, "=&A" (a),
162 "S" (v) : "memory", "ecx");
165 #define arch_atomic64_dec_return arch_atomic64_dec_return
167 static __always_inline void arch_atomic64_add(s64 i, atomic64_t *v)
169 __alternative_atomic64(add, add_return,
170 ASM_OUTPUT2("+A" (i), "+c" (v)),
171 ASM_NO_INPUT_CLOBBER("memory"));
174 static __always_inline void arch_atomic64_sub(s64 i, atomic64_t *v)
176 __alternative_atomic64(sub, sub_return,
177 ASM_OUTPUT2("+A" (i), "+c" (v)),
178 ASM_NO_INPUT_CLOBBER("memory"));
181 static __always_inline void arch_atomic64_inc(atomic64_t *v)
183 __alternative_atomic64(inc, inc_return, /* no output */,
184 "S" (v) : "memory", "eax", "ecx", "edx");
186 #define arch_atomic64_inc arch_atomic64_inc
188 static __always_inline void arch_atomic64_dec(atomic64_t *v)
190 __alternative_atomic64(dec, dec_return, /* no output */,
191 "S" (v) : "memory", "eax", "ecx", "edx");
193 #define arch_atomic64_dec arch_atomic64_dec
195 static __always_inline int arch_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
197 unsigned low = (unsigned)u;
198 unsigned high = (unsigned)(u >> 32);
199 alternative_atomic64(add_unless,
200 ASM_OUTPUT2("+A" (a), "+c" (low), "+D" (high)),
204 #define arch_atomic64_add_unless arch_atomic64_add_unless
206 static __always_inline int arch_atomic64_inc_not_zero(atomic64_t *v)
209 alternative_atomic64(inc_not_zero, "=&a" (r),
210 "S" (v) : "ecx", "edx", "memory");
213 #define arch_atomic64_inc_not_zero arch_atomic64_inc_not_zero
215 static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
218 alternative_atomic64(dec_if_positive, "=&A" (r),
219 "S" (v) : "ecx", "memory");
222 #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
224 #undef alternative_atomic64
225 #undef __alternative_atomic64
227 static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v)
229 s64 val = arch_atomic64_read_nonatomic(v);
231 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
234 static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v)
236 s64 val = arch_atomic64_read_nonatomic(v);
238 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
242 #define arch_atomic64_fetch_and arch_atomic64_fetch_and
244 static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v)
246 s64 val = arch_atomic64_read_nonatomic(v);
248 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
251 static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v)
253 s64 val = arch_atomic64_read_nonatomic(v);
255 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
259 #define arch_atomic64_fetch_or arch_atomic64_fetch_or
261 static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v)
263 s64 val = arch_atomic64_read_nonatomic(v);
265 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
268 static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v)
270 s64 val = arch_atomic64_read_nonatomic(v);
272 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
276 #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
278 static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v)
280 s64 val = arch_atomic64_read_nonatomic(v);
282 do { } while (!arch_atomic64_try_cmpxchg(v, &val, val + i));
286 #define arch_atomic64_fetch_add arch_atomic64_fetch_add
288 #define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v))
290 #endif /* _ASM_X86_ATOMIC64_32_H */