1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_CMPXCHG_32_H
3 #define _ASM_X86_CMPXCHG_32_H
6 * Note: if you use __cmpxchg64(), or their variants,
7 * you need to test for the feature in boot_cpu_data.
17 #define __arch_cmpxchg64(_ptr, _old, _new, _lock) \
19 union __u64_halves o = { .full = (_old), }, \
20 n = { .full = (_new), }; \
22 asm volatile(_lock "cmpxchg8b %[ptr]" \
23 : [ptr] "+m" (*(_ptr)), \
24 "+a" (o.low), "+d" (o.high) \
25 : "b" (n.low), "c" (n.high) \
32 static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
34 return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX);
37 static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
39 return __arch_cmpxchg64(ptr, old, new,);
42 #define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock) \
44 union __u64_halves o = { .full = *(_oldp), }, \
45 n = { .full = (_new), }; \
48 asm volatile(_lock "cmpxchg8b %[ptr]" \
51 [ptr] "+m" (*(_ptr)), \
52 "+a" (o.low), "+d" (o.high) \
53 : "b" (n.low), "c" (n.high) \
62 static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
64 return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX);
67 static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
69 return __arch_try_cmpxchg64(ptr, oldp, new,);
72 #ifdef CONFIG_X86_CMPXCHG64
74 #define arch_cmpxchg64 __cmpxchg64
76 #define arch_cmpxchg64_local __cmpxchg64_local
78 #define arch_try_cmpxchg64 __try_cmpxchg64
80 #define arch_try_cmpxchg64_local __try_cmpxchg64_local
85 * Building a kernel capable running on 80386 and 80486. It may be necessary
86 * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
89 #define __arch_cmpxchg64_emu(_ptr, _old, _new, _lock_loc, _lock) \
91 union __u64_halves o = { .full = (_old), }, \
92 n = { .full = (_new), }; \
94 asm volatile(ALTERNATIVE(_lock_loc \
95 "call cmpxchg8b_emu", \
96 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
97 : ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high)) \
98 : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
104 static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
106 return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
108 #define arch_cmpxchg64 arch_cmpxchg64
110 static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
112 return __arch_cmpxchg64_emu(ptr, old, new, ,);
114 #define arch_cmpxchg64_local arch_cmpxchg64_local
116 #define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new, _lock_loc, _lock) \
118 union __u64_halves o = { .full = *(_oldp), }, \
119 n = { .full = (_new), }; \
122 asm volatile(ALTERNATIVE(_lock_loc \
123 "call cmpxchg8b_emu", \
124 _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
126 : ALT_OUTPUT_SP(CC_OUT(e) (ret), \
127 "+a" (o.low), "+d" (o.high)) \
128 : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr) \
131 if (unlikely(!ret)) \
137 static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
139 return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
141 #define arch_try_cmpxchg64 arch_try_cmpxchg64
143 static __always_inline bool arch_try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
145 return __arch_try_cmpxchg64_emu(ptr, oldp, new, ,);
147 #define arch_try_cmpxchg64_local arch_try_cmpxchg64_local
151 #define system_has_cmpxchg64() boot_cpu_has(X86_FEATURE_CX8)
153 #endif /* _ASM_X86_CMPXCHG_32_H */