]> Git Repo - J-linux.git/blob - arch/x86/include/asm/cmpxchg_32.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / arch / x86 / include / asm / cmpxchg_32.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_CMPXCHG_32_H
3 #define _ASM_X86_CMPXCHG_32_H
4
5 /*
6  * Note: if you use __cmpxchg64(), or their variants,
7  *       you need to test for the feature in boot_cpu_data.
8  */
9
10 union __u64_halves {
11         u64 full;
12         struct {
13                 u32 low, high;
14         };
15 };
16
17 #define __arch_cmpxchg64(_ptr, _old, _new, _lock)                       \
18 ({                                                                      \
19         union __u64_halves o = { .full = (_old), },                     \
20                            n = { .full = (_new), };                     \
21                                                                         \
22         asm volatile(_lock "cmpxchg8b %[ptr]"                           \
23                      : [ptr] "+m" (*(_ptr)),                            \
24                        "+a" (o.low), "+d" (o.high)                      \
25                      : "b" (n.low), "c" (n.high)                        \
26                      : "memory");                                       \
27                                                                         \
28         o.full;                                                         \
29 })
30
31
32 static __always_inline u64 __cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
33 {
34         return __arch_cmpxchg64(ptr, old, new, LOCK_PREFIX);
35 }
36
37 static __always_inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
38 {
39         return __arch_cmpxchg64(ptr, old, new,);
40 }
41
42 #define __arch_try_cmpxchg64(_ptr, _oldp, _new, _lock)                  \
43 ({                                                                      \
44         union __u64_halves o = { .full = *(_oldp), },                   \
45                            n = { .full = (_new), };                     \
46         bool ret;                                                       \
47                                                                         \
48         asm volatile(_lock "cmpxchg8b %[ptr]"                           \
49                      CC_SET(e)                                          \
50                      : CC_OUT(e) (ret),                                 \
51                        [ptr] "+m" (*(_ptr)),                            \
52                        "+a" (o.low), "+d" (o.high)                      \
53                      : "b" (n.low), "c" (n.high)                        \
54                      : "memory");                                       \
55                                                                         \
56         if (unlikely(!ret))                                             \
57                 *(_oldp) = o.full;                                      \
58                                                                         \
59         likely(ret);                                                    \
60 })
61
62 static __always_inline bool __try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
63 {
64         return __arch_try_cmpxchg64(ptr, oldp, new, LOCK_PREFIX);
65 }
66
67 static __always_inline bool __try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
68 {
69         return __arch_try_cmpxchg64(ptr, oldp, new,);
70 }
71
72 #ifdef CONFIG_X86_CMPXCHG64
73
74 #define arch_cmpxchg64 __cmpxchg64
75
76 #define arch_cmpxchg64_local __cmpxchg64_local
77
78 #define arch_try_cmpxchg64 __try_cmpxchg64
79
80 #define arch_try_cmpxchg64_local __try_cmpxchg64_local
81
82 #else
83
84 /*
85  * Building a kernel capable running on 80386 and 80486. It may be necessary
86  * to simulate the cmpxchg8b on the 80386 and 80486 CPU.
87  */
88
89 #define __arch_cmpxchg64_emu(_ptr, _old, _new, _lock_loc, _lock)        \
90 ({                                                                      \
91         union __u64_halves o = { .full = (_old), },                     \
92                            n = { .full = (_new), };                     \
93                                                                         \
94         asm volatile(ALTERNATIVE(_lock_loc                              \
95                                  "call cmpxchg8b_emu",                  \
96                                  _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
97                      : ALT_OUTPUT_SP("+a" (o.low), "+d" (o.high))       \
98                      : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr)      \
99                      : "memory");                                       \
100                                                                         \
101         o.full;                                                         \
102 })
103
104 static __always_inline u64 arch_cmpxchg64(volatile u64 *ptr, u64 old, u64 new)
105 {
106         return __arch_cmpxchg64_emu(ptr, old, new, LOCK_PREFIX_HERE, "lock; ");
107 }
108 #define arch_cmpxchg64 arch_cmpxchg64
109
110 static __always_inline u64 arch_cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
111 {
112         return __arch_cmpxchg64_emu(ptr, old, new, ,);
113 }
114 #define arch_cmpxchg64_local arch_cmpxchg64_local
115
116 #define __arch_try_cmpxchg64_emu(_ptr, _oldp, _new, _lock_loc, _lock)   \
117 ({                                                                      \
118         union __u64_halves o = { .full = *(_oldp), },                   \
119                            n = { .full = (_new), };                     \
120         bool ret;                                                       \
121                                                                         \
122         asm volatile(ALTERNATIVE(_lock_loc                              \
123                                  "call cmpxchg8b_emu",                  \
124                                  _lock "cmpxchg8b %a[ptr]", X86_FEATURE_CX8) \
125                      CC_SET(e)                                          \
126                      : ALT_OUTPUT_SP(CC_OUT(e) (ret),                   \
127                                      "+a" (o.low), "+d" (o.high))       \
128                      : "b" (n.low), "c" (n.high), [ptr] "S" (_ptr)      \
129                      : "memory");                                       \
130                                                                         \
131         if (unlikely(!ret))                                             \
132                 *(_oldp) = o.full;                                      \
133                                                                         \
134         likely(ret);                                                    \
135 })
136
137 static __always_inline bool arch_try_cmpxchg64(volatile u64 *ptr, u64 *oldp, u64 new)
138 {
139         return __arch_try_cmpxchg64_emu(ptr, oldp, new, LOCK_PREFIX_HERE, "lock; ");
140 }
141 #define arch_try_cmpxchg64 arch_try_cmpxchg64
142
143 static __always_inline bool arch_try_cmpxchg64_local(volatile u64 *ptr, u64 *oldp, u64 new)
144 {
145         return __arch_try_cmpxchg64_emu(ptr, oldp, new, ,);
146 }
147 #define arch_try_cmpxchg64_local arch_try_cmpxchg64_local
148
149 #endif
150
151 #define system_has_cmpxchg64()          boot_cpu_has(X86_FEATURE_CX8)
152
153 #endif /* _ASM_X86_CMPXCHG_32_H */
This page took 0.037129 seconds and 4 git commands to generate.