]> Git Repo - linux.git/blob - arch/loongarch/include/asm/percpu.h
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[linux.git] / arch / loongarch / include / asm / percpu.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4  */
5 #ifndef __ASM_PERCPU_H
6 #define __ASM_PERCPU_H
7
8 #include <asm/cmpxchg.h>
9 #include <asm/loongarch.h>
10
11 /*
12  * The "address" (in fact, offset from $r21) of a per-CPU variable is close to
13  * the loading address of main kernel image, but far from where the modules are
14  * loaded. Tell the compiler this fact when using explicit relocs.
15  */
16 #if defined(MODULE) && defined(CONFIG_AS_HAS_EXPLICIT_RELOCS)
17 #define PER_CPU_ATTRIBUTES    __attribute__((model("extreme")))
18 #endif
19
20 /* Use r21 for fast access */
21 register unsigned long __my_cpu_offset __asm__("$r21");
22
23 static inline void set_my_cpu_offset(unsigned long off)
24 {
25         __my_cpu_offset = off;
26         csr_write64(off, PERCPU_BASE_KS);
27 }
28 #define __my_cpu_offset __my_cpu_offset
29
30 #define PERCPU_OP(op, asm_op, c_op)                                     \
31 static inline unsigned long __percpu_##op(void *ptr,                    \
32                         unsigned long val, int size)                    \
33 {                                                                       \
34         unsigned long ret;                                              \
35                                                                         \
36         switch (size) {                                                 \
37         case 4:                                                         \
38                 __asm__ __volatile__(                                   \
39                 "am"#asm_op".w" " %[ret], %[val], %[ptr]        \n"             \
40                 : [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr)           \
41                 : [val] "r" (val));                                     \
42                 break;                                                  \
43         case 8:                                                         \
44                 __asm__ __volatile__(                                   \
45                 "am"#asm_op".d" " %[ret], %[val], %[ptr]        \n"             \
46                 : [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr)           \
47                 : [val] "r" (val));                                     \
48                 break;                                                  \
49         default:                                                        \
50                 ret = 0;                                                \
51                 BUILD_BUG();                                            \
52         }                                                               \
53                                                                         \
54         return ret c_op val;                                            \
55 }
56
57 PERCPU_OP(add, add, +)
58 PERCPU_OP(and, and, &)
59 PERCPU_OP(or, or, |)
60 #undef PERCPU_OP
61
62 static inline unsigned long __percpu_read(void *ptr, int size)
63 {
64         unsigned long ret;
65
66         switch (size) {
67         case 1:
68                 __asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr]       \n"
69                 : [ret] "=&r"(ret)
70                 : [ptr] "r"(ptr)
71                 : "memory");
72                 break;
73         case 2:
74                 __asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr]       \n"
75                 : [ret] "=&r"(ret)
76                 : [ptr] "r"(ptr)
77                 : "memory");
78                 break;
79         case 4:
80                 __asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr]       \n"
81                 : [ret] "=&r"(ret)
82                 : [ptr] "r"(ptr)
83                 : "memory");
84                 break;
85         case 8:
86                 __asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr]       \n"
87                 : [ret] "=&r"(ret)
88                 : [ptr] "r"(ptr)
89                 : "memory");
90                 break;
91         default:
92                 ret = 0;
93                 BUILD_BUG();
94         }
95
96         return ret;
97 }
98
99 static inline void __percpu_write(void *ptr, unsigned long val, int size)
100 {
101         switch (size) {
102         case 1:
103                 __asm__ __volatile__("stx.b %[val], $r21, %[ptr]        \n"
104                 :
105                 : [val] "r" (val), [ptr] "r" (ptr)
106                 : "memory");
107                 break;
108         case 2:
109                 __asm__ __volatile__("stx.h %[val], $r21, %[ptr]        \n"
110                 :
111                 : [val] "r" (val), [ptr] "r" (ptr)
112                 : "memory");
113                 break;
114         case 4:
115                 __asm__ __volatile__("stx.w %[val], $r21, %[ptr]        \n"
116                 :
117                 : [val] "r" (val), [ptr] "r" (ptr)
118                 : "memory");
119                 break;
120         case 8:
121                 __asm__ __volatile__("stx.d %[val], $r21, %[ptr]        \n"
122                 :
123                 : [val] "r" (val), [ptr] "r" (ptr)
124                 : "memory");
125                 break;
126         default:
127                 BUILD_BUG();
128         }
129 }
130
131 static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
132                                                 int size)
133 {
134         switch (size) {
135         case 1:
136         case 2:
137                 return __xchg_small((volatile void *)ptr, val, size);
138
139         case 4:
140                 return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
141
142         case 8:
143                 return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
144
145         default:
146                 BUILD_BUG();
147         }
148
149         return 0;
150 }
151
152 /* this_cpu_cmpxchg */
153 #define _protect_cmpxchg_local(pcp, o, n)                       \
154 ({                                                              \
155         typeof(*raw_cpu_ptr(&(pcp))) __ret;                     \
156         preempt_disable_notrace();                              \
157         __ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n);       \
158         preempt_enable_notrace();                               \
159         __ret;                                                  \
160 })
161
162 #define _percpu_read(pcp)                                               \
163 ({                                                                      \
164         typeof(pcp) __retval;                                           \
165         __retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp));     \
166         __retval;                                                       \
167 })
168
169 #define _percpu_write(pcp, val)                                         \
170 do {                                                                    \
171         __percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp));      \
172 } while (0)                                                             \
173
174 #define _pcp_protect(operation, pcp, val)                       \
175 ({                                                              \
176         typeof(pcp) __retval;                                   \
177         preempt_disable_notrace();                              \
178         __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)),  \
179                                           (val), sizeof(pcp));  \
180         preempt_enable_notrace();                               \
181         __retval;                                               \
182 })
183
184 #define _percpu_add(pcp, val) \
185         _pcp_protect(__percpu_add, pcp, val)
186
187 #define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
188
189 #define _percpu_and(pcp, val) \
190         _pcp_protect(__percpu_and, pcp, val)
191
192 #define _percpu_or(pcp, val) \
193         _pcp_protect(__percpu_or, pcp, val)
194
195 #define _percpu_xchg(pcp, val) ((typeof(pcp)) \
196         _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
197
198 #define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
199 #define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
200
201 #define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
202 #define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
203
204 #define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
205 #define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
206
207 #define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
208 #define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
209
210 #define this_cpu_read_1(pcp) _percpu_read(pcp)
211 #define this_cpu_read_2(pcp) _percpu_read(pcp)
212 #define this_cpu_read_4(pcp) _percpu_read(pcp)
213 #define this_cpu_read_8(pcp) _percpu_read(pcp)
214
215 #define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
216 #define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
217 #define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
218 #define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
219
220 #define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
221 #define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
222 #define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
223 #define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
224
225 #define this_cpu_cmpxchg_1(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
226 #define this_cpu_cmpxchg_2(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
227 #define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
228 #define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
229
230 #include <asm-generic/percpu.h>
231
232 #endif /* __ASM_PERCPU_H */
This page took 0.048379 seconds and 4 git commands to generate.