]> Git Repo - qemu.git/blame_incremental - accel/tcg/atomic_template.h
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20191111' into staging
[qemu.git] / accel / tcg / atomic_template.h
... / ...
CommitLineData
1/*
2 * Atomic helper templates
3 * Included from tcg-runtime.c and cputlb.c.
4 *
5 * Copyright (c) 2016 Red Hat, Inc
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include "qemu/plugin.h"
22#include "trace/mem.h"
23
24#if DATA_SIZE == 16
25# define SUFFIX o
26# define DATA_TYPE Int128
27# define BSWAP bswap128
28# define SHIFT 4
29#elif DATA_SIZE == 8
30# define SUFFIX q
31# define DATA_TYPE uint64_t
32# define SDATA_TYPE int64_t
33# define BSWAP bswap64
34# define SHIFT 3
35#elif DATA_SIZE == 4
36# define SUFFIX l
37# define DATA_TYPE uint32_t
38# define SDATA_TYPE int32_t
39# define BSWAP bswap32
40# define SHIFT 2
41#elif DATA_SIZE == 2
42# define SUFFIX w
43# define DATA_TYPE uint16_t
44# define SDATA_TYPE int16_t
45# define BSWAP bswap16
46# define SHIFT 1
47#elif DATA_SIZE == 1
48# define SUFFIX b
49# define DATA_TYPE uint8_t
50# define SDATA_TYPE int8_t
51# define BSWAP
52# define SHIFT 0
53#else
54# error unsupported data size
55#endif
56
57#if DATA_SIZE >= 4
58# define ABI_TYPE DATA_TYPE
59#else
60# define ABI_TYPE uint32_t
61#endif
62
63/* Define host-endian atomic operations. Note that END is used within
64 the ATOMIC_NAME macro, and redefined below. */
65#if DATA_SIZE == 1
66# define END
67# define MEND _be /* either le or be would be fine */
68#elif defined(HOST_WORDS_BIGENDIAN)
69# define END _be
70# define MEND _be
71#else
72# define END _le
73# define MEND _le
74#endif
75
76ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
77 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
78{
79 ATOMIC_MMU_DECLS;
80 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
81 DATA_TYPE ret;
82 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
83 ATOMIC_MMU_IDX);
84
85 atomic_trace_rmw_pre(env, addr, info);
86#if DATA_SIZE == 16
87 ret = atomic16_cmpxchg(haddr, cmpv, newv);
88#else
89 ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
90#endif
91 ATOMIC_MMU_CLEANUP;
92 atomic_trace_rmw_post(env, addr, info);
93 return ret;
94}
95
96#if DATA_SIZE >= 16
97#if HAVE_ATOMIC128
98ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
99{
100 ATOMIC_MMU_DECLS;
101 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
102 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
103 ATOMIC_MMU_IDX);
104
105 atomic_trace_ld_pre(env, addr, info);
106 val = atomic16_read(haddr);
107 ATOMIC_MMU_CLEANUP;
108 atomic_trace_ld_post(env, addr, info);
109 return val;
110}
111
112void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
113 ABI_TYPE val EXTRA_ARGS)
114{
115 ATOMIC_MMU_DECLS;
116 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
117 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, true,
118 ATOMIC_MMU_IDX);
119
120 atomic_trace_st_pre(env, addr, info);
121 atomic16_set(haddr, val);
122 ATOMIC_MMU_CLEANUP;
123 atomic_trace_st_post(env, addr, info);
124}
125#endif
126#else
127ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
128 ABI_TYPE val EXTRA_ARGS)
129{
130 ATOMIC_MMU_DECLS;
131 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
132 DATA_TYPE ret;
133 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, false,
134 ATOMIC_MMU_IDX);
135
136 atomic_trace_rmw_pre(env, addr, info);
137 ret = atomic_xchg__nocheck(haddr, val);
138 ATOMIC_MMU_CLEANUP;
139 atomic_trace_rmw_post(env, addr, info);
140 return ret;
141}
142
143#define GEN_ATOMIC_HELPER(X) \
144ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
145 ABI_TYPE val EXTRA_ARGS) \
146{ \
147 ATOMIC_MMU_DECLS; \
148 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
149 DATA_TYPE ret; \
150 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
151 false, \
152 ATOMIC_MMU_IDX); \
153 \
154 atomic_trace_rmw_pre(env, addr, info); \
155 ret = atomic_##X(haddr, val); \
156 ATOMIC_MMU_CLEANUP; \
157 atomic_trace_rmw_post(env, addr, info); \
158 return ret; \
159}
160
161GEN_ATOMIC_HELPER(fetch_add)
162GEN_ATOMIC_HELPER(fetch_and)
163GEN_ATOMIC_HELPER(fetch_or)
164GEN_ATOMIC_HELPER(fetch_xor)
165GEN_ATOMIC_HELPER(add_fetch)
166GEN_ATOMIC_HELPER(and_fetch)
167GEN_ATOMIC_HELPER(or_fetch)
168GEN_ATOMIC_HELPER(xor_fetch)
169
170#undef GEN_ATOMIC_HELPER
171
172/* These helpers are, as a whole, full barriers. Within the helper,
173 * the leading barrier is explicit and the trailing barrier is within
174 * cmpxchg primitive.
175 *
176 * Trace this load + RMW loop as a single RMW op. This way, regardless
177 * of CF_PARALLEL's value, we'll trace just a read and a write.
178 */
179#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
180ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
181 ABI_TYPE xval EXTRA_ARGS) \
182{ \
183 ATOMIC_MMU_DECLS; \
184 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
185 XDATA_TYPE cmp, old, new, val = xval; \
186 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
187 false, \
188 ATOMIC_MMU_IDX); \
189 \
190 atomic_trace_rmw_pre(env, addr, info); \
191 smp_mb(); \
192 cmp = atomic_read__nocheck(haddr); \
193 do { \
194 old = cmp; new = FN(old, val); \
195 cmp = atomic_cmpxchg__nocheck(haddr, old, new); \
196 } while (cmp != old); \
197 ATOMIC_MMU_CLEANUP; \
198 atomic_trace_rmw_post(env, addr, info); \
199 return RET; \
200}
201
202GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
203GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
204GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
205GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
206
207GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
208GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
209GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
210GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
211
212#undef GEN_ATOMIC_HELPER_FN
213#endif /* DATA SIZE >= 16 */
214
215#undef END
216#undef MEND
217
218#if DATA_SIZE > 1
219
220/* Define reverse-host-endian atomic operations. Note that END is used
221 within the ATOMIC_NAME macro. */
222#ifdef HOST_WORDS_BIGENDIAN
223# define END _le
224# define MEND _le
225#else
226# define END _be
227# define MEND _be
228#endif
229
230ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
231 ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
232{
233 ATOMIC_MMU_DECLS;
234 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
235 DATA_TYPE ret;
236 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
237 false,
238 ATOMIC_MMU_IDX);
239
240 atomic_trace_rmw_pre(env, addr, info);
241#if DATA_SIZE == 16
242 ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
243#else
244 ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
245#endif
246 ATOMIC_MMU_CLEANUP;
247 atomic_trace_rmw_post(env, addr, info);
248 return BSWAP(ret);
249}
250
251#if DATA_SIZE >= 16
252#if HAVE_ATOMIC128
253ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
254{
255 ATOMIC_MMU_DECLS;
256 DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
257 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
258 false,
259 ATOMIC_MMU_IDX);
260
261 atomic_trace_ld_pre(env, addr, info);
262 val = atomic16_read(haddr);
263 ATOMIC_MMU_CLEANUP;
264 atomic_trace_ld_post(env, addr, info);
265 return BSWAP(val);
266}
267
268void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
269 ABI_TYPE val EXTRA_ARGS)
270{
271 ATOMIC_MMU_DECLS;
272 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
273 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
274 true,
275 ATOMIC_MMU_IDX);
276
277 val = BSWAP(val);
278 atomic_trace_st_pre(env, addr, info);
279 val = BSWAP(val);
280 atomic16_set(haddr, val);
281 ATOMIC_MMU_CLEANUP;
282 atomic_trace_st_post(env, addr, info);
283}
284#endif
285#else
286ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
287 ABI_TYPE val EXTRA_ARGS)
288{
289 ATOMIC_MMU_DECLS;
290 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
291 ABI_TYPE ret;
292 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT,
293 false,
294 ATOMIC_MMU_IDX);
295
296 atomic_trace_rmw_pre(env, addr, info);
297 ret = atomic_xchg__nocheck(haddr, BSWAP(val));
298 ATOMIC_MMU_CLEANUP;
299 atomic_trace_rmw_post(env, addr, info);
300 return BSWAP(ret);
301}
302
303#define GEN_ATOMIC_HELPER(X) \
304ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
305 ABI_TYPE val EXTRA_ARGS) \
306{ \
307 ATOMIC_MMU_DECLS; \
308 DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
309 DATA_TYPE ret; \
310 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
311 false, \
312 ATOMIC_MMU_IDX); \
313 \
314 atomic_trace_rmw_pre(env, addr, info); \
315 ret = atomic_##X(haddr, BSWAP(val)); \
316 ATOMIC_MMU_CLEANUP; \
317 atomic_trace_rmw_post(env, addr, info); \
318 return BSWAP(ret); \
319}
320
321GEN_ATOMIC_HELPER(fetch_and)
322GEN_ATOMIC_HELPER(fetch_or)
323GEN_ATOMIC_HELPER(fetch_xor)
324GEN_ATOMIC_HELPER(and_fetch)
325GEN_ATOMIC_HELPER(or_fetch)
326GEN_ATOMIC_HELPER(xor_fetch)
327
328#undef GEN_ATOMIC_HELPER
329
330/* These helpers are, as a whole, full barriers. Within the helper,
331 * the leading barrier is explicit and the trailing barrier is within
332 * cmpxchg primitive.
333 *
334 * Trace this load + RMW loop as a single RMW op. This way, regardless
335 * of CF_PARALLEL's value, we'll trace just a read and a write.
336 */
337#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
338ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
339 ABI_TYPE xval EXTRA_ARGS) \
340{ \
341 ATOMIC_MMU_DECLS; \
342 XDATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
343 XDATA_TYPE ldo, ldn, old, new, val = xval; \
344 uint16_t info = glue(trace_mem_build_info_no_se, MEND)(SHIFT, \
345 false, \
346 ATOMIC_MMU_IDX); \
347 \
348 atomic_trace_rmw_pre(env, addr, info); \
349 smp_mb(); \
350 ldn = atomic_read__nocheck(haddr); \
351 do { \
352 ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
353 ldn = atomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
354 } while (ldo != ldn); \
355 ATOMIC_MMU_CLEANUP; \
356 atomic_trace_rmw_post(env, addr, info); \
357 return RET; \
358}
359
360GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
361GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
362GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
363GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
364
365GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
366GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
367GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
368GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
369
370/* Note that for addition, we need to use a separate cmpxchg loop instead
371 of bswaps for the reverse-host-endian helpers. */
372#define ADD(X, Y) (X + Y)
373GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
374GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
375#undef ADD
376
377#undef GEN_ATOMIC_HELPER_FN
378#endif /* DATA_SIZE >= 16 */
379
380#undef END
381#undef MEND
382#endif /* DATA_SIZE > 1 */
383
384#undef BSWAP
385#undef ABI_TYPE
386#undef DATA_TYPE
387#undef SDATA_TYPE
388#undef SUFFIX
389#undef DATA_SIZE
390#undef SHIFT
This page took 0.024802 seconds and 4 git commands to generate.