]> Git Repo - qemu.git/blame - include/qemu/atomic.h
memory: avoid "resurrection" of dead FlatViews
[qemu.git] / include / qemu / atomic.h
CommitLineData
5444e768
PB
1/*
2 * Simple interface for atomic operations.
3 *
4 * Copyright (C) 2013 Red Hat, Inc.
5 *
6 * Author: Paolo Bonzini <[email protected]>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
9 * See the COPYING file in the top-level directory.
10 *
b208ac07 11 * See docs/devel/atomics.txt for discussion about the guarantees each
a0aa44b4 12 * atomic primitive is meant to provide.
5444e768 13 */
85199474 14
2a6a4076
MA
15#ifndef QEMU_ATOMIC_H
16#define QEMU_ATOMIC_H
1d31fca4 17
5444e768
PB
18/* Compiler barrier */
19#define barrier() ({ asm volatile("" ::: "memory"); (void)0; })
20
5927ed84
PB
21/* The variable that receives the old value of an atomically-accessed
22 * variable must be non-qualified, because atomic builtins return values
23 * through a pointer-type argument as in __atomic_load(&var, &old, MODEL).
24 *
25 * This macro has to handle types smaller than int manually, because of
26 * implicit promotion. int and larger types, as well as pointers, can be
27 * converted to a non-qualified type just by applying a binary operator.
28 */
29#define typeof_strip_qual(expr) \
30 typeof( \
31 __builtin_choose_expr( \
32 __builtin_types_compatible_p(typeof(expr), bool) || \
33 __builtin_types_compatible_p(typeof(expr), const bool) || \
34 __builtin_types_compatible_p(typeof(expr), volatile bool) || \
35 __builtin_types_compatible_p(typeof(expr), const volatile bool), \
36 (bool)1, \
37 __builtin_choose_expr( \
38 __builtin_types_compatible_p(typeof(expr), signed char) || \
39 __builtin_types_compatible_p(typeof(expr), const signed char) || \
40 __builtin_types_compatible_p(typeof(expr), volatile signed char) || \
41 __builtin_types_compatible_p(typeof(expr), const volatile signed char), \
42 (signed char)1, \
43 __builtin_choose_expr( \
44 __builtin_types_compatible_p(typeof(expr), unsigned char) || \
45 __builtin_types_compatible_p(typeof(expr), const unsigned char) || \
46 __builtin_types_compatible_p(typeof(expr), volatile unsigned char) || \
47 __builtin_types_compatible_p(typeof(expr), const volatile unsigned char), \
48 (unsigned char)1, \
49 __builtin_choose_expr( \
50 __builtin_types_compatible_p(typeof(expr), signed short) || \
51 __builtin_types_compatible_p(typeof(expr), const signed short) || \
52 __builtin_types_compatible_p(typeof(expr), volatile signed short) || \
53 __builtin_types_compatible_p(typeof(expr), const volatile signed short), \
54 (signed short)1, \
55 __builtin_choose_expr( \
56 __builtin_types_compatible_p(typeof(expr), unsigned short) || \
57 __builtin_types_compatible_p(typeof(expr), const unsigned short) || \
58 __builtin_types_compatible_p(typeof(expr), volatile unsigned short) || \
59 __builtin_types_compatible_p(typeof(expr), const volatile unsigned short), \
60 (unsigned short)1, \
61 (expr)+0))))))
62
a0aa44b4
AB
63#ifdef __ATOMIC_RELAXED
64/* For C11 atomic ops */
65
66/* Manual memory barriers
67 *
68 *__atomic_thread_fence does not include a compiler barrier; instead,
69 * the barrier is part of __atomic_load/__atomic_store's "volatile-like"
70 * semantics. If smp_wmb() is a no-op, absence of the barrier means that
71 * the compiler is free to reorder stores on each side of the barrier.
72 * Add one here, and similarly in smp_rmb() and smp_read_barrier_depends().
73 */
74
f1ee8696
PB
75#define smp_mb() ({ barrier(); __atomic_thread_fence(__ATOMIC_SEQ_CST); })
76#define smp_mb_release() ({ barrier(); __atomic_thread_fence(__ATOMIC_RELEASE); })
77#define smp_mb_acquire() ({ barrier(); __atomic_thread_fence(__ATOMIC_ACQUIRE); })
a0aa44b4 78
c9838952
EC
79/* Most compilers currently treat consume and acquire the same, but really
80 * no processors except Alpha need a barrier here. Leave it in if
81 * using Thread Sanitizer to avoid warnings, otherwise optimize it away.
82 */
83#if defined(__SANITIZE_THREAD__)
f1ee8696 84#define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); })
23ea7f57 85#elif defined(__alpha__)
c9838952
EC
86#define smp_read_barrier_depends() asm volatile("mb":::"memory")
87#else
88#define smp_read_barrier_depends() barrier()
89#endif
90
374aae65
RH
91/* Sanity check that the size of an atomic operation isn't "overly large".
92 * Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
93 * want to use them because we ought not need them, and this lets us do a
94 * bit of sanity checking that other 32-bit hosts might build.
95 *
96 * That said, we have a problem on 64-bit ILP32 hosts in that in order to
97 * sync with TCG_OVERSIZED_GUEST, this must match TCG_TARGET_REG_BITS.
98 * We'd prefer not want to pull in everything else TCG related, so handle
99 * those few cases by hand.
100 *
101 * Note that x32 is fully detected with __x64_64__ + _ILP32, and that for
102 * Sparc we always force the use of sparcv9 in configure.
103 */
104#if defined(__x86_64__) || defined(__sparc__)
105# define ATOMIC_REG_SIZE 8
106#else
107# define ATOMIC_REG_SIZE sizeof(void *)
108#endif
a0aa44b4
AB
109
110/* Weak atomic operations prevent the compiler moving other
111 * loads/stores past the atomic operation load/store. However there is
112 * no explicit memory barrier for the processor.
e653bc6b
AB
113 *
114 * The C11 memory model says that variables that are accessed from
115 * different threads should at least be done with __ATOMIC_RELAXED
116 * primitives or the result is undefined. Generally this has little to
117 * no effect on the generated code but not using the atomic primitives
118 * will get flagged by sanitizers as a violation.
a0aa44b4 119 */
84bca392
RH
120#define atomic_read__nocheck(ptr) \
121 __atomic_load_n(ptr, __ATOMIC_RELAXED)
122
ca47a926
AB
123#define atomic_read(ptr) \
124 ({ \
374aae65 125 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
84bca392 126 atomic_read__nocheck(ptr); \
a0aa44b4
AB
127 })
128
84bca392
RH
129#define atomic_set__nocheck(ptr, i) \
130 __atomic_store_n(ptr, i, __ATOMIC_RELAXED)
131
ca47a926 132#define atomic_set(ptr, i) do { \
374aae65 133 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
84bca392 134 atomic_set__nocheck(ptr, i); \
a0aa44b4
AB
135} while(0)
136
15487aa1
EC
137/* See above: most compilers currently treat consume and acquire the
138 * same, but this slows down atomic_rcu_read unnecessarily.
139 */
140#ifdef __SANITIZE_THREAD__
141#define atomic_rcu_read__nocheck(ptr, valptr) \
142 __atomic_load(ptr, valptr, __ATOMIC_CONSUME);
143#else
144#define atomic_rcu_read__nocheck(ptr, valptr) \
145 __atomic_load(ptr, valptr, __ATOMIC_RELAXED); \
146 smp_read_barrier_depends();
147#endif
a0aa44b4 148
ca47a926
AB
149#define atomic_rcu_read(ptr) \
150 ({ \
374aae65 151 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
5927ed84 152 typeof_strip_qual(*ptr) _val; \
15487aa1 153 atomic_rcu_read__nocheck(ptr, &_val); \
ca47a926 154 _val; \
a0aa44b4
AB
155 })
156
ca47a926 157#define atomic_rcu_set(ptr, i) do { \
374aae65 158 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
89943de1 159 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
a0aa44b4
AB
160} while(0)
161
803cf26a 162#define atomic_load_acquire(ptr) \
a0aa44b4 163 ({ \
374aae65 164 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
5927ed84 165 typeof_strip_qual(*ptr) _val; \
803cf26a 166 __atomic_load(ptr, &_val, __ATOMIC_ACQUIRE); \
a0aa44b4
AB
167 _val; \
168 })
169
803cf26a 170#define atomic_store_release(ptr, i) do { \
374aae65 171 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
803cf26a 172 __atomic_store_n(ptr, i, __ATOMIC_RELEASE); \
a0aa44b4 173} while(0)
a0aa44b4
AB
174
175
176/* All the remaining operations are fully sequentially consistent */
177
84bca392
RH
178#define atomic_xchg__nocheck(ptr, i) ({ \
179 __atomic_exchange_n(ptr, (i), __ATOMIC_SEQ_CST); \
180})
181
a0aa44b4 182#define atomic_xchg(ptr, i) ({ \
374aae65 183 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
84bca392 184 atomic_xchg__nocheck(ptr, i); \
a0aa44b4
AB
185})
186
187/* Returns the eventual value, failed or not */
84bca392 188#define atomic_cmpxchg__nocheck(ptr, old, new) ({ \
89943de1
PK
189 typeof_strip_qual(*ptr) _old = (old); \
190 __atomic_compare_exchange_n(ptr, &_old, new, false, \
a0aa44b4
AB
191 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST); \
192 _old; \
84bca392
RH
193})
194
195#define atomic_cmpxchg(ptr, old, new) ({ \
374aae65 196 QEMU_BUILD_BUG_ON(sizeof(*ptr) > ATOMIC_REG_SIZE); \
84bca392
RH
197 atomic_cmpxchg__nocheck(ptr, old, new); \
198})
a0aa44b4
AB
199
200/* Provide shorter names for GCC atomic builtins, return old value */
201#define atomic_fetch_inc(ptr) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST)
202#define atomic_fetch_dec(ptr) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST)
203#define atomic_fetch_add(ptr, n) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST)
204#define atomic_fetch_sub(ptr, n) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST)
205#define atomic_fetch_and(ptr, n) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST)
206#define atomic_fetch_or(ptr, n) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST)
61696ddb 207#define atomic_fetch_xor(ptr, n) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST)
a0aa44b4 208
83d0c719
EC
209#define atomic_inc_fetch(ptr) __atomic_add_fetch(ptr, 1, __ATOMIC_SEQ_CST)
210#define atomic_dec_fetch(ptr) __atomic_sub_fetch(ptr, 1, __ATOMIC_SEQ_CST)
211#define atomic_add_fetch(ptr, n) __atomic_add_fetch(ptr, n, __ATOMIC_SEQ_CST)
212#define atomic_sub_fetch(ptr, n) __atomic_sub_fetch(ptr, n, __ATOMIC_SEQ_CST)
213#define atomic_and_fetch(ptr, n) __atomic_and_fetch(ptr, n, __ATOMIC_SEQ_CST)
214#define atomic_or_fetch(ptr, n) __atomic_or_fetch(ptr, n, __ATOMIC_SEQ_CST)
215#define atomic_xor_fetch(ptr, n) __atomic_xor_fetch(ptr, n, __ATOMIC_SEQ_CST)
216
a0aa44b4
AB
217/* And even shorter names that return void. */
218#define atomic_inc(ptr) ((void) __atomic_fetch_add(ptr, 1, __ATOMIC_SEQ_CST))
219#define atomic_dec(ptr) ((void) __atomic_fetch_sub(ptr, 1, __ATOMIC_SEQ_CST))
220#define atomic_add(ptr, n) ((void) __atomic_fetch_add(ptr, n, __ATOMIC_SEQ_CST))
221#define atomic_sub(ptr, n) ((void) __atomic_fetch_sub(ptr, n, __ATOMIC_SEQ_CST))
222#define atomic_and(ptr, n) ((void) __atomic_fetch_and(ptr, n, __ATOMIC_SEQ_CST))
223#define atomic_or(ptr, n) ((void) __atomic_fetch_or(ptr, n, __ATOMIC_SEQ_CST))
61696ddb 224#define atomic_xor(ptr, n) ((void) __atomic_fetch_xor(ptr, n, __ATOMIC_SEQ_CST))
a0aa44b4
AB
225
226#else /* __ATOMIC_RELAXED */
52e850de 227
a281ebc1 228/*
5444e768
PB
229 * We use GCC builtin if it's available, as that can use mfence on
230 * 32-bit as well, e.g. if built with -march=pentium-m. However, on
231 * i386 the spec is buggy, and the implementation followed it until
232 * 4.3 (http://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793).
233 */
234#if defined(__i386__) || defined(__x86_64__)
235#if !QEMU_GNUC_PREREQ(4, 4)
236#if defined __x86_64__
237#define smp_mb() ({ asm volatile("mfence" ::: "memory"); (void)0; })
a281ebc1 238#else
5444e768
PB
239#define smp_mb() ({ asm volatile("lock; addl $0,0(%%esp) " ::: "memory"); (void)0; })
240#endif
241#endif
242#endif
243
244
245#ifdef __alpha__
246#define smp_read_barrier_depends() asm volatile("mb":::"memory")
a281ebc1
MT
247#endif
248
5444e768 249#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
a281ebc1 250
5444e768
PB
251/*
252 * Because of the strongly ordered storage model, wmb() and rmb() are nops
253 * here (a compiler barrier only). QEMU doesn't do accesses to write-combining
254 * qemu memory or non-temporal load/stores from C code.
255 */
f1ee8696
PB
256#define smp_mb_release() barrier()
257#define smp_mb_acquire() barrier()
5444e768
PB
258
259/*
260 * __sync_lock_test_and_set() is documented to be an acquire barrier only,
261 * but it is a full barrier at the hardware level. Add a compiler barrier
262 * to make it a full barrier also at the compiler level.
263 */
264#define atomic_xchg(ptr, i) (barrier(), __sync_lock_test_and_set(ptr, i))
265
463ce4ae 266#elif defined(_ARCH_PPC)
e2251708
DG
267
268/*
a281ebc1 269 * We use an eieio() for wmb() on powerpc. This assumes we don't
e2251708 270 * need to order cacheable and non-cacheable stores with respect to
5444e768
PB
271 * each other.
272 *
273 * smp_mb has the same problem as on x86 for not-very-new GCC
274 * (http://patchwork.ozlabs.org/patch/126184/, Nov 2011).
e2251708 275 */
f1ee8696 276#define smp_wmb() ({ asm volatile("eieio" ::: "memory"); (void)0; })
a821ce59 277#if defined(__powerpc64__)
f1ee8696
PB
278#define smp_mb_release() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
279#define smp_mb_acquire() ({ asm volatile("lwsync" ::: "memory"); (void)0; })
a821ce59 280#else
f1ee8696
PB
281#define smp_mb_release() ({ asm volatile("sync" ::: "memory"); (void)0; })
282#define smp_mb_acquire() ({ asm volatile("sync" ::: "memory"); (void)0; })
a821ce59 283#endif
f1ee8696 284#define smp_mb() ({ asm volatile("sync" ::: "memory"); (void)0; })
a821ce59 285
5444e768 286#endif /* _ARCH_PPC */
e2251708 287
e2251708
DG
288/*
289 * For (host) platforms we don't have explicit barrier definitions
290 * for, we use the gcc __sync_synchronize() primitive to generate a
291 * full barrier. This should be safe on all platforms, though it may
f1ee8696 292 * be overkill for smp_mb_acquire() and smp_mb_release().
e2251708 293 */
5444e768 294#ifndef smp_mb
f1ee8696 295#define smp_mb() __sync_synchronize()
5444e768
PB
296#endif
297
f1ee8696
PB
298#ifndef smp_mb_acquire
299#define smp_mb_acquire() __sync_synchronize()
5444e768 300#endif
5444e768 301
f1ee8696
PB
302#ifndef smp_mb_release
303#define smp_mb_release() __sync_synchronize()
5444e768 304#endif
5444e768
PB
305
306#ifndef smp_read_barrier_depends
5444e768
PB
307#define smp_read_barrier_depends() barrier()
308#endif
e2251708 309
a0aa44b4
AB
310/* These will only be atomic if the processor does the fetch or store
311 * in a single issue memory operation
312 */
84bca392
RH
313#define atomic_read__nocheck(p) (*(__typeof__(*(p)) volatile*) (p))
314#define atomic_set__nocheck(p, i) ((*(__typeof__(*(p)) volatile*) (p)) = (i))
315
316#define atomic_read(ptr) atomic_read__nocheck(ptr)
317#define atomic_set(ptr, i) atomic_set__nocheck(ptr,i)
5444e768 318
7911747b
PB
319/**
320 * atomic_rcu_read - reads a RCU-protected pointer to a local variable
321 * into a RCU read-side critical section. The pointer can later be safely
322 * dereferenced within the critical section.
323 *
324 * This ensures that the pointer copy is invariant thorough the whole critical
325 * section.
326 *
327 * Inserts memory barriers on architectures that require them (currently only
328 * Alpha) and documents which pointers are protected by RCU.
329 *
a0aa44b4
AB
330 * atomic_rcu_read also includes a compiler barrier to ensure that
331 * value-speculative optimizations (e.g. VSS: Value Speculation
332 * Scheduling) does not perform the data read before the pointer read
333 * by speculating the value of the pointer.
7911747b
PB
334 *
335 * Should match atomic_rcu_set(), atomic_xchg(), atomic_cmpxchg().
336 */
7911747b
PB
337#define atomic_rcu_read(ptr) ({ \
338 typeof(*ptr) _val = atomic_read(ptr); \
339 smp_read_barrier_depends(); \
340 _val; \
341})
7911747b
PB
342
343/**
344 * atomic_rcu_set - assigns (publicizes) a pointer to a new data structure
345 * meant to be read by RCU read-side critical sections.
346 *
347 * Documents which pointers will be dereferenced by RCU read-side critical
348 * sections and adds the required memory barriers on architectures requiring
349 * them. It also makes sure the compiler does not reorder code initializing the
350 * data structure before its publication.
351 *
352 * Should match atomic_rcu_read().
353 */
7911747b
PB
354#define atomic_rcu_set(ptr, i) do { \
355 smp_wmb(); \
356 atomic_set(ptr, i); \
357} while (0)
7911747b 358
803cf26a 359#define atomic_load_acquire(ptr) ({ \
5444e768 360 typeof(*ptr) _val = atomic_read(ptr); \
f1ee8696 361 smp_mb_acquire(); \
5444e768
PB
362 _val; \
363})
5444e768 364
803cf26a 365#define atomic_store_release(ptr, i) do { \
f1ee8696 366 smp_mb_release(); \
5444e768 367 atomic_set(ptr, i); \
5444e768 368} while (0)
5444e768
PB
369
370#ifndef atomic_xchg
33effd3a
PM
371#if defined(__clang__)
372#define atomic_xchg(ptr, i) __sync_swap(ptr, i)
5444e768
PB
373#else
374/* __sync_lock_test_and_set() is documented to be an acquire barrier only. */
375#define atomic_xchg(ptr, i) (smp_mb(), __sync_lock_test_and_set(ptr, i))
376#endif
377#endif
84bca392 378#define atomic_xchg__nocheck atomic_xchg
5444e768
PB
379
380/* Provide shorter names for GCC atomic builtins. */
381#define atomic_fetch_inc(ptr) __sync_fetch_and_add(ptr, 1)
382#define atomic_fetch_dec(ptr) __sync_fetch_and_add(ptr, -1)
d1a9f2d1
RH
383#define atomic_fetch_add(ptr, n) __sync_fetch_and_add(ptr, n)
384#define atomic_fetch_sub(ptr, n) __sync_fetch_and_sub(ptr, n)
385#define atomic_fetch_and(ptr, n) __sync_fetch_and_and(ptr, n)
386#define atomic_fetch_or(ptr, n) __sync_fetch_and_or(ptr, n)
61696ddb 387#define atomic_fetch_xor(ptr, n) __sync_fetch_and_xor(ptr, n)
83d0c719
EC
388
389#define atomic_inc_fetch(ptr) __sync_add_and_fetch(ptr, 1)
390#define atomic_dec_fetch(ptr) __sync_add_and_fetch(ptr, -1)
391#define atomic_add_fetch(ptr, n) __sync_add_and_fetch(ptr, n)
392#define atomic_sub_fetch(ptr, n) __sync_sub_and_fetch(ptr, n)
393#define atomic_and_fetch(ptr, n) __sync_and_and_fetch(ptr, n)
394#define atomic_or_fetch(ptr, n) __sync_or_and_fetch(ptr, n)
395#define atomic_xor_fetch(ptr, n) __sync_xor_and_fetch(ptr, n)
396
d1a9f2d1 397#define atomic_cmpxchg(ptr, old, new) __sync_val_compare_and_swap(ptr, old, new)
84bca392 398#define atomic_cmpxchg__nocheck(ptr, old, new) atomic_cmpxchg(ptr, old, new)
5444e768
PB
399
400/* And even shorter names that return void. */
401#define atomic_inc(ptr) ((void) __sync_fetch_and_add(ptr, 1))
402#define atomic_dec(ptr) ((void) __sync_fetch_and_add(ptr, -1))
403#define atomic_add(ptr, n) ((void) __sync_fetch_and_add(ptr, n))
404#define atomic_sub(ptr, n) ((void) __sync_fetch_and_sub(ptr, n))
405#define atomic_and(ptr, n) ((void) __sync_fetch_and_and(ptr, n))
406#define atomic_or(ptr, n) ((void) __sync_fetch_and_or(ptr, n))
61696ddb 407#define atomic_xor(ptr, n) ((void) __sync_fetch_and_xor(ptr, n))
5444e768 408
a0aa44b4 409#endif /* __ATOMIC_RELAXED */
f1ee8696
PB
410
411#ifndef smp_wmb
412#define smp_wmb() smp_mb_release()
413#endif
414#ifndef smp_rmb
415#define smp_rmb() smp_mb_acquire()
416#endif
417
803cf26a
PB
418/* This is more efficient than a store plus a fence. */
419#if !defined(__SANITIZE_THREAD__)
420#if defined(__i386__) || defined(__x86_64__) || defined(__s390x__)
421#define atomic_mb_set(ptr, i) ((void)atomic_xchg(ptr, i))
422#endif
423#endif
424
425/* atomic_mb_read/set semantics map Java volatile variables. They are
426 * less expensive on some platforms (notably POWER) than fully
427 * sequentially consistent operations.
428 *
429 * As long as they are used as paired operations they are safe to
b208ac07 430 * use. See docs/devel/atomics.txt for more discussion.
803cf26a
PB
431 */
432
433#ifndef atomic_mb_read
434#define atomic_mb_read(ptr) \
435 atomic_load_acquire(ptr)
436#endif
437
438#ifndef atomic_mb_set
439#define atomic_mb_set(ptr, i) do { \
440 atomic_store_release(ptr, i); \
441 smp_mb(); \
442} while(0)
443#endif
444
447b0d0b
PB
445#define atomic_fetch_inc_nonzero(ptr) ({ \
446 typeof_strip_qual(*ptr) _oldn = atomic_read(ptr); \
447 while (_oldn && atomic_cmpxchg(ptr, _oldn, _oldn + 1) != _oldn) { \
448 _oldn = atomic_read(ptr); \
449 } \
450 _oldn; \
451})
452
2a6a4076 453#endif /* QEMU_ATOMIC_H */
This page took 0.527333 seconds and 4 git commands to generate.