]> Git Repo - uclibc-ng.git/blob - libc/sysdeps/linux/powerpc/bits/atomic.h
use uniform form of C99 keywords
[uclibc-ng.git] / libc / sysdeps / linux / powerpc / bits / atomic.h
1 /* Atomic operations.  PowerPC Common version.
2    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
3    This file is part of the GNU C Library.
4    Contributed by Paul Mackerras <[email protected]>, 2003.
5
6    The GNU C Library is free software; you can redistribute it and/or
7    modify it under the terms of the GNU Lesser General Public
8    License as published by the Free Software Foundation; either
9    version 2.1 of the License, or (at your option) any later version.
10
11    The GNU C Library is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14    Lesser General Public License for more details.
15
16    You should have received a copy of the GNU Lesser General Public
17    License along with the GNU C Library; if not, write to the Free
18    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19    02111-1307 USA.  */
20
21 #include <bits/wordsize.h>
22
23 #if __WORDSIZE == 64
24 /* Atomic operations.  PowerPC64 version.
25    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
26    This file is part of the GNU C Library.
27    Contributed by Paul Mackerras <[email protected]>, 2003.
28
29    The GNU C Library is free software; you can redistribute it and/or
30    modify it under the terms of the GNU Lesser General Public
31    License as published by the Free Software Foundation; either
32    version 2.1 of the License, or (at your option) any later version.
33
34    The GNU C Library is distributed in the hope that it will be useful,
35    but WITHOUT ANY WARRANTY; without even the implied warranty of
36    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
37    Lesser General Public License for more details.
38
39    You should have received a copy of the GNU Lesser General Public
40    License along with the GNU C Library; if not, write to the Free
41    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
42    02111-1307 USA.  */
43
44 /* The 32-bit exchange_bool is different on powerpc64 because the subf
45    does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
46    (a load word and zero (high 32) form) load.
47    In powerpc64 register values are 64-bit by default,  including oldval.
48    The value in old val unknown sign extension, lwarx loads the 32-bit
49    value as unsigned.  So we explicitly clear the high 32 bits in oldval.  */
50 # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval) \
51 ({                                                                            \
52   unsigned int __tmp, __tmp2;                                                 \
53   __asm__ __volatile__ ("   clrldi  %1,%1,32\n"                               \
54                     "1: lwarx   %0,0,%2\n"                                    \
55                     "   subf.   %0,%1,%0\n"                                   \
56                     "   bne     2f\n"                                         \
57                     "   stwcx.  %4,0,%2\n"                                    \
58                     "   bne-    1b\n"                                         \
59                     "2: " __ARCH_ACQ_INSTR                                    \
60                     : "=&r" (__tmp), "=r" (__tmp2)                            \
61                     : "b" (mem), "1" (oldval), "r" (newval)                   \
62                     : "cr0", "memory");                                       \
63   __tmp != 0;                                                                 \
64 })
65
66 # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval) \
67 ({                                                                            \
68   unsigned int __tmp, __tmp2;                                                 \
69   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                 \
70                     "   clrldi  %1,%1,32\n"                                   \
71                     "1: lwarx   %0,0,%2\n"                                    \
72                     "   subf.   %0,%1,%0\n"                                   \
73                     "   bne     2f\n"                                         \
74                     "   stwcx.  %4,0,%2\n"                                    \
75                     "   bne-    1b\n"                                         \
76                     "2: "                                                     \
77                     : "=&r" (__tmp), "=r" (__tmp2)                            \
78                     : "b" (mem), "1" (oldval), "r" (newval)                   \
79                     : "cr0", "memory");                                       \
80   __tmp != 0;                                                                 \
81 })
82
83 /*
84  * Only powerpc64 processors support Load doubleword and reserve index (ldarx)
85  * and Store doubleword conditional indexed (stdcx) instructions.  So here
86  * we define the 64-bit forms.
87  */
88 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
89 ({                                                                            \
90   unsigned long __tmp;                                                        \
91   __asm__ __volatile__ (                                                              \
92                     "1: ldarx   %0,0,%1\n"                                    \
93                     "   subf.   %0,%2,%0\n"                                   \
94                     "   bne     2f\n"                                         \
95                     "   stdcx.  %3,0,%1\n"                                    \
96                     "   bne-    1b\n"                                         \
97                     "2: " __ARCH_ACQ_INSTR                                    \
98                     : "=&r" (__tmp)                                           \
99                     : "b" (mem), "r" (oldval), "r" (newval)                   \
100                     : "cr0", "memory");                                       \
101   __tmp != 0;                                                                 \
102 })
103
104 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
105 ({                                                                            \
106   unsigned long __tmp;                                                        \
107   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                 \
108                     "1: ldarx   %0,0,%1\n"                                    \
109                     "   subf.   %0,%2,%0\n"                                   \
110                     "   bne     2f\n"                                         \
111                     "   stdcx.  %3,0,%1\n"                                    \
112                     "   bne-    1b\n"                                         \
113                     "2: "                                                     \
114                     : "=&r" (__tmp)                                           \
115                     : "b" (mem), "r" (oldval), "r" (newval)                   \
116                     : "cr0", "memory");                                       \
117   __tmp != 0;                                                                 \
118 })
119
120 #define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
121   ({                                                                          \
122       __typeof (*(mem)) __tmp;                                                \
123       __typeof (mem)  __memp = (mem);                                         \
124       __asm__ __volatile__ (                                                  \
125                         "1:     ldarx   %0,0,%1\n"                            \
126                         "       cmpd    %0,%2\n"                              \
127                         "       bne     2f\n"                                 \
128                         "       stdcx.  %3,0,%1\n"                            \
129                         "       bne-    1b\n"                                 \
130                         "2:     " __ARCH_ACQ_INSTR                            \
131                         : "=&r" (__tmp)                                       \
132                         : "b" (__memp), "r" (oldval), "r" (newval)            \
133                         : "cr0", "memory");                                   \
134       __tmp;                                                                  \
135   })
136
137 #define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
138   ({                                                                          \
139       __typeof (*(mem)) __tmp;                                                \
140       __typeof (mem)  __memp = (mem);                                         \
141       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
142                         "1:     ldarx   %0,0,%1\n"                            \
143                         "       cmpd    %0,%2\n"                              \
144                         "       bne     2f\n"                                 \
145                         "       stdcx.  %3,0,%1\n"                            \
146                         "       bne-    1b\n"                                 \
147                         "2:     "                                             \
148                         : "=&r" (__tmp)                                       \
149                         : "b" (__memp), "r" (oldval), "r" (newval)            \
150                         : "cr0", "memory");                                   \
151       __tmp;                                                                  \
152   })
153
154 # define __arch_atomic_exchange_64_acq(mem, value) \
155     ({                                                                        \
156       __typeof (*mem) __val;                                                  \
157       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
158                         "1:     ldarx   %0,0,%2\n"                            \
159                         "       stdcx.  %3,0,%2\n"                            \
160                         "       bne-    1b\n"                                 \
161                   " " __ARCH_ACQ_INSTR                                        \
162                         : "=&r" (__val), "=m" (*mem)                          \
163                         : "b" (mem), "r" (value), "m" (*mem)                  \
164                         : "cr0", "memory");                                   \
165       __val;                                                                  \
166     })
167
168 # define __arch_atomic_exchange_64_rel(mem, value) \
169     ({                                                                        \
170       __typeof (*mem) __val;                                                  \
171       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
172                         "1:     ldarx   %0,0,%2\n"                            \
173                         "       stdcx.  %3,0,%2\n"                            \
174                         "       bne-    1b"                                   \
175                         : "=&r" (__val), "=m" (*mem)                          \
176                         : "b" (mem), "r" (value), "m" (*mem)                  \
177                         : "cr0", "memory");                                   \
178       __val;                                                                  \
179     })
180
181 # define __arch_atomic_exchange_and_add_64(mem, value) \
182     ({                                                                        \
183       __typeof (*mem) __val, __tmp;                                           \
184       __asm__ __volatile__ ("1: ldarx   %0,0,%3\n"                            \
185                         "       add     %1,%0,%4\n"                           \
186                         "       stdcx.  %1,0,%3\n"                            \
187                         "       bne-    1b"                                   \
188                         : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)           \
189                         : "b" (mem), "r" (value), "m" (*mem)                  \
190                         : "cr0", "memory");                                   \
191       __val;                                                                  \
192     })
193
194 # define __arch_atomic_increment_val_64(mem) \
195     ({                                                                        \
196       __typeof (*(mem)) __val;                                                \
197       __asm__ __volatile__ ("1: ldarx   %0,0,%2\n"                            \
198                         "       addi    %0,%0,1\n"                            \
199                         "       stdcx.  %0,0,%2\n"                            \
200                         "       bne-    1b"                                   \
201                         : "=&b" (__val), "=m" (*mem)                          \
202                         : "b" (mem), "m" (*mem)                               \
203                         : "cr0", "memory");                                   \
204       __val;                                                                  \
205     })
206
207 # define __arch_atomic_decrement_val_64(mem) \
208     ({                                                                        \
209       __typeof (*(mem)) __val;                                                \
210       __asm__ __volatile__ ("1: ldarx   %0,0,%2\n"                            \
211                         "       subi    %0,%0,1\n"                            \
212                         "       stdcx.  %0,0,%2\n"                            \
213                         "       bne-    1b"                                   \
214                         : "=&b" (__val), "=m" (*mem)                          \
215                         : "b" (mem), "m" (*mem)                               \
216                         : "cr0", "memory");                                   \
217       __val;                                                                  \
218     })
219
220 # define __arch_atomic_decrement_if_positive_64(mem) \
221   ({ int __val, __tmp;                                                        \
222      __asm__ __volatile__ ("1:  ldarx   %0,0,%3\n"                            \
223                        "        cmpdi   0,%0,0\n"                             \
224                        "        addi    %1,%0,-1\n"                           \
225                        "        ble     2f\n"                                 \
226                        "        stdcx.  %1,0,%3\n"                            \
227                        "        bne-    1b\n"                                 \
228                        "2:      " __ARCH_ACQ_INSTR                            \
229                        : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
230                        : "b" (mem), "m" (*mem)                                \
231                        : "cr0", "memory");                                    \
232      __val;                                                                   \
233   })
234
235 /*
236  * All powerpc64 processors support the new "light weight"  sync (lwsync).
237  */
238 # define atomic_read_barrier()  __asm__ ("lwsync" ::: "memory")
239 /*
240  * "light weight" sync can also be used for the release barrier.
241  */
242 # ifndef UP
243 #  define __ARCH_REL_INSTR      "lwsync"
244 # endif
245
246 #else
247 /* Atomic operations.  PowerPC32 version.
248    Copyright (C) 2003, 2004 Free Software Foundation, Inc.
249    This file is part of the GNU C Library.
250    Contributed by Paul Mackerras <[email protected]>, 2003.
251
252    The GNU C Library is free software; you can redistribute it and/or
253    modify it under the terms of the GNU Lesser General Public
254    License as published by the Free Software Foundation; either
255    version 2.1 of the License, or (at your option) any later version.
256
257    The GNU C Library is distributed in the hope that it will be useful,
258    but WITHOUT ANY WARRANTY; without even the implied warranty of
259    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
260    Lesser General Public License for more details.
261
262    You should have received a copy of the GNU Lesser General Public
263    License along with the GNU C Library; if not, write to the Free
264    Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
265    02111-1307 USA.  */
266
267 /*
268  * The 32-bit exchange_bool is different on powerpc64 because the subf
269  * does signed 64-bit arthmatic while the lwarx is 32-bit unsigned
270  * (a load word and zero (high 32) form).  So powerpc64 has a slightly
271  * different version in sysdeps/powerpc/powerpc64/bits/atomic.h.
272  */
273 # define __arch_compare_and_exchange_bool_32_acq(mem, newval, oldval)         \
274 ({                                                                            \
275   unsigned int __tmp;                                                         \
276   __asm__ __volatile__ (                                                              \
277                     "1: lwarx   %0,0,%1\n"                                    \
278                     "   subf.   %0,%2,%0\n"                                   \
279                     "   bne     2f\n"                                         \
280                     "   stwcx.  %3,0,%1\n"                                    \
281                     "   bne-    1b\n"                                         \
282                     "2: " __ARCH_ACQ_INSTR                                    \
283                     : "=&r" (__tmp)                                           \
284                     : "b" (mem), "r" (oldval), "r" (newval)                   \
285                     : "cr0", "memory");                                       \
286   __tmp != 0;                                                                 \
287 })
288
289 # define __arch_compare_and_exchange_bool_32_rel(mem, newval, oldval)         \
290 ({                                                                            \
291   unsigned int __tmp;                                                         \
292   __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                 \
293                     "1: lwarx   %0,0,%1\n"                                    \
294                     "   subf.   %0,%2,%0\n"                                   \
295                     "   bne     2f\n"                                         \
296                     "   stwcx.  %3,0,%1\n"                                    \
297                     "   bne-    1b\n"                                         \
298                     "2: "                                                     \
299                     : "=&r" (__tmp)                                           \
300                     : "b" (mem), "r" (oldval), "r" (newval)                   \
301                     : "cr0", "memory");                                       \
302   __tmp != 0;                                                                 \
303 })
304
305 /* Powerpc32 processors don't implement the 64-bit (doubleword) forms of
306    load and reserve (ldarx) and store conditional (stdcx.) instructions.
307    So for powerpc32 we stub out the 64-bit forms.  */
308 # define __arch_compare_and_exchange_bool_64_acq(mem, newval, oldval) \
309   (abort (), 0)
310
311 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
312   (abort (), (__typeof (*mem)) 0)
313
314 # define __arch_compare_and_exchange_bool_64_rel(mem, newval, oldval) \
315   (abort (), 0)
316
317 # define __arch_compare_and_exchange_val_64_rel(mem, newval, oldval) \
318   (abort (), (__typeof (*mem)) 0)
319
320 # define __arch_atomic_exchange_64_acq(mem, value) \
321     ({ abort (); (*mem) = (value); })
322
323 # define __arch_atomic_exchange_64_rel(mem, value) \
324     ({ abort (); (*mem) = (value); })
325
326 # define __arch_atomic_exchange_and_add_64(mem, value) \
327     ({ abort (); (*mem) = (value); })
328
329 # define __arch_atomic_increment_val_64(mem) \
330     ({ abort (); (*mem)++; })
331
332 # define __arch_atomic_decrement_val_64(mem) \
333     ({ abort (); (*mem)--; })
334
335 # define __arch_atomic_decrement_if_positive_64(mem) \
336     ({ abort (); (*mem)--; })
337
338 #ifdef _ARCH_PWR4
339 /*
340  * Newer powerpc64 processors support the new "light weight" sync (lwsync)
341  * So if the build is using -mcpu=[power4,power5,power5+,970] we can
342  * safely use lwsync.
343  */
344 # define atomic_read_barrier()  __asm__ ("lwsync" ::: "memory")
345 /*
346  * "light weight" sync can also be used for the release barrier.
347  */
348 # ifndef UP
349 #  define __ARCH_REL_INSTR      "lwsync"
350 # endif
351 #else
352
353 /*
354  * Older powerpc32 processors don't support the new "light weight"
355  * sync (lwsync).  So the only safe option is to use normal sync
356  * for all powerpc32 applications.
357  */
358 # define atomic_read_barrier()  __asm__ ("sync" ::: "memory")
359 #endif
360
361 #endif
362
363 #include <stdint.h>
364
365 typedef int32_t atomic32_t;
366 typedef uint32_t uatomic32_t;
367 typedef int_fast32_t atomic_fast32_t;
368 typedef uint_fast32_t uatomic_fast32_t;
369
370 typedef int64_t atomic64_t;
371 typedef uint64_t uatomic64_t;
372 typedef int_fast64_t atomic_fast64_t;
373 typedef uint_fast64_t uatomic_fast64_t;
374
375 typedef intptr_t atomicptr_t;
376 typedef uintptr_t uatomicptr_t;
377 typedef intmax_t atomic_max_t;
378 typedef uintmax_t uatomic_max_t;
379
380 /*
381  * Powerpc does not have byte and halfword forms of load and reserve and
382  * store conditional. So for powerpc we stub out the 8- and 16-bit forms.
383  */
384 #define __arch_compare_and_exchange_bool_8_acq(mem, newval, oldval) \
385   (abort (), 0)
386
387 #define __arch_compare_and_exchange_bool_16_acq(mem, newval, oldval) \
388   (abort (), 0)
389
390 #define __arch_compare_and_exchange_bool_8_rel(mem, newval, oldval) \
391   (abort (), 0)
392
393 #define __arch_compare_and_exchange_bool_16_rel(mem, newval, oldval) \
394   (abort (), 0)
395
396 #ifdef UP
397 # define __ARCH_ACQ_INSTR       ""
398 # define __ARCH_REL_INSTR       ""
399 #else
400 # define __ARCH_ACQ_INSTR       "isync"
401 # ifndef __ARCH_REL_INSTR
402 #  define __ARCH_REL_INSTR      "sync"
403 # endif
404 #endif
405
406 #ifndef MUTEX_HINT_ACQ
407 # define MUTEX_HINT_ACQ
408 #endif
409 #ifndef MUTEX_HINT_REL
410 # define MUTEX_HINT_REL
411 #endif
412
413 #define atomic_full_barrier()   __asm__ ("sync" ::: "memory")
414 #define atomic_write_barrier()  __asm__ ("eieio" ::: "memory")
415
416 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval)           \
417   ({                                                                          \
418       __typeof (*(mem)) __tmp;                                                \
419       __typeof (mem)  __memp = (mem);                                         \
420       __asm__ __volatile__ (                                                  \
421                         "1:     lwarx   %0,0,%1\n"                            \
422                         "       cmpw    %0,%2\n"                              \
423                         "       bne     2f\n"                                 \
424                         "       stwcx.  %3,0,%1\n"                            \
425                         "       bne-    1b\n"                                 \
426                         "2:     " __ARCH_ACQ_INSTR                            \
427                         : "=&r" (__tmp)                                       \
428                         : "b" (__memp), "r" (oldval), "r" (newval)            \
429                         : "cr0", "memory");                                   \
430       __tmp;                                                                  \
431   })
432
433 #define __arch_compare_and_exchange_val_32_rel(mem, newval, oldval)           \
434   ({                                                                          \
435       __typeof (*(mem)) __tmp;                                                \
436       __typeof (mem)  __memp = (mem);                                         \
437       __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                                     \
438                         "1:     lwarx   %0,0,%1\n"                            \
439                         "       cmpw    %0,%2\n"                              \
440                         "       bne     2f\n"                                 \
441                         "       stwcx.  %3,0,%1\n"                            \
442                         "       bne-    1b\n"                                 \
443                         "2:     "                                             \
444                         : "=&r" (__tmp)                                       \
445                         : "b" (__memp), "r" (oldval), "r" (newval)            \
446                         : "cr0", "memory");                                   \
447       __tmp;                                                                  \
448   })
449
450 #define __arch_atomic_exchange_32_acq(mem, value)                             \
451   ({                                                                          \
452     __typeof (*mem) __val;                                                    \
453     __asm__ __volatile__ (                                                            \
454                       "1:       lwarx   %0,0,%2\n"                            \
455                       "         stwcx.  %3,0,%2\n"                            \
456                       "         bne-    1b\n"                                 \
457                       "   " __ARCH_ACQ_INSTR                                  \
458                       : "=&r" (__val), "=m" (*mem)                            \
459                       : "b" (mem), "r" (value), "m" (*mem)                    \
460                       : "cr0", "memory");                                     \
461     __val;                                                                    \
462   })
463
464 #define __arch_atomic_exchange_32_rel(mem, value) \
465   ({                                                                          \
466     __typeof (*mem) __val;                                                    \
467     __asm__ __volatile__ (__ARCH_REL_INSTR "\n"                               \
468                       "1:       lwarx   %0,0,%2\n"                            \
469                       "         stwcx.  %3,0,%2\n"                            \
470                       "         bne-    1b"                                   \
471                       : "=&r" (__val), "=m" (*mem)                            \
472                       : "b" (mem), "r" (value), "m" (*mem)                    \
473                       : "cr0", "memory");                                     \
474     __val;                                                                    \
475   })
476
477 #define __arch_atomic_exchange_and_add_32(mem, value) \
478   ({                                                                          \
479     __typeof (*mem) __val, __tmp;                                             \
480     __asm__ __volatile__ ("1:   lwarx   %0,0,%3\n"                            \
481                       "         add     %1,%0,%4\n"                           \
482                       "         stwcx.  %1,0,%3\n"                            \
483                       "         bne-    1b"                                   \
484                       : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)             \
485                       : "b" (mem), "r" (value), "m" (*mem)                    \
486                       : "cr0", "memory");                                     \
487     __val;                                                                    \
488   })
489
490 #define __arch_atomic_increment_val_32(mem) \
491   ({                                                                          \
492     __typeof (*(mem)) __val;                                                  \
493     __asm__ __volatile__ ("1:   lwarx   %0,0,%2\n"                            \
494                       "         addi    %0,%0,1\n"                            \
495                       "         stwcx.  %0,0,%2\n"                            \
496                       "         bne-    1b"                                   \
497                       : "=&b" (__val), "=m" (*mem)                            \
498                       : "b" (mem), "m" (*mem)                                 \
499                       : "cr0", "memory");                                     \
500     __val;                                                                    \
501   })
502
503 #define __arch_atomic_decrement_val_32(mem) \
504   ({                                                                          \
505     __typeof (*(mem)) __val;                                                  \
506     __asm__ __volatile__ ("1:   lwarx   %0,0,%2\n"                            \
507                       "         subi    %0,%0,1\n"                            \
508                       "         stwcx.  %0,0,%2\n"                            \
509                       "         bne-    1b"                                   \
510                       : "=&b" (__val), "=m" (*mem)                            \
511                       : "b" (mem), "m" (*mem)                                 \
512                       : "cr0", "memory");                                     \
513     __val;                                                                    \
514   })
515
516 #define __arch_atomic_decrement_if_positive_32(mem) \
517   ({ int __val, __tmp;                                                        \
518      __asm__ __volatile__ ("1:  lwarx   %0,0,%3\n"                            \
519                        "        cmpwi   0,%0,0\n"                             \
520                        "        addi    %1,%0,-1\n"                           \
521                        "        ble     2f\n"                                 \
522                        "        stwcx.  %1,0,%3\n"                            \
523                        "        bne-    1b\n"                                 \
524                        "2:      " __ARCH_ACQ_INSTR                            \
525                        : "=&b" (__val), "=&r" (__tmp), "=m" (*mem)            \
526                        : "b" (mem), "m" (*mem)                                \
527                        : "cr0", "memory");                                    \
528      __val;                                                                   \
529   })
530
531 #define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \
532   ({                                                                          \
533     __typeof (*(mem)) __result;                                               \
534     if (sizeof (*mem) == 4)                                                   \
535       __result = __arch_compare_and_exchange_val_32_acq(mem, newval, oldval); \
536     else if (sizeof (*mem) == 8)                                              \
537       __result = __arch_compare_and_exchange_val_64_acq(mem, newval, oldval); \
538     else                                                                      \
539        abort ();                                                              \
540     __result;                                                                 \
541   })
542
543 #define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \
544   ({                                                                          \
545     __typeof (*(mem)) __result;                                               \
546     if (sizeof (*mem) == 4)                                                   \
547       __result = __arch_compare_and_exchange_val_32_rel(mem, newval, oldval); \
548     else if (sizeof (*mem) == 8)                                              \
549       __result = __arch_compare_and_exchange_val_64_rel(mem, newval, oldval); \
550     else                                                                      \
551        abort ();                                                              \
552     __result;                                                                 \
553   })
554
555 #define atomic_exchange_acq(mem, value) \
556   ({                                                                          \
557     __typeof (*(mem)) __result;                                               \
558     if (sizeof (*mem) == 4)                                                   \
559       __result = __arch_atomic_exchange_32_acq (mem, value);                  \
560     else if (sizeof (*mem) == 8)                                              \
561       __result = __arch_atomic_exchange_64_acq (mem, value);                  \
562     else                                                                      \
563        abort ();                                                              \
564     __result;                                                                 \
565   })
566
567 #define atomic_exchange_rel(mem, value) \
568   ({                                                                          \
569     __typeof (*(mem)) __result;                                               \
570     if (sizeof (*mem) == 4)                                                   \
571       __result = __arch_atomic_exchange_32_rel (mem, value);                  \
572     else if (sizeof (*mem) == 8)                                              \
573       __result = __arch_atomic_exchange_64_rel (mem, value);                  \
574     else                                                                      \
575        abort ();                                                              \
576     __result;                                                                 \
577   })
578
579 #define atomic_exchange_and_add(mem, value) \
580   ({                                                                          \
581     __typeof (*(mem)) __result;                                               \
582     if (sizeof (*mem) == 4)                                                   \
583       __result = __arch_atomic_exchange_and_add_32 (mem, value);              \
584     else if (sizeof (*mem) == 8)                                              \
585       __result = __arch_atomic_exchange_and_add_64 (mem, value);              \
586     else                                                                      \
587        abort ();                                                              \
588     __result;                                                                 \
589   })
590
591 #define atomic_increment_val(mem) \
592   ({                                                                          \
593     __typeof (*(mem)) __result;                                               \
594     if (sizeof (*(mem)) == 4)                                                 \
595       __result = __arch_atomic_increment_val_32 (mem);                        \
596     else if (sizeof (*(mem)) == 8)                                            \
597       __result = __arch_atomic_increment_val_64 (mem);                        \
598     else                                                                      \
599        abort ();                                                              \
600     __result;                                                                 \
601   })
602
603 #define atomic_increment(mem) ({ atomic_increment_val (mem); (void) 0; })
604
605 #define atomic_decrement_val(mem) \
606   ({                                                                          \
607     __typeof (*(mem)) __result;                                               \
608     if (sizeof (*(mem)) == 4)                                                 \
609       __result = __arch_atomic_decrement_val_32 (mem);                        \
610     else if (sizeof (*(mem)) == 8)                                            \
611       __result = __arch_atomic_decrement_val_64 (mem);                        \
612     else                                                                      \
613        abort ();                                                              \
614     __result;                                                                 \
615   })
616
617 #define atomic_decrement(mem) ({ atomic_decrement_val (mem); (void) 0; })
618
619
620 /* Decrement *MEM if it is > 0, and return the old value.  */
621 #define atomic_decrement_if_positive(mem) \
622   ({ __typeof (*(mem)) __result;                                              \
623     if (sizeof (*mem) == 4)                                                   \
624       __result = __arch_atomic_decrement_if_positive_32 (mem);                \
625     else if (sizeof (*mem) == 8)                                              \
626       __result = __arch_atomic_decrement_if_positive_64 (mem);                \
627     else                                                                      \
628        abort ();                                                              \
629     __result;                                                                 \
630   })
This page took 0.070051 seconds and 4 git commands to generate.