]>
Commit | Line | Data |
---|---|---|
f122662e | 1 | /* Internal macros for atomic operations for GNU C Library. |
cc8d7d06 | 2 | Copyright (C) 2002-2015 Free Software Foundation, Inc. |
f122662e SH |
3 | This file is part of the GNU C Library. |
4 | Contributed by Ulrich Drepper <[email protected]>, 2002. | |
5 | ||
6 | The GNU C Library is free software; you can redistribute it and/or | |
7 | modify it under the terms of the GNU Lesser General Public | |
8 | License as published by the Free Software Foundation; either | |
9 | version 2.1 of the License, or (at your option) any later version. | |
10 | ||
11 | The GNU C Library is distributed in the hope that it will be useful, | |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | Lesser General Public License for more details. | |
15 | ||
16 | You should have received a copy of the GNU Lesser General Public | |
266bdc1f MF |
17 | License along with the GNU C Library; if not, see |
18 | <http://www.gnu.org/licenses/>. */ | |
f122662e SH |
19 | |
20 | #ifndef _ATOMIC_H | |
21 | #define _ATOMIC_H 1 | |
22 | ||
a032a658 AF |
23 | /* This header defines three types of macros: |
24 | ||
25 | - atomic arithmetic and logic operation on memory. They all | |
26 | have the prefix "atomic_". | |
27 | ||
28 | - conditionally atomic operations of the same kinds. These | |
29 | always behave identical but can be faster when atomicity | |
30 | is not really needed since only one thread has access to | |
31 | the memory location. In that case the code is slower in | |
32 | the multi-thread case. The interfaces have the prefix | |
33 | "catomic_". | |
34 | ||
cc8d7d06 | 35 | - support functions like barriers. They also have the prefix |
a032a658 AF |
36 | "atomic_". |
37 | ||
38 | Architectures must provide a few lowlevel macros (the compare | |
39 | and exchange definitions). All others are optional. They | |
40 | should only be provided if the architecture has specific | |
41 | support for the operation. | |
42 | ||
43 | As <atomic.h> macros are usually heavily nested and often use local | |
44 | variables to make sure side-effects are evaluated properly, use for | |
45 | macro local variables a per-macro unique prefix. This file uses | |
46 | __atgN_ prefix where N is different in each macro. */ | |
47 | ||
f122662e SH |
48 | #include <stdlib.h> |
49 | ||
50 | #include <bits/atomic.h> | |
51 | ||
52 | /* Wrapper macros to call pre_NN_post (mem, ...) where NN is the | |
53 | bit width of *MEM. The calling macro puts parens around MEM | |
54 | and following args. */ | |
55 | #define __atomic_val_bysize(pre, post, mem, ...) \ | |
56 | ({ \ | |
a032a658 | 57 | __typeof (*mem) __atg1_result; \ |
f122662e | 58 | if (sizeof (*mem) == 1) \ |
a032a658 | 59 | __atg1_result = pre##_8_##post (mem, __VA_ARGS__); \ |
f122662e | 60 | else if (sizeof (*mem) == 2) \ |
a032a658 | 61 | __atg1_result = pre##_16_##post (mem, __VA_ARGS__); \ |
f122662e | 62 | else if (sizeof (*mem) == 4) \ |
a032a658 | 63 | __atg1_result = pre##_32_##post (mem, __VA_ARGS__); \ |
f122662e | 64 | else if (sizeof (*mem) == 8) \ |
a032a658 | 65 | __atg1_result = pre##_64_##post (mem, __VA_ARGS__); \ |
f122662e SH |
66 | else \ |
67 | abort (); \ | |
a032a658 | 68 | __atg1_result; \ |
f122662e SH |
69 | }) |
70 | #define __atomic_bool_bysize(pre, post, mem, ...) \ | |
71 | ({ \ | |
a032a658 | 72 | int __atg2_result; \ |
f122662e | 73 | if (sizeof (*mem) == 1) \ |
a032a658 | 74 | __atg2_result = pre##_8_##post (mem, __VA_ARGS__); \ |
f122662e | 75 | else if (sizeof (*mem) == 2) \ |
a032a658 | 76 | __atg2_result = pre##_16_##post (mem, __VA_ARGS__); \ |
f122662e | 77 | else if (sizeof (*mem) == 4) \ |
a032a658 | 78 | __atg2_result = pre##_32_##post (mem, __VA_ARGS__); \ |
f122662e | 79 | else if (sizeof (*mem) == 8) \ |
a032a658 | 80 | __atg2_result = pre##_64_##post (mem, __VA_ARGS__); \ |
f122662e SH |
81 | else \ |
82 | abort (); \ | |
a032a658 | 83 | __atg2_result; \ |
f122662e SH |
84 | }) |
85 | ||
86 | ||
87 | /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. | |
88 | Return the old *MEM value. */ | |
89 | #if !defined atomic_compare_and_exchange_val_acq \ | |
90 | && defined __arch_compare_and_exchange_val_32_acq | |
91 | # define atomic_compare_and_exchange_val_acq(mem, newval, oldval) \ | |
92 | __atomic_val_bysize (__arch_compare_and_exchange_val,acq, \ | |
93 | mem, newval, oldval) | |
94 | #endif | |
95 | ||
96 | ||
a032a658 AF |
97 | #ifndef catomic_compare_and_exchange_val_acq |
98 | # ifdef __arch_c_compare_and_exchange_val_32_acq | |
99 | # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \ | |
100 | __atomic_val_bysize (__arch_c_compare_and_exchange_val,acq, \ | |
101 | mem, newval, oldval) | |
102 | # else | |
103 | # define catomic_compare_and_exchange_val_acq(mem, newval, oldval) \ | |
104 | atomic_compare_and_exchange_val_acq (mem, newval, oldval) | |
105 | # endif | |
106 | #endif | |
107 | ||
108 | ||
109 | #ifndef catomic_compare_and_exchange_val_rel | |
110 | # ifndef atomic_compare_and_exchange_val_rel | |
111 | # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \ | |
112 | catomic_compare_and_exchange_val_acq (mem, newval, oldval) | |
113 | # else | |
114 | # define catomic_compare_and_exchange_val_rel(mem, newval, oldval) \ | |
115 | atomic_compare_and_exchange_val_rel (mem, newval, oldval) | |
116 | # endif | |
117 | #endif | |
118 | ||
119 | ||
f122662e SH |
120 | #ifndef atomic_compare_and_exchange_val_rel |
121 | # define atomic_compare_and_exchange_val_rel(mem, newval, oldval) \ | |
122 | atomic_compare_and_exchange_val_acq (mem, newval, oldval) | |
123 | #endif | |
124 | ||
125 | ||
126 | /* Atomically store NEWVAL in *MEM if *MEM is equal to OLDVAL. | |
127 | Return zero if *MEM was changed or non-zero if no exchange happened. */ | |
128 | #ifndef atomic_compare_and_exchange_bool_acq | |
129 | # ifdef __arch_compare_and_exchange_bool_32_acq | |
130 | # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ | |
131 | __atomic_bool_bysize (__arch_compare_and_exchange_bool,acq, \ | |
132 | mem, newval, oldval) | |
a032a658 AF |
133 | # else |
134 | # define atomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ | |
135 | ({ /* Cannot use __oldval here, because macros later in this file might \ | |
136 | call this macro with __oldval argument. */ \ | |
137 | __typeof (oldval) __atg3_old = (oldval); \ | |
138 | atomic_compare_and_exchange_val_acq (mem, newval, __atg3_old) \ | |
139 | != __atg3_old; \ | |
140 | }) | |
141 | # endif | |
142 | #endif | |
143 | ||
144 | ||
145 | #ifndef catomic_compare_and_exchange_bool_acq | |
146 | # ifdef __arch_c_compare_and_exchange_bool_32_acq | |
147 | # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ | |
148 | __atomic_bool_bysize (__arch_c_compare_and_exchange_bool,acq, \ | |
149 | mem, newval, oldval) | |
150 | # else | |
151 | # define catomic_compare_and_exchange_bool_acq(mem, newval, oldval) \ | |
f122662e SH |
152 | ({ /* Cannot use __oldval here, because macros later in this file might \ |
153 | call this macro with __oldval argument. */ \ | |
a032a658 AF |
154 | __typeof (oldval) __atg4_old = (oldval); \ |
155 | catomic_compare_and_exchange_val_acq (mem, newval, __atg4_old) \ | |
156 | != __atg4_old; \ | |
f122662e SH |
157 | }) |
158 | # endif | |
159 | #endif | |
160 | ||
161 | ||
a032a658 AF |
162 | #ifndef catomic_compare_and_exchange_bool_rel |
163 | # ifndef atomic_compare_and_exchange_bool_rel | |
164 | # define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \ | |
165 | catomic_compare_and_exchange_bool_acq (mem, newval, oldval) | |
166 | # else | |
167 | # define catomic_compare_and_exchange_bool_rel(mem, newval, oldval) \ | |
168 | atomic_compare_and_exchange_bool_rel (mem, newval, oldval) | |
169 | # endif | |
170 | #endif | |
171 | ||
172 | ||
f122662e SH |
173 | #ifndef atomic_compare_and_exchange_bool_rel |
174 | # define atomic_compare_and_exchange_bool_rel(mem, newval, oldval) \ | |
175 | atomic_compare_and_exchange_bool_acq (mem, newval, oldval) | |
176 | #endif | |
177 | ||
178 | ||
179 | /* Store NEWVALUE in *MEM and return the old value. */ | |
180 | #ifndef atomic_exchange_acq | |
181 | # define atomic_exchange_acq(mem, newvalue) \ | |
a032a658 AF |
182 | ({ __typeof (*(mem)) __atg5_oldval; \ |
183 | __typeof (mem) __atg5_memp = (mem); \ | |
184 | __typeof (*(mem)) __atg5_value = (newvalue); \ | |
f122662e SH |
185 | \ |
186 | do \ | |
a032a658 AF |
187 | __atg5_oldval = *__atg5_memp; \ |
188 | while (__builtin_expect \ | |
189 | (atomic_compare_and_exchange_bool_acq (__atg5_memp, __atg5_value, \ | |
190 | __atg5_oldval), 0)); \ | |
f122662e | 191 | \ |
a032a658 | 192 | __atg5_oldval; }) |
f122662e SH |
193 | #endif |
194 | ||
195 | #ifndef atomic_exchange_rel | |
196 | # define atomic_exchange_rel(mem, newvalue) atomic_exchange_acq (mem, newvalue) | |
197 | #endif | |
198 | ||
199 | ||
200 | /* Add VALUE to *MEM and return the old value of *MEM. */ | |
cc8d7d06 BRF |
201 | #ifndef atomic_exchange_and_add_acq |
202 | # ifdef atomic_exchange_and_add | |
203 | # define atomic_exchange_and_add_acq(mem, value) \ | |
204 | atomic_exchange_and_add (mem, value) | |
205 | # else | |
206 | # define atomic_exchange_and_add_acq(mem, value) \ | |
a032a658 AF |
207 | ({ __typeof (*(mem)) __atg6_oldval; \ |
208 | __typeof (mem) __atg6_memp = (mem); \ | |
209 | __typeof (*(mem)) __atg6_value = (value); \ | |
f122662e SH |
210 | \ |
211 | do \ | |
a032a658 AF |
212 | __atg6_oldval = *__atg6_memp; \ |
213 | while (__builtin_expect \ | |
214 | (atomic_compare_and_exchange_bool_acq (__atg6_memp, \ | |
215 | __atg6_oldval \ | |
216 | + __atg6_value, \ | |
217 | __atg6_oldval), 0)); \ | |
f122662e | 218 | \ |
a032a658 | 219 | __atg6_oldval; }) |
cc8d7d06 | 220 | # endif |
a032a658 AF |
221 | #endif |
222 | ||
cc8d7d06 BRF |
223 | #ifndef atomic_exchange_and_add_rel |
224 | # define atomic_exchange_and_add_rel(mem, value) \ | |
225 | atomic_exchange_and_add_acq(mem, value) | |
226 | #endif | |
227 | ||
228 | #ifndef atomic_exchange_and_add | |
229 | # define atomic_exchange_and_add(mem, value) \ | |
230 | atomic_exchange_and_add_acq(mem, value) | |
231 | #endif | |
a032a658 AF |
232 | |
233 | #ifndef catomic_exchange_and_add | |
234 | # define catomic_exchange_and_add(mem, value) \ | |
235 | ({ __typeof (*(mem)) __atg7_oldv; \ | |
236 | __typeof (mem) __atg7_memp = (mem); \ | |
237 | __typeof (*(mem)) __atg7_value = (value); \ | |
238 | \ | |
239 | do \ | |
240 | __atg7_oldv = *__atg7_memp; \ | |
241 | while (__builtin_expect \ | |
242 | (catomic_compare_and_exchange_bool_acq (__atg7_memp, \ | |
243 | __atg7_oldv \ | |
244 | + __atg7_value, \ | |
245 | __atg7_oldv), 0)); \ | |
246 | \ | |
247 | __atg7_oldv; }) | |
248 | #endif | |
249 | ||
250 | ||
251 | #ifndef atomic_max | |
252 | # define atomic_max(mem, value) \ | |
253 | do { \ | |
254 | __typeof (*(mem)) __atg8_oldval; \ | |
255 | __typeof (mem) __atg8_memp = (mem); \ | |
256 | __typeof (*(mem)) __atg8_value = (value); \ | |
257 | do { \ | |
258 | __atg8_oldval = *__atg8_memp; \ | |
259 | if (__atg8_oldval >= __atg8_value) \ | |
260 | break; \ | |
261 | } while (__builtin_expect \ | |
262 | (atomic_compare_and_exchange_bool_acq (__atg8_memp, __atg8_value,\ | |
263 | __atg8_oldval), 0)); \ | |
264 | } while (0) | |
265 | #endif | |
266 | ||
267 | ||
268 | #ifndef catomic_max | |
269 | # define catomic_max(mem, value) \ | |
270 | do { \ | |
271 | __typeof (*(mem)) __atg9_oldv; \ | |
272 | __typeof (mem) __atg9_memp = (mem); \ | |
273 | __typeof (*(mem)) __atg9_value = (value); \ | |
274 | do { \ | |
275 | __atg9_oldv = *__atg9_memp; \ | |
276 | if (__atg9_oldv >= __atg9_value) \ | |
277 | break; \ | |
278 | } while (__builtin_expect \ | |
279 | (catomic_compare_and_exchange_bool_acq (__atg9_memp, \ | |
280 | __atg9_value, \ | |
281 | __atg9_oldv), 0)); \ | |
282 | } while (0) | |
283 | #endif | |
284 | ||
285 | ||
286 | #ifndef atomic_min | |
287 | # define atomic_min(mem, value) \ | |
288 | do { \ | |
289 | __typeof (*(mem)) __atg10_oldval; \ | |
290 | __typeof (mem) __atg10_memp = (mem); \ | |
291 | __typeof (*(mem)) __atg10_value = (value); \ | |
292 | do { \ | |
293 | __atg10_oldval = *__atg10_memp; \ | |
294 | if (__atg10_oldval <= __atg10_value) \ | |
295 | break; \ | |
296 | } while (__builtin_expect \ | |
297 | (atomic_compare_and_exchange_bool_acq (__atg10_memp, \ | |
298 | __atg10_value, \ | |
299 | __atg10_oldval), 0)); \ | |
300 | } while (0) | |
f122662e SH |
301 | #endif |
302 | ||
303 | ||
304 | #ifndef atomic_add | |
305 | # define atomic_add(mem, value) (void) atomic_exchange_and_add ((mem), (value)) | |
306 | #endif | |
307 | ||
308 | ||
a032a658 AF |
309 | #ifndef catomic_add |
310 | # define catomic_add(mem, value) \ | |
311 | (void) catomic_exchange_and_add ((mem), (value)) | |
312 | #endif | |
313 | ||
314 | ||
f122662e SH |
315 | #ifndef atomic_increment |
316 | # define atomic_increment(mem) atomic_add ((mem), 1) | |
317 | #endif | |
318 | ||
319 | ||
a032a658 AF |
320 | #ifndef catomic_increment |
321 | # define catomic_increment(mem) catomic_add ((mem), 1) | |
322 | #endif | |
323 | ||
324 | ||
f122662e SH |
325 | #ifndef atomic_increment_val |
326 | # define atomic_increment_val(mem) (atomic_exchange_and_add ((mem), 1) + 1) | |
327 | #endif | |
328 | ||
329 | ||
a032a658 AF |
330 | #ifndef catomic_increment_val |
331 | # define catomic_increment_val(mem) (catomic_exchange_and_add ((mem), 1) + 1) | |
332 | #endif | |
333 | ||
334 | ||
f122662e SH |
335 | /* Add one to *MEM and return true iff it's now zero. */ |
336 | #ifndef atomic_increment_and_test | |
337 | # define atomic_increment_and_test(mem) \ | |
338 | (atomic_exchange_and_add ((mem), 1) + 1 == 0) | |
339 | #endif | |
340 | ||
341 | ||
342 | #ifndef atomic_decrement | |
343 | # define atomic_decrement(mem) atomic_add ((mem), -1) | |
344 | #endif | |
345 | ||
346 | ||
a032a658 AF |
347 | #ifndef catomic_decrement |
348 | # define catomic_decrement(mem) catomic_add ((mem), -1) | |
349 | #endif | |
350 | ||
351 | ||
f122662e SH |
352 | #ifndef atomic_decrement_val |
353 | # define atomic_decrement_val(mem) (atomic_exchange_and_add ((mem), -1) - 1) | |
354 | #endif | |
355 | ||
356 | ||
a032a658 AF |
357 | #ifndef catomic_decrement_val |
358 | # define catomic_decrement_val(mem) (catomic_exchange_and_add ((mem), -1) - 1) | |
359 | #endif | |
360 | ||
361 | ||
f122662e SH |
362 | /* Subtract 1 from *MEM and return true iff it's now zero. */ |
363 | #ifndef atomic_decrement_and_test | |
364 | # define atomic_decrement_and_test(mem) \ | |
365 | (atomic_exchange_and_add ((mem), -1) == 1) | |
366 | #endif | |
367 | ||
368 | ||
369 | /* Decrement *MEM if it is > 0, and return the old value. */ | |
370 | #ifndef atomic_decrement_if_positive | |
371 | # define atomic_decrement_if_positive(mem) \ | |
a032a658 AF |
372 | ({ __typeof (*(mem)) __atg11_oldval; \ |
373 | __typeof (mem) __atg11_memp = (mem); \ | |
f122662e SH |
374 | \ |
375 | do \ | |
376 | { \ | |
a032a658 AF |
377 | __atg11_oldval = *__atg11_memp; \ |
378 | if (__builtin_expect (__atg11_oldval <= 0, 0)) \ | |
f122662e SH |
379 | break; \ |
380 | } \ | |
a032a658 AF |
381 | while (__builtin_expect \ |
382 | (atomic_compare_and_exchange_bool_acq (__atg11_memp, \ | |
383 | __atg11_oldval - 1, \ | |
384 | __atg11_oldval), 0)); \ | |
385 | __atg11_oldval; }) | |
f122662e SH |
386 | #endif |
387 | ||
388 | ||
389 | #ifndef atomic_add_negative | |
390 | # define atomic_add_negative(mem, value) \ | |
a032a658 AF |
391 | ({ __typeof (value) __atg12_value = (value); \ |
392 | atomic_exchange_and_add (mem, __atg12_value) < -__atg12_value; }) | |
f122662e SH |
393 | #endif |
394 | ||
395 | ||
396 | #ifndef atomic_add_zero | |
397 | # define atomic_add_zero(mem, value) \ | |
a032a658 AF |
398 | ({ __typeof (value) __atg13_value = (value); \ |
399 | atomic_exchange_and_add (mem, __atg13_value) == -__atg13_value; }) | |
f122662e SH |
400 | #endif |
401 | ||
402 | ||
403 | #ifndef atomic_bit_set | |
404 | # define atomic_bit_set(mem, bit) \ | |
405 | (void) atomic_bit_test_set(mem, bit) | |
406 | #endif | |
407 | ||
408 | ||
409 | #ifndef atomic_bit_test_set | |
410 | # define atomic_bit_test_set(mem, bit) \ | |
a032a658 AF |
411 | ({ __typeof (*(mem)) __atg14_old; \ |
412 | __typeof (mem) __atg14_memp = (mem); \ | |
413 | __typeof (*(mem)) __atg14_mask = ((__typeof (*(mem))) 1 << (bit)); \ | |
f122662e SH |
414 | \ |
415 | do \ | |
a032a658 AF |
416 | __atg14_old = (*__atg14_memp); \ |
417 | while (__builtin_expect \ | |
418 | (atomic_compare_and_exchange_bool_acq (__atg14_memp, \ | |
419 | __atg14_old | __atg14_mask,\ | |
420 | __atg14_old), 0)); \ | |
f122662e | 421 | \ |
a032a658 | 422 | __atg14_old & __atg14_mask; }) |
f122662e SH |
423 | #endif |
424 | ||
a032a658 AF |
425 | /* Atomically *mem &= mask. */ |
426 | #ifndef atomic_and | |
427 | # define atomic_and(mem, mask) \ | |
428 | do { \ | |
429 | __typeof (*(mem)) __atg15_old; \ | |
430 | __typeof (mem) __atg15_memp = (mem); \ | |
431 | __typeof (*(mem)) __atg15_mask = (mask); \ | |
432 | \ | |
433 | do \ | |
434 | __atg15_old = (*__atg15_memp); \ | |
435 | while (__builtin_expect \ | |
436 | (atomic_compare_and_exchange_bool_acq (__atg15_memp, \ | |
437 | __atg15_old & __atg15_mask, \ | |
438 | __atg15_old), 0)); \ | |
439 | } while (0) | |
440 | #endif | |
441 | ||
442 | #ifndef catomic_and | |
443 | # define catomic_and(mem, mask) \ | |
444 | do { \ | |
445 | __typeof (*(mem)) __atg20_old; \ | |
446 | __typeof (mem) __atg20_memp = (mem); \ | |
447 | __typeof (*(mem)) __atg20_mask = (mask); \ | |
448 | \ | |
449 | do \ | |
450 | __atg20_old = (*__atg20_memp); \ | |
451 | while (__builtin_expect \ | |
452 | (catomic_compare_and_exchange_bool_acq (__atg20_memp, \ | |
453 | __atg20_old & __atg20_mask,\ | |
454 | __atg20_old), 0)); \ | |
455 | } while (0) | |
456 | #endif | |
457 | ||
458 | /* Atomically *mem &= mask and return the old value of *mem. */ | |
459 | #ifndef atomic_and_val | |
460 | # define atomic_and_val(mem, mask) \ | |
461 | ({ __typeof (*(mem)) __atg16_old; \ | |
462 | __typeof (mem) __atg16_memp = (mem); \ | |
463 | __typeof (*(mem)) __atg16_mask = (mask); \ | |
464 | \ | |
465 | do \ | |
466 | __atg16_old = (*__atg16_memp); \ | |
467 | while (__builtin_expect \ | |
468 | (atomic_compare_and_exchange_bool_acq (__atg16_memp, \ | |
469 | __atg16_old & __atg16_mask,\ | |
470 | __atg16_old), 0)); \ | |
471 | \ | |
472 | __atg16_old; }) | |
473 | #endif | |
474 | ||
475 | /* Atomically *mem |= mask and return the old value of *mem. */ | |
476 | #ifndef atomic_or | |
477 | # define atomic_or(mem, mask) \ | |
478 | do { \ | |
479 | __typeof (*(mem)) __atg17_old; \ | |
480 | __typeof (mem) __atg17_memp = (mem); \ | |
481 | __typeof (*(mem)) __atg17_mask = (mask); \ | |
482 | \ | |
483 | do \ | |
484 | __atg17_old = (*__atg17_memp); \ | |
485 | while (__builtin_expect \ | |
486 | (atomic_compare_and_exchange_bool_acq (__atg17_memp, \ | |
487 | __atg17_old | __atg17_mask, \ | |
488 | __atg17_old), 0)); \ | |
489 | } while (0) | |
490 | #endif | |
491 | ||
492 | #ifndef catomic_or | |
493 | # define catomic_or(mem, mask) \ | |
494 | do { \ | |
495 | __typeof (*(mem)) __atg18_old; \ | |
496 | __typeof (mem) __atg18_memp = (mem); \ | |
497 | __typeof (*(mem)) __atg18_mask = (mask); \ | |
498 | \ | |
499 | do \ | |
500 | __atg18_old = (*__atg18_memp); \ | |
501 | while (__builtin_expect \ | |
502 | (catomic_compare_and_exchange_bool_acq (__atg18_memp, \ | |
503 | __atg18_old | __atg18_mask,\ | |
504 | __atg18_old), 0)); \ | |
505 | } while (0) | |
506 | #endif | |
507 | ||
508 | /* Atomically *mem |= mask and return the old value of *mem. */ | |
509 | #ifndef atomic_or_val | |
510 | # define atomic_or_val(mem, mask) \ | |
511 | ({ __typeof (*(mem)) __atg19_old; \ | |
512 | __typeof (mem) __atg19_memp = (mem); \ | |
513 | __typeof (*(mem)) __atg19_mask = (mask); \ | |
514 | \ | |
515 | do \ | |
516 | __atg19_old = (*__atg19_memp); \ | |
517 | while (__builtin_expect \ | |
518 | (atomic_compare_and_exchange_bool_acq (__atg19_memp, \ | |
519 | __atg19_old | __atg19_mask,\ | |
520 | __atg19_old), 0)); \ | |
521 | \ | |
522 | __atg19_old; }) | |
523 | #endif | |
f122662e SH |
524 | |
525 | #ifndef atomic_full_barrier | |
1d8abd74 | 526 | # define atomic_full_barrier() __asm__ ("" ::: "memory") |
f122662e SH |
527 | #endif |
528 | ||
529 | ||
530 | #ifndef atomic_read_barrier | |
531 | # define atomic_read_barrier() atomic_full_barrier () | |
532 | #endif | |
533 | ||
534 | ||
535 | #ifndef atomic_write_barrier | |
536 | # define atomic_write_barrier() atomic_full_barrier () | |
537 | #endif | |
538 | ||
539 | ||
a032a658 AF |
540 | #ifndef atomic_forced_read |
541 | # define atomic_forced_read(x) \ | |
542 | ({ __typeof (x) __x; __asm__ ("" : "=r" (__x) : "0" (x)); __x; }) | |
543 | #endif | |
544 | ||
cc8d7d06 BRF |
545 | /* The following functions are a subset of the atomic operations provided by |
546 | C11. Usually, a function named atomic_OP_MO(args) is equivalent to C11's | |
547 | atomic_OP_explicit(args, memory_order_MO); exceptions noted below. */ | |
548 | ||
549 | /* Each arch can request to use compiler built-ins for C11 atomics. If it | |
550 | does, all atomics will be based on these. */ | |
dba942c8 | 551 | #if defined USE_ATOMIC_COMPILER_BUILTINS |
cc8d7d06 BRF |
552 | |
553 | /* We require 32b atomic operations; some archs also support 64b atomic | |
554 | operations. */ | |
555 | void __atomic_link_error (void); | |
dba942c8 | 556 | # if defined(__HAVE_64B_ATOMICS) && __HAVE_64B_ATOMICS |
cc8d7d06 BRF |
557 | # define __atomic_check_size(mem) \ |
558 | if ((sizeof (*mem) != 4) && (sizeof (*mem) != 8)) \ | |
559 | __atomic_link_error (); | |
560 | # else | |
561 | # define __atomic_check_size(mem) \ | |
562 | if (sizeof (*mem) != 4) \ | |
563 | __atomic_link_error (); | |
564 | # endif | |
565 | ||
566 | # define atomic_thread_fence_acquire() \ | |
567 | __atomic_thread_fence (__ATOMIC_ACQUIRE) | |
568 | # define atomic_thread_fence_release() \ | |
569 | __atomic_thread_fence (__ATOMIC_RELEASE) | |
570 | # define atomic_thread_fence_seq_cst() \ | |
571 | __atomic_thread_fence (__ATOMIC_SEQ_CST) | |
572 | ||
573 | # define atomic_load_relaxed(mem) \ | |
574 | ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_RELAXED); }) | |
575 | # define atomic_load_acquire(mem) \ | |
576 | ({ __atomic_check_size((mem)); __atomic_load_n ((mem), __ATOMIC_ACQUIRE); }) | |
577 | ||
578 | # define atomic_store_relaxed(mem, val) \ | |
579 | do { \ | |
580 | __atomic_check_size((mem)); \ | |
581 | __atomic_store_n ((mem), (val), __ATOMIC_RELAXED); \ | |
582 | } while (0) | |
583 | # define atomic_store_release(mem, val) \ | |
584 | do { \ | |
585 | __atomic_check_size((mem)); \ | |
586 | __atomic_store_n ((mem), (val), __ATOMIC_RELEASE); \ | |
587 | } while (0) | |
588 | ||
589 | /* On failure, this CAS has memory_order_relaxed semantics. */ | |
590 | # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \ | |
591 | ({ __atomic_check_size((mem)); \ | |
592 | __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ | |
593 | __ATOMIC_RELAXED, __ATOMIC_RELAXED); }) | |
594 | # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \ | |
595 | ({ __atomic_check_size((mem)); \ | |
596 | __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ | |
597 | __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); }) | |
598 | # define atomic_compare_exchange_weak_release(mem, expected, desired) \ | |
599 | ({ __atomic_check_size((mem)); \ | |
600 | __atomic_compare_exchange_n ((mem), (expected), (desired), 1, \ | |
601 | __ATOMIC_RELEASE, __ATOMIC_RELAXED); }) | |
602 | ||
603 | # define atomic_exchange_acquire(mem, desired) \ | |
604 | ({ __atomic_check_size((mem)); \ | |
605 | __atomic_exchange_n ((mem), (desired), __ATOMIC_ACQUIRE); }) | |
606 | # define atomic_exchange_release(mem, desired) \ | |
607 | ({ __atomic_check_size((mem)); \ | |
608 | __atomic_exchange_n ((mem), (desired), __ATOMIC_RELEASE); }) | |
609 | ||
610 | # define atomic_fetch_add_relaxed(mem, operand) \ | |
611 | ({ __atomic_check_size((mem)); \ | |
612 | __atomic_fetch_add ((mem), (operand), __ATOMIC_RELAXED); }) | |
613 | # define atomic_fetch_add_acquire(mem, operand) \ | |
614 | ({ __atomic_check_size((mem)); \ | |
615 | __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQUIRE); }) | |
616 | # define atomic_fetch_add_release(mem, operand) \ | |
617 | ({ __atomic_check_size((mem)); \ | |
618 | __atomic_fetch_add ((mem), (operand), __ATOMIC_RELEASE); }) | |
619 | # define atomic_fetch_add_acq_rel(mem, operand) \ | |
620 | ({ __atomic_check_size((mem)); \ | |
621 | __atomic_fetch_add ((mem), (operand), __ATOMIC_ACQ_REL); }) | |
622 | ||
623 | # define atomic_fetch_and_acquire(mem, operand) \ | |
624 | ({ __atomic_check_size((mem)); \ | |
625 | __atomic_fetch_and ((mem), (operand), __ATOMIC_ACQUIRE); }) | |
626 | ||
627 | # define atomic_fetch_or_relaxed(mem, operand) \ | |
628 | ({ __atomic_check_size((mem)); \ | |
629 | __atomic_fetch_or ((mem), (operand), __ATOMIC_RELAXED); }) | |
630 | # define atomic_fetch_or_acquire(mem, operand) \ | |
631 | ({ __atomic_check_size((mem)); \ | |
632 | __atomic_fetch_or ((mem), (operand), __ATOMIC_ACQUIRE); }) | |
633 | ||
634 | #else /* !USE_ATOMIC_COMPILER_BUILTINS */ | |
635 | ||
636 | /* By default, we assume that read, write, and full barriers are equivalent | |
637 | to acquire, release, and seq_cst barriers. Archs for which this does not | |
638 | hold have to provide custom definitions of the fences. */ | |
639 | # ifndef atomic_thread_fence_acquire | |
640 | # define atomic_thread_fence_acquire() atomic_read_barrier () | |
641 | # endif | |
642 | # ifndef atomic_thread_fence_release | |
643 | # define atomic_thread_fence_release() atomic_write_barrier () | |
644 | # endif | |
645 | # ifndef atomic_thread_fence_seq_cst | |
646 | # define atomic_thread_fence_seq_cst() atomic_full_barrier () | |
647 | # endif | |
648 | ||
649 | # ifndef atomic_load_relaxed | |
650 | # define atomic_load_relaxed(mem) \ | |
651 | ({ __typeof (*(mem)) __atg100_val; \ | |
652 | __asm__ ("" : "=r" (__atg100_val) : "0" (*(mem))); \ | |
653 | __atg100_val; }) | |
654 | # endif | |
655 | # ifndef atomic_load_acquire | |
656 | # define atomic_load_acquire(mem) \ | |
657 | ({ __typeof (*(mem)) __atg101_val = atomic_load_relaxed (mem); \ | |
658 | atomic_thread_fence_acquire (); \ | |
659 | __atg101_val; }) | |
660 | # endif | |
661 | ||
662 | # ifndef atomic_store_relaxed | |
663 | /* XXX Use inline asm here? */ | |
664 | # define atomic_store_relaxed(mem, val) do { *(mem) = (val); } while (0) | |
665 | # endif | |
666 | # ifndef atomic_store_release | |
667 | # define atomic_store_release(mem, val) \ | |
668 | do { \ | |
669 | atomic_thread_fence_release (); \ | |
670 | atomic_store_relaxed ((mem), (val)); \ | |
671 | } while (0) | |
672 | # endif | |
673 | ||
674 | /* On failure, this CAS has memory_order_relaxed semantics. */ | |
675 | /* XXX This potentially has one branch more than necessary, but archs | |
676 | currently do not define a CAS that returns both the previous value and | |
677 | the success flag. */ | |
678 | # ifndef atomic_compare_exchange_weak_acquire | |
679 | # define atomic_compare_exchange_weak_acquire(mem, expected, desired) \ | |
680 | ({ __typeof (*(expected)) __atg102_expected = *(expected); \ | |
681 | *(expected) = \ | |
682 | atomic_compare_and_exchange_val_acq ((mem), (desired), *(expected)); \ | |
683 | *(expected) == __atg102_expected; }) | |
684 | # endif | |
685 | # ifndef atomic_compare_exchange_weak_relaxed | |
686 | /* XXX Fall back to CAS with acquire MO because archs do not define a weaker | |
687 | CAS. */ | |
688 | # define atomic_compare_exchange_weak_relaxed(mem, expected, desired) \ | |
689 | atomic_compare_exchange_weak_acquire ((mem), (expected), (desired)) | |
690 | # endif | |
691 | # ifndef atomic_compare_exchange_weak_release | |
692 | # define atomic_compare_exchange_weak_release(mem, expected, desired) \ | |
693 | ({ __typeof (*(expected)) __atg103_expected = *(expected); \ | |
694 | *(expected) = \ | |
695 | atomic_compare_and_exchange_val_rel ((mem), (desired), *(expected)); \ | |
696 | *(expected) == __atg103_expected; }) | |
697 | # endif | |
698 | ||
699 | # ifndef atomic_exchange_acquire | |
700 | # define atomic_exchange_acquire(mem, val) \ | |
701 | atomic_exchange_acq ((mem), (val)) | |
702 | # endif | |
703 | # ifndef atomic_exchange_release | |
704 | # define atomic_exchange_release(mem, val) \ | |
705 | atomic_exchange_rel ((mem), (val)) | |
706 | # endif | |
707 | ||
708 | # ifndef atomic_fetch_add_acquire | |
709 | # define atomic_fetch_add_acquire(mem, operand) \ | |
710 | atomic_exchange_and_add_acq ((mem), (operand)) | |
711 | # endif | |
712 | # ifndef atomic_fetch_add_relaxed | |
713 | /* XXX Fall back to acquire MO because the MO semantics of | |
714 | atomic_exchange_and_add are not documented; the generic version falls back | |
715 | to atomic_exchange_and_add_acq if atomic_exchange_and_add is not defined, | |
716 | and vice versa. */ | |
717 | # define atomic_fetch_add_relaxed(mem, operand) \ | |
718 | atomic_fetch_add_acquire ((mem), (operand)) | |
719 | # endif | |
720 | # ifndef atomic_fetch_add_release | |
721 | # define atomic_fetch_add_release(mem, operand) \ | |
722 | atomic_exchange_and_add_rel ((mem), (operand)) | |
723 | # endif | |
724 | # ifndef atomic_fetch_add_acq_rel | |
725 | # define atomic_fetch_add_acq_rel(mem, operand) \ | |
726 | ({ atomic_thread_fence_release (); \ | |
727 | atomic_exchange_and_add_acq ((mem), (operand)); }) | |
728 | # endif | |
729 | ||
730 | /* XXX The default for atomic_and_val has acquire semantics, but this is not | |
731 | documented. */ | |
732 | # ifndef atomic_fetch_and_acquire | |
733 | # define atomic_fetch_and_acquire(mem, operand) \ | |
734 | atomic_and_val ((mem), (operand)) | |
735 | # endif | |
736 | ||
737 | /* XXX The default for atomic_or_val has acquire semantics, but this is not | |
738 | documented. */ | |
739 | # ifndef atomic_fetch_or_acquire | |
740 | # define atomic_fetch_or_acquire(mem, operand) \ | |
741 | atomic_or_val ((mem), (operand)) | |
742 | # endif | |
743 | /* XXX Fall back to acquire MO because archs do not define a weaker | |
744 | atomic_or_val. */ | |
745 | # ifndef atomic_fetch_or_relaxed | |
746 | # define atomic_fetch_or_relaxed(mem, operand) \ | |
747 | atomic_fetch_or_acquire ((mem), (operand)) | |
748 | # endif | |
749 | ||
750 | #endif /* !USE_ATOMIC_COMPILER_BUILTINS */ | |
751 | ||
a032a658 | 752 | |
f122662e SH |
753 | #ifndef atomic_delay |
754 | # define atomic_delay() do { /* nothing */ } while (0) | |
755 | #endif | |
756 | ||
757 | #endif /* atomic.h */ |