]>
Commit | Line | Data |
---|---|---|
85199474 MT |
1 | #ifndef __QEMU_BARRIER_H |
2 | #define __QEMU_BARRIER_H 1 | |
3 | ||
1d93f0f0 JK |
4 | /* Compiler barrier */ |
5 | #define barrier() asm volatile("" ::: "memory") | |
6 | ||
e2251708 DG |
7 | #if defined(__i386__) || defined(__x86_64__) |
8 | ||
9 | /* | |
10 | * Because of the strongly ordered x86 storage model, wmb() is a nop | |
11 | * on x86(well, a compiler barrier only). Well, at least as long as | |
12 | * qemu doesn't do accesses to write-combining memory or non-temporal | |
13 | * load/stores from C code. | |
14 | */ | |
15 | #define smp_wmb() barrier() | |
16 | ||
463ce4ae | 17 | #elif defined(_ARCH_PPC) |
e2251708 DG |
18 | |
19 | /* | |
20 | * We use an eieio() for a wmb() on powerpc. This assumes we don't | |
21 | * need to order cacheable and non-cacheable stores with respect to | |
22 | * each other | |
23 | */ | |
24 | #define smp_wmb() asm volatile("eieio" ::: "memory") | |
25 | ||
26 | #else | |
27 | ||
28 | /* | |
29 | * For (host) platforms we don't have explicit barrier definitions | |
30 | * for, we use the gcc __sync_synchronize() primitive to generate a | |
31 | * full barrier. This should be safe on all platforms, though it may | |
32 | * be overkill. | |
33 | */ | |
34 | #define smp_wmb() __sync_synchronize() | |
35 | ||
36 | #endif | |
37 | ||
85199474 | 38 | #endif |