]>
Commit | Line | Data |
---|---|---|
ae2d489c PB |
1 | /* |
2 | * Atomic operations on 64-bit quantities. | |
3 | * | |
4 | * Copyright (C) 2017 Red Hat, Inc. | |
5 | * | |
6 | * Author: Paolo Bonzini <[email protected]> | |
7 | * | |
8 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
9 | * See the COPYING file in the top-level directory. | |
10 | */ | |
11 | ||
12 | #include "qemu/osdep.h" | |
13 | #include "qemu/atomic.h" | |
14 | #include "qemu/stats64.h" | |
15 | #include "qemu/processor.h" | |
16 | ||
17 | #ifndef CONFIG_ATOMIC64 | |
18 | static inline void stat64_rdlock(Stat64 *s) | |
19 | { | |
20 | /* Keep out incoming writers to avoid them starving us. */ | |
d73415a3 | 21 | qatomic_add(&s->lock, 2); |
ae2d489c PB |
22 | |
23 | /* If there is a concurrent writer, wait for it. */ | |
d73415a3 | 24 | while (qatomic_read(&s->lock) & 1) { |
ae2d489c PB |
25 | cpu_relax(); |
26 | } | |
27 | } | |
28 | ||
29 | static inline void stat64_rdunlock(Stat64 *s) | |
30 | { | |
d73415a3 | 31 | qatomic_sub(&s->lock, 2); |
ae2d489c PB |
32 | } |
33 | ||
34 | static inline bool stat64_wrtrylock(Stat64 *s) | |
35 | { | |
d73415a3 | 36 | return qatomic_cmpxchg(&s->lock, 0, 1) == 0; |
ae2d489c PB |
37 | } |
38 | ||
39 | static inline void stat64_wrunlock(Stat64 *s) | |
40 | { | |
d73415a3 | 41 | qatomic_dec(&s->lock); |
ae2d489c PB |
42 | } |
43 | ||
44 | uint64_t stat64_get(const Stat64 *s) | |
45 | { | |
46 | uint32_t high, low; | |
47 | ||
48 | stat64_rdlock((Stat64 *)s); | |
49 | ||
50 | /* 64-bit writes always take the lock, so we can read in | |
51 | * any order. | |
52 | */ | |
d73415a3 SH |
53 | high = qatomic_read(&s->high); |
54 | low = qatomic_read(&s->low); | |
ae2d489c PB |
55 | stat64_rdunlock((Stat64 *)s); |
56 | ||
57 | return ((uint64_t)high << 32) | low; | |
58 | } | |
59 | ||
60 | bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) | |
61 | { | |
62 | uint32_t old; | |
63 | ||
64 | if (!stat64_wrtrylock(s)) { | |
65 | cpu_relax(); | |
66 | return false; | |
67 | } | |
68 | ||
69 | /* 64-bit reads always take the lock, so they don't care about the | |
70 | * order of our update. By updating s->low first, we can check | |
71 | * whether we have to carry into s->high. | |
72 | */ | |
d73415a3 | 73 | old = qatomic_fetch_add(&s->low, low); |
ae2d489c | 74 | high += (old + low) < old; |
d73415a3 | 75 | qatomic_add(&s->high, high); |
ae2d489c PB |
76 | stat64_wrunlock(s); |
77 | return true; | |
78 | } | |
79 | ||
80 | bool stat64_min_slow(Stat64 *s, uint64_t value) | |
81 | { | |
82 | uint32_t high, low; | |
83 | uint64_t orig; | |
84 | ||
85 | if (!stat64_wrtrylock(s)) { | |
86 | cpu_relax(); | |
87 | return false; | |
88 | } | |
89 | ||
d73415a3 SH |
90 | high = qatomic_read(&s->high); |
91 | low = qatomic_read(&s->low); | |
ae2d489c PB |
92 | |
93 | orig = ((uint64_t)high << 32) | low; | |
26a5db32 | 94 | if (value < orig) { |
ae2d489c PB |
95 | /* We have to set low before high, just like stat64_min reads |
96 | * high before low. The value may become higher temporarily, but | |
97 | * stat64_get does not notice (it takes the lock) and the only ill | |
98 | * effect on stat64_min is that the slow path may be triggered | |
99 | * unnecessarily. | |
100 | */ | |
d73415a3 | 101 | qatomic_set(&s->low, (uint32_t)value); |
ae2d489c | 102 | smp_wmb(); |
d73415a3 | 103 | qatomic_set(&s->high, value >> 32); |
ae2d489c PB |
104 | } |
105 | stat64_wrunlock(s); | |
106 | return true; | |
107 | } | |
108 | ||
109 | bool stat64_max_slow(Stat64 *s, uint64_t value) | |
110 | { | |
111 | uint32_t high, low; | |
112 | uint64_t orig; | |
113 | ||
114 | if (!stat64_wrtrylock(s)) { | |
115 | cpu_relax(); | |
116 | return false; | |
117 | } | |
118 | ||
d73415a3 SH |
119 | high = qatomic_read(&s->high); |
120 | low = qatomic_read(&s->low); | |
ae2d489c PB |
121 | |
122 | orig = ((uint64_t)high << 32) | low; | |
26a5db32 | 123 | if (value > orig) { |
ae2d489c PB |
124 | /* We have to set low before high, just like stat64_max reads |
125 | * high before low. The value may become lower temporarily, but | |
126 | * stat64_get does not notice (it takes the lock) and the only ill | |
127 | * effect on stat64_max is that the slow path may be triggered | |
128 | * unnecessarily. | |
129 | */ | |
d73415a3 | 130 | qatomic_set(&s->low, (uint32_t)value); |
ae2d489c | 131 | smp_wmb(); |
d73415a3 | 132 | qatomic_set(&s->high, value >> 32); |
ae2d489c PB |
133 | } |
134 | stat64_wrunlock(s); | |
135 | return true; | |
136 | } | |
137 | #endif |