]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
09d4e0ed PM |
2 | /* |
3 | * Generic implementation of 64-bit atomics using spinlocks, | |
4 | * useful on processors that don't have 64-bit atomic instructions. | |
5 | * | |
6 | * Copyright © 2009 Paul Mackerras, IBM Corp. <[email protected]> | |
09d4e0ed PM |
7 | */ |
8 | #include <linux/types.h> | |
9 | #include <linux/cache.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/init.h> | |
8bc3bcc9 | 12 | #include <linux/export.h> |
60063497 | 13 | #include <linux/atomic.h> |
09d4e0ed PM |
14 | |
15 | /* | |
16 | * We use a hashed array of spinlocks to provide exclusive access | |
17 | * to each atomic64_t variable. Since this is expected to used on | |
18 | * systems with small numbers of CPUs (<= 4 or so), we use a | |
19 | * relatively small array of 16 spinlocks to avoid wasting too much | |
20 | * memory on the spinlock array. | |
21 | */ | |
22 | #define NR_LOCKS 16 | |
23 | ||
24 | /* | |
25 | * Ensure each lock is in a separate cacheline. | |
26 | */ | |
27 | static union { | |
f59ca058 | 28 | raw_spinlock_t lock; |
09d4e0ed | 29 | char pad[L1_CACHE_BYTES]; |
fcc16882 SB |
30 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = { |
31 | [0 ... (NR_LOCKS - 1)] = { | |
32 | .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock), | |
33 | }, | |
34 | }; | |
09d4e0ed | 35 | |
cb475de3 | 36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
09d4e0ed PM |
37 | { |
38 | unsigned long addr = (unsigned long) v; | |
39 | ||
40 | addr >>= L1_CACHE_SHIFT; | |
41 | addr ^= (addr >> 8) ^ (addr >> 16); | |
42 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; | |
43 | } | |
44 | ||
1bdadf46 | 45 | s64 generic_atomic64_read(const atomic64_t *v) |
09d4e0ed PM |
46 | { |
47 | unsigned long flags; | |
cb475de3 | 48 | raw_spinlock_t *lock = lock_addr(v); |
9255813d | 49 | s64 val; |
09d4e0ed | 50 | |
f59ca058 | 51 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 52 | val = v->counter; |
f59ca058 | 53 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
54 | return val; |
55 | } | |
1bdadf46 | 56 | EXPORT_SYMBOL(generic_atomic64_read); |
09d4e0ed | 57 | |
1bdadf46 | 58 | void generic_atomic64_set(atomic64_t *v, s64 i) |
09d4e0ed PM |
59 | { |
60 | unsigned long flags; | |
cb475de3 | 61 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed | 62 | |
f59ca058 | 63 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 64 | v->counter = i; |
f59ca058 | 65 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed | 66 | } |
1bdadf46 | 67 | EXPORT_SYMBOL(generic_atomic64_set); |
09d4e0ed | 68 | |
560cb12a | 69 | #define ATOMIC64_OP(op, c_op) \ |
1bdadf46 | 70 | void generic_atomic64_##op(s64 a, atomic64_t *v) \ |
560cb12a PZ |
71 | { \ |
72 | unsigned long flags; \ | |
73 | raw_spinlock_t *lock = lock_addr(v); \ | |
74 | \ | |
75 | raw_spin_lock_irqsave(lock, flags); \ | |
76 | v->counter c_op a; \ | |
77 | raw_spin_unlock_irqrestore(lock, flags); \ | |
78 | } \ | |
1bdadf46 | 79 | EXPORT_SYMBOL(generic_atomic64_##op); |
560cb12a PZ |
80 | |
81 | #define ATOMIC64_OP_RETURN(op, c_op) \ | |
1bdadf46 | 82 | s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \ |
560cb12a PZ |
83 | { \ |
84 | unsigned long flags; \ | |
85 | raw_spinlock_t *lock = lock_addr(v); \ | |
9255813d | 86 | s64 val; \ |
560cb12a PZ |
87 | \ |
88 | raw_spin_lock_irqsave(lock, flags); \ | |
89 | val = (v->counter c_op a); \ | |
90 | raw_spin_unlock_irqrestore(lock, flags); \ | |
91 | return val; \ | |
92 | } \ | |
1bdadf46 | 93 | EXPORT_SYMBOL(generic_atomic64_##op##_return); |
560cb12a | 94 | |
28aa2bda | 95 | #define ATOMIC64_FETCH_OP(op, c_op) \ |
1bdadf46 | 96 | s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \ |
28aa2bda PZ |
97 | { \ |
98 | unsigned long flags; \ | |
99 | raw_spinlock_t *lock = lock_addr(v); \ | |
9255813d | 100 | s64 val; \ |
28aa2bda PZ |
101 | \ |
102 | raw_spin_lock_irqsave(lock, flags); \ | |
103 | val = v->counter; \ | |
104 | v->counter c_op a; \ | |
105 | raw_spin_unlock_irqrestore(lock, flags); \ | |
106 | return val; \ | |
107 | } \ | |
1bdadf46 | 108 | EXPORT_SYMBOL(generic_atomic64_fetch_##op); |
28aa2bda | 109 | |
560cb12a PZ |
110 | #define ATOMIC64_OPS(op, c_op) \ |
111 | ATOMIC64_OP(op, c_op) \ | |
28aa2bda PZ |
112 | ATOMIC64_OP_RETURN(op, c_op) \ |
113 | ATOMIC64_FETCH_OP(op, c_op) | |
560cb12a PZ |
114 | |
115 | ATOMIC64_OPS(add, +=) | |
116 | ATOMIC64_OPS(sub, -=) | |
117 | ||
118 | #undef ATOMIC64_OPS | |
28aa2bda PZ |
119 | #define ATOMIC64_OPS(op, c_op) \ |
120 | ATOMIC64_OP(op, c_op) \ | |
28aa2bda PZ |
121 | ATOMIC64_FETCH_OP(op, c_op) |
122 | ||
123 | ATOMIC64_OPS(and, &=) | |
124 | ATOMIC64_OPS(or, |=) | |
125 | ATOMIC64_OPS(xor, ^=) | |
126 | ||
127 | #undef ATOMIC64_OPS | |
128 | #undef ATOMIC64_FETCH_OP | |
560cb12a | 129 | #undef ATOMIC64_OP |
09d4e0ed | 130 | |
1bdadf46 | 131 | s64 generic_atomic64_dec_if_positive(atomic64_t *v) |
09d4e0ed PM |
132 | { |
133 | unsigned long flags; | |
cb475de3 | 134 | raw_spinlock_t *lock = lock_addr(v); |
9255813d | 135 | s64 val; |
09d4e0ed | 136 | |
f59ca058 | 137 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
138 | val = v->counter - 1; |
139 | if (val >= 0) | |
140 | v->counter = val; | |
f59ca058 | 141 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
142 | return val; |
143 | } | |
1bdadf46 | 144 | EXPORT_SYMBOL(generic_atomic64_dec_if_positive); |
09d4e0ed | 145 | |
1bdadf46 | 146 | s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) |
09d4e0ed PM |
147 | { |
148 | unsigned long flags; | |
cb475de3 | 149 | raw_spinlock_t *lock = lock_addr(v); |
9255813d | 150 | s64 val; |
09d4e0ed | 151 | |
f59ca058 | 152 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
153 | val = v->counter; |
154 | if (val == o) | |
155 | v->counter = n; | |
f59ca058 | 156 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
157 | return val; |
158 | } | |
1bdadf46 | 159 | EXPORT_SYMBOL(generic_atomic64_cmpxchg); |
09d4e0ed | 160 | |
1bdadf46 | 161 | s64 generic_atomic64_xchg(atomic64_t *v, s64 new) |
09d4e0ed PM |
162 | { |
163 | unsigned long flags; | |
cb475de3 | 164 | raw_spinlock_t *lock = lock_addr(v); |
9255813d | 165 | s64 val; |
09d4e0ed | 166 | |
f59ca058 | 167 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
168 | val = v->counter; |
169 | v->counter = new; | |
f59ca058 | 170 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
171 | return val; |
172 | } | |
1bdadf46 | 173 | EXPORT_SYMBOL(generic_atomic64_xchg); |
09d4e0ed | 174 | |
1bdadf46 | 175 | s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) |
09d4e0ed PM |
176 | { |
177 | unsigned long flags; | |
cb475de3 | 178 | raw_spinlock_t *lock = lock_addr(v); |
9255813d | 179 | s64 val; |
09d4e0ed | 180 | |
f59ca058 | 181 | raw_spin_lock_irqsave(lock, flags); |
00b808ab MR |
182 | val = v->counter; |
183 | if (val != u) | |
09d4e0ed | 184 | v->counter += a; |
f59ca058 | 185 | raw_spin_unlock_irqrestore(lock, flags); |
00b808ab MR |
186 | |
187 | return val; | |
09d4e0ed | 188 | } |
1bdadf46 | 189 | EXPORT_SYMBOL(generic_atomic64_fetch_add_unless); |