]>
Commit | Line | Data |
---|---|---|
09d4e0ed PM |
1 | /* |
2 | * Generic implementation of 64-bit atomics using spinlocks, | |
3 | * useful on processors that don't have 64-bit atomic instructions. | |
4 | * | |
5 | * Copyright © 2009 Paul Mackerras, IBM Corp. <[email protected]> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU General Public License | |
9 | * as published by the Free Software Foundation; either version | |
10 | * 2 of the License, or (at your option) any later version. | |
11 | */ | |
12 | #include <linux/types.h> | |
13 | #include <linux/cache.h> | |
14 | #include <linux/spinlock.h> | |
15 | #include <linux/init.h> | |
8bc3bcc9 | 16 | #include <linux/export.h> |
60063497 | 17 | #include <linux/atomic.h> |
09d4e0ed PM |
18 | |
19 | /* | |
20 | * We use a hashed array of spinlocks to provide exclusive access | |
21 | * to each atomic64_t variable. Since this is expected to used on | |
22 | * systems with small numbers of CPUs (<= 4 or so), we use a | |
23 | * relatively small array of 16 spinlocks to avoid wasting too much | |
24 | * memory on the spinlock array. | |
25 | */ | |
26 | #define NR_LOCKS 16 | |
27 | ||
28 | /* | |
29 | * Ensure each lock is in a separate cacheline. | |
30 | */ | |
31 | static union { | |
f59ca058 | 32 | raw_spinlock_t lock; |
09d4e0ed PM |
33 | char pad[L1_CACHE_BYTES]; |
34 | } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp; | |
35 | ||
cb475de3 | 36 | static inline raw_spinlock_t *lock_addr(const atomic64_t *v) |
09d4e0ed PM |
37 | { |
38 | unsigned long addr = (unsigned long) v; | |
39 | ||
40 | addr >>= L1_CACHE_SHIFT; | |
41 | addr ^= (addr >> 8) ^ (addr >> 16); | |
42 | return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; | |
43 | } | |
44 | ||
45 | long long atomic64_read(const atomic64_t *v) | |
46 | { | |
47 | unsigned long flags; | |
cb475de3 | 48 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed PM |
49 | long long val; |
50 | ||
f59ca058 | 51 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 52 | val = v->counter; |
f59ca058 | 53 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
54 | return val; |
55 | } | |
3fc7b4b2 | 56 | EXPORT_SYMBOL(atomic64_read); |
09d4e0ed PM |
57 | |
58 | void atomic64_set(atomic64_t *v, long long i) | |
59 | { | |
60 | unsigned long flags; | |
cb475de3 | 61 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed | 62 | |
f59ca058 | 63 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 64 | v->counter = i; |
f59ca058 | 65 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed | 66 | } |
3fc7b4b2 | 67 | EXPORT_SYMBOL(atomic64_set); |
09d4e0ed PM |
68 | |
69 | void atomic64_add(long long a, atomic64_t *v) | |
70 | { | |
71 | unsigned long flags; | |
cb475de3 | 72 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed | 73 | |
f59ca058 | 74 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 75 | v->counter += a; |
f59ca058 | 76 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed | 77 | } |
3fc7b4b2 | 78 | EXPORT_SYMBOL(atomic64_add); |
09d4e0ed PM |
79 | |
80 | long long atomic64_add_return(long long a, atomic64_t *v) | |
81 | { | |
82 | unsigned long flags; | |
cb475de3 | 83 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed PM |
84 | long long val; |
85 | ||
f59ca058 | 86 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 87 | val = v->counter += a; |
f59ca058 | 88 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
89 | return val; |
90 | } | |
3fc7b4b2 | 91 | EXPORT_SYMBOL(atomic64_add_return); |
09d4e0ed PM |
92 | |
93 | void atomic64_sub(long long a, atomic64_t *v) | |
94 | { | |
95 | unsigned long flags; | |
cb475de3 | 96 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed | 97 | |
f59ca058 | 98 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 99 | v->counter -= a; |
f59ca058 | 100 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed | 101 | } |
3fc7b4b2 | 102 | EXPORT_SYMBOL(atomic64_sub); |
09d4e0ed PM |
103 | |
104 | long long atomic64_sub_return(long long a, atomic64_t *v) | |
105 | { | |
106 | unsigned long flags; | |
cb475de3 | 107 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed PM |
108 | long long val; |
109 | ||
f59ca058 | 110 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed | 111 | val = v->counter -= a; |
f59ca058 | 112 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
113 | return val; |
114 | } | |
3fc7b4b2 | 115 | EXPORT_SYMBOL(atomic64_sub_return); |
09d4e0ed PM |
116 | |
117 | long long atomic64_dec_if_positive(atomic64_t *v) | |
118 | { | |
119 | unsigned long flags; | |
cb475de3 | 120 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed PM |
121 | long long val; |
122 | ||
f59ca058 | 123 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
124 | val = v->counter - 1; |
125 | if (val >= 0) | |
126 | v->counter = val; | |
f59ca058 | 127 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
128 | return val; |
129 | } | |
3fc7b4b2 | 130 | EXPORT_SYMBOL(atomic64_dec_if_positive); |
09d4e0ed PM |
131 | |
132 | long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) | |
133 | { | |
134 | unsigned long flags; | |
cb475de3 | 135 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed PM |
136 | long long val; |
137 | ||
f59ca058 | 138 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
139 | val = v->counter; |
140 | if (val == o) | |
141 | v->counter = n; | |
f59ca058 | 142 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
143 | return val; |
144 | } | |
3fc7b4b2 | 145 | EXPORT_SYMBOL(atomic64_cmpxchg); |
09d4e0ed PM |
146 | |
147 | long long atomic64_xchg(atomic64_t *v, long long new) | |
148 | { | |
149 | unsigned long flags; | |
cb475de3 | 150 | raw_spinlock_t *lock = lock_addr(v); |
09d4e0ed PM |
151 | long long val; |
152 | ||
f59ca058 | 153 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
154 | val = v->counter; |
155 | v->counter = new; | |
f59ca058 | 156 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
157 | return val; |
158 | } | |
3fc7b4b2 | 159 | EXPORT_SYMBOL(atomic64_xchg); |
09d4e0ed PM |
160 | |
161 | int atomic64_add_unless(atomic64_t *v, long long a, long long u) | |
162 | { | |
163 | unsigned long flags; | |
cb475de3 | 164 | raw_spinlock_t *lock = lock_addr(v); |
97577896 | 165 | int ret = 0; |
09d4e0ed | 166 | |
f59ca058 | 167 | raw_spin_lock_irqsave(lock, flags); |
09d4e0ed PM |
168 | if (v->counter != u) { |
169 | v->counter += a; | |
97577896 | 170 | ret = 1; |
09d4e0ed | 171 | } |
f59ca058 | 172 | raw_spin_unlock_irqrestore(lock, flags); |
09d4e0ed PM |
173 | return ret; |
174 | } | |
3fc7b4b2 | 175 | EXPORT_SYMBOL(atomic64_add_unless); |
09d4e0ed PM |
176 | |
177 | static int init_atomic64_lock(void) | |
178 | { | |
179 | int i; | |
180 | ||
181 | for (i = 0; i < NR_LOCKS; ++i) | |
f59ca058 | 182 | raw_spin_lock_init(&atomic64_lock[i].lock); |
09d4e0ed PM |
183 | return 0; |
184 | } | |
185 | ||
186 | pure_initcall(init_atomic64_lock); |