]>
Commit | Line | Data |
---|---|---|
7b64fef3 WD |
1 | #ifndef _ASM_GENERIC_DIV64_H |
2 | #define _ASM_GENERIC_DIV64_H | |
3 | /* | |
4 | * Copyright (C) 2003 Bernardo Innocenti <[email protected]> | |
5 | * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h | |
6 | * | |
0342e335 PF |
7 | * Optimization for constant divisors on 32-bit machines: |
8 | * Copyright (C) 2006-2015 Nicolas Pitre | |
9 | * | |
7b64fef3 WD |
10 | * The semantics of do_div() are: |
11 | * | |
ca49b2c6 | 12 | * u32 do_div(u64 *n, u32 base) |
7b64fef3 | 13 | * { |
ca49b2c6 SG |
14 | * u32 remainder = *n % base; |
15 | * *n = *n / base; | |
16 | * return remainder; | |
7b64fef3 WD |
17 | * } |
18 | * | |
19 | * NOTE: macro parameter n is evaluated multiple times, | |
20 | * beware of side effects! | |
21 | */ | |
22 | ||
23 | #include <linux/types.h> | |
0342e335 PF |
24 | #include <linux/compiler.h> |
25 | ||
26 | #if BITS_PER_LONG == 64 | |
27 | ||
28 | # define do_div(n,base) ({ \ | |
ca49b2c6 SG |
29 | u32 __base = (base); \ |
30 | u32 __rem; \ | |
31 | __rem = ((u64)(n)) % __base; \ | |
32 | (n) = ((u64)(n)) / __base; \ | |
0342e335 PF |
33 | __rem; \ |
34 | }) | |
35 | ||
36 | #elif BITS_PER_LONG == 32 | |
37 | ||
38 | #include <linux/log2.h> | |
39 | ||
40 | /* | |
41 | * If the divisor happens to be constant, we determine the appropriate | |
42 | * inverse at compile time to turn the division into a few inline | |
43 | * multiplications which ought to be much faster. And yet only if compiling | |
44 | * with a sufficiently recent gcc version to perform proper 64-bit constant | |
45 | * propagation. | |
46 | * | |
47 | * (It is unfortunate that gcc doesn't perform all this internally.) | |
48 | */ | |
49 | ||
50 | #ifndef __div64_const32_is_OK | |
51 | #define __div64_const32_is_OK (__GNUC__ >= 4) | |
52 | #endif | |
53 | ||
54 | #define __div64_const32(n, ___b) \ | |
55 | ({ \ | |
56 | /* \ | |
57 | * Multiplication by reciprocal of b: n / b = n * (p / b) / p \ | |
58 | * \ | |
59 | * We rely on the fact that most of this code gets optimized \ | |
60 | * away at compile time due to constant propagation and only \ | |
61 | * a few multiplication instructions should remain. \ | |
62 | * Hence this monstrous macro (static inline doesn't always \ | |
63 | * do the trick here). \ | |
64 | */ \ | |
ca49b2c6 SG |
65 | u64 ___res, ___x, ___t, ___m, ___n = (n); \ |
66 | u32 ___p, ___bias; \ | |
0342e335 PF |
67 | \ |
68 | /* determine MSB of b */ \ | |
69 | ___p = 1 << ilog2(___b); \ | |
70 | \ | |
71 | /* compute m = ((p << 64) + b - 1) / b */ \ | |
72 | ___m = (~0ULL / ___b) * ___p; \ | |
73 | ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b; \ | |
74 | \ | |
75 | /* one less than the dividend with highest result */ \ | |
76 | ___x = ~0ULL / ___b * ___b - 1; \ | |
77 | \ | |
78 | /* test our ___m with res = m * x / (p << 64) */ \ | |
79 | ___res = ((___m & 0xffffffff) * (___x & 0xffffffff)) >> 32; \ | |
80 | ___t = ___res += (___m & 0xffffffff) * (___x >> 32); \ | |
81 | ___res += (___x & 0xffffffff) * (___m >> 32); \ | |
82 | ___t = (___res < ___t) ? (1ULL << 32) : 0; \ | |
83 | ___res = (___res >> 32) + ___t; \ | |
84 | ___res += (___m >> 32) * (___x >> 32); \ | |
85 | ___res /= ___p; \ | |
86 | \ | |
87 | /* Now sanitize and optimize what we've got. */ \ | |
88 | if (~0ULL % (___b / (___b & -___b)) == 0) { \ | |
89 | /* special case, can be simplified to ... */ \ | |
90 | ___n /= (___b & -___b); \ | |
91 | ___m = ~0ULL / (___b / (___b & -___b)); \ | |
92 | ___p = 1; \ | |
93 | ___bias = 1; \ | |
94 | } else if (___res != ___x / ___b) { \ | |
95 | /* \ | |
96 | * We can't get away without a bias to compensate \ | |
97 | * for bit truncation errors. To avoid it we'd need an \ | |
98 | * additional bit to represent m which would overflow \ | |
99 | * a 64-bit variable. \ | |
100 | * \ | |
101 | * Instead we do m = p / b and n / b = (n * m + m) / p. \ | |
102 | */ \ | |
103 | ___bias = 1; \ | |
104 | /* Compute m = (p << 64) / b */ \ | |
105 | ___m = (~0ULL / ___b) * ___p; \ | |
106 | ___m += ((~0ULL % ___b + 1) * ___p) / ___b; \ | |
107 | } else { \ | |
108 | /* \ | |
109 | * Reduce m / p, and try to clear bit 31 of m when \ | |
110 | * possible, otherwise that'll need extra overflow \ | |
111 | * handling later. \ | |
112 | */ \ | |
ca49b2c6 | 113 | u32 ___bits = -(___m & -___m); \ |
0342e335 PF |
114 | ___bits |= ___m >> 32; \ |
115 | ___bits = (~___bits) << 1; \ | |
116 | /* \ | |
117 | * If ___bits == 0 then setting bit 31 is unavoidable. \ | |
118 | * Simply apply the maximum possible reduction in that \ | |
119 | * case. Otherwise the MSB of ___bits indicates the \ | |
120 | * best reduction we should apply. \ | |
121 | */ \ | |
122 | if (!___bits) { \ | |
123 | ___p /= (___m & -___m); \ | |
124 | ___m /= (___m & -___m); \ | |
125 | } else { \ | |
126 | ___p >>= ilog2(___bits); \ | |
127 | ___m >>= ilog2(___bits); \ | |
128 | } \ | |
129 | /* No bias needed. */ \ | |
130 | ___bias = 0; \ | |
131 | } \ | |
132 | \ | |
133 | /* \ | |
134 | * Now we have a combination of 2 conditions: \ | |
135 | * \ | |
136 | * 1) whether or not we need to apply a bias, and \ | |
137 | * \ | |
138 | * 2) whether or not there might be an overflow in the cross \ | |
139 | * product determined by (___m & ((1 << 63) | (1 << 31))). \ | |
140 | * \ | |
141 | * Select the best way to do (m_bias + m * n) / (1 << 64). \ | |
142 | * From now on there will be actual runtime code generated. \ | |
143 | */ \ | |
144 | ___res = __arch_xprod_64(___m, ___n, ___bias); \ | |
145 | \ | |
146 | ___res /= ___p; \ | |
147 | }) | |
148 | ||
149 | #ifndef __arch_xprod_64 | |
150 | /* | |
151 | * Default C implementation for __arch_xprod_64() | |
152 | * | |
ca49b2c6 | 153 | * Prototype: u64 __arch_xprod_64(const u64 m, u64 n, bool bias) |
0342e335 PF |
154 | * Semantic: retval = ((bias ? m : 0) + m * n) >> 64 |
155 | * | |
156 | * The product is a 128-bit value, scaled down to 64 bits. | |
157 | * Assuming constant propagation to optimize away unused conditional code. | |
158 | * Architectures may provide their own optimized assembly implementation. | |
159 | */ | |
ca49b2c6 | 160 | static inline u64 __arch_xprod_64(const u64 m, u64 n, bool bias) |
0342e335 | 161 | { |
ca49b2c6 SG |
162 | u32 m_lo = m; |
163 | u32 m_hi = m >> 32; | |
164 | u32 n_lo = n; | |
165 | u32 n_hi = n >> 32; | |
166 | u64 res, tmp; | |
0342e335 PF |
167 | |
168 | if (!bias) { | |
ca49b2c6 | 169 | res = ((u64)m_lo * n_lo) >> 32; |
0342e335 PF |
170 | } else if (!(m & ((1ULL << 63) | (1ULL << 31)))) { |
171 | /* there can't be any overflow here */ | |
ca49b2c6 | 172 | res = (m + (u64)m_lo * n_lo) >> 32; |
0342e335 | 173 | } else { |
ca49b2c6 | 174 | res = m + (u64)m_lo * n_lo; |
0342e335 PF |
175 | tmp = (res < m) ? (1ULL << 32) : 0; |
176 | res = (res >> 32) + tmp; | |
177 | } | |
178 | ||
179 | if (!(m & ((1ULL << 63) | (1ULL << 31)))) { | |
180 | /* there can't be any overflow here */ | |
ca49b2c6 SG |
181 | res += (u64)m_lo * n_hi; |
182 | res += (u64)m_hi * n_lo; | |
0342e335 PF |
183 | res >>= 32; |
184 | } else { | |
ca49b2c6 SG |
185 | tmp = res += (u64)m_lo * n_hi; |
186 | res += (u64)m_hi * n_lo; | |
0342e335 PF |
187 | tmp = (res < tmp) ? (1ULL << 32) : 0; |
188 | res = (res >> 32) + tmp; | |
189 | } | |
7b64fef3 | 190 | |
ca49b2c6 | 191 | res += (u64)m_hi * n_hi; |
0342e335 PF |
192 | |
193 | return res; | |
194 | } | |
195 | #endif | |
196 | ||
197 | #ifndef __div64_32 | |
ca49b2c6 | 198 | extern u32 __div64_32(u64 *dividend, u32 divisor); |
0342e335 | 199 | #endif |
7b64fef3 WD |
200 | |
201 | /* The unnecessary pointer compare is there | |
202 | * to check for type safety (n must be 64bit) | |
203 | */ | |
204 | # define do_div(n,base) ({ \ | |
ca49b2c6 SG |
205 | u32 __base = (base); \ |
206 | u32 __rem; \ | |
207 | (void)(((typeof((n)) *)0) == ((u64 *)0)); \ | |
0342e335 PF |
208 | if (__builtin_constant_p(__base) && \ |
209 | is_power_of_2(__base)) { \ | |
210 | __rem = (n) & (__base - 1); \ | |
211 | (n) >>= ilog2(__base); \ | |
212 | } else if (__div64_const32_is_OK && \ | |
213 | __builtin_constant_p(__base) && \ | |
214 | __base != 0) { \ | |
ca49b2c6 | 215 | u32 __res_lo, __n_lo = (n); \ |
0342e335 PF |
216 | (n) = __div64_const32(n, __base); \ |
217 | /* the remainder can be computed with 32-bit regs */ \ | |
218 | __res_lo = (n); \ | |
219 | __rem = __n_lo - __res_lo * __base; \ | |
220 | } else if (likely(((n) >> 32) == 0)) { \ | |
ca49b2c6 SG |
221 | __rem = (u32)(n) % __base; \ |
222 | (n) = (u32)(n) / __base; \ | |
0cf207ec | 223 | } else \ |
7b64fef3 WD |
224 | __rem = __div64_32(&(n), __base); \ |
225 | __rem; \ | |
226 | }) | |
227 | ||
0342e335 PF |
228 | #else /* BITS_PER_LONG == ?? */ |
229 | ||
230 | # error do_div() does not yet support the C64 | |
231 | ||
232 | #endif /* BITS_PER_LONG */ | |
233 | ||
3feb647f | 234 | /* Wrapper for do_div(). Doesn't modify dividend and returns |
2121bbe4 | 235 | * the result, not remainder. |
3feb647f | 236 | */ |
ca49b2c6 | 237 | static inline u64 lldiv(u64 dividend, u32 divisor) |
3feb647f | 238 | { |
ca49b2c6 | 239 | u64 __res = dividend; |
3feb647f SP |
240 | do_div(__res, divisor); |
241 | return(__res); | |
242 | } | |
243 | ||
7b64fef3 | 244 | #endif /* _ASM_GENERIC_DIV64_H */ |