]> Git Repo - J-linux.git/blob - include/asm-generic/div64.h
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / include / asm-generic / div64.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_GENERIC_DIV64_H
3 #define _ASM_GENERIC_DIV64_H
4 /*
5  * Copyright (C) 2003 Bernardo Innocenti <[email protected]>
6  * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
7  *
8  * Optimization for constant divisors on 32-bit machines:
9  * Copyright (C) 2006-2015 Nicolas Pitre
10  *
11  * The semantics of do_div() is, in C++ notation, observing that the name
12  * is a function-like macro and the n parameter has the semantics of a C++
13  * reference:
14  *
15  * uint32_t do_div(uint64_t &n, uint32_t base)
16  * {
17  *      uint32_t remainder = n % base;
18  *      n = n / base;
19  *      return remainder;
20  * }
21  *
22  * NOTE: macro parameter n is evaluated multiple times,
23  *       beware of side effects!
24  */
25
26 #include <linux/types.h>
27 #include <linux/compiler.h>
28
29 #if BITS_PER_LONG == 64
30
31 /**
32  * do_div - returns 2 values: calculate remainder and update new dividend
33  * @n: uint64_t dividend (will be updated)
34  * @base: uint32_t divisor
35  *
36  * Summary:
37  * ``uint32_t remainder = n % base;``
38  * ``n = n / base;``
39  *
40  * Return: (uint32_t)remainder
41  *
42  * NOTE: macro parameter @n is evaluated multiple times,
43  * beware of side effects!
44  */
45 # define do_div(n,base) ({                                      \
46         uint32_t __base = (base);                               \
47         uint32_t __rem;                                         \
48         __rem = ((uint64_t)(n)) % __base;                       \
49         (n) = ((uint64_t)(n)) / __base;                         \
50         __rem;                                                  \
51  })
52
53 #elif BITS_PER_LONG == 32
54
55 #include <linux/log2.h>
56
57 /*
58  * If the divisor happens to be constant, we determine the appropriate
59  * inverse at compile time to turn the division into a few inline
60  * multiplications which ought to be much faster.
61  *
62  * (It is unfortunate that gcc doesn't perform all this internally.)
63  */
64
65 #define __div64_const32(n, ___b)                                        \
66 ({                                                                      \
67         /*                                                              \
68          * Multiplication by reciprocal of b: n / b = n * (p / b) / p   \
69          *                                                              \
70          * We rely on the fact that most of this code gets optimized    \
71          * away at compile time due to constant propagation and only    \
72          * a few multiplication instructions should remain.             \
73          * Hence this monstrous macro (static inline doesn't always     \
74          * do the trick here).                                          \
75          */                                                             \
76         uint64_t ___res, ___x, ___t, ___m, ___n = (n);                  \
77         uint32_t ___p;                                                  \
78         bool ___bias = false;                                           \
79                                                                         \
80         /* determine MSB of b */                                        \
81         ___p = 1 << ilog2(___b);                                        \
82                                                                         \
83         /* compute m = ((p << 64) + b - 1) / b */                       \
84         ___m = (~0ULL / ___b) * ___p;                                   \
85         ___m += (((~0ULL % ___b + 1) * ___p) + ___b - 1) / ___b;        \
86                                                                         \
87         /* one less than the dividend with highest result */            \
88         ___x = ~0ULL / ___b * ___b - 1;                                 \
89                                                                         \
90         /* test our ___m with res = m * x / (p << 64) */                \
91         ___res = (___m & 0xffffffff) * (___x & 0xffffffff);             \
92         ___t = (___m & 0xffffffff) * (___x >> 32) + (___res >> 32);     \
93         ___res = (___m >> 32) * (___x >> 32) + (___t >> 32);            \
94         ___t = (___m >> 32) * (___x & 0xffffffff) + (___t & 0xffffffff);\
95         ___res = (___res + (___t >> 32)) / ___p;                        \
96                                                                         \
97         /* Now validate what we've got. */                              \
98         if (___res != ___x / ___b) {                                    \
99                 /*                                                      \
100                  * We can't get away without a bias to compensate       \
101                  * for bit truncation errors.  To avoid it we'd need an \
102                  * additional bit to represent m which would overflow   \
103                  * a 64-bit variable.                                   \
104                  *                                                      \
105                  * Instead we do m = p / b and n / b = (n * m + m) / p. \
106                  */                                                     \
107                 ___bias = true;                                         \
108                 /* Compute m = (p << 64) / b */                         \
109                 ___m = (~0ULL / ___b) * ___p;                           \
110                 ___m += ((~0ULL % ___b + 1) * ___p) / ___b;             \
111         }                                                               \
112                                                                         \
113         /* Reduce m / p to help avoid overflow handling later. */       \
114         ___p /= (___m & -___m);                                         \
115         ___m /= (___m & -___m);                                         \
116                                                                         \
117         /*                                                              \
118          * Perform (m_bias + m * n) / (1 << 64).                        \
119          * From now on there will be actual runtime code generated.     \
120          */                                                             \
121         ___res = __arch_xprod_64(___m, ___n, ___bias);                  \
122                                                                         \
123         ___res /= ___p;                                                 \
124 })
125
126 #ifndef __arch_xprod_64
127 /*
128  * Default C implementation for __arch_xprod_64()
129  *
130  * Prototype: uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
131  * Semantic:  retval = ((bias ? m : 0) + m * n) >> 64
132  *
133  * The product is a 128-bit value, scaled down to 64 bits.
134  * Hoping for compile-time optimization of  conditional code.
135  * Architectures may provide their own optimized assembly implementation.
136  */
137 #ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
138 static __always_inline
139 #else
140 static inline
141 #endif
142 uint64_t __arch_xprod_64(const uint64_t m, uint64_t n, bool bias)
143 {
144         uint32_t m_lo = m;
145         uint32_t m_hi = m >> 32;
146         uint32_t n_lo = n;
147         uint32_t n_hi = n >> 32;
148         uint64_t x, y;
149
150         /* Determine if overflow handling can be dispensed with. */
151         bool no_ovf = __builtin_constant_p(m) &&
152                       ((m >> 32) + (m & 0xffffffff) < 0x100000000);
153
154         if (no_ovf) {
155                 x = (uint64_t)m_lo * n_lo + (bias ? m : 0);
156                 x >>= 32;
157                 x += (uint64_t)m_lo * n_hi;
158                 x += (uint64_t)m_hi * n_lo;
159                 x >>= 32;
160                 x += (uint64_t)m_hi * n_hi;
161         } else {
162                 x = (uint64_t)m_lo * n_lo + (bias ? m_lo : 0);
163                 y = (uint64_t)m_lo * n_hi + (uint32_t)(x >> 32) + (bias ? m_hi : 0);
164                 x = (uint64_t)m_hi * n_hi + (uint32_t)(y >> 32);
165                 y = (uint64_t)m_hi * n_lo + (uint32_t)y;
166                 x += (uint32_t)(y >> 32);
167         }
168
169         return x;
170 }
171 #endif
172
173 #ifndef __div64_32
174 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
175 #endif
176
177 /* The unnecessary pointer compare is there
178  * to check for type safety (n must be 64bit)
179  */
180 # define do_div(n,base) ({                              \
181         uint32_t __base = (base);                       \
182         uint32_t __rem;                                 \
183         (void)(((typeof((n)) *)0) == ((uint64_t *)0));  \
184         if (__builtin_constant_p(__base) &&             \
185             is_power_of_2(__base)) {                    \
186                 __rem = (n) & (__base - 1);             \
187                 (n) >>= ilog2(__base);                  \
188         } else if (__builtin_constant_p(__base) &&      \
189                    __base != 0) {                       \
190                 uint32_t __res_lo, __n_lo = (n);        \
191                 (n) = __div64_const32(n, __base);       \
192                 /* the remainder can be computed with 32-bit regs */ \
193                 __res_lo = (n);                         \
194                 __rem = __n_lo - __res_lo * __base;     \
195         } else if (likely(((n) >> 32) == 0)) {          \
196                 __rem = (uint32_t)(n) % __base;         \
197                 (n) = (uint32_t)(n) / __base;           \
198         } else {                                        \
199                 __rem = __div64_32(&(n), __base);       \
200         }                                               \
201         __rem;                                          \
202  })
203
204 #else /* BITS_PER_LONG == ?? */
205
206 # error do_div() does not yet support the C64
207
208 #endif /* BITS_PER_LONG */
209
210 #endif /* _ASM_GENERIC_DIV64_H */
This page took 0.037873 seconds and 4 git commands to generate.