1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __VDSO_MATH64_H
3 #define __VDSO_MATH64_H
5 static __always_inline u32
6 __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
10 while (dividend >= divisor) {
11 /* The following asm() prevents the compiler from
12 optimising this loop into a modulo operation. */
13 asm("" : "+rm"(dividend));
19 *remainder = dividend;
24 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
26 #ifndef mul_u64_u32_add_u64_shr
27 static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
29 return (u64)((((unsigned __int128)a * mul) + b) >> shift);
31 #endif /* mul_u64_u32_add_u64_shr */
35 #ifndef mul_u64_u32_add_u64_shr
37 static inline u64 mul_u32_u32(u32 a, u32 b)
41 #define mul_u32_u32 mul_u32_u32
43 static __always_inline u64 mul_u64_u32_add_u64_shr(u64 a, u32 mul, u64 b, unsigned int shift)
45 u32 ah = a >> 32, al = a;
49 ovf = __builtin_add_overflow(mul_u32_u32(al, mul), b, &ret);
52 ret += 1ULL << (64 - shift);
54 ret += mul_u32_u32(ah, mul) << (32 - shift);
58 #endif /* mul_u64_u32_add_u64_shr */
62 #endif /* __VDSO_MATH64_H */