mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-15 14:36:46 +07:00
be5e610c0f
Introduce mul_u64_u32_shr() as proposed by Andy a while back; it allows using 64x64->128 muls on 64bit archs and recent GCC which defines __SIZEOF_INT128__ and __int128. (This new method will be used by the scheduler.) Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: fweisbec@gmail.com Cc: Andy Lutomirski <luto@amacapital.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/n/tip-hxjoeuzmrcaumR0uZwjpe2pv@git.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
167 lines
3.4 KiB
C
167 lines
3.4 KiB
C
#ifndef _LINUX_MATH64_H
|
|
#define _LINUX_MATH64_H
|
|
|
|
#include <linux/types.h>
|
|
#include <asm/div64.h>
|
|
|
|
#if BITS_PER_LONG == 64
|
|
|
|
#define div64_long(x, y) div64_s64((x), (y))
|
|
#define div64_ul(x, y) div64_u64((x), (y))
|
|
|
|
/**
|
|
* div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
|
|
*
|
|
* This is commonly provided by 32bit archs to provide an optimized 64bit
|
|
* divide.
|
|
*/
|
|
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
|
|
{
|
|
*remainder = dividend % divisor;
|
|
return dividend / divisor;
|
|
}
|
|
|
|
/**
|
|
* div_s64_rem - signed 64bit divide with 32bit divisor with remainder
|
|
*/
|
|
static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
|
|
{
|
|
*remainder = dividend % divisor;
|
|
return dividend / divisor;
|
|
}
|
|
|
|
/**
|
|
* div64_u64_rem - unsigned 64bit divide with 64bit divisor and remainder
|
|
*/
|
|
static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
|
|
{
|
|
*remainder = dividend % divisor;
|
|
return dividend / divisor;
|
|
}
|
|
|
|
/**
|
|
* div64_u64 - unsigned 64bit divide with 64bit divisor
|
|
*/
|
|
static inline u64 div64_u64(u64 dividend, u64 divisor)
|
|
{
|
|
return dividend / divisor;
|
|
}
|
|
|
|
/**
|
|
* div64_s64 - signed 64bit divide with 64bit divisor
|
|
*/
|
|
static inline s64 div64_s64(s64 dividend, s64 divisor)
|
|
{
|
|
return dividend / divisor;
|
|
}
|
|
|
|
#elif BITS_PER_LONG == 32
|
|
|
|
#define div64_long(x, y) div_s64((x), (y))
|
|
#define div64_ul(x, y) div_u64((x), (y))
|
|
|
|
#ifndef div_u64_rem
|
|
static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
|
|
{
|
|
*remainder = do_div(dividend, divisor);
|
|
return dividend;
|
|
}
|
|
#endif
|
|
|
|
#ifndef div_s64_rem
|
|
extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
|
|
#endif
|
|
|
|
#ifndef div64_u64_rem
|
|
extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
|
|
#endif
|
|
|
|
#ifndef div64_u64
|
|
extern u64 div64_u64(u64 dividend, u64 divisor);
|
|
#endif
|
|
|
|
#ifndef div64_s64
|
|
extern s64 div64_s64(s64 dividend, s64 divisor);
|
|
#endif
|
|
|
|
#endif /* BITS_PER_LONG */
|
|
|
|
/**
|
|
* div_u64 - unsigned 64bit divide with 32bit divisor
|
|
*
|
|
* This is the most common 64bit divide and should be used if possible,
|
|
* as many 32bit archs can optimize this variant better than a full 64bit
|
|
* divide.
|
|
*/
|
|
#ifndef div_u64
|
|
static inline u64 div_u64(u64 dividend, u32 divisor)
|
|
{
|
|
u32 remainder;
|
|
return div_u64_rem(dividend, divisor, &remainder);
|
|
}
|
|
#endif
|
|
|
|
/**
|
|
* div_s64 - signed 64bit divide with 32bit divisor
|
|
*/
|
|
#ifndef div_s64
|
|
static inline s64 div_s64(s64 dividend, s32 divisor)
|
|
{
|
|
s32 remainder;
|
|
return div_s64_rem(dividend, divisor, &remainder);
|
|
}
|
|
#endif
|
|
|
|
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
|
|
|
|
static __always_inline u32
|
|
__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
|
|
{
|
|
u32 ret = 0;
|
|
|
|
while (dividend >= divisor) {
|
|
/* The following asm() prevents the compiler from
|
|
optimising this loop into a modulo operation. */
|
|
asm("" : "+rm"(dividend));
|
|
|
|
dividend -= divisor;
|
|
ret++;
|
|
}
|
|
|
|
*remainder = dividend;
|
|
|
|
return ret;
|
|
}
|
|
|
|
#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
|
|
|
|
#ifndef mul_u64_u32_shr
|
|
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
|
|
{
|
|
return (u64)(((unsigned __int128)a * mul) >> shift);
|
|
}
|
|
#endif /* mul_u64_u32_shr */
|
|
|
|
#else
|
|
|
|
#ifndef mul_u64_u32_shr
|
|
static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
|
|
{
|
|
u32 ah, al;
|
|
u64 ret;
|
|
|
|
al = a;
|
|
ah = a >> 32;
|
|
|
|
ret = ((u64)al * mul) >> shift;
|
|
if (ah)
|
|
ret += ((u64)ah * mul) << (32 - shift);
|
|
|
|
return ret;
|
|
}
|
|
#endif /* mul_u64_u32_shr */
|
|
|
|
#endif
|
|
|
|
#endif /* _LINUX_MATH64_H */
|