mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 23:36:44 +07:00
65c1055355
Because hash_64() is called from the get_kprobe() inside int3 handler, kernel causes int3 recursion and crashes if kprobes user puts a probe on it. Usually hash_64() is inlined into caller function, but in some cases, it has instances by gcc's interprocedural constant propagation. This patch uses __always_inline instead of inline to prevent gcc from doing such things. Reported-by: Timo Juhani Lindfors <timo.lindfors@iki.fi> Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Acked-by: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Pavel Emelyanov <xemul@parallels.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Nadia Yvette Chambers <nyc@holomorphy.com> Cc: yrl.pp-manager.tt@hitachi.com Cc: David S. Miller <davem@davemloft.net> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20130314115230.19690.39387.stgit@mhiramat-M0-7522 Signed-off-by: Ingo Molnar <mingo@kernel.org>
82 lines
2.0 KiB
C
82 lines
2.0 KiB
C
#ifndef _LINUX_HASH_H
|
|
#define _LINUX_HASH_H
|
|
/* Fast hashing routine for ints, longs and pointers.
|
|
(C) 2002 Nadia Yvette Chambers, IBM */
|
|
|
|
/*
|
|
* Knuth recommends primes in approximately golden ratio to the maximum
|
|
* integer representable by a machine word for multiplicative hashing.
|
|
* Chuck Lever verified the effectiveness of this technique:
|
|
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
|
|
*
|
|
* These primes are chosen to be bit-sparse, that is operations on
|
|
* them can use shifts and additions instead of multiplications for
|
|
* machines where multiplications are slow.
|
|
*/
|
|
|
|
#include <asm/types.h>
|
|
#include <linux/compiler.h>
|
|
|
|
/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
|
|
#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
|
|
/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
|
|
#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001UL
|
|
|
|
#if BITS_PER_LONG == 32
|
|
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_32
|
|
#define hash_long(val, bits) hash_32(val, bits)
|
|
#elif BITS_PER_LONG == 64
|
|
#define hash_long(val, bits) hash_64(val, bits)
|
|
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_PRIME_64
|
|
#else
|
|
#error Wordsize not 32 or 64
|
|
#endif
|
|
|
|
static __always_inline u64 hash_64(u64 val, unsigned int bits)
|
|
{
|
|
u64 hash = val;
|
|
|
|
/* Sigh, gcc can't optimise this alone like it does for 32 bits. */
|
|
u64 n = hash;
|
|
n <<= 18;
|
|
hash -= n;
|
|
n <<= 33;
|
|
hash -= n;
|
|
n <<= 3;
|
|
hash += n;
|
|
n <<= 3;
|
|
hash -= n;
|
|
n <<= 4;
|
|
hash += n;
|
|
n <<= 2;
|
|
hash += n;
|
|
|
|
/* High bits are more random, so use them. */
|
|
return hash >> (64 - bits);
|
|
}
|
|
|
|
static inline u32 hash_32(u32 val, unsigned int bits)
|
|
{
|
|
/* On some cpus multiply is faster, on others gcc will do shifts */
|
|
u32 hash = val * GOLDEN_RATIO_PRIME_32;
|
|
|
|
/* High bits are more random, so use them. */
|
|
return hash >> (32 - bits);
|
|
}
|
|
|
|
static inline unsigned long hash_ptr(const void *ptr, unsigned int bits)
|
|
{
|
|
return hash_long((unsigned long)ptr, bits);
|
|
}
|
|
|
|
static inline u32 hash32_ptr(const void *ptr)
|
|
{
|
|
unsigned long val = (unsigned long)ptr;
|
|
|
|
#if BITS_PER_LONG == 64
|
|
val ^= (val >> 32);
|
|
#endif
|
|
return (u32)val;
|
|
}
|
|
#endif /* _LINUX_HASH_H */
|