mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-25 22:45:23 +07:00
d14edb1648
With this config: http://busybox.net/~vda/kernel_config_OPTIMIZE_INLINING_and_Os gcc-4.7.2 generates many copies of these tiny functions: __arch_hweight32 (35 copies): 55 push %rbp e8 66 9b 4a 00 callq __sw_hweight32 48 89 e5 mov %rsp,%rbp 5d pop %rbp c3 retq __arch_hweight64 (8 copies): 55 push %rbp e8 5e c2 8a 00 callq __sw_hweight64 48 89 e5 mov %rsp,%rbp 5d pop %rbp c3 retq See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66122 This patch fixes this via s/inline/__always_inline/ To avoid touching 32-bit case where such change was not tested to be a win, reformat __arch_hweight64() to have completely disjoint 64-bit and 32-bit implementations. IOW: made #ifdef / 32 bits and 64 bits instead of having #ifdef / #else / #endif inside a single function body. Only 64-bit __arch_hweight64() is __always_inline'd. text data bss dec filename 86971120 17195912 36659200 140826232 vmlinux.before 86970954 17195912 36659200 140826066 vmlinux Signed-off-by: Denys Vlasenko <dvlasenk@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: David Rientjes <rientjes@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Thomas Graf <tgraf@suug.ch> Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/1438697716-28121-2-git-send-email-dvlasenk@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
65 lines
1.5 KiB
C
65 lines
1.5 KiB
C
#ifndef _ASM_X86_HWEIGHT_H
|
|
#define _ASM_X86_HWEIGHT_H
|
|
|
|
#ifdef CONFIG_64BIT
|
|
/* popcnt %edi, %eax -- redundant REX prefix for alignment */
|
|
#define POPCNT32 ".byte 0xf3,0x40,0x0f,0xb8,0xc7"
|
|
/* popcnt %rdi, %rax */
|
|
#define POPCNT64 ".byte 0xf3,0x48,0x0f,0xb8,0xc7"
|
|
#define REG_IN "D"
|
|
#define REG_OUT "a"
|
|
#else
|
|
/* popcnt %eax, %eax */
|
|
#define POPCNT32 ".byte 0xf3,0x0f,0xb8,0xc0"
|
|
#define REG_IN "a"
|
|
#define REG_OUT "a"
|
|
#endif
|
|
|
|
/*
|
|
* __sw_hweightXX are called from within the alternatives below
|
|
* and callee-clobbered registers need to be taken care of. See
|
|
* ARCH_HWEIGHT_CFLAGS in <arch/x86/Kconfig> for the respective
|
|
* compiler switches.
|
|
*/
|
|
static __always_inline unsigned int __arch_hweight32(unsigned int w)
|
|
{
|
|
unsigned int res = 0;
|
|
|
|
asm (ALTERNATIVE("call __sw_hweight32", POPCNT32, X86_FEATURE_POPCNT)
|
|
: "="REG_OUT (res)
|
|
: REG_IN (w));
|
|
|
|
return res;
|
|
}
|
|
|
|
static inline unsigned int __arch_hweight16(unsigned int w)
|
|
{
|
|
return __arch_hweight32(w & 0xffff);
|
|
}
|
|
|
|
static inline unsigned int __arch_hweight8(unsigned int w)
|
|
{
|
|
return __arch_hweight32(w & 0xff);
|
|
}
|
|
|
|
#ifdef CONFIG_X86_32
|
|
static inline unsigned long __arch_hweight64(__u64 w)
|
|
{
|
|
return __arch_hweight32((u32)w) +
|
|
__arch_hweight32((u32)(w >> 32));
|
|
}
|
|
#else
|
|
static __always_inline unsigned long __arch_hweight64(__u64 w)
|
|
{
|
|
unsigned long res = 0;
|
|
|
|
asm (ALTERNATIVE("call __sw_hweight64", POPCNT64, X86_FEATURE_POPCNT)
|
|
: "="REG_OUT (res)
|
|
: REG_IN (w));
|
|
|
|
return res;
|
|
}
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
#endif
|