mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
5bdcd510c2
As described in:77b0bf55bc
: ("kbuild/Makefile: Prepare for using macros in inline assembly code to work around asm() related GCC inlining bugs") GCC's inlining heuristics are broken with common asm() patterns used in kernel code, resulting in the effective disabling of inlining. The workaround is to set an assembly macro and call it from the inline assembly block - which is also a minor cleanup for the jump-label code. As a result the code size is slightly increased, but inlining decisions are better: text data bss dec hex filename 18163528 10226300 2957312 313471401de51c4
./vmlinux before 18163608 10227348 2957312 31348268 1de562c ./vmlinux after (+1128) And functions such as intel_pstate_adjust_policy_max(), kvm_cpu_accept_dm_intr(), kvm_register_readl() are inlined. Tested-by: Kees Cook <keescook@chromium.org> Signed-off-by: Nadav Amit <namit@vmware.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Kate Stewart <kstewart@linuxfoundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Philippe Ombredanne <pombredanne@nexb.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20181005202718.229565-4-namit@vmware.com Link: https://lore.kernel.org/lkml/20181003213100.189959-11-namit@vmware.com/T/#u Signed-off-by: Ingo Molnar <mingo@kernel.org>
70 lines
1.5 KiB
C
70 lines
1.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _ASM_X86_JUMP_LABEL_H
|
|
#define _ASM_X86_JUMP_LABEL_H
|
|
|
|
#define JUMP_LABEL_NOP_SIZE 5
|
|
|
|
#ifdef CONFIG_X86_64
|
|
# define STATIC_KEY_INIT_NOP P6_NOP5_ATOMIC
|
|
#else
|
|
# define STATIC_KEY_INIT_NOP GENERIC_NOP5_ATOMIC
|
|
#endif
|
|
|
|
#include <asm/asm.h>
|
|
#include <asm/nops.h>
|
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <linux/stringify.h>
|
|
#include <linux/types.h>
|
|
|
|
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
|
|
{
|
|
asm_volatile_goto("STATIC_BRANCH_NOP l_yes=\"%l[l_yes]\" key=\"%c0\" "
|
|
"branch=\"%c1\""
|
|
: : "i" (key), "i" (branch) : : l_yes);
|
|
return false;
|
|
l_yes:
|
|
return true;
|
|
}
|
|
|
|
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
|
|
{
|
|
asm_volatile_goto("STATIC_BRANCH_JMP l_yes=\"%l[l_yes]\" key=\"%c0\" "
|
|
"branch=\"%c1\""
|
|
: : "i" (key), "i" (branch) : : l_yes);
|
|
|
|
return false;
|
|
l_yes:
|
|
return true;
|
|
}
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
.macro STATIC_BRANCH_NOP l_yes:req key:req branch:req
|
|
.Lstatic_branch_nop_\@:
|
|
.byte STATIC_KEY_INIT_NOP
|
|
.Lstatic_branch_no_after_\@:
|
|
.pushsection __jump_table, "aw"
|
|
_ASM_ALIGN
|
|
.long .Lstatic_branch_nop_\@ - ., \l_yes - .
|
|
_ASM_PTR \key + \branch - .
|
|
.popsection
|
|
.endm
|
|
|
|
.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
|
|
.Lstatic_branch_jmp_\@:
|
|
.byte 0xe9
|
|
.long \l_yes - .Lstatic_branch_jmp_after_\@
|
|
.Lstatic_branch_jmp_after_\@:
|
|
.pushsection __jump_table, "aw"
|
|
_ASM_ALIGN
|
|
.long .Lstatic_branch_jmp_\@ - ., \l_yes - .
|
|
_ASM_PTR \key + \branch - .
|
|
.popsection
|
|
.endm
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif
|