mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 20:07:13 +07:00
c3d6324f84
In preparation for static_call and variable size jump_label support, teach text_poke_bp() to emulate instructions, namely: JMP32, JMP8, CALL, NOP2, NOP_ATOMIC5, INT3 The current text_poke_bp() takes a @handler argument which is used as a jump target when the temporary INT3 is hit by a different CPU. When patching CALL instructions, this doesn't work because we'd miss the PUSH of the return address. Instead, teach poke_int3_handler() to emulate an instruction, typically the instruction we're patching in. This fits almost all text_poke_bp() users, except arch_unoptimize_kprobe() which restores random text, and for that site we have to build an explicit emulate instruction. Tested-by: Alexei Starovoitov <ast@kernel.org> Tested-by: Steven Rostedt (VMware) <rostedt@goodmis.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Masami Hiramatsu <mhiramat@kernel.org> Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20191111132457.529086974@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org> (cherry picked from commit 8c7eebc10687af45ac8e40ad1bac0cf7893dba9f) Signed-off-by: Alexei Starovoitov <ast@kernel.org>
198 lines
5.2 KiB
C
198 lines
5.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* jump label x86 support
|
|
*
|
|
* Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
|
|
*
|
|
*/
|
|
#include <linux/jump_label.h>
|
|
#include <linux/memory.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/module.h>
|
|
#include <linux/list.h>
|
|
#include <linux/jhash.h>
|
|
#include <linux/cpu.h>
|
|
#include <asm/kprobes.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/text-patching.h>
|
|
|
|
union jump_code_union {
|
|
char code[JUMP_LABEL_NOP_SIZE];
|
|
struct {
|
|
char jump;
|
|
int offset;
|
|
} __attribute__((packed));
|
|
};
|
|
|
|
static void bug_at(unsigned char *ip, int line)
|
|
{
|
|
/*
|
|
* The location is not an op that we were expecting.
|
|
* Something went wrong. Crash the box, as something could be
|
|
* corrupting the kernel.
|
|
*/
|
|
pr_crit("jump_label: Fatal kernel bug, unexpected op at %pS [%p] (%5ph) %d\n", ip, ip, ip, line);
|
|
BUG();
|
|
}
|
|
|
|
static void __jump_label_set_jump_code(struct jump_entry *entry,
|
|
enum jump_label_type type,
|
|
union jump_code_union *code,
|
|
int init)
|
|
{
|
|
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
|
|
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
|
|
const void *expect;
|
|
int line;
|
|
|
|
code->jump = 0xe9;
|
|
code->offset = jump_entry_target(entry) -
|
|
(jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE);
|
|
|
|
if (init) {
|
|
expect = default_nop; line = __LINE__;
|
|
} else if (type == JUMP_LABEL_JMP) {
|
|
expect = ideal_nop; line = __LINE__;
|
|
} else {
|
|
expect = code->code; line = __LINE__;
|
|
}
|
|
|
|
if (memcmp((void *)jump_entry_code(entry), expect, JUMP_LABEL_NOP_SIZE))
|
|
bug_at((void *)jump_entry_code(entry), line);
|
|
|
|
if (type == JUMP_LABEL_NOP)
|
|
memcpy(code, ideal_nop, JUMP_LABEL_NOP_SIZE);
|
|
}
|
|
|
|
static void __ref __jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type,
|
|
int init)
|
|
{
|
|
union jump_code_union code;
|
|
|
|
__jump_label_set_jump_code(entry, type, &code, init);
|
|
|
|
/*
|
|
* As long as only a single processor is running and the code is still
|
|
* not marked as RO, text_poke_early() can be used; Checking that
|
|
* system_state is SYSTEM_BOOTING guarantees it. It will be set to
|
|
* SYSTEM_SCHEDULING before other cores are awaken and before the
|
|
* code is write-protected.
|
|
*
|
|
* At the time the change is being done, just ignore whether we
|
|
* are doing nop -> jump or jump -> nop transition, and assume
|
|
* always nop being the 'currently valid' instruction
|
|
*/
|
|
if (init || system_state == SYSTEM_BOOTING) {
|
|
text_poke_early((void *)jump_entry_code(entry), &code,
|
|
JUMP_LABEL_NOP_SIZE);
|
|
return;
|
|
}
|
|
|
|
text_poke_bp((void *)jump_entry_code(entry), &code, JUMP_LABEL_NOP_SIZE, NULL);
|
|
}
|
|
|
|
void arch_jump_label_transform(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
mutex_lock(&text_mutex);
|
|
__jump_label_transform(entry, type, 0);
|
|
mutex_unlock(&text_mutex);
|
|
}
|
|
|
|
#define TP_VEC_MAX (PAGE_SIZE / sizeof(struct text_poke_loc))
|
|
static struct text_poke_loc tp_vec[TP_VEC_MAX];
|
|
static int tp_vec_nr;
|
|
|
|
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
struct text_poke_loc *tp;
|
|
void *entry_code;
|
|
|
|
if (system_state == SYSTEM_BOOTING) {
|
|
/*
|
|
* Fallback to the non-batching mode.
|
|
*/
|
|
arch_jump_label_transform(entry, type);
|
|
return true;
|
|
}
|
|
|
|
/*
|
|
* No more space in the vector, tell upper layer to apply
|
|
* the queue before continuing.
|
|
*/
|
|
if (tp_vec_nr == TP_VEC_MAX)
|
|
return false;
|
|
|
|
tp = &tp_vec[tp_vec_nr];
|
|
|
|
entry_code = (void *)jump_entry_code(entry);
|
|
|
|
/*
|
|
* The INT3 handler will do a bsearch in the queue, so we need entries
|
|
* to be sorted. We can survive an unsorted list by rejecting the entry,
|
|
* forcing the generic jump_label code to apply the queue. Warning once,
|
|
* to raise the attention to the case of an unsorted entry that is
|
|
* better not happen, because, in the worst case we will perform in the
|
|
* same way as we do without batching - with some more overhead.
|
|
*/
|
|
if (tp_vec_nr > 0) {
|
|
int prev = tp_vec_nr - 1;
|
|
struct text_poke_loc *prev_tp = &tp_vec[prev];
|
|
|
|
if (WARN_ON_ONCE(prev_tp->addr > entry_code))
|
|
return false;
|
|
}
|
|
|
|
__jump_label_set_jump_code(entry, type,
|
|
(union jump_code_union *)&tp->text, 0);
|
|
|
|
text_poke_loc_init(tp, entry_code, NULL, JUMP_LABEL_NOP_SIZE, NULL);
|
|
|
|
tp_vec_nr++;
|
|
|
|
return true;
|
|
}
|
|
|
|
void arch_jump_label_transform_apply(void)
|
|
{
|
|
if (!tp_vec_nr)
|
|
return;
|
|
|
|
mutex_lock(&text_mutex);
|
|
text_poke_bp_batch(tp_vec, tp_vec_nr);
|
|
mutex_unlock(&text_mutex);
|
|
|
|
tp_vec_nr = 0;
|
|
}
|
|
|
|
static enum {
|
|
JL_STATE_START,
|
|
JL_STATE_NO_UPDATE,
|
|
JL_STATE_UPDATE,
|
|
} jlstate __initdata_or_module = JL_STATE_START;
|
|
|
|
__init_or_module void arch_jump_label_transform_static(struct jump_entry *entry,
|
|
enum jump_label_type type)
|
|
{
|
|
/*
|
|
* This function is called at boot up and when modules are
|
|
* first loaded. Check if the default nop, the one that is
|
|
* inserted at compile time, is the ideal nop. If it is, then
|
|
* we do not need to update the nop, and we can leave it as is.
|
|
* If it is not, then we need to update the nop to the ideal nop.
|
|
*/
|
|
if (jlstate == JL_STATE_START) {
|
|
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
|
|
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];
|
|
|
|
if (memcmp(ideal_nop, default_nop, 5) != 0)
|
|
jlstate = JL_STATE_UPDATE;
|
|
else
|
|
jlstate = JL_STATE_NO_UPDATE;
|
|
}
|
|
if (jlstate == JL_STATE_UPDATE)
|
|
__jump_label_transform(entry, type, 1);
|
|
}
|