mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 22:00:53 +07:00
442f04c34a
This adds a host tool named objtool which has a "check" subcommand which analyzes .o files to ensure the validity of stack metadata. It enforces a set of rules on asm code and C inline assembly code so that stack traces can be reliable. For each function, it recursively follows all possible code paths and validates the correct frame pointer state at each instruction. It also follows code paths involving kernel special sections, like .altinstructions, __jump_table, and __ex_table, which can add alternative execution paths to a given instruction (or set of instructions). Similarly, it knows how to follow switch statements, for which gcc sometimes uses jump tables. Here are some of the benefits of validating stack metadata: a) More reliable stack traces for frame pointer enabled kernels Frame pointers are used for debugging purposes. They allow runtime code and debug tools to be able to walk the stack to determine the chain of function call sites that led to the currently executing code. For some architectures, frame pointers are enabled by CONFIG_FRAME_POINTER. For some other architectures they may be required by the ABI (sometimes referred to as "backchain pointers"). For C code, gcc automatically generates instructions for setting up frame pointers when the -fno-omit-frame-pointer option is used. But for asm code, the frame setup instructions have to be written by hand, which most people don't do. So the end result is that CONFIG_FRAME_POINTER is honored for C code but not for most asm code. For stack traces based on frame pointers to be reliable, all functions which call other functions must first create a stack frame and update the frame pointer. If a first function doesn't properly create a stack frame before calling a second function, the *caller* of the first function will be skipped on the stack trace. For example, consider the following example backtrace with frame pointers enabled: [<ffffffff81812584>] dump_stack+0x4b/0x63 [<ffffffff812d6dc2>] cmdline_proc_show+0x12/0x30 [<ffffffff8127f568>] seq_read+0x108/0x3e0 [<ffffffff812cce62>] proc_reg_read+0x42/0x70 [<ffffffff81256197>] __vfs_read+0x37/0x100 [<ffffffff81256b16>] vfs_read+0x86/0x130 [<ffffffff81257898>] SyS_read+0x58/0xd0 [<ffffffff8181c1f2>] entry_SYSCALL_64_fastpath+0x12/0x76 It correctly shows that the caller of cmdline_proc_show() is seq_read(). If we remove the frame pointer logic from cmdline_proc_show() by replacing the frame pointer related instructions with nops, here's what it looks like instead: [<ffffffff81812584>] dump_stack+0x4b/0x63 [<ffffffff812d6dc2>] cmdline_proc_show+0x12/0x30 [<ffffffff812cce62>] proc_reg_read+0x42/0x70 [<ffffffff81256197>] __vfs_read+0x37/0x100 [<ffffffff81256b16>] vfs_read+0x86/0x130 [<ffffffff81257898>] SyS_read+0x58/0xd0 [<ffffffff8181c1f2>] entry_SYSCALL_64_fastpath+0x12/0x76 Notice that cmdline_proc_show()'s caller, seq_read(), has been skipped. Instead the stack trace seems to show that cmdline_proc_show() was called by proc_reg_read(). The benefit of "objtool check" here is that because it ensures that *all* functions honor CONFIG_FRAME_POINTER, no functions will ever[*] be skipped on a stack trace. [*] unless an interrupt or exception has occurred at the very beginning of a function before the stack frame has been created, or at the very end of the function after the stack frame has been destroyed. This is an inherent limitation of frame pointers. b) 100% reliable stack traces for DWARF enabled kernels This is not yet implemented. For more details about what is planned, see tools/objtool/Documentation/stack-validation.txt. c) Higher live patching compatibility rate This is not yet implemented. For more details about what is planned, see tools/objtool/Documentation/stack-validation.txt. To achieve the validation, "objtool check" enforces the following rules: 1. Each callable function must be annotated as such with the ELF function type. In asm code, this is typically done using the ENTRY/ENDPROC macros. If objtool finds a return instruction outside of a function, it flags an error since that usually indicates callable code which should be annotated accordingly. This rule is needed so that objtool can properly identify each callable function in order to analyze its stack metadata. 2. Conversely, each section of code which is *not* callable should *not* be annotated as an ELF function. The ENDPROC macro shouldn't be used in this case. This rule is needed so that objtool can ignore non-callable code. Such code doesn't have to follow any of the other rules. 3. Each callable function which calls another function must have the correct frame pointer logic, if required by CONFIG_FRAME_POINTER or the architecture's back chain rules. This can by done in asm code with the FRAME_BEGIN/FRAME_END macros. This rule ensures that frame pointer based stack traces will work as designed. If function A doesn't create a stack frame before calling function B, the _caller_ of function A will be skipped on the stack trace. 4. Dynamic jumps and jumps to undefined symbols are only allowed if: a) the jump is part of a switch statement; or b) the jump matches sibling call semantics and the frame pointer has the same value it had on function entry. This rule is needed so that objtool can reliably analyze all of a function's code paths. If a function jumps to code in another file, and it's not a sibling call, objtool has no way to follow the jump because it only analyzes a single file at a time. 5. A callable function may not execute kernel entry/exit instructions. The only code which needs such instructions is kernel entry code, which shouldn't be be in callable functions anyway. This rule is just a sanity check to ensure that callable functions return normally. It currently only supports x86_64. I tried to make the code generic so that support for other architectures can hopefully be plugged in relatively easily. On my Lenovo laptop with a i7-4810MQ 4-core/8-thread CPU, building the kernel with objtool checking every .o file adds about three seconds of total build time. It hasn't been optimized for performance yet, so there are probably some opportunities for better build performance. Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Bernd Petrovitsch <bernd@petrovitsch.priv.at> Cc: Borislav Petkov <bp@alien8.de> Cc: Chris J Arges <chris.j.arges@canonical.com> Cc: Jiri Slaby <jslaby@suse.cz> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Michal Marek <mmarek@suse.cz> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Pedro Alves <palves@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: live-patching@vger.kernel.org Link: http://lkml.kernel.org/r/f3efb173de43bd067b060de73f856567c0fa1174.1456719558.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
194 lines
4.6 KiB
C
194 lines
4.6 KiB
C
/*
|
|
* Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version 2
|
|
* of the License, or (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
/*
|
|
* This file reads all the special sections which have alternate instructions
|
|
* which can be patched in or redirected to at runtime.
|
|
*/
|
|
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include "special.h"
|
|
#include "warn.h"
|
|
|
|
#define EX_ENTRY_SIZE 12
|
|
#define EX_ORIG_OFFSET 0
|
|
#define EX_NEW_OFFSET 4
|
|
|
|
#define JUMP_ENTRY_SIZE 24
|
|
#define JUMP_ORIG_OFFSET 0
|
|
#define JUMP_NEW_OFFSET 8
|
|
|
|
#define ALT_ENTRY_SIZE 13
|
|
#define ALT_ORIG_OFFSET 0
|
|
#define ALT_NEW_OFFSET 4
|
|
#define ALT_FEATURE_OFFSET 8
|
|
#define ALT_ORIG_LEN_OFFSET 10
|
|
#define ALT_NEW_LEN_OFFSET 11
|
|
|
|
#define X86_FEATURE_POPCNT (4*32+23)
|
|
|
|
struct special_entry {
|
|
const char *sec;
|
|
bool group, jump_or_nop;
|
|
unsigned char size, orig, new;
|
|
unsigned char orig_len, new_len; /* group only */
|
|
unsigned char feature; /* ALTERNATIVE macro CPU feature */
|
|
};
|
|
|
|
struct special_entry entries[] = {
|
|
{
|
|
.sec = ".altinstructions",
|
|
.group = true,
|
|
.size = ALT_ENTRY_SIZE,
|
|
.orig = ALT_ORIG_OFFSET,
|
|
.orig_len = ALT_ORIG_LEN_OFFSET,
|
|
.new = ALT_NEW_OFFSET,
|
|
.new_len = ALT_NEW_LEN_OFFSET,
|
|
.feature = ALT_FEATURE_OFFSET,
|
|
},
|
|
{
|
|
.sec = "__jump_table",
|
|
.jump_or_nop = true,
|
|
.size = JUMP_ENTRY_SIZE,
|
|
.orig = JUMP_ORIG_OFFSET,
|
|
.new = JUMP_NEW_OFFSET,
|
|
},
|
|
{
|
|
.sec = "__ex_table",
|
|
.size = EX_ENTRY_SIZE,
|
|
.orig = EX_ORIG_OFFSET,
|
|
.new = EX_NEW_OFFSET,
|
|
},
|
|
{},
|
|
};
|
|
|
|
static int get_alt_entry(struct elf *elf, struct special_entry *entry,
|
|
struct section *sec, int idx,
|
|
struct special_alt *alt)
|
|
{
|
|
struct rela *orig_rela, *new_rela;
|
|
unsigned long offset;
|
|
|
|
offset = idx * entry->size;
|
|
|
|
alt->group = entry->group;
|
|
alt->jump_or_nop = entry->jump_or_nop;
|
|
|
|
if (alt->group) {
|
|
alt->orig_len = *(unsigned char *)(sec->data + offset +
|
|
entry->orig_len);
|
|
alt->new_len = *(unsigned char *)(sec->data + offset +
|
|
entry->new_len);
|
|
}
|
|
|
|
if (entry->feature) {
|
|
unsigned short feature;
|
|
|
|
feature = *(unsigned short *)(sec->data + offset +
|
|
entry->feature);
|
|
|
|
/*
|
|
* It has been requested that we don't validate the !POPCNT
|
|
* feature path which is a "very very small percentage of
|
|
* machines".
|
|
*/
|
|
if (feature == X86_FEATURE_POPCNT)
|
|
alt->skip_orig = true;
|
|
}
|
|
|
|
orig_rela = find_rela_by_dest(sec, offset + entry->orig);
|
|
if (!orig_rela) {
|
|
WARN_FUNC("can't find orig rela", sec, offset + entry->orig);
|
|
return -1;
|
|
}
|
|
if (orig_rela->sym->type != STT_SECTION) {
|
|
WARN_FUNC("don't know how to handle non-section rela symbol %s",
|
|
sec, offset + entry->orig, orig_rela->sym->name);
|
|
return -1;
|
|
}
|
|
|
|
alt->orig_sec = orig_rela->sym->sec;
|
|
alt->orig_off = orig_rela->addend;
|
|
|
|
if (!entry->group || alt->new_len) {
|
|
new_rela = find_rela_by_dest(sec, offset + entry->new);
|
|
if (!new_rela) {
|
|
WARN_FUNC("can't find new rela",
|
|
sec, offset + entry->new);
|
|
return -1;
|
|
}
|
|
|
|
alt->new_sec = new_rela->sym->sec;
|
|
alt->new_off = (unsigned int)new_rela->addend;
|
|
|
|
/* _ASM_EXTABLE_EX hack */
|
|
if (alt->new_off >= 0x7ffffff0)
|
|
alt->new_off -= 0x7ffffff0;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Read all the special sections and create a list of special_alt structs which
|
|
* describe all the alternate instructions which can be patched in or
|
|
* redirected to at runtime.
|
|
*/
|
|
int special_get_alts(struct elf *elf, struct list_head *alts)
|
|
{
|
|
struct special_entry *entry;
|
|
struct section *sec;
|
|
unsigned int nr_entries;
|
|
struct special_alt *alt;
|
|
int idx, ret;
|
|
|
|
INIT_LIST_HEAD(alts);
|
|
|
|
for (entry = entries; entry->sec; entry++) {
|
|
sec = find_section_by_name(elf, entry->sec);
|
|
if (!sec)
|
|
continue;
|
|
|
|
if (sec->len % entry->size != 0) {
|
|
WARN("%s size not a multiple of %d",
|
|
sec->name, entry->size);
|
|
return -1;
|
|
}
|
|
|
|
nr_entries = sec->len / entry->size;
|
|
|
|
for (idx = 0; idx < nr_entries; idx++) {
|
|
alt = malloc(sizeof(*alt));
|
|
if (!alt) {
|
|
WARN("malloc failed");
|
|
return -1;
|
|
}
|
|
memset(alt, 0, sizeof(*alt));
|
|
|
|
ret = get_alt_entry(elf, entry, sec, idx, alt);
|
|
if (ret)
|
|
return ret;
|
|
|
|
list_add_tail(&alt->list, alts);
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|