linux_dsm_epyc7002/arch/parisc/kernel/vmlinux.lds.S
Sven Schnelle 6ca6366220 parisc: add dynamic ftrace
This patch implements dynamic ftrace for PA-RISC. The required mcount
call sequences can get pretty long, so instead of patching the
whole call sequence out of the functions, we are using
-fpatchable-function-entry from gcc. This puts a configurable amount of
NOPS before/at the start of the function. Taking do_sys_open() as example,
which would look like this when the call is patched out:

1036b248:       08 00 02 40     nop
1036b24c:       08 00 02 40     nop
1036b250:       08 00 02 40     nop
1036b254:       08 00 02 40     nop

1036b258 <do_sys_open>:
1036b258:       08 00 02 40     nop
1036b25c:       08 03 02 41     copy r3,r1
1036b260:       6b c2 3f d9     stw rp,-14(sp)
1036b264:       08 1e 02 43     copy sp,r3
1036b268:       6f c1 01 00     stw,ma r1,80(sp)

When ftrace gets enabled for this function the kernel will patch these
NOPs to:

1036b248:       10 19 57 20     <address of ftrace>
1036b24c:       6f c1 00 80     stw,ma r1,40(sp)
1036b250:       48 21 3f d1     ldw -18(r1),r1
1036b254:       e8 20 c0 02     bv,n r0(r1)

1036b258 <do_sys_open>:
1036b258:       e8 3f 1f df     b,l,n .-c,r1
1036b25c:       08 03 02 41     copy r3,r1
1036b260:       6b c2 3f d9     stw rp,-14(sp)
1036b264:       08 1e 02 43     copy sp,r3
1036b268:       6f c1 01 00     stw,ma r1,80(sp)

So the first NOP in do_sys_open() will be patched to jump backwards into
some minimal trampoline code which pushes a stackframe, saves r1 which
holds the return address, loads the address of the real ftrace function,
and branches to that location. For 64 Bit things are getting a bit more
complicated (and longer) because we must make sure that the address of
ftrace location is 8 byte aligned, and the offset passed to ldd for
fetching the address is 8 byte aligned as well.

Note that gcc has a bug which misplaces the function label, and needs a
patch to make dynamic ftrace work. See
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90751 for details.

Signed-off-by: Sven Schnelle <svens@stackframe.org>
Signed-off-by: Helge Deller <deller@gmx.de>
2019-06-08 12:56:29 +02:00

188 lines
3.9 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0 */
/* Kernel link layout for various "sections"
*
* Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
* Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
* Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
* Copyright (C) 2000 Michael Ang <mang with subcarrier.org>
* Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
* Copyright (C) 2003 James Bottomley <jejb with parisc-linux.org>
* Copyright (C) 2006-2013 Helge Deller <deller@gmx.de>
*/
/*
* Put page table entries (swapper_pg_dir) as the first thing in .bss. This
* will ensure that it has .bss alignment (PAGE_SIZE).
*/
#define BSS_FIRST_SECTIONS *(.data..vm0.pmd) \
*(.data..vm0.pgd) \
*(.data..vm0.pte)
#define CC_USING_PATCHABLE_FUNCTION_ENTRY
#include <asm-generic/vmlinux.lds.h>
/* needed for the processor specific cache alignment size */
#include <asm/cache.h>
#include <asm/page.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
/* ld script to make hppa Linux kernel */
#ifndef CONFIG_64BIT
OUTPUT_FORMAT("elf32-hppa-linux")
OUTPUT_ARCH(hppa)
#else
OUTPUT_FORMAT("elf64-hppa-linux")
OUTPUT_ARCH(hppa:hppa2.0w)
#endif
#define EXIT_TEXT_SECTIONS() .exit.text : { EXIT_TEXT }
#if !defined(CONFIG_64BIT) || defined(CONFIG_MLONGCALLS)
#define MLONGCALL_KEEP(x)
#define MLONGCALL_DISCARD(x) x
#else
#define MLONGCALL_KEEP(x) x
#define MLONGCALL_DISCARD(x)
#endif
ENTRY(parisc_kernel_start)
#ifndef CONFIG_64BIT
jiffies = jiffies_64 + 4;
#else
jiffies = jiffies_64;
#endif
SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
__init_begin = .;
HEAD_TEXT_SECTION
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(PAGE_SIZE)
MLONGCALL_DISCARD(EXIT_TEXT_SECTIONS())
.exit.data :
{
EXIT_DATA
}
PERCPU_SECTION(8)
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
*(.altinstructions)
__alt_instructions_end = .;
}
. = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_text = .; /* Text and read-only data */
_stext = .;
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
.text ALIGN(PAGE_SIZE) : {
TEXT_TEXT
LOCK_TEXT
SCHED_TEXT
CPUIDLE_TEXT
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.do_softirq)
*(.text.sys_exit)
*(.text.do_sigaltstack)
*(.text.do_fork)
*(.text.div)
*($$*) /* millicode routines */
*(.text.*)
*(.fixup)
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
}
MLONGCALL_KEEP(EXIT_TEXT_SECTIONS())
. = ALIGN(PAGE_SIZE);
_etext = .;
/* End of text section */
/* Start of data section */
_sdata = .;
/* Architecturally we need to keep __gp below 0x1000000 and thus
* in front of RO_DATA_SECTION() which stores lots of tracepoint
* and ftrace symbols. */
#ifdef CONFIG_64BIT
. = ALIGN(16);
/* Linkage tables */
.opd : {
__start_opd = .;
*(.opd)
__end_opd = .;
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif
RO_DATA_SECTION(8)
/* RO because of BUILDTIME_EXTABLE_SORT */
EXCEPTION_TABLE(8)
NOTES
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)
__stop___unwind = .;
}
/* writeable */
/* Make sure this is page aligned so
* that we can properly leave these
* as writable
*/
. = ALIGN(HUGEPAGE_SIZE);
data_start = .;
/* Data */
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, PAGE_SIZE)
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data..lock_aligned : {
*(.data..lock_aligned)
}
/* End of data section */
_edata = .;
/* BSS */
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
. = ALIGN(HUGEPAGE_SIZE);
_end = . ;
STABS_DEBUG
.note 0 : { *(.note) }
/* Sections to be discarded */
DISCARDS
/DISCARD/ : {
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
* for static binaries
*/
*(.interp)
*(.dynsym)
*(.dynstr)
*(.dynamic)
*(.hash)
*(.gnu.hash)
#endif
}
}