2019-05-27 13:55:01 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-09-08 03:27:09 +07:00
|
|
|
#ifndef _ASM_POWERPC_MODULE_H
|
|
|
|
#define _ASM_POWERPC_MODULE_H
|
2005-12-17 04:43:46 +07:00
|
|
|
#ifdef __KERNEL__
|
2005-09-02 03:51:52 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <asm/bug.h>
|
2012-09-28 12:01:03 +07:00
|
|
|
#include <asm-generic/module.h>
|
2005-09-02 03:51:52 +07:00
|
|
|
|
|
|
|
|
2018-05-30 19:19:22 +07:00
|
|
|
#ifdef CONFIG_MPROFILE_KERNEL
|
2018-03-26 15:27:01 +07:00
|
|
|
#define MODULE_ARCH_VERMAGIC_FTRACE "mprofile-kernel "
|
|
|
|
#else
|
|
|
|
#define MODULE_ARCH_VERMAGIC_FTRACE ""
|
2017-05-10 13:57:49 +07:00
|
|
|
#endif
|
|
|
|
|
2018-03-26 15:27:01 +07:00
|
|
|
#ifdef CONFIG_RELOCATABLE
|
|
|
|
#define MODULE_ARCH_VERMAGIC_RELOCATABLE "relocatable "
|
|
|
|
#else
|
|
|
|
#define MODULE_ARCH_VERMAGIC_RELOCATABLE ""
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define MODULE_ARCH_VERMAGIC MODULE_ARCH_VERMAGIC_FTRACE MODULE_ARCH_VERMAGIC_RELOCATABLE
|
|
|
|
|
2005-09-02 03:51:52 +07:00
|
|
|
#ifndef __powerpc64__
|
|
|
|
/*
|
|
|
|
* Thanks to Paul M for explaining this.
|
|
|
|
*
|
|
|
|
* PPC can only do rel jumps += 32MB, and often the kernel and other
|
2016-02-25 01:51:11 +07:00
|
|
|
* modules are further away than this. So, we jump to a table of
|
2005-09-02 03:51:52 +07:00
|
|
|
* trampolines attached to the module (the Procedure Linkage Table)
|
|
|
|
* whenever that happens.
|
|
|
|
*/
|
|
|
|
|
|
|
|
struct ppc_plt_entry {
|
|
|
|
/* 16 byte jump instruction sequence (4 instructions) */
|
|
|
|
unsigned int jump[4];
|
|
|
|
};
|
|
|
|
#endif /* __powerpc64__ */
|
|
|
|
|
|
|
|
|
|
|
|
struct mod_arch_specific {
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
unsigned int stubs_section; /* Index of stubs section in module */
|
|
|
|
unsigned int toc_section; /* What section is the TOC? */
|
2014-03-18 16:29:26 +07:00
|
|
|
bool toc_fixed; /* Have we fixed up .TOC.? */
|
2008-11-15 11:47:03 +07:00
|
|
|
|
2017-11-10 06:48:27 +07:00
|
|
|
/* For module function descriptor dereference */
|
|
|
|
unsigned long start_opd;
|
|
|
|
unsigned long end_opd;
|
2008-11-15 14:39:05 +07:00
|
|
|
#else /* powerpc64 */
|
2005-09-02 03:51:52 +07:00
|
|
|
/* Indices of PLT sections within module. */
|
|
|
|
unsigned int core_plt_section;
|
|
|
|
unsigned int init_plt_section;
|
2018-05-25 10:48:34 +07:00
|
|
|
#endif /* powerpc64 */
|
|
|
|
|
2008-11-15 14:39:05 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
unsigned long tramp;
|
2018-05-25 10:48:34 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
|
|
|
unsigned long tramp_regs;
|
|
|
|
#endif
|
2005-09-02 03:51:52 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* List of BUG addresses, source line numbers and filenames */
|
|
|
|
struct list_head bug_list;
|
|
|
|
struct bug_entry *bug_table;
|
|
|
|
unsigned int num_bugs;
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Select ELF headers.
|
|
|
|
* Make empty section for module_frob_arch_sections to expand.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef __powerpc64__
|
|
|
|
# ifdef MODULE
|
|
|
|
asm(".section .stubs,\"ax\",@nobits; .align 3; .previous");
|
|
|
|
# endif
|
|
|
|
#else
|
|
|
|
# ifdef MODULE
|
|
|
|
asm(".section .plt,\"ax\",@nobits; .align 3; .previous");
|
|
|
|
asm(".section .init.plt,\"ax\",@nobits; .align 3; .previous");
|
|
|
|
# endif /* MODULE */
|
|
|
|
#endif
|
|
|
|
|
2008-11-15 11:47:03 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
# ifdef MODULE
|
|
|
|
asm(".section .ftrace.tramp,\"ax\",@nobits; .align 3; .previous");
|
|
|
|
# endif /* MODULE */
|
|
|
|
#endif
|
|
|
|
|
powerpc/module: Mark module stubs with a magic value
When a module is loaded, calls out to the kernel go via a stub which is
generated at runtime. One of these stubs is used to call _mcount(),
which is the default target of tracing calls generated by the compiler
with -pg.
If dynamic ftrace is enabled (which it typically is), another stub is
used to call ftrace_caller(), which is the target of tracing calls when
ftrace is actually active.
ftrace then wants to disable the calls to _mcount() at module startup,
and enable/disable the calls to ftrace_caller() when enabling/disabling
tracing - all of these it does by patching the code.
As part of that code patching, the ftrace code wants to confirm that the
branch it is about to modify, is in fact a call to a module stub which
calls _mcount() or ftrace_caller().
Currently it does that by inspecting the instructions and confirming
they are what it expects. Although that works, the code to do it is
pretty intricate because it requires lots of knowledge about the exact
format of the stub.
We can make that process easier by marking the generated stubs with a
magic value, and then looking for that magic value. Altough this is not
as rigorous as the current method, I believe it is sufficient in
practice.
Reviewed-by: Balbir Singh <bsingharora@gmail.com>
Reviewed-by: Torsten Duwe <duwe@suse.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-03-03 11:26:55 +07:00
|
|
|
int module_trampoline_target(struct module *mod, unsigned long trampoline,
|
2014-04-04 11:58:42 +07:00
|
|
|
unsigned long *target);
|
2005-09-02 03:51:52 +07:00
|
|
|
|
2016-03-03 11:26:54 +07:00
|
|
|
#ifdef CONFIG_DYNAMIC_FTRACE
|
|
|
|
int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs);
|
|
|
|
#else
|
|
|
|
static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-12-17 04:43:46 +07:00
|
|
|
#endif /* __KERNEL__ */
|
2005-09-08 03:27:09 +07:00
|
|
|
#endif /* _ASM_POWERPC_MODULE_H */
|