2005-04-17 05:20:36 +07:00
|
|
|
/* Kernel dynamically loadable module help for PARISC.
|
|
|
|
*
|
|
|
|
* The best reference for this stuff is probably the Processor-
|
|
|
|
* Specific ELF Supplement for PA-RISC:
|
|
|
|
* http://ftp.parisc-linux.org/docs/arch/elf-pa-hp.pdf
|
|
|
|
*
|
|
|
|
* Linux/PA-RISC Project (http://www.parisc-linux.org/)
|
|
|
|
* Copyright (C) 2003 Randolph Chung <tausq at debian . org>
|
2009-01-02 04:25:30 +07:00
|
|
|
* Copyright (C) 2008 Helge Deller <deller@gmx.de>
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
|
|
*
|
|
|
|
*
|
|
|
|
* Notes:
|
2009-01-02 04:25:30 +07:00
|
|
|
* - PLT stub handling
|
|
|
|
* On 32bit (and sometimes 64bit) and with big kernel modules like xfs or
|
|
|
|
* ipv6 the relocation types R_PARISC_PCREL17F and R_PARISC_PCREL22F may
|
|
|
|
* fail to reach their PLT stub if we only create one big stub array for
|
|
|
|
* all sections at the beginning of the core or init section.
|
|
|
|
* Instead we now insert individual PLT stub entries directly in front of
|
|
|
|
* of the code sections where the stubs are actually called.
|
|
|
|
* This reduces the distance between the PCREL location and the stub entry
|
|
|
|
* so that the relocations can be fulfilled.
|
|
|
|
* While calculating the final layout of the kernel module in memory, the
|
|
|
|
* kernel module loader calls arch_mod_section_prepend() to request the
|
|
|
|
* to be reserved amount of memory in front of each individual section.
|
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
* - SEGREL32 handling
|
|
|
|
* We are not doing SEGREL32 handling correctly. According to the ABI, we
|
|
|
|
* should do a value offset, like this:
|
2006-09-29 16:00:06 +07:00
|
|
|
* if (in_init(me, (void *)val))
|
2015-11-26 06:14:08 +07:00
|
|
|
* val -= (uint32_t)me->init_layout.base;
|
2005-04-17 05:20:36 +07:00
|
|
|
* else
|
2015-11-26 06:14:08 +07:00
|
|
|
* val -= (uint32_t)me->core_layout.base;
|
2005-04-17 05:20:36 +07:00
|
|
|
* However, SEGREL32 is used only for PARISC unwind entries, and we want
|
|
|
|
* those entries to have an absolute address, and not just an offset.
|
|
|
|
*
|
|
|
|
* The unwind table mechanism has the ability to specify an offset for
|
|
|
|
* the unwind table; however, because we split off the init functions into
|
|
|
|
* a different piece of memory, it is not possible to do this using a
|
|
|
|
* single offset. Instead, we use the above hack for now.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/moduleloader.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/vmalloc.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kernel.h>
|
2006-12-16 22:16:50 +07:00
|
|
|
#include <linux/bug.h>
|
2011-04-15 06:25:21 +07:00
|
|
|
#include <linux/mm.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2011-04-15 06:25:21 +07:00
|
|
|
#include <asm/pgtable.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <asm/unwind.h>
|
2017-11-10 06:48:28 +07:00
|
|
|
#include <asm/sections.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define DEBUGP printk
|
|
|
|
#else
|
|
|
|
#define DEBUGP(fmt...)
|
|
|
|
#endif
|
|
|
|
|
2009-01-02 04:25:30 +07:00
|
|
|
#define RELOC_REACHABLE(val, bits) \
|
|
|
|
(( ( !((val) & (1<<((bits)-1))) && ((val)>>(bits)) != 0 ) || \
|
|
|
|
( ((val) & (1<<((bits)-1))) && ((val)>>(bits)) != (((__typeof__(val))(~0))>>((bits)+2)))) ? \
|
|
|
|
0 : 1)
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#define CHECK_RELOC(val, bits) \
|
2009-01-02 04:25:30 +07:00
|
|
|
if (!RELOC_REACHABLE(val, bits)) { \
|
2005-04-17 05:20:36 +07:00
|
|
|
printk(KERN_ERR "module %s relocation of symbol %s is out of range (0x%lx in %d bits)\n", \
|
|
|
|
me->name, strtab + sym->st_name, (unsigned long)val, bits); \
|
|
|
|
return -ENOEXEC; \
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Maximum number of GOT entries. We use a long displacement ldd from
|
|
|
|
* the bottom of the table, which has a maximum signed displacement of
|
|
|
|
* 0x3fff; however, since we're only going forward, this becomes
|
|
|
|
* 0x1fff, and thus, since each GOT entry is 8 bytes long we can have
|
2009-08-02 17:34:08 +07:00
|
|
|
* at most 1023 entries.
|
|
|
|
* To overcome this 14bit displacement with some kernel modules, we'll
|
|
|
|
* use instead the unusal 16bit displacement method (see reassemble_16a)
|
|
|
|
* which gives us a maximum positive displacement of 0x7fff, and as such
|
|
|
|
* allows us to allocate up to 4095 GOT entries. */
|
|
|
|
#define MAX_GOTS 4095
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* three functions to determine where in the module core
|
|
|
|
* or init pieces the location is */
|
2006-09-29 16:00:06 +07:00
|
|
|
static inline int in_init(struct module *me, void *loc)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2015-11-26 06:14:08 +07:00
|
|
|
return (loc >= me->init_layout.base &&
|
|
|
|
loc <= (me->init_layout.base + me->init_layout.size));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-09-29 16:00:06 +07:00
|
|
|
static inline int in_core(struct module *me, void *loc)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2015-11-26 06:14:08 +07:00
|
|
|
return (loc >= me->core_layout.base &&
|
|
|
|
loc <= (me->core_layout.base + me->core_layout.size));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-09-29 16:00:06 +07:00
|
|
|
static inline int in_local(struct module *me, void *loc)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-09-29 16:00:06 +07:00
|
|
|
return in_init(me, loc) || in_core(me, loc);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-01-28 20:58:52 +07:00
|
|
|
#ifndef CONFIG_64BIT
|
2005-04-17 05:20:36 +07:00
|
|
|
struct got_entry {
|
|
|
|
Elf32_Addr addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct stub_entry {
|
|
|
|
Elf32_Word insns[2]; /* each stub entry has two insns */
|
|
|
|
};
|
|
|
|
#else
|
|
|
|
struct got_entry {
|
|
|
|
Elf64_Addr addr;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct stub_entry {
|
|
|
|
Elf64_Word insns[4]; /* each stub entry has four insns */
|
|
|
|
};
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Field selection types defined by hppa */
|
|
|
|
#define rnd(x) (((x)+0x1000)&~0x1fff)
|
|
|
|
/* fsel: full 32 bits */
|
|
|
|
#define fsel(v,a) ((v)+(a))
|
|
|
|
/* lsel: select left 21 bits */
|
|
|
|
#define lsel(v,a) (((v)+(a))>>11)
|
|
|
|
/* rsel: select right 11 bits */
|
|
|
|
#define rsel(v,a) (((v)+(a))&0x7ff)
|
|
|
|
/* lrsel with rounding of addend to nearest 8k */
|
|
|
|
#define lrsel(v,a) (((v)+rnd(a))>>11)
|
|
|
|
/* rrsel with rounding of addend to nearest 8k */
|
|
|
|
#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
|
|
|
|
|
|
|
|
#define mask(x,sz) ((x) & ~((1<<(sz))-1))
|
|
|
|
|
|
|
|
|
|
|
|
/* The reassemble_* functions prepare an immediate value for
|
|
|
|
insertion into an opcode. pa-risc uses all sorts of weird bitfields
|
|
|
|
in the instruction to hold the value. */
|
2009-08-02 17:34:08 +07:00
|
|
|
static inline int sign_unext(int x, int len)
|
|
|
|
{
|
|
|
|
int len_ones;
|
|
|
|
|
|
|
|
len_ones = (1 << len) - 1;
|
|
|
|
return x & len_ones;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int low_sign_unext(int x, int len)
|
|
|
|
{
|
|
|
|
int sign, temp;
|
|
|
|
|
|
|
|
sign = (x >> (len-1)) & 1;
|
|
|
|
temp = sign_unext(x, len-1);
|
|
|
|
return (temp << 1) | sign;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline int reassemble_14(int as14)
|
|
|
|
{
|
|
|
|
return (((as14 & 0x1fff) << 1) |
|
|
|
|
((as14 & 0x2000) >> 13));
|
|
|
|
}
|
|
|
|
|
2009-08-02 17:34:08 +07:00
|
|
|
static inline int reassemble_16a(int as16)
|
|
|
|
{
|
|
|
|
int s, t;
|
|
|
|
|
|
|
|
/* Unusual 16-bit encoding, for wide mode only. */
|
|
|
|
t = (as16 << 1) & 0xffff;
|
|
|
|
s = (as16 & 0x8000);
|
|
|
|
return (t ^ s ^ (s >> 1)) | (s >> 15);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline int reassemble_17(int as17)
|
|
|
|
{
|
|
|
|
return (((as17 & 0x10000) >> 16) |
|
|
|
|
((as17 & 0x0f800) << 5) |
|
|
|
|
((as17 & 0x00400) >> 8) |
|
|
|
|
((as17 & 0x003ff) << 3));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int reassemble_21(int as21)
|
|
|
|
{
|
|
|
|
return (((as21 & 0x100000) >> 20) |
|
|
|
|
((as21 & 0x0ffe00) >> 8) |
|
|
|
|
((as21 & 0x000180) << 7) |
|
|
|
|
((as21 & 0x00007c) << 14) |
|
|
|
|
((as21 & 0x000003) << 12));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int reassemble_22(int as22)
|
|
|
|
{
|
|
|
|
return (((as22 & 0x200000) >> 21) |
|
|
|
|
((as22 & 0x1f0000) << 5) |
|
|
|
|
((as22 & 0x00f800) << 5) |
|
|
|
|
((as22 & 0x000400) >> 8) |
|
|
|
|
((as22 & 0x0003ff) << 3));
|
|
|
|
}
|
|
|
|
|
|
|
|
void *module_alloc(unsigned long size)
|
|
|
|
{
|
2011-04-15 06:25:21 +07:00
|
|
|
/* using RWX means less protection for modules, but it's
|
|
|
|
* easier than trying to map the text, data, init_text and
|
|
|
|
* init_data correctly */
|
|
|
|
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
|
2017-05-09 05:57:44 +07:00
|
|
|
GFP_KERNEL,
|
2015-02-14 05:40:07 +07:00
|
|
|
PAGE_KERNEL_RWX, 0, NUMA_NO_NODE,
|
2011-04-15 06:25:21 +07:00
|
|
|
__builtin_return_address(0));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-01-28 20:58:52 +07:00
|
|
|
#ifndef CONFIG_64BIT
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
|
|
|
|
{
|
|
|
|
unsigned long cnt = 0;
|
|
|
|
|
|
|
|
for (; n > 0; n--, rela++)
|
|
|
|
{
|
|
|
|
switch (ELF32_R_TYPE(rela->r_info)) {
|
|
|
|
case R_PARISC_PCREL17F:
|
|
|
|
case R_PARISC_PCREL22F:
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static inline unsigned long count_gots(const Elf_Rela *rela, unsigned long n)
|
|
|
|
{
|
|
|
|
unsigned long cnt = 0;
|
|
|
|
|
|
|
|
for (; n > 0; n--, rela++)
|
|
|
|
{
|
|
|
|
switch (ELF64_R_TYPE(rela->r_info)) {
|
|
|
|
case R_PARISC_LTOFF21L:
|
|
|
|
case R_PARISC_LTOFF14R:
|
|
|
|
case R_PARISC_PCREL22F:
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long count_fdescs(const Elf_Rela *rela, unsigned long n)
|
|
|
|
{
|
|
|
|
unsigned long cnt = 0;
|
|
|
|
|
|
|
|
for (; n > 0; n--, rela++)
|
|
|
|
{
|
|
|
|
switch (ELF64_R_TYPE(rela->r_info)) {
|
|
|
|
case R_PARISC_FPTR64:
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
|
|
|
|
{
|
|
|
|
unsigned long cnt = 0;
|
|
|
|
|
|
|
|
for (; n > 0; n--, rela++)
|
|
|
|
{
|
|
|
|
switch (ELF64_R_TYPE(rela->r_info)) {
|
|
|
|
case R_PARISC_PCREL22F:
|
|
|
|
cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return cnt;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-01-20 05:37:04 +07:00
|
|
|
void module_arch_freeing_init(struct module *mod)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2009-01-02 04:25:30 +07:00
|
|
|
kfree(mod->arch.section);
|
|
|
|
mod->arch.section = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-01-02 04:25:30 +07:00
|
|
|
/* Additional bytes needed in front of individual sections */
|
|
|
|
unsigned int arch_mod_section_prepend(struct module *mod,
|
|
|
|
unsigned int section)
|
|
|
|
{
|
|
|
|
/* size needed for all stubs of this section (including
|
|
|
|
* one additional for correct alignment of the stubs) */
|
|
|
|
return (mod->arch.section[section].stub_entries + 1)
|
|
|
|
* sizeof(struct stub_entry);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#define CONST
|
|
|
|
int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
|
|
|
|
CONST Elf_Shdr *sechdrs,
|
|
|
|
CONST char *secstrings,
|
|
|
|
struct module *me)
|
|
|
|
{
|
2009-01-02 04:25:30 +07:00
|
|
|
unsigned long gots = 0, fdescs = 0, len;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned int i;
|
|
|
|
|
2009-01-02 04:25:30 +07:00
|
|
|
len = hdr->e_shnum * sizeof(me->arch.section[0]);
|
|
|
|
me->arch.section = kzalloc(len, GFP_KERNEL);
|
|
|
|
if (!me->arch.section)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
for (i = 1; i < hdr->e_shnum; i++) {
|
2009-01-02 04:25:30 +07:00
|
|
|
const Elf_Rela *rels = (void *)sechdrs[i].sh_addr;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long nrels = sechdrs[i].sh_size / sizeof(*rels);
|
2009-01-02 04:25:30 +07:00
|
|
|
unsigned int count, s;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (strncmp(secstrings + sechdrs[i].sh_name,
|
|
|
|
".PARISC.unwind", 14) == 0)
|
|
|
|
me->arch.unwind_section = i;
|
|
|
|
|
|
|
|
if (sechdrs[i].sh_type != SHT_RELA)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* some of these are not relevant for 32-bit/64-bit
|
|
|
|
* we leave them here to make the code common. the
|
|
|
|
* compiler will do its thing and optimize out the
|
|
|
|
* stuff we don't need
|
|
|
|
*/
|
|
|
|
gots += count_gots(rels, nrels);
|
|
|
|
fdescs += count_fdescs(rels, nrels);
|
2009-01-02 04:25:30 +07:00
|
|
|
|
|
|
|
/* XXX: By sorting the relocs and finding duplicate entries
|
|
|
|
* we could reduce the number of necessary stubs and save
|
|
|
|
* some memory. */
|
|
|
|
count = count_stubs(rels, nrels);
|
|
|
|
if (!count)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* so we need relocation stubs. reserve necessary memory. */
|
|
|
|
/* sh_info gives the section for which we need to add stubs. */
|
|
|
|
s = sechdrs[i].sh_info;
|
|
|
|
|
|
|
|
/* each code section should only have one relocation section */
|
|
|
|
WARN_ON(me->arch.section[s].stub_entries);
|
|
|
|
|
|
|
|
/* store number of stubs we need for this section */
|
|
|
|
me->arch.section[s].stub_entries += count;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* align things a bit */
|
2015-11-26 06:14:08 +07:00
|
|
|
me->core_layout.size = ALIGN(me->core_layout.size, 16);
|
|
|
|
me->arch.got_offset = me->core_layout.size;
|
|
|
|
me->core_layout.size += gots * sizeof(struct got_entry);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2015-11-26 06:14:08 +07:00
|
|
|
me->core_layout.size = ALIGN(me->core_layout.size, 16);
|
|
|
|
me->arch.fdesc_offset = me->core_layout.size;
|
|
|
|
me->core_layout.size += fdescs * sizeof(Elf_Fdesc);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
me->arch.got_max = gots;
|
|
|
|
me->arch.fdesc_max = fdescs;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-01-28 20:58:52 +07:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 05:20:36 +07:00
|
|
|
static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
struct got_entry *got;
|
|
|
|
|
|
|
|
value += addend;
|
|
|
|
|
|
|
|
BUG_ON(value == 0);
|
|
|
|
|
2015-11-26 06:14:08 +07:00
|
|
|
got = me->core_layout.base + me->arch.got_offset;
|
2005-04-17 05:20:36 +07:00
|
|
|
for (i = 0; got[i].addr; i++)
|
|
|
|
if (got[i].addr == value)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
BUG_ON(++me->arch.got_count > me->arch.got_max);
|
|
|
|
|
|
|
|
got[i].addr = value;
|
|
|
|
out:
|
|
|
|
DEBUGP("GOT ENTRY %d[%x] val %lx\n", i, i*sizeof(struct got_entry),
|
|
|
|
value);
|
|
|
|
return i * sizeof(struct got_entry);
|
|
|
|
}
|
2007-01-28 20:58:52 +07:00
|
|
|
#endif /* CONFIG_64BIT */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-01-28 20:58:52 +07:00
|
|
|
#ifdef CONFIG_64BIT
|
2005-04-17 05:20:36 +07:00
|
|
|
static Elf_Addr get_fdesc(struct module *me, unsigned long value)
|
|
|
|
{
|
2015-11-26 06:14:08 +07:00
|
|
|
Elf_Fdesc *fdesc = me->core_layout.base + me->arch.fdesc_offset;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!value) {
|
|
|
|
printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Look for existing fdesc entry. */
|
|
|
|
while (fdesc->addr) {
|
|
|
|
if (fdesc->addr == value)
|
|
|
|
return (Elf_Addr)fdesc;
|
|
|
|
fdesc++;
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(++me->arch.fdesc_count > me->arch.fdesc_max);
|
|
|
|
|
|
|
|
/* Create new one */
|
|
|
|
fdesc->addr = value;
|
2015-11-26 06:14:08 +07:00
|
|
|
fdesc->gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
|
2005-04-17 05:20:36 +07:00
|
|
|
return (Elf_Addr)fdesc;
|
|
|
|
}
|
2007-01-28 20:58:52 +07:00
|
|
|
#endif /* CONFIG_64BIT */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-24 03:15:20 +07:00
|
|
|
enum elf_stub_type {
|
|
|
|
ELF_STUB_GOT,
|
|
|
|
ELF_STUB_MILLI,
|
|
|
|
ELF_STUB_DIRECT,
|
|
|
|
};
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static Elf_Addr get_stub(struct module *me, unsigned long value, long addend,
|
2009-01-02 04:25:30 +07:00
|
|
|
enum elf_stub_type stub_type, Elf_Addr loc0, unsigned int targetsec)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct stub_entry *stub;
|
2009-08-02 17:34:08 +07:00
|
|
|
int __maybe_unused d;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-01-02 04:25:30 +07:00
|
|
|
/* initialize stub_offset to point in front of the section */
|
|
|
|
if (!me->arch.section[targetsec].stub_offset) {
|
|
|
|
loc0 -= (me->arch.section[targetsec].stub_entries + 1) *
|
|
|
|
sizeof(struct stub_entry);
|
|
|
|
/* get correct alignment for the stubs */
|
|
|
|
loc0 = ALIGN(loc0, sizeof(struct stub_entry));
|
|
|
|
me->arch.section[targetsec].stub_offset = loc0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2009-01-02 04:25:30 +07:00
|
|
|
/* get address of stub entry */
|
|
|
|
stub = (void *) me->arch.section[targetsec].stub_offset;
|
|
|
|
me->arch.section[targetsec].stub_offset += sizeof(struct stub_entry);
|
|
|
|
|
|
|
|
/* do not write outside available stub area */
|
|
|
|
BUG_ON(0 == me->arch.section[targetsec].stub_entries--);
|
|
|
|
|
|
|
|
|
2007-01-28 20:58:52 +07:00
|
|
|
#ifndef CONFIG_64BIT
|
2005-04-17 05:20:36 +07:00
|
|
|
/* for 32-bit the stub looks like this:
|
|
|
|
* ldil L'XXX,%r1
|
|
|
|
* be,n R'XXX(%sr4,%r1)
|
|
|
|
*/
|
|
|
|
//value = *(unsigned long *)((value + addend) & ~3); /* why? */
|
|
|
|
|
|
|
|
stub->insns[0] = 0x20200000; /* ldil L'XXX,%r1 */
|
|
|
|
stub->insns[1] = 0xe0202002; /* be,n R'XXX(%sr4,%r1) */
|
|
|
|
|
|
|
|
stub->insns[0] |= reassemble_21(lrsel(value, addend));
|
|
|
|
stub->insns[1] |= reassemble_17(rrsel(value, addend) / 4);
|
|
|
|
|
|
|
|
#else
|
2006-06-24 03:15:20 +07:00
|
|
|
/* for 64-bit we have three kinds of stubs:
|
2005-04-17 05:20:36 +07:00
|
|
|
* for normal function calls:
|
|
|
|
* ldd 0(%dp),%dp
|
|
|
|
* ldd 10(%dp), %r1
|
|
|
|
* bve (%r1)
|
|
|
|
* ldd 18(%dp), %dp
|
|
|
|
*
|
|
|
|
* for millicode:
|
|
|
|
* ldil 0, %r1
|
|
|
|
* ldo 0(%r1), %r1
|
|
|
|
* ldd 10(%r1), %r1
|
|
|
|
* bve,n (%r1)
|
2006-06-24 03:15:20 +07:00
|
|
|
*
|
|
|
|
* for direct branches (jumps between different section of the
|
|
|
|
* same module):
|
|
|
|
* ldil 0, %r1
|
|
|
|
* ldo 0(%r1), %r1
|
|
|
|
* bve,n (%r1)
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-06-24 03:15:20 +07:00
|
|
|
switch (stub_type) {
|
|
|
|
case ELF_STUB_GOT:
|
2009-08-02 17:34:08 +07:00
|
|
|
d = get_got(me, value, addend);
|
|
|
|
if (d <= 15) {
|
|
|
|
/* Format 5 */
|
|
|
|
stub->insns[0] = 0x0f6010db; /* ldd 0(%dp),%dp */
|
|
|
|
stub->insns[0] |= low_sign_unext(d, 5) << 16;
|
|
|
|
} else {
|
|
|
|
/* Format 3 */
|
|
|
|
stub->insns[0] = 0x537b0000; /* ldd 0(%dp),%dp */
|
|
|
|
stub->insns[0] |= reassemble_16a(d);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
stub->insns[1] = 0x53610020; /* ldd 10(%dp),%r1 */
|
|
|
|
stub->insns[2] = 0xe820d000; /* bve (%r1) */
|
|
|
|
stub->insns[3] = 0x537b0030; /* ldd 18(%dp),%dp */
|
2006-06-24 03:15:20 +07:00
|
|
|
break;
|
|
|
|
case ELF_STUB_MILLI:
|
2005-04-17 05:20:36 +07:00
|
|
|
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
|
|
|
|
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
|
|
|
|
stub->insns[2] = 0x50210020; /* ldd 10(%r1),%r1 */
|
|
|
|
stub->insns[3] = 0xe820d002; /* bve,n (%r1) */
|
|
|
|
|
|
|
|
stub->insns[0] |= reassemble_21(lrsel(value, addend));
|
|
|
|
stub->insns[1] |= reassemble_14(rrsel(value, addend));
|
2006-06-24 03:15:20 +07:00
|
|
|
break;
|
|
|
|
case ELF_STUB_DIRECT:
|
|
|
|
stub->insns[0] = 0x20200000; /* ldil 0,%r1 */
|
|
|
|
stub->insns[1] = 0x34210000; /* ldo 0(%r1), %r1 */
|
|
|
|
stub->insns[2] = 0xe820d002; /* bve,n (%r1) */
|
|
|
|
|
|
|
|
stub->insns[0] |= reassemble_21(lrsel(value, addend));
|
|
|
|
stub->insns[1] |= reassemble_14(rrsel(value, addend));
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-06-24 03:15:20 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#endif
|
|
|
|
|
|
|
|
return (Elf_Addr)stub;
|
|
|
|
}
|
|
|
|
|
2007-01-28 20:58:52 +07:00
|
|
|
#ifndef CONFIG_64BIT
|
2005-04-17 05:20:36 +07:00
|
|
|
int apply_relocate_add(Elf_Shdr *sechdrs,
|
|
|
|
const char *strtab,
|
|
|
|
unsigned int symindex,
|
|
|
|
unsigned int relsec,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
|
|
|
Elf32_Sym *sym;
|
|
|
|
Elf32_Word *loc;
|
|
|
|
Elf32_Addr val;
|
|
|
|
Elf32_Sword addend;
|
|
|
|
Elf32_Addr dot;
|
2009-01-02 04:25:30 +07:00
|
|
|
Elf_Addr loc0;
|
|
|
|
unsigned int targetsec = sechdrs[relsec].sh_info;
|
2005-04-17 05:20:36 +07:00
|
|
|
//unsigned long dp = (unsigned long)$global$;
|
|
|
|
register unsigned long dp asm ("r27");
|
|
|
|
|
|
|
|
DEBUGP("Applying relocate section %u to %u\n", relsec,
|
2009-01-02 04:25:30 +07:00
|
|
|
targetsec);
|
2005-04-17 05:20:36 +07:00
|
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
|
|
/* This is where to make the change */
|
2009-01-02 04:25:30 +07:00
|
|
|
loc = (void *)sechdrs[targetsec].sh_addr
|
2005-04-17 05:20:36 +07:00
|
|
|
+ rel[i].r_offset;
|
2009-01-02 04:25:30 +07:00
|
|
|
/* This is the start of the target section */
|
|
|
|
loc0 = sechdrs[targetsec].sh_addr;
|
2005-04-17 05:20:36 +07:00
|
|
|
/* This is the symbol it is referring to */
|
|
|
|
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
|
|
|
|
+ ELF32_R_SYM(rel[i].r_info);
|
|
|
|
if (!sym->st_value) {
|
|
|
|
printk(KERN_WARNING "%s: Unknown symbol %s\n",
|
|
|
|
me->name, strtab + sym->st_name);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
|
|
|
|
dot = (Elf32_Addr)loc & ~0x03;
|
|
|
|
|
|
|
|
val = sym->st_value;
|
|
|
|
addend = rel[i].r_addend;
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define r(t) ELF32_R_TYPE(rel[i].r_info)==t ? #t :
|
|
|
|
DEBUGP("Symbol %s loc 0x%x val 0x%x addend 0x%x: %s\n",
|
|
|
|
strtab + sym->st_name,
|
|
|
|
(uint32_t)loc, val, addend,
|
|
|
|
r(R_PARISC_PLABEL32)
|
|
|
|
r(R_PARISC_DIR32)
|
|
|
|
r(R_PARISC_DIR21L)
|
|
|
|
r(R_PARISC_DIR14R)
|
|
|
|
r(R_PARISC_SEGREL32)
|
|
|
|
r(R_PARISC_DPREL21L)
|
|
|
|
r(R_PARISC_DPREL14R)
|
|
|
|
r(R_PARISC_PCREL17F)
|
|
|
|
r(R_PARISC_PCREL22F)
|
|
|
|
"UNKNOWN");
|
|
|
|
#undef r
|
|
|
|
#endif
|
|
|
|
|
|
|
|
switch (ELF32_R_TYPE(rel[i].r_info)) {
|
|
|
|
case R_PARISC_PLABEL32:
|
|
|
|
/* 32-bit function address */
|
|
|
|
/* no function descriptors... */
|
|
|
|
*loc = fsel(val, addend);
|
|
|
|
break;
|
|
|
|
case R_PARISC_DIR32:
|
|
|
|
/* direct 32-bit ref */
|
|
|
|
*loc = fsel(val, addend);
|
|
|
|
break;
|
|
|
|
case R_PARISC_DIR21L:
|
|
|
|
/* left 21 bits of effective address */
|
|
|
|
val = lrsel(val, addend);
|
|
|
|
*loc = mask(*loc, 21) | reassemble_21(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_DIR14R:
|
|
|
|
/* right 14 bits of effective address */
|
|
|
|
val = rrsel(val, addend);
|
|
|
|
*loc = mask(*loc, 14) | reassemble_14(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_SEGREL32:
|
|
|
|
/* 32-bit segment relative address */
|
|
|
|
/* See note about special handling of SEGREL32 at
|
|
|
|
* the beginning of this file.
|
|
|
|
*/
|
|
|
|
*loc = fsel(val, addend);
|
|
|
|
break;
|
2017-03-14 22:47:29 +07:00
|
|
|
case R_PARISC_SECREL32:
|
|
|
|
/* 32-bit section relative address. */
|
|
|
|
*loc = fsel(val, addend);
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
case R_PARISC_DPREL21L:
|
|
|
|
/* left 21 bit of relative address */
|
|
|
|
val = lrsel(val - dp, addend);
|
|
|
|
*loc = mask(*loc, 21) | reassemble_21(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_DPREL14R:
|
|
|
|
/* right 14 bit of relative address */
|
|
|
|
val = rrsel(val - dp, addend);
|
|
|
|
*loc = mask(*loc, 14) | reassemble_14(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_PCREL17F:
|
|
|
|
/* 17-bit PC relative address */
|
2009-01-02 04:25:30 +07:00
|
|
|
/* calculate direct call offset */
|
|
|
|
val += addend;
|
2005-04-17 05:20:36 +07:00
|
|
|
val = (val - dot - 8)/4;
|
2009-01-02 04:25:30 +07:00
|
|
|
if (!RELOC_REACHABLE(val, 17)) {
|
|
|
|
/* direct distance too far, create
|
|
|
|
* stub entry instead */
|
|
|
|
val = get_stub(me, sym->st_value, addend,
|
|
|
|
ELF_STUB_DIRECT, loc0, targetsec);
|
|
|
|
val = (val - dot - 8)/4;
|
|
|
|
CHECK_RELOC(val, 17);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
*loc = (*loc & ~0x1f1ffd) | reassemble_17(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_PCREL22F:
|
|
|
|
/* 22-bit PC relative address; only defined for pa20 */
|
2009-01-02 04:25:30 +07:00
|
|
|
/* calculate direct call offset */
|
|
|
|
val += addend;
|
2005-04-17 05:20:36 +07:00
|
|
|
val = (val - dot - 8)/4;
|
2009-01-02 04:25:30 +07:00
|
|
|
if (!RELOC_REACHABLE(val, 22)) {
|
|
|
|
/* direct distance too far, create
|
|
|
|
* stub entry instead */
|
|
|
|
val = get_stub(me, sym->st_value, addend,
|
|
|
|
ELF_STUB_DIRECT, loc0, targetsec);
|
|
|
|
val = (val - dot - 8)/4;
|
|
|
|
CHECK_RELOC(val, 22);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
|
|
|
|
break;
|
2016-04-09 03:10:35 +07:00
|
|
|
case R_PARISC_PCREL32:
|
|
|
|
/* 32-bit PC relative address */
|
|
|
|
*loc = val - dot - 8 + addend;
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
default:
|
|
|
|
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
|
|
|
|
me->name, ELF32_R_TYPE(rel[i].r_info));
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
int apply_relocate_add(Elf_Shdr *sechdrs,
|
|
|
|
const char *strtab,
|
|
|
|
unsigned int symindex,
|
|
|
|
unsigned int relsec,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
|
|
|
|
Elf64_Sym *sym;
|
|
|
|
Elf64_Word *loc;
|
|
|
|
Elf64_Xword *loc64;
|
|
|
|
Elf64_Addr val;
|
|
|
|
Elf64_Sxword addend;
|
|
|
|
Elf64_Addr dot;
|
2009-01-02 04:25:30 +07:00
|
|
|
Elf_Addr loc0;
|
|
|
|
unsigned int targetsec = sechdrs[relsec].sh_info;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
DEBUGP("Applying relocate section %u to %u\n", relsec,
|
2009-01-02 04:25:30 +07:00
|
|
|
targetsec);
|
2005-04-17 05:20:36 +07:00
|
|
|
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
|
|
|
|
/* This is where to make the change */
|
2009-01-02 04:25:30 +07:00
|
|
|
loc = (void *)sechdrs[targetsec].sh_addr
|
2005-04-17 05:20:36 +07:00
|
|
|
+ rel[i].r_offset;
|
2009-01-02 04:25:30 +07:00
|
|
|
/* This is the start of the target section */
|
|
|
|
loc0 = sechdrs[targetsec].sh_addr;
|
2005-04-17 05:20:36 +07:00
|
|
|
/* This is the symbol it is referring to */
|
|
|
|
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
|
|
|
|
+ ELF64_R_SYM(rel[i].r_info);
|
|
|
|
if (!sym->st_value) {
|
|
|
|
printk(KERN_WARNING "%s: Unknown symbol %s\n",
|
|
|
|
me->name, strtab + sym->st_name);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
//dot = (sechdrs[relsec].sh_addr + rel->r_offset) & ~0x03;
|
|
|
|
dot = (Elf64_Addr)loc & ~0x03;
|
|
|
|
loc64 = (Elf64_Xword *)loc;
|
|
|
|
|
|
|
|
val = sym->st_value;
|
|
|
|
addend = rel[i].r_addend;
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define r(t) ELF64_R_TYPE(rel[i].r_info)==t ? #t :
|
|
|
|
printk("Symbol %s loc %p val 0x%Lx addend 0x%Lx: %s\n",
|
|
|
|
strtab + sym->st_name,
|
|
|
|
loc, val, addend,
|
|
|
|
r(R_PARISC_LTOFF14R)
|
|
|
|
r(R_PARISC_LTOFF21L)
|
|
|
|
r(R_PARISC_PCREL22F)
|
|
|
|
r(R_PARISC_DIR64)
|
|
|
|
r(R_PARISC_SEGREL32)
|
|
|
|
r(R_PARISC_FPTR64)
|
|
|
|
"UNKNOWN");
|
|
|
|
#undef r
|
|
|
|
#endif
|
|
|
|
|
|
|
|
switch (ELF64_R_TYPE(rel[i].r_info)) {
|
|
|
|
case R_PARISC_LTOFF21L:
|
|
|
|
/* LT-relative; left 21 bits */
|
|
|
|
val = get_got(me, val, addend);
|
|
|
|
DEBUGP("LTOFF21L Symbol %s loc %p val %lx\n",
|
|
|
|
strtab + sym->st_name,
|
|
|
|
loc, val);
|
|
|
|
val = lrsel(val, 0);
|
|
|
|
*loc = mask(*loc, 21) | reassemble_21(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_LTOFF14R:
|
|
|
|
/* L(ltoff(val+addend)) */
|
|
|
|
/* LT-relative; right 14 bits */
|
|
|
|
val = get_got(me, val, addend);
|
|
|
|
val = rrsel(val, 0);
|
|
|
|
DEBUGP("LTOFF14R Symbol %s loc %p val %lx\n",
|
|
|
|
strtab + sym->st_name,
|
|
|
|
loc, val);
|
|
|
|
*loc = mask(*loc, 14) | reassemble_14(val);
|
|
|
|
break;
|
|
|
|
case R_PARISC_PCREL22F:
|
|
|
|
/* PC-relative; 22 bits */
|
|
|
|
DEBUGP("PCREL22F Symbol %s loc %p val %lx\n",
|
|
|
|
strtab + sym->st_name,
|
|
|
|
loc, val);
|
2009-01-02 04:25:30 +07:00
|
|
|
val += addend;
|
2005-04-17 05:20:36 +07:00
|
|
|
/* can we reach it locally? */
|
2009-01-02 04:25:30 +07:00
|
|
|
if (in_local(me, (void *)val)) {
|
|
|
|
/* this is the case where the symbol is local
|
|
|
|
* to the module, but in a different section,
|
|
|
|
* so stub the jump in case it's more than 22
|
|
|
|
* bits away */
|
|
|
|
val = (val - dot - 8)/4;
|
|
|
|
if (!RELOC_REACHABLE(val, 22)) {
|
|
|
|
/* direct distance too far, create
|
|
|
|
* stub entry instead */
|
|
|
|
val = get_stub(me, sym->st_value,
|
|
|
|
addend, ELF_STUB_DIRECT,
|
|
|
|
loc0, targetsec);
|
|
|
|
} else {
|
|
|
|
/* Ok, we can reach it directly. */
|
|
|
|
val = sym->st_value;
|
|
|
|
val += addend;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
val = sym->st_value;
|
|
|
|
if (strncmp(strtab + sym->st_name, "$$", 2)
|
2005-04-17 05:20:36 +07:00
|
|
|
== 0)
|
2006-06-24 03:15:20 +07:00
|
|
|
val = get_stub(me, val, addend, ELF_STUB_MILLI,
|
2009-01-02 04:25:30 +07:00
|
|
|
loc0, targetsec);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
2006-06-24 03:15:20 +07:00
|
|
|
val = get_stub(me, val, addend, ELF_STUB_GOT,
|
2009-01-02 04:25:30 +07:00
|
|
|
loc0, targetsec);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
DEBUGP("STUB FOR %s loc %lx, val %lx+%lx at %lx\n",
|
|
|
|
strtab + sym->st_name, loc, sym->st_value,
|
|
|
|
addend, val);
|
|
|
|
val = (val - dot - 8)/4;
|
2009-01-02 04:25:30 +07:00
|
|
|
CHECK_RELOC(val, 22);
|
2005-04-17 05:20:36 +07:00
|
|
|
*loc = (*loc & ~0x3ff1ffd) | reassemble_22(val);
|
|
|
|
break;
|
2016-04-09 03:10:35 +07:00
|
|
|
case R_PARISC_PCREL32:
|
|
|
|
/* 32-bit PC relative address */
|
|
|
|
*loc = val - dot - 8 + addend;
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
case R_PARISC_DIR64:
|
|
|
|
/* 64-bit effective address */
|
|
|
|
*loc64 = val + addend;
|
|
|
|
break;
|
|
|
|
case R_PARISC_SEGREL32:
|
|
|
|
/* 32-bit segment relative address */
|
|
|
|
/* See note about special handling of SEGREL32 at
|
|
|
|
* the beginning of this file.
|
|
|
|
*/
|
|
|
|
*loc = fsel(val, addend);
|
|
|
|
break;
|
2017-03-14 22:47:29 +07:00
|
|
|
case R_PARISC_SECREL32:
|
|
|
|
/* 32-bit section relative address. */
|
|
|
|
*loc = fsel(val, addend);
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
case R_PARISC_FPTR64:
|
|
|
|
/* 64-bit function address */
|
2006-09-29 16:00:06 +07:00
|
|
|
if(in_local(me, (void *)(val + addend))) {
|
2005-04-17 05:20:36 +07:00
|
|
|
*loc64 = get_fdesc(me, val+addend);
|
|
|
|
DEBUGP("FDESC for %s at %p points to %lx\n",
|
|
|
|
strtab + sym->st_name, *loc64,
|
|
|
|
((Elf_Fdesc *)*loc64)->addr);
|
|
|
|
} else {
|
|
|
|
/* if the symbol is not local to this
|
|
|
|
* module then val+addend is a pointer
|
|
|
|
* to the function descriptor */
|
|
|
|
DEBUGP("Non local FPTR64 Symbol %s loc %p val %lx\n",
|
|
|
|
strtab + sym->st_name,
|
|
|
|
loc, val);
|
|
|
|
*loc64 = val + addend;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
printk(KERN_ERR "module %s: Unknown relocation: %Lu\n",
|
|
|
|
me->name, ELF64_R_TYPE(rel[i].r_info));
|
|
|
|
return -ENOEXEC;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static void
|
|
|
|
register_unwind_table(struct module *me,
|
|
|
|
const Elf_Shdr *sechdrs)
|
|
|
|
{
|
|
|
|
unsigned char *table, *end;
|
|
|
|
unsigned long gp;
|
|
|
|
|
|
|
|
if (!me->arch.unwind_section)
|
|
|
|
return;
|
|
|
|
|
|
|
|
table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
|
|
|
|
end = table + sechdrs[me->arch.unwind_section].sh_size;
|
2015-11-26 06:14:08 +07:00
|
|
|
gp = (Elf_Addr)me->core_layout.base + me->arch.got_offset;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
|
|
|
|
me->arch.unwind_section, table, end, gp);
|
|
|
|
me->arch.unwind = unwind_table_add(me->name, 0, gp, table, end);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
deregister_unwind_table(struct module *me)
|
|
|
|
{
|
|
|
|
if (me->arch.unwind)
|
|
|
|
unwind_table_remove(me->arch.unwind);
|
|
|
|
}
|
|
|
|
|
|
|
|
int module_finalize(const Elf_Ehdr *hdr,
|
|
|
|
const Elf_Shdr *sechdrs,
|
|
|
|
struct module *me)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
unsigned long nsyms;
|
|
|
|
const char *strtab = NULL;
|
2018-11-10 22:14:59 +07:00
|
|
|
const Elf_Shdr *s;
|
|
|
|
char *secstrings;
|
2005-04-17 05:20:36 +07:00
|
|
|
Elf_Sym *newptr, *oldptr;
|
|
|
|
Elf_Shdr *symhdr = NULL;
|
|
|
|
#ifdef DEBUG
|
|
|
|
Elf_Fdesc *entry;
|
|
|
|
u32 *addr;
|
|
|
|
|
|
|
|
entry = (Elf_Fdesc *)me->init;
|
|
|
|
printk("FINALIZE, ->init FPTR is %p, GP %lx ADDR %lx\n", entry,
|
|
|
|
entry->gp, entry->addr);
|
|
|
|
addr = (u32 *)entry->addr;
|
|
|
|
printk("INSNS: %x %x %x %x\n",
|
|
|
|
addr[0], addr[1], addr[2], addr[3]);
|
2009-01-02 04:25:30 +07:00
|
|
|
printk("got entries used %ld, gots max %ld\n"
|
2005-04-17 05:20:36 +07:00
|
|
|
"fdescs used %ld, fdescs max %ld\n",
|
|
|
|
me->arch.got_count, me->arch.got_max,
|
|
|
|
me->arch.fdesc_count, me->arch.fdesc_max);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
register_unwind_table(me, sechdrs);
|
|
|
|
|
|
|
|
/* haven't filled in me->symtab yet, so have to find it
|
|
|
|
* ourselves */
|
|
|
|
for (i = 1; i < hdr->e_shnum; i++) {
|
|
|
|
if(sechdrs[i].sh_type == SHT_SYMTAB
|
2009-08-05 03:27:07 +07:00
|
|
|
&& (sechdrs[i].sh_flags & SHF_ALLOC)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
int strindex = sechdrs[i].sh_link;
|
|
|
|
/* FIXME: AWFUL HACK
|
|
|
|
* The cast is to drop the const from
|
|
|
|
* the sechdrs pointer */
|
|
|
|
symhdr = (Elf_Shdr *)&sechdrs[i];
|
|
|
|
strtab = (char *)sechdrs[strindex].sh_addr;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
DEBUGP("module %s: strtab %p, symhdr %p\n",
|
|
|
|
me->name, strtab, symhdr);
|
|
|
|
|
|
|
|
if(me->arch.got_count > MAX_GOTS) {
|
2006-10-19 02:44:30 +07:00
|
|
|
printk(KERN_ERR "%s: Global Offset Table overflow (used %ld, allowed %d)\n",
|
|
|
|
me->name, me->arch.got_count, MAX_GOTS);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
2009-01-02 04:25:30 +07:00
|
|
|
|
|
|
|
kfree(me->arch.section);
|
|
|
|
me->arch.section = NULL;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* no symbol table */
|
|
|
|
if(symhdr == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
oldptr = (void *)symhdr->sh_addr;
|
|
|
|
newptr = oldptr + 1; /* we start counting at 1 */
|
|
|
|
nsyms = symhdr->sh_size / sizeof(Elf_Sym);
|
|
|
|
DEBUGP("OLD num_symtab %lu\n", nsyms);
|
|
|
|
|
|
|
|
for (i = 1; i < nsyms; i++) {
|
|
|
|
oldptr++; /* note, count starts at 1 so preincrement */
|
|
|
|
if(strncmp(strtab + oldptr->st_name,
|
|
|
|
".L", 2) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if(newptr != oldptr)
|
|
|
|
*newptr++ = *oldptr;
|
|
|
|
else
|
|
|
|
newptr++;
|
|
|
|
|
|
|
|
}
|
|
|
|
nsyms = newptr - (Elf_Sym *)symhdr->sh_addr;
|
|
|
|
DEBUGP("NEW num_symtab %lu\n", nsyms);
|
|
|
|
symhdr->sh_size = nsyms * sizeof(Elf_Sym);
|
2018-11-10 22:14:59 +07:00
|
|
|
|
|
|
|
/* find .altinstructions section */
|
|
|
|
secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
|
|
|
|
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
|
|
|
|
void *aseg = (void *) s->sh_addr;
|
|
|
|
char *secname = secstrings + s->sh_name;
|
|
|
|
|
|
|
|
if (!strcmp(".altinstructions", secname))
|
|
|
|
/* patch .altinstructions */
|
|
|
|
apply_alternatives(aseg, aseg + s->sh_size, me->name);
|
|
|
|
}
|
|
|
|
|
2010-10-06 01:29:27 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void module_arch_cleanup(struct module *mod)
|
|
|
|
{
|
|
|
|
deregister_unwind_table(mod);
|
|
|
|
}
|
2017-11-10 06:48:28 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_64BIT
|
|
|
|
void *dereference_module_function_descriptor(struct module *mod, void *ptr)
|
|
|
|
{
|
|
|
|
unsigned long start_opd = (Elf64_Addr)mod->core_layout.base +
|
|
|
|
mod->arch.fdesc_offset;
|
|
|
|
unsigned long end_opd = start_opd +
|
|
|
|
mod->arch.fdesc_count * sizeof(Elf64_Fdesc);
|
|
|
|
|
|
|
|
if (ptr < (void *)start_opd || ptr >= (void *)end_opd)
|
|
|
|
return ptr;
|
|
|
|
|
|
|
|
return dereference_function_descriptor(ptr);
|
|
|
|
}
|
|
|
|
#endif
|