2012-03-05 18:49:33 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_MODULE_H
|
|
|
|
#define __ASM_MODULE_H
|
|
|
|
|
|
|
|
#include <asm-generic/module.h>
|
|
|
|
|
|
|
|
#define MODULE_ARCH_VERMAGIC "aarch64"
|
|
|
|
|
2015-11-24 18:37:35 +07:00
|
|
|
#ifdef CONFIG_ARM64_MODULE_PLTS
|
arm64: module: split core and init PLT sections
The arm64 module PLT code allocates all PLT entries in a single core
section, since the overhead of having a separate init PLT section is
not justified by the small number of PLT entries usually required for
init code.
However, the core and init module regions are allocated independently,
and there is a corner case where the core region may be allocated from
the VMALLOC region if the dedicated module region is exhausted, but the
init region, being much smaller, can still be allocated from the module
region. This leads to relocation failures if the distance between those
regions exceeds 128 MB. (In fact, this corner case is highly unlikely to
occur on arm64, but the issue has been observed on ARM, whose module
region is much smaller).
So split the core and init PLT regions, and name the latter ".init.plt"
so it gets allocated along with (and sufficiently close to) the .init
sections that it serves. Also, given that init PLT entries may need to
be emitted for branches that target the core module, modify the logic
that disregards defined symbols to only disregard symbols that are
defined in the same section as the relocated branch instruction.
Since there may now be two PLT entries associated with each entry in
the symbol table, we can no longer hijack the symbol::st_size fields
to record the addresses of PLT entries as we emit them for zero-addend
relocations. So instead, perform an explicit comparison to check for
duplicate entries.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-02-22 05:12:57 +07:00
|
|
|
struct mod_plt_sec {
|
2015-11-24 18:37:35 +07:00
|
|
|
struct elf64_shdr *plt;
|
|
|
|
int plt_num_entries;
|
|
|
|
int plt_max_entries;
|
|
|
|
};
|
arm64: module: split core and init PLT sections
The arm64 module PLT code allocates all PLT entries in a single core
section, since the overhead of having a separate init PLT section is
not justified by the small number of PLT entries usually required for
init code.
However, the core and init module regions are allocated independently,
and there is a corner case where the core region may be allocated from
the VMALLOC region if the dedicated module region is exhausted, but the
init region, being much smaller, can still be allocated from the module
region. This leads to relocation failures if the distance between those
regions exceeds 128 MB. (In fact, this corner case is highly unlikely to
occur on arm64, but the issue has been observed on ARM, whose module
region is much smaller).
So split the core and init PLT regions, and name the latter ".init.plt"
so it gets allocated along with (and sufficiently close to) the .init
sections that it serves. Also, given that init PLT entries may need to
be emitted for branches that target the core module, modify the logic
that disregards defined symbols to only disregard symbols that are
defined in the same section as the relocated branch instruction.
Since there may now be two PLT entries associated with each entry in
the symbol table, we can no longer hijack the symbol::st_size fields
to record the addresses of PLT entries as we emit them for zero-addend
relocations. So instead, perform an explicit comparison to check for
duplicate entries.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-02-22 05:12:57 +07:00
|
|
|
|
|
|
|
struct mod_arch_specific {
|
|
|
|
struct mod_plt_sec core;
|
|
|
|
struct mod_plt_sec init;
|
2017-06-07 00:00:22 +07:00
|
|
|
|
|
|
|
/* for CONFIG_DYNAMIC_FTRACE */
|
2017-11-21 00:41:30 +07:00
|
|
|
struct plt_entry *ftrace_trampoline;
|
arm64: module: split core and init PLT sections
The arm64 module PLT code allocates all PLT entries in a single core
section, since the overhead of having a separate init PLT section is
not justified by the small number of PLT entries usually required for
init code.
However, the core and init module regions are allocated independently,
and there is a corner case where the core region may be allocated from
the VMALLOC region if the dedicated module region is exhausted, but the
init region, being much smaller, can still be allocated from the module
region. This leads to relocation failures if the distance between those
regions exceeds 128 MB. (In fact, this corner case is highly unlikely to
occur on arm64, but the issue has been observed on ARM, whose module
region is much smaller).
So split the core and init PLT regions, and name the latter ".init.plt"
so it gets allocated along with (and sufficiently close to) the .init
sections that it serves. Also, given that init PLT entries may need to
be emitted for branches that target the core module, modify the logic
that disregards defined symbols to only disregard symbols that are
defined in the same section as the relocated branch instruction.
Since there may now be two PLT entries associated with each entry in
the symbol table, we can no longer hijack the symbol::st_size fields
to record the addresses of PLT entries as we emit them for zero-addend
relocations. So instead, perform an explicit comparison to check for
duplicate entries.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-02-22 05:12:57 +07:00
|
|
|
};
|
2015-11-24 18:37:35 +07:00
|
|
|
#endif
|
|
|
|
|
arm64: module: split core and init PLT sections
The arm64 module PLT code allocates all PLT entries in a single core
section, since the overhead of having a separate init PLT section is
not justified by the small number of PLT entries usually required for
init code.
However, the core and init module regions are allocated independently,
and there is a corner case where the core region may be allocated from
the VMALLOC region if the dedicated module region is exhausted, but the
init region, being much smaller, can still be allocated from the module
region. This leads to relocation failures if the distance between those
regions exceeds 128 MB. (In fact, this corner case is highly unlikely to
occur on arm64, but the issue has been observed on ARM, whose module
region is much smaller).
So split the core and init PLT regions, and name the latter ".init.plt"
so it gets allocated along with (and sufficiently close to) the .init
sections that it serves. Also, given that init PLT entries may need to
be emitted for branches that target the core module, modify the logic
that disregards defined symbols to only disregard symbols that are
defined in the same section as the relocated branch instruction.
Since there may now be two PLT entries associated with each entry in
the symbol table, we can no longer hijack the symbol::st_size fields
to record the addresses of PLT entries as we emit them for zero-addend
relocations. So instead, perform an explicit comparison to check for
duplicate entries.
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2017-02-22 05:12:57 +07:00
|
|
|
u64 module_emit_plt_entry(struct module *mod, void *loc, const Elf64_Rela *rela,
|
2015-11-24 18:37:35 +07:00
|
|
|
Elf64_Sym *sym);
|
|
|
|
|
arm64: add support for kernel ASLR
This adds support for KASLR is implemented, based on entropy provided by
the bootloader in the /chosen/kaslr-seed DT property. Depending on the size
of the address space (VA_BITS) and the page size, the entropy in the
virtual displacement is up to 13 bits (16k/2 levels) and up to 25 bits (all
4 levels), with the sidenote that displacements that result in the kernel
image straddling a 1GB/32MB/512MB alignment boundary (for 4KB/16KB/64KB
granule kernels, respectively) are not allowed, and will be rounded up to
an acceptable value.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is enabled, the module region is
randomized independently from the core kernel. This makes it less likely
that the location of core kernel data structures can be determined by an
adversary, but causes all function calls from modules into the core kernel
to be resolved via entries in the module PLTs.
If CONFIG_RANDOMIZE_MODULE_REGION_FULL is not enabled, the module region is
randomized by choosing a page aligned 128 MB region inside the interval
[_etext - 128 MB, _stext + 128 MB). This gives between 10 and 14 bits of
entropy (depending on page size), independently of the kernel randomization,
but still guarantees that modules are within the range of relative branch
and jump instructions (with the caveat that, since the module region is
shared with other uses of the vmalloc area, modules may need to be loaded
further away if the module region is exhausted)
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2016-01-26 20:12:01 +07:00
|
|
|
#ifdef CONFIG_RANDOMIZE_BASE
|
|
|
|
extern u64 module_alloc_base;
|
|
|
|
#else
|
|
|
|
#define module_alloc_base ((u64)_etext - MODULES_VSIZE)
|
|
|
|
#endif
|
|
|
|
|
2017-11-21 00:41:29 +07:00
|
|
|
struct plt_entry {
|
|
|
|
/*
|
|
|
|
* A program that conforms to the AArch64 Procedure Call Standard
|
|
|
|
* (AAPCS64) must assume that a veneer that alters IP0 (x16) and/or
|
|
|
|
* IP1 (x17) may be inserted at any branch instruction that is
|
|
|
|
* exposed to a relocation that supports long branches. Since that
|
|
|
|
* is exactly what we are dealing with here, we are free to use x16
|
|
|
|
* as a scratch register in the PLT veneers.
|
|
|
|
*/
|
|
|
|
__le32 mov0; /* movn x16, #0x.... */
|
|
|
|
__le32 mov1; /* movk x16, #0x...., lsl #16 */
|
|
|
|
__le32 mov2; /* movk x16, #0x...., lsl #32 */
|
|
|
|
__le32 br; /* br x16 */
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline struct plt_entry get_plt_entry(u64 val)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* MOVK/MOVN/MOVZ opcode:
|
|
|
|
* +--------+------------+--------+-----------+-------------+---------+
|
|
|
|
* | sf[31] | opc[30:29] | 100101 | hw[22:21] | imm16[20:5] | Rd[4:0] |
|
|
|
|
* +--------+------------+--------+-----------+-------------+---------+
|
|
|
|
*
|
|
|
|
* Rd := 0x10 (x16)
|
|
|
|
* hw := 0b00 (no shift), 0b01 (lsl #16), 0b10 (lsl #32)
|
|
|
|
* opc := 0b11 (MOVK), 0b00 (MOVN), 0b10 (MOVZ)
|
|
|
|
* sf := 1 (64-bit variant)
|
|
|
|
*/
|
|
|
|
return (struct plt_entry){
|
|
|
|
cpu_to_le32(0x92800010 | (((~val ) & 0xffff)) << 5),
|
|
|
|
cpu_to_le32(0xf2a00010 | ((( val >> 16) & 0xffff)) << 5),
|
|
|
|
cpu_to_le32(0xf2c00010 | ((( val >> 32) & 0xffff)) << 5),
|
|
|
|
cpu_to_le32(0xd61f0200)
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool plt_entries_equal(const struct plt_entry *a,
|
|
|
|
const struct plt_entry *b)
|
|
|
|
{
|
|
|
|
return a->mov0 == b->mov0 &&
|
|
|
|
a->mov1 == b->mov1 &&
|
|
|
|
a->mov2 == b->mov2;
|
|
|
|
}
|
|
|
|
|
2012-03-05 18:49:33 +07:00
|
|
|
#endif /* __ASM_MODULE_H */
|