linux_dsm_epyc7002/arch/x86/kernel/module.c

257 lines
6.5 KiB
C
Raw Normal View History

/* Kernel module help for x86.
Copyright (C) 2001 Rusty Russell.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
kasan: enable instrumentation of global variables This feature let us to detect accesses out of bounds of global variables. This will work as for globals in kernel image, so for globals in modules. Currently this won't work for symbols in user-specified sections (e.g. __init, __read_mostly, ...) The idea of this is simple. Compiler increases each global variable by redzone size and add constructors invoking __asan_register_globals() function. Information about global variable (address, size, size with redzone ...) passed to __asan_register_globals() so we could poison variable's redzone. This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 05:40:17 +07:00
#include <linux/kasan.h>
#include <linux/bug.h>
#include <linux/mm.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
#include <linux/gfp.h>
jump label: Introduce static_branch() interface Introduce: static __always_inline bool static_branch(struct jump_label_key *key); instead of the old JUMP_LABEL(key, label) macro. In this way, jump labels become really easy to use: Define: struct jump_label_key jump_key; Can be used as: if (static_branch(&jump_key)) do unlikely code enable/disale via: jump_label_inc(&jump_key); jump_label_dec(&jump_key); that's it! For the jump labels disabled case, the static_branch() becomes an atomic_read(), and jump_label_inc()/dec() are simply atomic_inc(), atomic_dec() operations. We show testing results for this change below. Thanks to H. Peter Anvin for suggesting the 'static_branch()' construct. Since we now require a 'struct jump_label_key *key', we can store a pointer into the jump table addresses. In this way, we can enable/disable jump labels, in basically constant time. This change allows us to completely remove the previous hashtable scheme. Thanks to Peter Zijlstra for this re-write. Testing: I ran a series of 'tbench 20' runs 5 times (with reboots) for 3 configurations, where tracepoints were disabled. jump label configured in avg: 815.6 jump label *not* configured in (using atomic reads) avg: 800.1 jump label *not* configured in (regular reads) avg: 803.4 Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20110316212947.GA8792@redhat.com> Signed-off-by: Jason Baron <jbaron@redhat.com> Suggested-by: H. Peter Anvin <hpa@linux.intel.com> Tested-by: David Daney <ddaney@caviumnetworks.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: David S. Miller <davem@davemloft.net> Acked-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
2011-03-17 04:29:47 +07:00
#include <linux/jump_label.h>
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
#include <linux/random.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
#if 0
#define DEBUGP(fmt, ...) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__)
#else
#define DEBUGP(fmt, ...) \
do { \
if (0) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
#endif
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
#ifdef CONFIG_RANDOMIZE_BASE
static unsigned long module_load_offset;
/* Mutex protects the module_load_offset. */
static DEFINE_MUTEX(module_kaslr_mutex);
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
static unsigned long int get_module_load_offset(void)
{
if (kaslr_enabled()) {
mutex_lock(&module_kaslr_mutex);
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
/*
* Calculate the module_load_offset the first time this
* code is called. Once calculated it stays the same until
* reboot.
*/
if (module_load_offset == 0)
module_load_offset =
(get_random_int() % 1024 + 1) * PAGE_SIZE;
mutex_unlock(&module_kaslr_mutex);
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
}
return module_load_offset;
}
#else
static unsigned long int get_module_load_offset(void)
{
return 0;
}
#endif
void *module_alloc(unsigned long size)
{
kasan: enable instrumentation of global variables This feature let us to detect accesses out of bounds of global variables. This will work as for globals in kernel image, so for globals in modules. Currently this won't work for symbols in user-specified sections (e.g. __init, __read_mostly, ...) The idea of this is simple. Compiler increases each global variable by redzone size and add constructors invoking __asan_register_globals() function. Information about global variable (address, size, size with redzone ...) passed to __asan_register_globals() so we could poison variable's redzone. This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 05:40:17 +07:00
void *p;
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
kasan: enable instrumentation of global variables This feature let us to detect accesses out of bounds of global variables. This will work as for globals in kernel image, so for globals in modules. Currently this won't work for symbols in user-specified sections (e.g. __init, __read_mostly, ...) The idea of this is simple. Compiler increases each global variable by redzone size and add constructors invoking __asan_register_globals() function. Information about global variable (address, size, size with redzone ...) passed to __asan_register_globals() so we could poison variable's redzone. This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 05:40:17 +07:00
p = __vmalloc_node_range(size, MODULE_ALIGN,
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
MODULES_VADDR + get_module_load_offset(),
MODULES_END, GFP_KERNEL | __GFP_HIGHMEM,
mm: vmalloc: pass additional vm_flags to __vmalloc_node_range() For instrumenting global variables KASan will shadow memory backing memory for modules. So on module loading we will need to allocate memory for shadow and map it at address in shadow that corresponds to the address allocated in module_alloc(). __vmalloc_node_range() could be used for this purpose, except it puts a guard hole after allocated area. Guard hole in shadow memory should be a problem because at some future point we might need to have a shadow memory at address occupied by guard hole. So we could fail to allocate shadow for module_alloc(). Now we have VM_NO_GUARD flag disabling guard page, so we need to pass into __vmalloc_node_range(). Add new parameter 'vm_flags' to __vmalloc_node_range() function. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 05:40:07 +07:00
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
x86, kaslr: randomize module base load address Randomize the load address of modules in the kernel to make kASLR effective for modules. Modules can only be loaded within a particular range of virtual address space. This patch adds 10 bits of entropy to the load address by adding 1-1024 * PAGE_SIZE to the beginning range where modules are loaded. The single base offset was chosen because randomizing each module load ends up wasting/fragmenting memory too much. Prior approaches to minimizing fragmentation while doing randomization tend to result in worse entropy than just doing a single base address offset. Example kASLR boot without this change, with a single module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0001000 4K ro GLB x pte 0xffffffffc0001000-0xffffffffc0002000 4K ro GLB NX pte 0xffffffffc0002000-0xffffffffc0004000 8K RW GLB NX pte 0xffffffffc0004000-0xffffffffc0200000 2032K pte 0xffffffffc0200000-0xffffffffff000000 1006M pmd ---[ End Modules ]--- Example kASLR boot after this change, same module loaded: ---[ Modules ]--- 0xffffffffc0000000-0xffffffffc0200000 2M pmd 0xffffffffc0200000-0xffffffffc03bf000 1788K pte 0xffffffffc03bf000-0xffffffffc03c0000 4K ro GLB x pte 0xffffffffc03c0000-0xffffffffc03c1000 4K ro GLB NX pte 0xffffffffc03c1000-0xffffffffc03c3000 8K RW GLB NX pte 0xffffffffc03c3000-0xffffffffc0400000 244K pte 0xffffffffc0400000-0xffffffffff000000 1004M pmd ---[ End Modules ]--- Signed-off-by: Andy Honig <ahonig@google.com> Link: http://lkml.kernel.org/r/20140226005916.GA27083@www.outflux.net Signed-off-by: Kees Cook <keescook@chromium.org> Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
2014-02-26 07:59:17 +07:00
__builtin_return_address(0));
kasan: enable instrumentation of global variables This feature let us to detect accesses out of bounds of global variables. This will work as for globals in kernel image, so for globals in modules. Currently this won't work for symbols in user-specified sections (e.g. __init, __read_mostly, ...) The idea of this is simple. Compiler increases each global variable by redzone size and add constructors invoking __asan_register_globals() function. Information about global variable (address, size, size with redzone ...) passed to __asan_register_globals() so we could poison variable's redzone. This patch also forces module_alloc() to return 8*PAGE_SIZE aligned address making shadow memory handling ( kasan_module_alloc()/kasan_module_free() ) more simple. Such alignment guarantees that each shadow page backing modules address space correspond to only one module_alloc() allocation. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-02-14 05:40:17 +07:00
if (p && (kasan_module_alloc(p, size) < 0)) {
vfree(p);
return NULL;
}
return p;
}
#ifdef CONFIG_X86_32
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_386_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_386_PC32:
/* Add the value, subtract its position */
*location += sym->st_value - (uint32_t)location;
break;
default:
pr_err("%s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
#else /*X86_64*/
int apply_relocate_add(Elf64_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf64_Sym *sym;
void *loc;
u64 val;
DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
+ ELF64_R_SYM(rel[i].r_info);
DEBUGP("type %d st_value %Lx r_addend %Lx loc %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info),
sym->st_value, rel[i].r_addend, (u64)loc);
val = sym->st_value + rel[i].r_addend;
switch (ELF64_R_TYPE(rel[i].r_info)) {
case R_X86_64_NONE:
break;
case R_X86_64_64:
*(u64 *)loc = val;
break;
case R_X86_64_32:
*(u32 *)loc = val;
if (val != *(u32 *)loc)
goto overflow;
break;
case R_X86_64_32S:
*(s32 *)loc = val;
if ((s64)val != *(s32 *)loc)
goto overflow;
break;
case R_X86_64_PC32:
val -= (u64)loc;
*(u32 *)loc = val;
#if 0
if ((s64)val != *(s32 *)loc)
goto overflow;
#endif
break;
default:
pr_err("%s: Unknown rela relocation: %llu\n",
me->name, ELF64_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
overflow:
pr_err("overflow in relocation type %d val %Lx\n",
(int)ELF64_R_TYPE(rel[i].r_info), val);
pr_err("`%s' likely not compiled with -mcmodel=kernel\n",
me->name);
return -ENOEXEC;
}
#endif
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
const Elf_Shdr *s, *text = NULL, *alt = NULL, *locks = NULL,
*para = NULL;
char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
if (!strcmp(".text", secstrings + s->sh_name))
text = s;
if (!strcmp(".altinstructions", secstrings + s->sh_name))
alt = s;
if (!strcmp(".smp_locks", secstrings + s->sh_name))
locks = s;
if (!strcmp(".parainstructions", secstrings + s->sh_name))
para = s;
}
if (alt) {
/* patch .altinstructions */
void *aseg = (void *)alt->sh_addr;
apply_alternatives(aseg, aseg + alt->sh_size);
}
if (locks && text) {
void *lseg = (void *)locks->sh_addr;
void *tseg = (void *)text->sh_addr;
alternatives_smp_module_add(me, me->name,
lseg, lseg + locks->sh_size,
tseg, tseg + text->sh_size);
}
if (para) {
void *pseg = (void *)para->sh_addr;
apply_paravirt(pseg, pseg + para->sh_size);
}
/* make jump label nops */
jump_label_apply_nops(me);
2010-10-06 01:29:27 +07:00
return 0;
}
void module_arch_cleanup(struct module *mod)
{
alternatives_smp_module_del(mod);
}