2010-08-03 06:21:22 +07:00
|
|
|
#ifndef BOOT_COMPRESSED_MISC_H
|
|
|
|
#define BOOT_COMPRESSED_MISC_H
|
|
|
|
|
|
|
|
/*
|
2012-11-24 01:19:07 +07:00
|
|
|
* Special hack: we have to be careful, because no indirections are allowed here,
|
|
|
|
* and paravirt_ops is a kind of one. As it will only run in baremetal anyway,
|
|
|
|
* we just keep it from happening. (This list needs to be extended when new
|
|
|
|
* paravirt and debugging variants are added.)
|
2010-08-03 06:21:22 +07:00
|
|
|
*/
|
|
|
|
#undef CONFIG_PARAVIRT
|
2012-11-24 01:19:07 +07:00
|
|
|
#undef CONFIG_PARAVIRT_SPINLOCKS
|
2015-02-14 05:39:56 +07:00
|
|
|
#undef CONFIG_KASAN
|
2010-08-03 06:21:22 +07:00
|
|
|
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <linux/screen_info.h>
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/io.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/boot.h>
|
|
|
|
#include <asm/bootparam.h>
|
2013-01-29 16:05:24 +07:00
|
|
|
#include <asm/bootparam_utils.h>
|
2010-08-03 06:21:22 +07:00
|
|
|
|
|
|
|
#define BOOT_BOOT_H
|
2010-08-03 11:03:46 +07:00
|
|
|
#include "../ctype.h"
|
2010-08-03 06:21:22 +07:00
|
|
|
|
2013-10-11 07:18:16 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
|
|
|
#define memptr long
|
|
|
|
#else
|
|
|
|
#define memptr unsigned
|
|
|
|
#endif
|
|
|
|
|
2010-08-03 06:21:22 +07:00
|
|
|
/* misc.c */
|
2013-10-11 07:18:16 +07:00
|
|
|
extern memptr free_mem_ptr;
|
|
|
|
extern memptr free_mem_end_ptr;
|
2016-04-18 23:42:12 +07:00
|
|
|
extern struct boot_params *boot_params;
|
2012-07-20 08:04:39 +07:00
|
|
|
void __putstr(const char *s);
|
2015-07-07 06:06:20 +07:00
|
|
|
void __puthex(unsigned long value);
|
2012-07-20 08:04:39 +07:00
|
|
|
#define error_putstr(__x) __putstr(__x)
|
2015-07-07 06:06:20 +07:00
|
|
|
#define error_puthex(__x) __puthex(__x)
|
2012-07-20 08:04:39 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_X86_VERBOSE_BOOTUP
|
|
|
|
|
|
|
|
#define debug_putstr(__x) __putstr(__x)
|
2015-07-07 06:06:20 +07:00
|
|
|
#define debug_puthex(__x) __puthex(__x)
|
|
|
|
#define debug_putaddr(__x) { \
|
|
|
|
debug_putstr(#__x ": 0x"); \
|
|
|
|
debug_puthex((unsigned long)(__x)); \
|
|
|
|
debug_putstr("\n"); \
|
|
|
|
}
|
2012-07-20 08:04:39 +07:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void debug_putstr(const char *s)
|
|
|
|
{ }
|
2015-07-07 06:06:20 +07:00
|
|
|
static inline void debug_puthex(const char *s)
|
|
|
|
{ }
|
|
|
|
#define debug_putaddr(x) /* */
|
2012-07-20 08:04:39 +07:00
|
|
|
|
|
|
|
#endif
|
2010-08-03 06:21:22 +07:00
|
|
|
|
2013-10-11 07:18:14 +07:00
|
|
|
#if CONFIG_EARLY_PRINTK || CONFIG_RANDOMIZE_BASE
|
2010-08-03 06:21:22 +07:00
|
|
|
/* cmdline.c */
|
|
|
|
int cmdline_find_option(const char *option, char *buffer, int bufsize);
|
|
|
|
int cmdline_find_option_bool(const char *option);
|
2013-10-11 07:18:14 +07:00
|
|
|
#endif
|
2010-08-03 06:21:22 +07:00
|
|
|
|
|
|
|
|
2013-10-11 07:18:14 +07:00
|
|
|
#if CONFIG_RANDOMIZE_BASE
|
2016-04-18 23:42:10 +07:00
|
|
|
/* kaslr.c */
|
2016-05-26 05:45:32 +07:00
|
|
|
void choose_random_location(unsigned long input,
|
|
|
|
unsigned long input_size,
|
|
|
|
unsigned long *output,
|
|
|
|
unsigned long output_size,
|
|
|
|
unsigned long *virt_addr);
|
2013-10-11 07:18:15 +07:00
|
|
|
/* cpuflags.c */
|
|
|
|
bool has_cpuflag(int flag);
|
2012-07-20 08:04:41 +07:00
|
|
|
#else
|
2016-05-26 05:45:32 +07:00
|
|
|
static inline void choose_random_location(unsigned long input,
|
|
|
|
unsigned long input_size,
|
|
|
|
unsigned long *output,
|
|
|
|
unsigned long output_size,
|
|
|
|
unsigned long *virt_addr)
|
2013-10-11 07:18:14 +07:00
|
|
|
{
|
2016-05-26 05:45:32 +07:00
|
|
|
/* No change from existing output location. */
|
|
|
|
*virt_addr = *output;
|
2013-10-11 07:18:14 +07:00
|
|
|
}
|
|
|
|
#endif
|
2012-07-20 08:04:41 +07:00
|
|
|
|
x86/KASLR: Build identity mappings on demand
Currently KASLR only supports relocation in a small physical range (from
16M to 1G), due to using the initial kernel page table identity mapping.
To support ranges above this, we need to have an identity mapping for the
desired memory range before we can decompress (and later run) the kernel.
32-bit kernels already have the needed identity mapping. This patch adds
identity mappings for the needed memory ranges on 64-bit kernels. This
happens in two possible boot paths:
If loaded via startup_32(), we need to set up the needed identity map.
If loaded from a 64-bit bootloader, the bootloader will have already
set up an identity mapping, and we'll start via the compressed kernel's
startup_64(). In this case, the bootloader's page tables need to be
avoided while selecting the new uncompressed kernel location. If not,
the decompressor could overwrite them during decompression.
To accomplish this, we could walk the pagetable and find every page
that is used, and add them to mem_avoid, but this needs extra code and
will require increasing the size of the mem_avoid array.
Instead, we can create a new set of page tables for our own identity
mapping instead. The pages for the new page table will come from the
_pagetable section of the compressed kernel, which means they are
already contained by in mem_avoid array. To do this, we reuse the code
from the uncompressed kernel's identity mapping routines.
The _pgtable will be shared by both the 32-bit and 64-bit paths to reduce
init_size, as now the compressed kernel's _rodata to _end will contribute
to init_size.
To handle the possible mappings, we need to increase the existing page
table buffer size:
When booting via startup_64(), we need to cover the old VO, params,
cmdline and uncompressed kernel. In an extreme case we could have them
all beyond the 512G boundary, which needs (2+2)*4 pages with 2M mappings.
And we'll need 2 for first 2M for VGA RAM. One more is needed for level4.
This gets us to 19 pages total.
When booting via startup_32(), KASLR could move the uncompressed kernel
above 4G, so we need to create extra identity mappings, which should only
need (2+2) pages at most when it is beyond the 512G boundary. So 19
pages is sufficient for this case as well.
The resulting BOOT_*PGT_SIZE defines use the "_SIZE" suffix on their
names to maintain logical consistency with the existing BOOT_HEAP_SIZE
and BOOT_STACK_SIZE defines.
This patch is based on earlier patches from Yinghai Lu and Baoquan He.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: kernel-hardening@lists.openwall.com
Cc: lasse.collin@tukaani.org
Link: http://lkml.kernel.org/r/1462572095-11754-4-git-send-email-keescook@chromium.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-07 05:01:35 +07:00
|
|
|
#ifdef CONFIG_X86_64
|
2016-05-26 05:45:31 +07:00
|
|
|
void initialize_identity_maps(void);
|
x86/KASLR: Build identity mappings on demand
Currently KASLR only supports relocation in a small physical range (from
16M to 1G), due to using the initial kernel page table identity mapping.
To support ranges above this, we need to have an identity mapping for the
desired memory range before we can decompress (and later run) the kernel.
32-bit kernels already have the needed identity mapping. This patch adds
identity mappings for the needed memory ranges on 64-bit kernels. This
happens in two possible boot paths:
If loaded via startup_32(), we need to set up the needed identity map.
If loaded from a 64-bit bootloader, the bootloader will have already
set up an identity mapping, and we'll start via the compressed kernel's
startup_64(). In this case, the bootloader's page tables need to be
avoided while selecting the new uncompressed kernel location. If not,
the decompressor could overwrite them during decompression.
To accomplish this, we could walk the pagetable and find every page
that is used, and add them to mem_avoid, but this needs extra code and
will require increasing the size of the mem_avoid array.
Instead, we can create a new set of page tables for our own identity
mapping instead. The pages for the new page table will come from the
_pagetable section of the compressed kernel, which means they are
already contained by in mem_avoid array. To do this, we reuse the code
from the uncompressed kernel's identity mapping routines.
The _pgtable will be shared by both the 32-bit and 64-bit paths to reduce
init_size, as now the compressed kernel's _rodata to _end will contribute
to init_size.
To handle the possible mappings, we need to increase the existing page
table buffer size:
When booting via startup_64(), we need to cover the old VO, params,
cmdline and uncompressed kernel. In an extreme case we could have them
all beyond the 512G boundary, which needs (2+2)*4 pages with 2M mappings.
And we'll need 2 for first 2M for VGA RAM. One more is needed for level4.
This gets us to 19 pages total.
When booting via startup_32(), KASLR could move the uncompressed kernel
above 4G, so we need to create extra identity mappings, which should only
need (2+2) pages at most when it is beyond the 512G boundary. So 19
pages is sufficient for this case as well.
The resulting BOOT_*PGT_SIZE defines use the "_SIZE" suffix on their
names to maintain logical consistency with the existing BOOT_HEAP_SIZE
and BOOT_STACK_SIZE defines.
This patch is based on earlier patches from Yinghai Lu and Baoquan He.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: kernel-hardening@lists.openwall.com
Cc: lasse.collin@tukaani.org
Link: http://lkml.kernel.org/r/1462572095-11754-4-git-send-email-keescook@chromium.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-07 05:01:35 +07:00
|
|
|
void add_identity_map(unsigned long start, unsigned long size);
|
|
|
|
void finalize_identity_maps(void);
|
|
|
|
extern unsigned char _pgtable[];
|
|
|
|
#else
|
2016-05-26 05:45:31 +07:00
|
|
|
static inline void initialize_identity_maps(void)
|
|
|
|
{ }
|
x86/KASLR: Build identity mappings on demand
Currently KASLR only supports relocation in a small physical range (from
16M to 1G), due to using the initial kernel page table identity mapping.
To support ranges above this, we need to have an identity mapping for the
desired memory range before we can decompress (and later run) the kernel.
32-bit kernels already have the needed identity mapping. This patch adds
identity mappings for the needed memory ranges on 64-bit kernels. This
happens in two possible boot paths:
If loaded via startup_32(), we need to set up the needed identity map.
If loaded from a 64-bit bootloader, the bootloader will have already
set up an identity mapping, and we'll start via the compressed kernel's
startup_64(). In this case, the bootloader's page tables need to be
avoided while selecting the new uncompressed kernel location. If not,
the decompressor could overwrite them during decompression.
To accomplish this, we could walk the pagetable and find every page
that is used, and add them to mem_avoid, but this needs extra code and
will require increasing the size of the mem_avoid array.
Instead, we can create a new set of page tables for our own identity
mapping instead. The pages for the new page table will come from the
_pagetable section of the compressed kernel, which means they are
already contained by in mem_avoid array. To do this, we reuse the code
from the uncompressed kernel's identity mapping routines.
The _pgtable will be shared by both the 32-bit and 64-bit paths to reduce
init_size, as now the compressed kernel's _rodata to _end will contribute
to init_size.
To handle the possible mappings, we need to increase the existing page
table buffer size:
When booting via startup_64(), we need to cover the old VO, params,
cmdline and uncompressed kernel. In an extreme case we could have them
all beyond the 512G boundary, which needs (2+2)*4 pages with 2M mappings.
And we'll need 2 for first 2M for VGA RAM. One more is needed for level4.
This gets us to 19 pages total.
When booting via startup_32(), KASLR could move the uncompressed kernel
above 4G, so we need to create extra identity mappings, which should only
need (2+2) pages at most when it is beyond the 512G boundary. So 19
pages is sufficient for this case as well.
The resulting BOOT_*PGT_SIZE defines use the "_SIZE" suffix on their
names to maintain logical consistency with the existing BOOT_HEAP_SIZE
and BOOT_STACK_SIZE defines.
This patch is based on earlier patches from Yinghai Lu and Baoquan He.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Baoquan He <bhe@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Borislav Petkov <bp@suse.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jiri Kosina <jkosina@suse.cz>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vivek Goyal <vgoyal@redhat.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: kernel-hardening@lists.openwall.com
Cc: lasse.collin@tukaani.org
Link: http://lkml.kernel.org/r/1462572095-11754-4-git-send-email-keescook@chromium.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-05-07 05:01:35 +07:00
|
|
|
static inline void add_identity_map(unsigned long start, unsigned long size)
|
|
|
|
{ }
|
|
|
|
static inline void finalize_identity_maps(void)
|
|
|
|
{ }
|
|
|
|
#endif
|
|
|
|
|
2013-10-11 07:18:14 +07:00
|
|
|
#ifdef CONFIG_EARLY_PRINTK
|
2012-07-20 08:04:42 +07:00
|
|
|
/* early_serial_console.c */
|
2013-10-11 07:18:14 +07:00
|
|
|
extern int early_serial_base;
|
|
|
|
void console_init(void);
|
|
|
|
#else
|
2012-07-20 08:04:41 +07:00
|
|
|
static const int early_serial_base;
|
|
|
|
static inline void console_init(void)
|
|
|
|
{ }
|
|
|
|
#endif
|
|
|
|
|
2010-08-03 06:21:22 +07:00
|
|
|
#endif
|