mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 02:56:44 +07:00
3a94707d7a
Currently KASLR only supports relocation in a small physical range (from 16M to 1G), due to using the initial kernel page table identity mapping. To support ranges above this, we need to have an identity mapping for the desired memory range before we can decompress (and later run) the kernel. 32-bit kernels already have the needed identity mapping. This patch adds identity mappings for the needed memory ranges on 64-bit kernels. This happens in two possible boot paths: If loaded via startup_32(), we need to set up the needed identity map. If loaded from a 64-bit bootloader, the bootloader will have already set up an identity mapping, and we'll start via the compressed kernel's startup_64(). In this case, the bootloader's page tables need to be avoided while selecting the new uncompressed kernel location. If not, the decompressor could overwrite them during decompression. To accomplish this, we could walk the pagetable and find every page that is used, and add them to mem_avoid, but this needs extra code and will require increasing the size of the mem_avoid array. Instead, we can create a new set of page tables for our own identity mapping instead. The pages for the new page table will come from the _pagetable section of the compressed kernel, which means they are already contained by in mem_avoid array. To do this, we reuse the code from the uncompressed kernel's identity mapping routines. The _pgtable will be shared by both the 32-bit and 64-bit paths to reduce init_size, as now the compressed kernel's _rodata to _end will contribute to init_size. To handle the possible mappings, we need to increase the existing page table buffer size: When booting via startup_64(), we need to cover the old VO, params, cmdline and uncompressed kernel. In an extreme case we could have them all beyond the 512G boundary, which needs (2+2)*4 pages with 2M mappings. And we'll need 2 for first 2M for VGA RAM. One more is needed for level4. This gets us to 19 pages total. When booting via startup_32(), KASLR could move the uncompressed kernel above 4G, so we need to create extra identity mappings, which should only need (2+2) pages at most when it is beyond the 512G boundary. So 19 pages is sufficient for this case as well. The resulting BOOT_*PGT_SIZE defines use the "_SIZE" suffix on their names to maintain logical consistency with the existing BOOT_HEAP_SIZE and BOOT_STACK_SIZE defines. This patch is based on earlier patches from Yinghai Lu and Baoquan He. Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Andy Lutomirski <luto@kernel.org> Cc: Baoquan He <bhe@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: lasse.collin@tukaani.org Link: http://lkml.kernel.org/r/1462572095-11754-4-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
58 lines
1.5 KiB
C
58 lines
1.5 KiB
C
#ifndef _ASM_X86_BOOT_H
|
|
#define _ASM_X86_BOOT_H
|
|
|
|
|
|
#include <asm/pgtable_types.h>
|
|
#include <uapi/asm/boot.h>
|
|
|
|
/* Physical address where kernel should be loaded. */
|
|
#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
|
|
+ (CONFIG_PHYSICAL_ALIGN - 1)) \
|
|
& ~(CONFIG_PHYSICAL_ALIGN - 1))
|
|
|
|
/* Minimum kernel alignment, as a power of two */
|
|
#ifdef CONFIG_X86_64
|
|
# define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
|
|
#else
|
|
# define MIN_KERNEL_ALIGN_LG2 (PAGE_SHIFT + THREAD_SIZE_ORDER)
|
|
#endif
|
|
#define MIN_KERNEL_ALIGN (_AC(1, UL) << MIN_KERNEL_ALIGN_LG2)
|
|
|
|
#if (CONFIG_PHYSICAL_ALIGN & (CONFIG_PHYSICAL_ALIGN-1)) || \
|
|
(CONFIG_PHYSICAL_ALIGN < MIN_KERNEL_ALIGN)
|
|
# error "Invalid value for CONFIG_PHYSICAL_ALIGN"
|
|
#endif
|
|
|
|
#ifdef CONFIG_KERNEL_BZIP2
|
|
# define BOOT_HEAP_SIZE 0x400000
|
|
#else /* !CONFIG_KERNEL_BZIP2 */
|
|
# define BOOT_HEAP_SIZE 0x10000
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_64
|
|
# define BOOT_STACK_SIZE 0x4000
|
|
|
|
# define BOOT_INIT_PGT_SIZE (6*4096)
|
|
# ifdef CONFIG_RANDOMIZE_BASE
|
|
/*
|
|
* Assuming all cross the 512GB boundary:
|
|
* 1 page for level4
|
|
* (2+2)*4 pages for kernel, param, cmd_line, and randomized kernel
|
|
* 2 pages for first 2M (video RAM: CONFIG_X86_VERBOSE_BOOTUP).
|
|
* Total is 19 pages.
|
|
*/
|
|
# ifdef CONFIG_X86_VERBOSE_BOOTUP
|
|
# define BOOT_PGT_SIZE (19*4096)
|
|
# else /* !CONFIG_X86_VERBOSE_BOOTUP */
|
|
# define BOOT_PGT_SIZE (17*4096)
|
|
# endif
|
|
# else /* !CONFIG_RANDOMIZE_BASE */
|
|
# define BOOT_PGT_SIZE BOOT_INIT_PGT_SIZE
|
|
# endif
|
|
|
|
#else /* !CONFIG_X86_64 */
|
|
# define BOOT_STACK_SIZE 0x1000
|
|
#endif
|
|
|
|
#endif /* _ASM_X86_BOOT_H */
|