mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
x86/kaslr: Initialize mem_limit to the real maximum address
On 64-bit, the kernel must be placed below MAXMEM (64TiB with 4-level paging or 4PiB with 5-level paging). This is currently not enforced by KASLR, which thus implicitly relies on physical memory being limited to less than 64TiB. On 32-bit, the limit is KERNEL_IMAGE_SIZE (512MiB). This is enforced by special checks in __process_mem_region(). Initialize mem_limit to the maximum (depending on architecture), instead of ULLONG_MAX, and make sure the command-line arguments can only decrease it. This makes the enforcement explicit on 64-bit, and eliminates the 32-bit specific checks to keep the kernel below 512M. Check upfront to make sure the minimum address is below the limit before doing any work. Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20200727230801.3468620-5-nivedita@alum.mit.edu
This commit is contained in:
parent
0870536556
commit
451286940d
@ -94,8 +94,11 @@ static unsigned long get_boot_seed(void)
|
|||||||
static bool memmap_too_large;
|
static bool memmap_too_large;
|
||||||
|
|
||||||
|
|
||||||
/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
|
/*
|
||||||
static unsigned long long mem_limit = ULLONG_MAX;
|
* Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
|
||||||
|
* It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
|
||||||
|
*/
|
||||||
|
static unsigned long long mem_limit;
|
||||||
|
|
||||||
/* Number of immovable memory regions */
|
/* Number of immovable memory regions */
|
||||||
static int num_immovable_mem;
|
static int num_immovable_mem;
|
||||||
@ -221,7 +224,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
|
|||||||
|
|
||||||
if (start == 0) {
|
if (start == 0) {
|
||||||
/* Store the specified memory limit if size > 0 */
|
/* Store the specified memory limit if size > 0 */
|
||||||
if (size > 0)
|
if (size > 0 && size < mem_limit)
|
||||||
mem_limit = size;
|
mem_limit = size;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
@ -311,7 +314,8 @@ static void handle_mem_options(void)
|
|||||||
if (mem_size == 0)
|
if (mem_size == 0)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
mem_limit = mem_size;
|
if (mem_size < mem_limit)
|
||||||
|
mem_limit = mem_size;
|
||||||
} else if (!strcmp(param, "efi_fake_mem")) {
|
} else if (!strcmp(param, "efi_fake_mem")) {
|
||||||
mem_avoid_memmap(PARSE_EFI, val);
|
mem_avoid_memmap(PARSE_EFI, val);
|
||||||
}
|
}
|
||||||
@ -322,7 +326,9 @@ static void handle_mem_options(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
|
* In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
|
||||||
|
* on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
|
||||||
|
*
|
||||||
* The mem_avoid array is used to store the ranges that need to be avoided
|
* The mem_avoid array is used to store the ranges that need to be avoided
|
||||||
* when KASLR searches for an appropriate random address. We must avoid any
|
* when KASLR searches for an appropriate random address. We must avoid any
|
||||||
* regions that are unsafe to overlap with during decompression, and other
|
* regions that are unsafe to overlap with during decompression, and other
|
||||||
@ -620,10 +626,6 @@ static void __process_mem_region(struct mem_vector *entry,
|
|||||||
unsigned long start_orig, end;
|
unsigned long start_orig, end;
|
||||||
struct mem_vector cur_entry;
|
struct mem_vector cur_entry;
|
||||||
|
|
||||||
/* On 32-bit, ignore entries entirely above our maximum. */
|
|
||||||
if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
|
|
||||||
return;
|
|
||||||
|
|
||||||
/* Ignore entries entirely below our minimum. */
|
/* Ignore entries entirely below our minimum. */
|
||||||
if (entry->start + entry->size < minimum)
|
if (entry->start + entry->size < minimum)
|
||||||
return;
|
return;
|
||||||
@ -656,11 +658,6 @@ static void __process_mem_region(struct mem_vector *entry,
|
|||||||
/* Reduce size by any delta from the original address. */
|
/* Reduce size by any delta from the original address. */
|
||||||
region.size -= region.start - start_orig;
|
region.size -= region.start - start_orig;
|
||||||
|
|
||||||
/* On 32-bit, reduce region size to fit within max size. */
|
|
||||||
if (IS_ENABLED(CONFIG_X86_32) &&
|
|
||||||
region.start + region.size > KERNEL_IMAGE_SIZE)
|
|
||||||
region.size = KERNEL_IMAGE_SIZE - region.start;
|
|
||||||
|
|
||||||
/* Return if region can't contain decompressed kernel */
|
/* Return if region can't contain decompressed kernel */
|
||||||
if (region.size < image_size)
|
if (region.size < image_size)
|
||||||
return;
|
return;
|
||||||
@ -845,15 +842,16 @@ static void process_e820_entries(unsigned long minimum,
|
|||||||
static unsigned long find_random_phys_addr(unsigned long minimum,
|
static unsigned long find_random_phys_addr(unsigned long minimum,
|
||||||
unsigned long image_size)
|
unsigned long image_size)
|
||||||
{
|
{
|
||||||
|
/* Bail out early if it's impossible to succeed. */
|
||||||
|
if (minimum + image_size > mem_limit)
|
||||||
|
return 0;
|
||||||
|
|
||||||
/* Check if we had too many memmaps. */
|
/* Check if we had too many memmaps. */
|
||||||
if (memmap_too_large) {
|
if (memmap_too_large) {
|
||||||
debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
|
debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Make sure minimum is aligned. */
|
|
||||||
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
|
|
||||||
|
|
||||||
if (process_efi_entries(minimum, image_size))
|
if (process_efi_entries(minimum, image_size))
|
||||||
return slots_fetch_random();
|
return slots_fetch_random();
|
||||||
|
|
||||||
@ -866,8 +864,6 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
|
|||||||
{
|
{
|
||||||
unsigned long slots, random_addr;
|
unsigned long slots, random_addr;
|
||||||
|
|
||||||
/* Make sure minimum is aligned. */
|
|
||||||
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
|
|
||||||
/* Align image_size for easy slot calculations. */
|
/* Align image_size for easy slot calculations. */
|
||||||
image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
|
image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
|
||||||
|
|
||||||
@ -914,6 +910,11 @@ void choose_random_location(unsigned long input,
|
|||||||
/* Prepare to add new identity pagetables on demand. */
|
/* Prepare to add new identity pagetables on demand. */
|
||||||
initialize_identity_maps();
|
initialize_identity_maps();
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_X86_32))
|
||||||
|
mem_limit = KERNEL_IMAGE_SIZE;
|
||||||
|
else
|
||||||
|
mem_limit = MAXMEM;
|
||||||
|
|
||||||
/* Record the various known unsafe memory ranges. */
|
/* Record the various known unsafe memory ranges. */
|
||||||
mem_avoid_init(input, input_size, *output);
|
mem_avoid_init(input, input_size, *output);
|
||||||
|
|
||||||
@ -923,6 +924,8 @@ void choose_random_location(unsigned long input,
|
|||||||
* location:
|
* location:
|
||||||
*/
|
*/
|
||||||
min_addr = min(*output, 512UL << 20);
|
min_addr = min(*output, 512UL << 20);
|
||||||
|
/* Make sure minimum is aligned. */
|
||||||
|
min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
|
||||||
|
|
||||||
/* Walk available memory entries to find a random address. */
|
/* Walk available memory entries to find a random address. */
|
||||||
random_addr = find_random_phys_addr(min_addr, output_size);
|
random_addr = find_random_phys_addr(min_addr, output_size);
|
||||||
|
Loading…
Reference in New Issue
Block a user