2019-06-03 12:44:50 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2012-03-05 18:49:27 +07:00
|
|
|
/*
|
|
|
|
* Based on arch/arm/mm/mmap.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/elf.h>
|
|
|
|
#include <linux/fs.h>
|
2017-05-19 22:42:00 +07:00
|
|
|
#include <linux/memblock.h>
|
2012-03-05 18:49:27 +07:00
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/export.h>
|
|
|
|
#include <linux/shm.h>
|
2017-02-09 00:51:30 +07:00
|
|
|
#include <linux/sched/signal.h>
|
2017-02-09 00:51:31 +07:00
|
|
|
#include <linux/sched/mm.h>
|
2012-03-05 18:49:27 +07:00
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/random.h>
|
|
|
|
|
|
|
|
#include <asm/cputype.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Leave enough space between the mmap area and the stack to honour ulimit in
|
|
|
|
* the face of randomisation.
|
|
|
|
*/
|
2017-07-13 04:36:36 +07:00
|
|
|
#define MIN_GAP (SZ_128M)
|
2012-03-05 18:49:27 +07:00
|
|
|
#define MAX_GAP (STACK_TOP/6*5)
|
|
|
|
|
2018-04-11 06:34:53 +07:00
|
|
|
static int mmap_is_legacy(struct rlimit *rlim_stack)
|
2012-03-05 18:49:27 +07:00
|
|
|
{
|
|
|
|
if (current->personality & ADDR_COMPAT_LAYOUT)
|
|
|
|
return 1;
|
|
|
|
|
2018-04-11 06:34:53 +07:00
|
|
|
if (rlim_stack->rlim_cur == RLIM_INFINITY)
|
2012-03-05 18:49:27 +07:00
|
|
|
return 1;
|
|
|
|
|
|
|
|
return sysctl_legacy_va_layout;
|
|
|
|
}
|
|
|
|
|
2015-04-15 05:48:00 +07:00
|
|
|
unsigned long arch_mmap_rnd(void)
|
2012-03-05 18:49:27 +07:00
|
|
|
{
|
2015-04-15 05:47:48 +07:00
|
|
|
unsigned long rnd;
|
2012-03-05 18:49:27 +07:00
|
|
|
|
2016-01-15 06:20:01 +07:00
|
|
|
#ifdef CONFIG_COMPAT
|
|
|
|
if (test_thread_flag(TIF_32BIT))
|
2016-02-27 06:19:37 +07:00
|
|
|
rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
|
2016-01-15 06:20:01 +07:00
|
|
|
else
|
|
|
|
#endif
|
2016-02-27 06:19:37 +07:00
|
|
|
rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
|
2014-11-18 06:02:19 +07:00
|
|
|
return rnd << PAGE_SHIFT;
|
2012-03-05 18:49:27 +07:00
|
|
|
}
|
|
|
|
|
2018-04-11 06:34:53 +07:00
|
|
|
static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
|
2012-03-05 18:49:27 +07:00
|
|
|
{
|
2018-04-11 06:34:53 +07:00
|
|
|
unsigned long gap = rlim_stack->rlim_cur;
|
2017-07-13 04:36:36 +07:00
|
|
|
unsigned long pad = (STACK_RND_MASK << PAGE_SHIFT) + stack_guard_gap;
|
|
|
|
|
|
|
|
/* Values close to RLIM_INFINITY can overflow. */
|
|
|
|
if (gap + pad > gap)
|
|
|
|
gap += pad;
|
2012-03-05 18:49:27 +07:00
|
|
|
|
|
|
|
if (gap < MIN_GAP)
|
|
|
|
gap = MIN_GAP;
|
|
|
|
else if (gap > MAX_GAP)
|
|
|
|
gap = MAX_GAP;
|
|
|
|
|
2015-04-15 05:47:48 +07:00
|
|
|
return PAGE_ALIGN(STACK_TOP - gap - rnd);
|
2012-03-05 18:49:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function, called very early during the creation of a new process VM
|
|
|
|
* image, sets up which VM layout function to use:
|
|
|
|
*/
|
2018-04-11 06:34:53 +07:00
|
|
|
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
|
2012-03-05 18:49:27 +07:00
|
|
|
{
|
2015-04-15 05:47:48 +07:00
|
|
|
unsigned long random_factor = 0UL;
|
|
|
|
|
|
|
|
if (current->flags & PF_RANDOMIZE)
|
2015-04-15 05:48:00 +07:00
|
|
|
random_factor = arch_mmap_rnd();
|
2015-04-15 05:47:48 +07:00
|
|
|
|
2012-03-05 18:49:27 +07:00
|
|
|
/*
|
|
|
|
* Fall back to the standard layout if the personality bit is set, or
|
|
|
|
* if the expected stack growth is unlimited:
|
|
|
|
*/
|
2018-04-11 06:34:53 +07:00
|
|
|
if (mmap_is_legacy(rlim_stack)) {
|
2015-04-15 05:47:48 +07:00
|
|
|
mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
|
2012-03-05 18:49:27 +07:00
|
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
|
|
} else {
|
2018-04-11 06:34:53 +07:00
|
|
|
mm->mmap_base = mmap_base(random_factor, rlim_stack);
|
2012-03-05 18:49:27 +07:00
|
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* You really shouldn't be using read() or write() on /dev/mem. This might go
|
|
|
|
* away in the future.
|
|
|
|
*/
|
2014-10-02 21:56:59 +07:00
|
|
|
int valid_phys_addr_range(phys_addr_t addr, size_t size)
|
2012-03-05 18:49:27 +07:00
|
|
|
{
|
2017-05-19 22:42:00 +07:00
|
|
|
/*
|
|
|
|
* Check whether addr is covered by a memory region without the
|
|
|
|
* MEMBLOCK_NOMAP attribute, and whether that region covers the
|
|
|
|
* entire range. In theory, this could lead to false negatives
|
|
|
|
* if the range is covered by distinct but adjacent memory regions
|
|
|
|
* that only differ in other attributes. However, few of such
|
|
|
|
* attributes have been defined, and it is debatable whether it
|
|
|
|
* follows that /dev/mem read() calls should be able traverse
|
|
|
|
* such boundaries.
|
|
|
|
*/
|
|
|
|
return memblock_is_region_memory(addr, size) &&
|
|
|
|
memblock_is_map_memory(addr);
|
2012-03-05 18:49:27 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do not allow /dev/mem mappings beyond the supported physical range.
|
|
|
|
*/
|
|
|
|
int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
|
|
|
|
{
|
|
|
|
return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_STRICT_DEVMEM
|
|
|
|
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
|
|
|
|
* is valid. The argument is a physical page number. We mimic x86 here by
|
|
|
|
* disallowing access to system RAM as well as device-exclusive MMIO regions.
|
|
|
|
* This effectively disable read()/write() on /dev/mem.
|
|
|
|
*/
|
|
|
|
int devmem_is_allowed(unsigned long pfn)
|
|
|
|
{
|
|
|
|
if (iomem_is_exclusive(pfn << PAGE_SHIFT))
|
|
|
|
return 0;
|
|
|
|
if (!page_is_ram(pfn))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|