mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-23 20:03:05 +07:00
42e4089c78
For L1TF PROT_NONE mappings are protected by inverting the PFN in the page table entry. This sets the high bits in the CPU's address space, thus making sure to point to not point an unmapped entry to valid cached memory. Some server system BIOSes put the MMIO mappings high up in the physical address space. If such an high mapping was mapped to unprivileged users they could attack low memory by setting such a mapping to PROT_NONE. This could happen through a special device driver which is not access protected. Normal /dev/mem is of course access protected. To avoid this forbid PROT_NONE mappings or mprotect for high MMIO mappings. Valid page mappings are allowed because the system is then unsafe anyways. It's not expected that users commonly use PROT_NONE on MMIO. But to minimize any impact this is only enforced if the mapping actually refers to a high MMIO address (defined as the MAX_PA-1 bit being set), and also skip the check for root. For mmaps this is straight forward and can be handled in vm_insert_pfn and in remap_pfn_range(). For mprotect it's a bit trickier. At the point where the actual PTEs are accessed a lot of state has been changed and it would be difficult to undo on an error. Since this is a uncommon case use a separate early page talk walk pass for MMIO PROT_NONE mappings that checks for this condition early. For non MMIO and non PROT_NONE there are no changes. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Dave Hansen <dave.hansen@intel.com>
264 lines
7.6 KiB
C
264 lines
7.6 KiB
C
/*
|
|
* Flexible mmap layout support
|
|
*
|
|
* Based on code by Ingo Molnar and Andi Kleen, copyrighted
|
|
* as follows:
|
|
*
|
|
* Copyright 2003-2009 Red Hat Inc.
|
|
* All Rights Reserved.
|
|
* Copyright 2005 Andi Kleen, SUSE Labs.
|
|
* Copyright 2007 Jiri Kosina, SUSE Labs.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
|
|
#include <linux/personality.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/random.h>
|
|
#include <linux/limits.h>
|
|
#include <linux/sched/signal.h>
|
|
#include <linux/sched/mm.h>
|
|
#include <linux/compat.h>
|
|
#include <asm/elf.h>
|
|
|
|
#include "physaddr.h"
|
|
|
|
struct va_alignment __read_mostly va_align = {
|
|
.flags = -1,
|
|
};
|
|
|
|
unsigned long task_size_32bit(void)
|
|
{
|
|
return IA32_PAGE_OFFSET;
|
|
}
|
|
|
|
unsigned long task_size_64bit(int full_addr_space)
|
|
{
|
|
return full_addr_space ? TASK_SIZE_MAX : DEFAULT_MAP_WINDOW;
|
|
}
|
|
|
|
static unsigned long stack_maxrandom_size(unsigned long task_size)
|
|
{
|
|
unsigned long max = 0;
|
|
if (current->flags & PF_RANDOMIZE) {
|
|
max = (-1UL) & __STACK_RND_MASK(task_size == task_size_32bit());
|
|
max <<= PAGE_SHIFT;
|
|
}
|
|
|
|
return max;
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
# define mmap32_rnd_bits mmap_rnd_compat_bits
|
|
# define mmap64_rnd_bits mmap_rnd_bits
|
|
#else
|
|
# define mmap32_rnd_bits mmap_rnd_bits
|
|
# define mmap64_rnd_bits mmap_rnd_bits
|
|
#endif
|
|
|
|
#define SIZE_128M (128 * 1024 * 1024UL)
|
|
|
|
static int mmap_is_legacy(void)
|
|
{
|
|
if (current->personality & ADDR_COMPAT_LAYOUT)
|
|
return 1;
|
|
|
|
return sysctl_legacy_va_layout;
|
|
}
|
|
|
|
static unsigned long arch_rnd(unsigned int rndbits)
|
|
{
|
|
if (!(current->flags & PF_RANDOMIZE))
|
|
return 0;
|
|
return (get_random_long() & ((1UL << rndbits) - 1)) << PAGE_SHIFT;
|
|
}
|
|
|
|
unsigned long arch_mmap_rnd(void)
|
|
{
|
|
return arch_rnd(mmap_is_ia32() ? mmap32_rnd_bits : mmap64_rnd_bits);
|
|
}
|
|
|
|
static unsigned long mmap_base(unsigned long rnd, unsigned long task_size,
|
|
struct rlimit *rlim_stack)
|
|
{
|
|
unsigned long gap = rlim_stack->rlim_cur;
|
|
unsigned long pad = stack_maxrandom_size(task_size) + stack_guard_gap;
|
|
unsigned long gap_min, gap_max;
|
|
|
|
/* Values close to RLIM_INFINITY can overflow. */
|
|
if (gap + pad > gap)
|
|
gap += pad;
|
|
|
|
/*
|
|
* Top of mmap area (just below the process stack).
|
|
* Leave an at least ~128 MB hole with possible stack randomization.
|
|
*/
|
|
gap_min = SIZE_128M;
|
|
gap_max = (task_size / 6) * 5;
|
|
|
|
if (gap < gap_min)
|
|
gap = gap_min;
|
|
else if (gap > gap_max)
|
|
gap = gap_max;
|
|
|
|
return PAGE_ALIGN(task_size - gap - rnd);
|
|
}
|
|
|
|
static unsigned long mmap_legacy_base(unsigned long rnd,
|
|
unsigned long task_size)
|
|
{
|
|
return __TASK_UNMAPPED_BASE(task_size) + rnd;
|
|
}
|
|
|
|
/*
|
|
* This function, called very early during the creation of a new
|
|
* process VM image, sets up which VM layout function to use:
|
|
*/
|
|
static void arch_pick_mmap_base(unsigned long *base, unsigned long *legacy_base,
|
|
unsigned long random_factor, unsigned long task_size,
|
|
struct rlimit *rlim_stack)
|
|
{
|
|
*legacy_base = mmap_legacy_base(random_factor, task_size);
|
|
if (mmap_is_legacy())
|
|
*base = *legacy_base;
|
|
else
|
|
*base = mmap_base(random_factor, task_size, rlim_stack);
|
|
}
|
|
|
|
void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
|
|
{
|
|
if (mmap_is_legacy())
|
|
mm->get_unmapped_area = arch_get_unmapped_area;
|
|
else
|
|
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
|
|
|
|
arch_pick_mmap_base(&mm->mmap_base, &mm->mmap_legacy_base,
|
|
arch_rnd(mmap64_rnd_bits), task_size_64bit(0),
|
|
rlim_stack);
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
|
/*
|
|
* The mmap syscall mapping base decision depends solely on the
|
|
* syscall type (64-bit or compat). This applies for 64bit
|
|
* applications and 32bit applications. The 64bit syscall uses
|
|
* mmap_base, the compat syscall uses mmap_compat_base.
|
|
*/
|
|
arch_pick_mmap_base(&mm->mmap_compat_base, &mm->mmap_compat_legacy_base,
|
|
arch_rnd(mmap32_rnd_bits), task_size_32bit(),
|
|
rlim_stack);
|
|
#endif
|
|
}
|
|
|
|
unsigned long get_mmap_base(int is_legacy)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
|
|
if (in_compat_syscall()) {
|
|
return is_legacy ? mm->mmap_compat_legacy_base
|
|
: mm->mmap_compat_base;
|
|
}
|
|
#endif
|
|
return is_legacy ? mm->mmap_legacy_base : mm->mmap_base;
|
|
}
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
if (vma->vm_flags & VM_MPX)
|
|
return "[mpx]";
|
|
return NULL;
|
|
}
|
|
|
|
/**
|
|
* mmap_address_hint_valid - Validate the address hint of mmap
|
|
* @addr: Address hint
|
|
* @len: Mapping length
|
|
*
|
|
* Check whether @addr and @addr + @len result in a valid mapping.
|
|
*
|
|
* On 32bit this only checks whether @addr + @len is <= TASK_SIZE.
|
|
*
|
|
* On 64bit with 5-level page tables another sanity check is required
|
|
* because mappings requested by mmap(@addr, 0) which cross the 47-bit
|
|
* virtual address boundary can cause the following theoretical issue:
|
|
*
|
|
* An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr
|
|
* is below the border of the 47-bit address space and @addr + @len is
|
|
* above the border.
|
|
*
|
|
* With 4-level paging this request succeeds, but the resulting mapping
|
|
* address will always be within the 47-bit virtual address space, because
|
|
* the hint address does not result in a valid mapping and is
|
|
* ignored. Hence applications which are not prepared to handle virtual
|
|
* addresses above 47-bit work correctly.
|
|
*
|
|
* With 5-level paging this request would be granted and result in a
|
|
* mapping which crosses the border of the 47-bit virtual address
|
|
* space. If the application cannot handle addresses above 47-bit this
|
|
* will lead to misbehaviour and hard to diagnose failures.
|
|
*
|
|
* Therefore ignore address hints which would result in a mapping crossing
|
|
* the 47-bit virtual address boundary.
|
|
*
|
|
* Note, that in the same scenario with MAP_FIXED the behaviour is
|
|
* different. The request with @addr < 47-bit and @addr + @len > 47-bit
|
|
* fails on a 4-level paging machine but succeeds on a 5-level paging
|
|
* machine. It is reasonable to expect that an application does not rely on
|
|
* the failure of such a fixed mapping request, so the restriction is not
|
|
* applied.
|
|
*/
|
|
bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
|
|
{
|
|
if (TASK_SIZE - len < addr)
|
|
return false;
|
|
|
|
return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
|
|
}
|
|
|
|
/* Can we access it for direct reading/writing? Must be RAM: */
|
|
int valid_phys_addr_range(phys_addr_t addr, size_t count)
|
|
{
|
|
return addr + count <= __pa(high_memory);
|
|
}
|
|
|
|
/* Can we access it through mmap? Must be a valid physical address: */
|
|
int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
|
|
{
|
|
phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
|
|
|
|
return phys_addr_valid(addr + count - 1);
|
|
}
|
|
|
|
/*
|
|
* Only allow root to set high MMIO mappings to PROT_NONE.
|
|
* This prevents an unpriv. user to set them to PROT_NONE and invert
|
|
* them, then pointing to valid memory for L1TF speculation.
|
|
*
|
|
* Note: for locked down kernels may want to disable the root override.
|
|
*/
|
|
bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
|
|
{
|
|
if (!boot_cpu_has_bug(X86_BUG_L1TF))
|
|
return true;
|
|
if (!__pte_needs_invert(pgprot_val(prot)))
|
|
return true;
|
|
/* If it's real memory always allow */
|
|
if (pfn_valid(pfn))
|
|
return true;
|
|
if (pfn > l1tf_pfn_limit() && !capable(CAP_SYS_ADMIN))
|
|
return false;
|
|
return true;
|
|
}
|