2016-06-08 01:05:33 +07:00
|
|
|
/*
|
|
|
|
* This implements the various checks for CONFIG_HARDENED_USERCOPY*,
|
|
|
|
* which are designed to protect kernel memory from needless exposure
|
|
|
|
* and overwrite under many unintended conditions. This code is based
|
|
|
|
* on PAX_USERCOPY, which is:
|
|
|
|
*
|
|
|
|
* Copyright (C) 2001-2016 PaX Team, Bradley Spengler, Open Source
|
|
|
|
* Security Inc.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/slab.h>
|
2017-02-02 23:54:15 +07:00
|
|
|
#include <linux/sched.h>
|
2017-02-09 00:51:36 +07:00
|
|
|
#include <linux/sched/task.h>
|
|
|
|
#include <linux/sched/task_stack.h>
|
2017-02-17 01:29:15 +07:00
|
|
|
#include <linux/thread_info.h>
|
2018-07-04 02:43:08 +07:00
|
|
|
#include <linux/atomic.h>
|
|
|
|
#include <linux/jump_label.h>
|
2016-06-08 01:05:33 +07:00
|
|
|
#include <asm/sections.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks if a given pointer and length is contained by the current
|
|
|
|
* stack frame (if possible).
|
|
|
|
*
|
|
|
|
* Returns:
|
|
|
|
* NOT_STACK: not at all on the stack
|
|
|
|
* GOOD_FRAME: fully within a valid stack frame
|
|
|
|
* GOOD_STACK: fully on the stack (when can't do frame-checking)
|
|
|
|
* BAD_STACK: error condition (invalid stack position or bad stack frame)
|
|
|
|
*/
|
|
|
|
static noinline int check_stack_object(const void *obj, unsigned long len)
|
|
|
|
{
|
|
|
|
const void * const stack = task_stack_page(current);
|
|
|
|
const void * const stackend = stack + THREAD_SIZE;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Object is not on the stack at all. */
|
|
|
|
if (obj + len <= stack || stackend <= obj)
|
|
|
|
return NOT_STACK;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reject: object partially overlaps the stack (passing the
|
|
|
|
* the check above means at least one end is within the stack,
|
|
|
|
* so if this check fails, the other end is outside the stack).
|
|
|
|
*/
|
|
|
|
if (obj < stack || stackend < obj + len)
|
|
|
|
return BAD_STACK;
|
|
|
|
|
|
|
|
/* Check if object is safely within a valid frame. */
|
|
|
|
ret = arch_within_stack_frames(stack, stackend, obj, len);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
return GOOD_STACK;
|
|
|
|
}
|
|
|
|
|
2018-01-11 05:22:38 +07:00
|
|
|
/*
|
2018-01-11 06:17:01 +07:00
|
|
|
* If these functions are reached, then CONFIG_HARDENED_USERCOPY has found
|
|
|
|
* an unexpected state during a copy_from_user() or copy_to_user() call.
|
2018-01-11 05:22:38 +07:00
|
|
|
* There are several checks being performed on the buffer by the
|
|
|
|
* __check_object_size() function. Normal stack buffer usage should never
|
|
|
|
* trip the checks, and kernel text addressing will always trip the check.
|
2018-01-11 06:17:01 +07:00
|
|
|
* For cache objects, it is checking that only the whitelisted range of
|
|
|
|
* bytes for a given cache is being accessed (via the cache's usersize and
|
|
|
|
* useroffset fields). To adjust a cache whitelist, use the usercopy-aware
|
|
|
|
* kmem_cache_create_usercopy() function to create the cache (and
|
|
|
|
* carefully audit the whitelist range).
|
2018-01-11 05:22:38 +07:00
|
|
|
*/
|
2018-01-11 06:17:01 +07:00
|
|
|
void usercopy_warn(const char *name, const char *detail, bool to_user,
|
|
|
|
unsigned long offset, unsigned long len)
|
|
|
|
{
|
|
|
|
WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
|
|
|
|
to_user ? "exposure" : "overwrite",
|
|
|
|
to_user ? "from" : "to",
|
|
|
|
name ? : "unknown?!",
|
|
|
|
detail ? " '" : "", detail ? : "", detail ? "'" : "",
|
|
|
|
offset, len);
|
|
|
|
}
|
|
|
|
|
2018-01-11 05:22:38 +07:00
|
|
|
void __noreturn usercopy_abort(const char *name, const char *detail,
|
|
|
|
bool to_user, unsigned long offset,
|
|
|
|
unsigned long len)
|
2016-06-08 01:05:33 +07:00
|
|
|
{
|
2018-01-11 05:22:38 +07:00
|
|
|
pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n",
|
|
|
|
to_user ? "exposure" : "overwrite",
|
|
|
|
to_user ? "from" : "to",
|
|
|
|
name ? : "unknown?!",
|
|
|
|
detail ? " '" : "", detail ? : "", detail ? "'" : "",
|
|
|
|
offset, len);
|
|
|
|
|
2016-06-08 01:05:33 +07:00
|
|
|
/*
|
|
|
|
* For greater effect, it would be nice to do do_group_exit(),
|
|
|
|
* but BUG() actually hooks all the lock-breaking and per-arch
|
|
|
|
* Oops code, so that is used here instead.
|
|
|
|
*/
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */
|
2018-01-11 05:48:22 +07:00
|
|
|
static bool overlaps(const unsigned long ptr, unsigned long n,
|
|
|
|
unsigned long low, unsigned long high)
|
2016-06-08 01:05:33 +07:00
|
|
|
{
|
2018-01-11 05:48:22 +07:00
|
|
|
const unsigned long check_low = ptr;
|
2016-06-08 01:05:33 +07:00
|
|
|
unsigned long check_high = check_low + n;
|
|
|
|
|
|
|
|
/* Does not overlap if entirely above or entirely below. */
|
2016-08-22 23:53:59 +07:00
|
|
|
if (check_low >= high || check_high <= low)
|
2016-06-08 01:05:33 +07:00
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Is this address range in the kernel text area? */
|
2018-01-11 05:48:22 +07:00
|
|
|
static inline void check_kernel_text_object(const unsigned long ptr,
|
|
|
|
unsigned long n, bool to_user)
|
2016-06-08 01:05:33 +07:00
|
|
|
{
|
|
|
|
unsigned long textlow = (unsigned long)_stext;
|
|
|
|
unsigned long texthigh = (unsigned long)_etext;
|
|
|
|
unsigned long textlow_linear, texthigh_linear;
|
|
|
|
|
|
|
|
if (overlaps(ptr, n, textlow, texthigh))
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Some architectures have virtual memory mappings with a secondary
|
|
|
|
* mapping of the kernel text, i.e. there is more than one virtual
|
|
|
|
* kernel address that points to the kernel image. It is usually
|
|
|
|
* when there is a separate linear physical memory mapping, in that
|
|
|
|
* __pa() is not just the reverse of __va(). This can be detected
|
|
|
|
* and checked:
|
|
|
|
*/
|
2017-01-11 04:35:45 +07:00
|
|
|
textlow_linear = (unsigned long)lm_alias(textlow);
|
2016-06-08 01:05:33 +07:00
|
|
|
/* No different mapping: we're done. */
|
|
|
|
if (textlow_linear == textlow)
|
2018-01-11 05:48:22 +07:00
|
|
|
return;
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/* Check the secondary mapping... */
|
2017-01-11 04:35:45 +07:00
|
|
|
texthigh_linear = (unsigned long)lm_alias(texthigh);
|
2016-06-08 01:05:33 +07:00
|
|
|
if (overlaps(ptr, n, textlow_linear, texthigh_linear))
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("linear kernel text", NULL, to_user,
|
|
|
|
ptr - textlow_linear, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
|
|
|
|
2018-01-11 05:48:22 +07:00
|
|
|
static inline void check_bogus_address(const unsigned long ptr, unsigned long n,
|
|
|
|
bool to_user)
|
2016-06-08 01:05:33 +07:00
|
|
|
{
|
|
|
|
/* Reject if object wraps past end of memory. */
|
2018-01-11 05:48:22 +07:00
|
|
|
if (ptr + n < ptr)
|
|
|
|
usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n);
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/* Reject if NULL or ZERO-allocation. */
|
|
|
|
if (ZERO_OR_NULL_PTR(ptr))
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("null address", NULL, to_user, ptr, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
|
|
|
|
2016-09-07 23:54:34 +07:00
|
|
|
/* Checks for allocs that are marked in some way as spanning multiple pages. */
|
2018-01-11 05:48:22 +07:00
|
|
|
static inline void check_page_span(const void *ptr, unsigned long n,
|
|
|
|
struct page *page, bool to_user)
|
2016-06-08 01:05:33 +07:00
|
|
|
{
|
2016-09-07 23:54:34 +07:00
|
|
|
#ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN
|
2016-06-08 01:05:33 +07:00
|
|
|
const void *end = ptr + n - 1;
|
2016-09-07 23:54:34 +07:00
|
|
|
struct page *endpage;
|
2016-06-08 01:05:33 +07:00
|
|
|
bool is_reserved, is_cma;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Sometimes the kernel data regions are not marked Reserved (see
|
|
|
|
* check below). And sometimes [_sdata,_edata) does not cover
|
|
|
|
* rodata and/or bss, so check each range explicitly.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Allow reads of kernel rodata region (if not marked as Reserved). */
|
|
|
|
if (ptr >= (const void *)__start_rodata &&
|
|
|
|
end <= (const void *)__end_rodata) {
|
|
|
|
if (!to_user)
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("rodata", NULL, to_user, 0, n);
|
|
|
|
return;
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Allow kernel data region (if not marked as Reserved). */
|
|
|
|
if (ptr >= (const void *)_sdata && end <= (const void *)_edata)
|
2018-01-11 05:48:22 +07:00
|
|
|
return;
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/* Allow kernel bss region (if not marked as Reserved). */
|
|
|
|
if (ptr >= (const void *)__bss_start &&
|
|
|
|
end <= (const void *)__bss_stop)
|
2018-01-11 05:48:22 +07:00
|
|
|
return;
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/* Is the object wholly within one base page? */
|
|
|
|
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
|
|
|
|
((unsigned long)end & (unsigned long)PAGE_MASK)))
|
2018-01-11 05:48:22 +07:00
|
|
|
return;
|
2016-06-08 01:05:33 +07:00
|
|
|
|
2016-09-07 23:54:34 +07:00
|
|
|
/* Allow if fully inside the same compound (__GFP_COMP) page. */
|
2016-06-08 01:05:33 +07:00
|
|
|
endpage = virt_to_head_page(end);
|
|
|
|
if (likely(endpage == page))
|
2018-01-11 05:48:22 +07:00
|
|
|
return;
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Reject if range is entirely either Reserved (i.e. special or
|
|
|
|
* device memory), or CMA. Otherwise, reject since the object spans
|
|
|
|
* several independently allocated pages.
|
|
|
|
*/
|
|
|
|
is_reserved = PageReserved(page);
|
|
|
|
is_cma = is_migrate_cma_page(page);
|
|
|
|
if (!is_reserved && !is_cma)
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("spans multiple pages", NULL, to_user, 0, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) {
|
|
|
|
page = virt_to_head_page(ptr);
|
|
|
|
if (is_reserved && !PageReserved(page))
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("spans Reserved and non-Reserved pages",
|
|
|
|
NULL, to_user, 0, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
if (is_cma && !is_migrate_cma_page(page))
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("spans CMA and non-CMA pages", NULL,
|
|
|
|
to_user, 0, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
2016-09-07 23:54:34 +07:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-01-11 05:48:22 +07:00
|
|
|
static inline void check_heap_object(const void *ptr, unsigned long n,
|
|
|
|
bool to_user)
|
2016-09-07 23:54:34 +07:00
|
|
|
{
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
if (!virt_addr_valid(ptr))
|
2018-01-11 05:48:22 +07:00
|
|
|
return;
|
2016-09-07 23:54:34 +07:00
|
|
|
|
|
|
|
page = virt_to_head_page(ptr);
|
|
|
|
|
2018-01-11 05:48:22 +07:00
|
|
|
if (PageSlab(page)) {
|
|
|
|
/* Check slab allocator for flags and size. */
|
|
|
|
__check_heap_object(ptr, n, page, to_user);
|
|
|
|
} else {
|
|
|
|
/* Verify object does not incorrectly span multiple pages. */
|
|
|
|
check_page_span(ptr, n, page, to_user);
|
|
|
|
}
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
|
|
|
|
2018-07-04 02:43:08 +07:00
|
|
|
static DEFINE_STATIC_KEY_FALSE_RO(bypass_usercopy_checks);
|
|
|
|
|
2016-06-08 01:05:33 +07:00
|
|
|
/*
|
|
|
|
* Validates that the given object is:
|
|
|
|
* - not bogus address
|
|
|
|
* - known-safe heap or stack object
|
|
|
|
* - not in kernel text
|
|
|
|
*/
|
|
|
|
void __check_object_size(const void *ptr, unsigned long n, bool to_user)
|
|
|
|
{
|
2018-07-04 02:43:08 +07:00
|
|
|
if (static_branch_unlikely(&bypass_usercopy_checks))
|
|
|
|
return;
|
|
|
|
|
2016-06-08 01:05:33 +07:00
|
|
|
/* Skip all tests if size is zero. */
|
|
|
|
if (!n)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* Check for invalid addresses. */
|
2018-01-11 05:48:22 +07:00
|
|
|
check_bogus_address((const unsigned long)ptr, n, to_user);
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/* Check for bad heap object. */
|
2018-01-11 05:48:22 +07:00
|
|
|
check_heap_object(ptr, n, to_user);
|
2016-06-08 01:05:33 +07:00
|
|
|
|
|
|
|
/* Check for bad stack object. */
|
|
|
|
switch (check_stack_object(ptr, n)) {
|
|
|
|
case NOT_STACK:
|
|
|
|
/* Object is not touching the current process stack. */
|
|
|
|
break;
|
|
|
|
case GOOD_FRAME:
|
|
|
|
case GOOD_STACK:
|
|
|
|
/*
|
|
|
|
* Object is either in the correct frame (when it
|
|
|
|
* is possible to check) or just generally on the
|
|
|
|
* process stack (when frame checking not available).
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
default:
|
2018-01-11 05:48:22 +07:00
|
|
|
usercopy_abort("process stack", NULL, to_user, 0, n);
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for object in kernel to avoid text exposure. */
|
2018-01-11 05:48:22 +07:00
|
|
|
check_kernel_text_object((const unsigned long)ptr, n, to_user);
|
2016-06-08 01:05:33 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__check_object_size);
|
2018-07-04 02:43:08 +07:00
|
|
|
|
|
|
|
static bool enable_checks __initdata = true;
|
|
|
|
|
|
|
|
static int __init parse_hardened_usercopy(char *str)
|
|
|
|
{
|
|
|
|
return strtobool(str, &enable_checks);
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("hardened_usercopy=", parse_hardened_usercopy);
|
|
|
|
|
|
|
|
static int __init set_hardened_usercopy(void)
|
|
|
|
{
|
|
|
|
if (enable_checks == false)
|
|
|
|
static_branch_enable(&bypass_usercopy_checks);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
late_initcall(set_hardened_usercopy);
|