2019-06-04 15:11:33 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2015-02-14 05:39:53 +07:00
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
|
|
|
|
* Author: Andrey Ryabinin <a.ryabinin@samsung.com>
|
|
|
|
*/
|
|
|
|
|
2019-07-12 10:53:52 +07:00
|
|
|
#include <linux/bitops.h>
|
2017-02-25 06:00:08 +07:00
|
|
|
#include <linux/delay.h>
|
2019-07-12 10:53:52 +07:00
|
|
|
#include <linux/kasan.h>
|
2015-02-14 05:39:53 +07:00
|
|
|
#include <linux/kernel.h>
|
2016-05-21 06:59:34 +07:00
|
|
|
#include <linux/mm.h>
|
2019-07-12 10:53:52 +07:00
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/module.h>
|
2015-02-14 05:39:53 +07:00
|
|
|
#include <linux/printk.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/string.h>
|
2016-05-21 06:59:34 +07:00
|
|
|
#include <linux/uaccess.h>
|
2019-09-24 05:34:16 +07:00
|
|
|
#include <linux/io.h>
|
2019-12-01 08:54:53 +07:00
|
|
|
#include <linux/vmalloc.h>
|
2019-09-24 05:34:16 +07:00
|
|
|
|
|
|
|
#include <asm/page.h>
|
2015-02-14 05:39:53 +07:00
|
|
|
|
2020-10-14 06:55:02 +07:00
|
|
|
#include <kunit/test.h>
|
|
|
|
|
2020-08-07 13:24:54 +07:00
|
|
|
#include "../mm/kasan/kasan.h"
|
|
|
|
|
|
|
|
#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
|
|
|
|
|
kasan: stop tests being eliminated as dead code with FORTIFY_SOURCE
Patch series "Fix some incompatibilites between KASAN and FORTIFY_SOURCE", v4.
3 KASAN self-tests fail on a kernel with both KASAN and FORTIFY_SOURCE:
memchr, memcmp and strlen.
When FORTIFY_SOURCE is on, a number of functions are replaced with
fortified versions, which attempt to check the sizes of the operands.
However, these functions often directly invoke __builtin_foo() once they
have performed the fortify check. The compiler can detect that the
results of these functions are not used, and knows that they have no other
side effects, and so can eliminate them as dead code.
Why are only memchr, memcmp and strlen affected?
================================================
Of string and string-like functions, kasan_test tests:
* strchr -> not affected, no fortified version
* strrchr -> likewise
* strcmp -> likewise
* strncmp -> likewise
* strnlen -> not affected, the fortify source implementation calls the
underlying strnlen implementation which is instrumented, not
a builtin
* strlen -> affected, the fortify souce implementation calls a __builtin
version which the compiler can determine is dead.
* memchr -> likewise
* memcmp -> likewise
* memset -> not affected, the compiler knows that memset writes to its
first argument and therefore is not dead.
Why does this not affect the functions normally?
================================================
In string.h, these functions are not marked as __pure, so the compiler
cannot know that they do not have side effects. If relevant functions are
marked as __pure in string.h, we see the following warnings and the
functions are elided:
lib/test_kasan.c: In function `kasan_memchr':
lib/test_kasan.c:606:2: warning: statement with no effect [-Wunused-value]
memchr(ptr, '1', size + 1);
^~~~~~~~~~~~~~~~~~~~~~~~~~
lib/test_kasan.c: In function `kasan_memcmp':
lib/test_kasan.c:622:2: warning: statement with no effect [-Wunused-value]
memcmp(ptr, arr, size+1);
^~~~~~~~~~~~~~~~~~~~~~~~
lib/test_kasan.c: In function `kasan_strings':
lib/test_kasan.c:645:2: warning: statement with no effect [-Wunused-value]
strchr(ptr, '1');
^~~~~~~~~~~~~~~~
...
This annotation would make sense to add and could be added at any point,
so the behaviour of test_kasan.c should change.
The fix
=======
Make all the functions that are pure write their results to a global,
which makes them live. The strlen and memchr tests now pass.
The memcmp test still fails to trigger, which is addressed in the next
patch.
[dja@axtens.net: drop patch 3]
Link: http://lkml.kernel.org/r/20200424145521.8203-2-dja@axtens.net
Fixes: 0c96350a2d2f ("lib/test_kasan.c: add tests for several string/memory API functions")
Signed-off-by: Daniel Axtens <dja@axtens.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Tested-by: David Gow <davidgow@google.com>
Reviewed-by: Dmitry Vyukov <dvyukov@google.com>
Cc: Daniel Micay <danielmicay@gmail.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Alexander Potapenko <glider@google.com>
Link: http://lkml.kernel.org/r/20200423154503.5103-1-dja@axtens.net
Link: http://lkml.kernel.org/r/20200423154503.5103-2-dja@axtens.net
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-04 05:56:43 +07:00
|
|
|
/*
|
|
|
|
* We assign some test results to these globals to make sure the tests
|
|
|
|
* are not eliminated as dead code.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void *kasan_ptr_result;
|
2020-10-14 06:55:02 +07:00
|
|
|
int kasan_int_result;
|
|
|
|
|
|
|
|
static struct kunit_resource resource;
|
|
|
|
static struct kunit_kasan_expectation fail_data;
|
|
|
|
static bool multishot;
|
|
|
|
|
|
|
|
static int kasan_test_init(struct kunit *test)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Temporarily enable multi-shot mode and set panic_on_warn=0.
|
|
|
|
* Otherwise, we'd only get a report for the first case.
|
|
|
|
*/
|
|
|
|
multishot = kasan_save_enable_multi_shot();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kasan_test_exit(struct kunit *test)
|
|
|
|
{
|
|
|
|
kasan_restore_multi_shot(multishot);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* KUNIT_EXPECT_KASAN_FAIL() - Causes a test failure when the expression does
|
|
|
|
* not cause a KASAN error. This uses a KUnit resource named "kasan_data." Do
|
|
|
|
* Do not use this name for a KUnit resource outside here.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define KUNIT_EXPECT_KASAN_FAIL(test, condition) do { \
|
|
|
|
fail_data.report_expected = true; \
|
|
|
|
fail_data.report_found = false; \
|
|
|
|
kunit_add_named_resource(test, \
|
|
|
|
NULL, \
|
|
|
|
NULL, \
|
|
|
|
&resource, \
|
|
|
|
"kasan_data", &fail_data); \
|
|
|
|
condition; \
|
|
|
|
KUNIT_EXPECT_EQ(test, \
|
|
|
|
fail_data.report_expected, \
|
|
|
|
fail_data.report_found); \
|
|
|
|
} while (0)
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_right(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 123;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_left(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 15;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_node_oob_right(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 4096;
|
|
|
|
|
|
|
|
ptr = kmalloc_node(size, GFP_KERNEL, 0);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_pagealloc_oob_right(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_SLUB)) {
|
|
|
|
kunit_info(test, "CONFIG_SLUB is not enabled.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-26 04:21:56 +07:00
|
|
|
/* Allocate a chunk that does not fit into a SLUB cache to trigger
|
|
|
|
* the page allocator fallback.
|
|
|
|
*/
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
|
2016-03-26 04:21:56 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
2018-02-07 06:36:23 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_pagealloc_uaf(struct kunit *test)
|
2018-02-07 06:36:23 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_SLUB)) {
|
|
|
|
kunit_info(test, "CONFIG_SLUB is not enabled.");
|
2018-02-07 06:36:23 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
|
|
|
|
2018-02-07 06:36:23 +07:00
|
|
|
kfree(ptr);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
|
2018-02-07 06:36:23 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_pagealloc_invalid_free(struct kunit *test)
|
2018-02-07 06:36:23 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_SLUB)) {
|
|
|
|
kunit_info(test, "CONFIG_SLUB is not enabled.");
|
2018-02-07 06:36:23 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
|
2018-02-07 06:36:23 +07:00
|
|
|
}
|
2016-03-26 04:21:56 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_large_oob_right(struct kunit *test)
|
2016-03-26 04:21:56 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
|
|
|
|
/* Allocate a chunk that is large enough, but still fits into a slab
|
|
|
|
* and does not trigger the page allocator fallback in SLUB.
|
|
|
|
*/
|
2015-02-14 05:39:53 +07:00
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_krealloc_more(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr1, *ptr2;
|
|
|
|
size_t size1 = 17;
|
|
|
|
size_t size2 = 19;
|
|
|
|
|
|
|
|
ptr1 = kmalloc(size1, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr2);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_krealloc_less(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr1, *ptr2;
|
|
|
|
size_t size1 = 17;
|
|
|
|
size_t size2 = 15;
|
|
|
|
|
|
|
|
ptr1 = kmalloc(size1, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2 + OOB_TAG_OFF] = 'x');
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr2);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_16(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
struct {
|
|
|
|
u64 words[2];
|
|
|
|
} *ptr1, *ptr2;
|
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
/* This test is specifically crafted for the generic mode. */
|
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-14 05:39:53 +07:00
|
|
|
ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
|
|
|
|
|
2015-02-14 05:39:53 +07:00
|
|
|
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr1);
|
|
|
|
kfree(ptr2);
|
|
|
|
}
|
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
static void kmalloc_uaf_16(struct kunit *test)
|
|
|
|
{
|
|
|
|
struct {
|
|
|
|
u64 words[2];
|
|
|
|
} *ptr1, *ptr2;
|
|
|
|
|
|
|
|
ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
|
|
|
|
|
|
|
|
ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
|
|
|
|
kfree(ptr2);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
|
|
|
|
kfree(ptr1);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_memset_2(struct kunit *test)
|
2015-11-06 09:51:15 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 8;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
|
2015-11-06 09:51:15 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_memset_4(struct kunit *test)
|
2015-11-06 09:51:15 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 8;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
|
2015-11-06 09:51:15 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_memset_8(struct kunit *test)
|
2015-11-06 09:51:15 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 8;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
|
2015-11-06 09:51:15 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_memset_16(struct kunit *test)
|
2015-11-06 09:51:15 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 16;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
|
2015-11-06 09:51:15 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_oob_in_memset(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 666;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-08-07 13:24:54 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
|
2015-02-14 05:39:53 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_memmove_invalid_size(struct kunit *test)
|
2020-04-02 11:09:40 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 64;
|
|
|
|
volatile size_t invalid_size = -2;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2020-04-02 11:09:40 +07:00
|
|
|
|
|
|
|
memset((char *)ptr, 0, 64);
|
2020-10-14 06:55:06 +07:00
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test,
|
|
|
|
memmove((char *)ptr, (char *)ptr + 4, invalid_size));
|
2020-04-02 11:09:40 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_uaf(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 10;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
|
|
|
kfree(ptr);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
|
2015-02-14 05:39:53 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_uaf_memset(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 33;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
|
|
|
kfree(ptr);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
|
2015-02-14 05:39:53 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_uaf2(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *ptr1, *ptr2;
|
|
|
|
size_t size = 43;
|
|
|
|
|
|
|
|
ptr1 = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
|
|
|
kfree(ptr1);
|
2020-10-14 06:55:06 +07:00
|
|
|
|
2015-02-14 05:39:53 +07:00
|
|
|
ptr2 = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
|
|
|
|
KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
|
|
|
kfree(ptr2);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kfree_via_page(struct kunit *test)
|
2019-09-24 05:34:16 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 8;
|
|
|
|
struct page *page;
|
|
|
|
unsigned long offset;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2019-09-24 05:34:16 +07:00
|
|
|
|
|
|
|
page = virt_to_page(ptr);
|
|
|
|
offset = offset_in_page(ptr);
|
|
|
|
kfree(page_address(page) + offset);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kfree_via_phys(struct kunit *test)
|
2019-09-24 05:34:16 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 8;
|
|
|
|
phys_addr_t phys;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2019-09-24 05:34:16 +07:00
|
|
|
|
|
|
|
phys = virt_to_phys(ptr);
|
|
|
|
kfree(phys_to_virt(phys));
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmem_cache_oob(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
size_t size = 200;
|
|
|
|
struct kmem_cache *cache = kmem_cache_create("test_cache",
|
|
|
|
size, 0,
|
|
|
|
0, NULL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
|
2015-02-14 05:39:53 +07:00
|
|
|
p = kmem_cache_alloc(cache, GFP_KERNEL);
|
|
|
|
if (!p) {
|
2020-10-14 06:55:06 +07:00
|
|
|
kunit_err(test, "Allocation failed: %s\n", __func__);
|
2015-02-14 05:39:53 +07:00
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
|
2015-02-14 05:39:53 +07:00
|
|
|
kmem_cache_free(cache, p);
|
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void memcg_accounted_kmem_cache(struct kunit *test)
|
2017-02-25 06:00:08 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
char *p;
|
|
|
|
size_t size = 200;
|
|
|
|
struct kmem_cache *cache;
|
|
|
|
|
|
|
|
cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
|
2017-02-25 06:00:08 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Several allocations with a delay to allow for lazy per memcg kmem
|
|
|
|
* cache creation.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < 5; i++) {
|
|
|
|
p = kmem_cache_alloc(cache, GFP_KERNEL);
|
2017-11-18 06:28:00 +07:00
|
|
|
if (!p)
|
2017-02-25 06:00:08 +07:00
|
|
|
goto free_cache;
|
2017-11-18 06:28:00 +07:00
|
|
|
|
2017-02-25 06:00:08 +07:00
|
|
|
kmem_cache_free(cache, p);
|
|
|
|
msleep(100);
|
|
|
|
}
|
|
|
|
|
|
|
|
free_cache:
|
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
}
|
|
|
|
|
2015-02-14 05:39:53 +07:00
|
|
|
static char global_array[10];
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_global_oob(struct kunit *test)
|
2015-02-14 05:39:53 +07:00
|
|
|
{
|
2021-05-15 07:27:27 +07:00
|
|
|
/*
|
|
|
|
* Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
|
|
|
|
* from failing here and panicing the kernel, access the array via a
|
|
|
|
* volatile pointer, which will prevent the compiler from being able to
|
|
|
|
* determine the array bounds.
|
|
|
|
*
|
|
|
|
* This access uses a volatile pointer to char (char *volatile) rather
|
|
|
|
* than the more conventional pointer to volatile char (volatile char *)
|
|
|
|
* because we want to prevent the compiler from making inferences about
|
|
|
|
* the pointer itself (i.e. its array bounds), not the data that it
|
|
|
|
* refers to.
|
|
|
|
*/
|
|
|
|
char *volatile array = global_array;
|
|
|
|
char *p = &array[ARRAY_SIZE(global_array) + 3];
|
2015-02-14 05:39:53 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
/* Only generic mode instruments globals. */
|
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_GENERIC required");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
|
2015-02-14 05:39:53 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void ksize_unpoisons_memory(struct kunit *test)
|
2016-05-21 06:59:17 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
2018-02-07 06:36:48 +07:00
|
|
|
size_t size = 123, real_size;
|
2016-05-21 06:59:17 +07:00
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2016-05-21 06:59:17 +07:00
|
|
|
real_size = ksize(ptr);
|
|
|
|
/* This access doesn't trigger an error. */
|
|
|
|
ptr[size] = 'x';
|
|
|
|
/* This one does. */
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
|
2016-05-21 06:59:17 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_stack_oob(struct kunit *test)
|
2016-05-21 06:59:34 +07:00
|
|
|
{
|
2020-10-14 06:55:06 +07:00
|
|
|
char stack_array[10];
|
2021-05-15 07:27:27 +07:00
|
|
|
/* See comment in kasan_global_oob. */
|
|
|
|
char *volatile array = stack_array;
|
|
|
|
char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
|
2016-05-21 06:59:34 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
|
2016-05-21 06:59:34 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
|
2016-05-21 06:59:34 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_alloca_oob_left(struct kunit *test)
|
2018-02-07 06:36:16 +07:00
|
|
|
{
|
|
|
|
volatile int i = 10;
|
|
|
|
char alloca_array[i];
|
2021-05-15 07:27:27 +07:00
|
|
|
/* See comment in kasan_global_oob. */
|
|
|
|
char *volatile array = alloca_array;
|
|
|
|
char *p = array - 1;
|
2018-02-07 06:36:16 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
/* Only generic mode instruments dynamic allocas. */
|
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_GENERIC required");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
|
2018-02-07 06:36:16 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_alloca_oob_right(struct kunit *test)
|
2018-02-07 06:36:16 +07:00
|
|
|
{
|
|
|
|
volatile int i = 10;
|
|
|
|
char alloca_array[i];
|
2021-05-15 07:27:27 +07:00
|
|
|
/* See comment in kasan_global_oob. */
|
|
|
|
char *volatile array = alloca_array;
|
|
|
|
char *p = array + i;
|
2018-02-07 06:36:16 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
/* Only generic mode instruments dynamic allocas. */
|
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_GENERIC required");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_STACK)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_STACK is not enabled");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
|
2018-02-07 06:36:16 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmem_cache_double_free(struct kunit *test)
|
2018-02-07 06:36:37 +07:00
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
size_t size = 200;
|
|
|
|
struct kmem_cache *cache;
|
|
|
|
|
|
|
|
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
|
|
|
|
|
2018-02-07 06:36:37 +07:00
|
|
|
p = kmem_cache_alloc(cache, GFP_KERNEL);
|
|
|
|
if (!p) {
|
2020-10-14 06:55:06 +07:00
|
|
|
kunit_err(test, "Allocation failed: %s\n", __func__);
|
2018-02-07 06:36:37 +07:00
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
kmem_cache_free(cache, p);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
|
2018-02-07 06:36:37 +07:00
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmem_cache_invalid_free(struct kunit *test)
|
2018-02-07 06:36:37 +07:00
|
|
|
{
|
|
|
|
char *p;
|
|
|
|
size_t size = 200;
|
|
|
|
struct kmem_cache *cache;
|
|
|
|
|
|
|
|
cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
|
|
|
|
NULL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
|
|
|
|
|
2018-02-07 06:36:37 +07:00
|
|
|
p = kmem_cache_alloc(cache, GFP_KERNEL);
|
|
|
|
if (!p) {
|
2020-10-14 06:55:06 +07:00
|
|
|
kunit_err(test, "Allocation failed: %s\n", __func__);
|
2018-02-07 06:36:37 +07:00
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-04-11 06:30:35 +07:00
|
|
|
/* Trigger invalid free, the object doesn't get freed */
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
|
2018-04-11 06:30:35 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Properly free the object to prevent the "Objects remaining in
|
|
|
|
* test_cache on __kmem_cache_shutdown" BUG failure.
|
|
|
|
*/
|
|
|
|
kmem_cache_free(cache, p);
|
|
|
|
|
2018-02-07 06:36:37 +07:00
|
|
|
kmem_cache_destroy(cache);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_memchr(struct kunit *test)
|
2018-10-27 05:02:34 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 24;
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
/* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
|
|
|
|
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
|
|
|
|
kunit_info(test,
|
|
|
|
"str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
|
2018-10-27 05:02:34 +07:00
|
|
|
return;
|
2020-10-14 06:55:06 +07:00
|
|
|
}
|
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
if (OOB_TAG_OFF)
|
|
|
|
size = round_up(size, OOB_TAG_OFF);
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test,
|
|
|
|
kasan_ptr_result = memchr(ptr, '1', size + 1));
|
2018-10-27 05:02:34 +07:00
|
|
|
|
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_memcmp(struct kunit *test)
|
2018-10-27 05:02:34 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 24;
|
|
|
|
int arr[9];
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
/* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
|
|
|
|
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
|
|
|
|
kunit_info(test,
|
|
|
|
"str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
|
2018-10-27 05:02:34 +07:00
|
|
|
return;
|
2020-10-14 06:55:06 +07:00
|
|
|
}
|
2018-10-27 05:02:34 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
if (OOB_TAG_OFF)
|
|
|
|
size = round_up(size, OOB_TAG_OFF);
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2018-10-27 05:02:34 +07:00
|
|
|
memset(arr, 0, sizeof(arr));
|
2020-10-14 06:55:06 +07:00
|
|
|
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test,
|
|
|
|
kasan_int_result = memcmp(ptr, arr, size+1));
|
2018-10-27 05:02:34 +07:00
|
|
|
kfree(ptr);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kasan_strings(struct kunit *test)
|
2018-10-27 05:02:34 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 24;
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
/* See https://bugzilla.kernel.org/show_bug.cgi?id=206337 */
|
|
|
|
if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
|
|
|
|
kunit_info(test,
|
|
|
|
"str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT");
|
2018-10-27 05:02:34 +07:00
|
|
|
return;
|
2020-10-14 06:55:06 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
|
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2018-10-27 05:02:34 +07:00
|
|
|
|
|
|
|
kfree(ptr);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try to cause only 1 invalid access (less spam in dmesg).
|
|
|
|
* For that we need ptr to point to zeroed byte.
|
|
|
|
* Skip metadata that could be stored in freed object so ptr
|
|
|
|
* will likely point to zeroed byte.
|
|
|
|
*/
|
|
|
|
ptr += 16;
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
|
2018-10-27 05:02:34 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
|
2018-10-27 05:02:34 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
|
2018-10-27 05:02:34 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
|
2018-10-27 05:02:34 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
|
2018-10-27 05:02:34 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
|
2018-10-27 05:02:34 +07:00
|
|
|
}
|
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
|
|
|
|
{
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
|
|
|
|
{
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
|
|
|
|
|
|
|
|
#if defined(clear_bit_unlock_is_negative_byte)
|
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
|
|
|
|
clear_bit_unlock_is_negative_byte(nr, addr));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static void kasan_bitops_generic(struct kunit *test)
|
2019-07-12 10:53:52 +07:00
|
|
|
{
|
2020-11-02 08:07:37 +07:00
|
|
|
long *bits;
|
|
|
|
|
|
|
|
/* This test is specifically crafted for the generic mode. */
|
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_GENERIC required\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-07-12 10:53:52 +07:00
|
|
|
/*
|
|
|
|
* Allocate 1 more byte, which causes kzalloc to round up to 16-bytes;
|
|
|
|
* this way we do not actually corrupt other memory.
|
|
|
|
*/
|
2020-11-02 08:07:37 +07:00
|
|
|
bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
|
2019-07-12 10:53:52 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Below calls try to access bit within allocated memory; however, the
|
|
|
|
* below accesses are still out-of-bounds, since bitops are defined to
|
|
|
|
* operate on the whole long the bit is in.
|
|
|
|
*/
|
2020-11-02 08:07:37 +07:00
|
|
|
kasan_bitops_modify(test, BITS_PER_LONG, bits);
|
2019-07-12 10:53:52 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Below calls try to access bit beyond allocated memory.
|
|
|
|
*/
|
2020-11-02 08:07:37 +07:00
|
|
|
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
|
2019-07-12 10:53:52 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
kfree(bits);
|
|
|
|
}
|
2019-07-12 10:53:52 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
static void kasan_bitops_tags(struct kunit *test)
|
|
|
|
{
|
|
|
|
long *bits;
|
2019-07-12 10:53:52 +07:00
|
|
|
|
2020-11-02 08:07:37 +07:00
|
|
|
/* This test is specifically crafted for the tag-based mode. */
|
|
|
|
if (IS_ENABLED(CONFIG_KASAN_GENERIC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_SW_TAGS required\n");
|
|
|
|
return;
|
|
|
|
}
|
2019-07-12 10:53:52 +07:00
|
|
|
|
2021-02-25 03:05:42 +07:00
|
|
|
/* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
|
|
|
|
bits = kzalloc(48, GFP_KERNEL);
|
2020-11-02 08:07:37 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
|
2019-07-12 10:53:52 +07:00
|
|
|
|
2021-02-25 03:05:42 +07:00
|
|
|
/* Do the accesses past the 48 allocated bytes, but within the redone. */
|
|
|
|
kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
|
|
|
|
kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
|
2019-07-12 10:53:52 +07:00
|
|
|
|
|
|
|
kfree(bits);
|
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void kmalloc_double_kzfree(struct kunit *test)
|
2019-07-12 10:54:11 +07:00
|
|
|
{
|
|
|
|
char *ptr;
|
|
|
|
size_t size = 16;
|
|
|
|
|
|
|
|
ptr = kmalloc(size, GFP_KERNEL);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
|
2019-07-12 10:54:11 +07:00
|
|
|
|
2020-08-07 13:18:13 +07:00
|
|
|
kfree_sensitive(ptr);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
|
2019-07-12 10:54:11 +07:00
|
|
|
}
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static void vmalloc_oob(struct kunit *test)
|
2019-12-01 08:54:53 +07:00
|
|
|
{
|
|
|
|
void *area;
|
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
if (!IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
|
|
|
kunit_info(test, "CONFIG_KASAN_VMALLOC is not enabled.");
|
|
|
|
return;
|
|
|
|
}
|
2019-12-01 08:54:53 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We have to be careful not to hit the guard page.
|
|
|
|
* The MMU will catch that and crash us.
|
|
|
|
*/
|
|
|
|
area = vmalloc(3000);
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
|
2019-12-01 08:54:53 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
|
2019-12-01 08:54:53 +07:00
|
|
|
vfree(area);
|
|
|
|
}
|
2020-08-07 13:24:42 +07:00
|
|
|
|
2020-10-14 06:55:06 +07:00
|
|
|
static struct kunit_case kasan_kunit_test_cases[] = {
|
|
|
|
KUNIT_CASE(kmalloc_oob_right),
|
|
|
|
KUNIT_CASE(kmalloc_oob_left),
|
|
|
|
KUNIT_CASE(kmalloc_node_oob_right),
|
|
|
|
KUNIT_CASE(kmalloc_pagealloc_oob_right),
|
|
|
|
KUNIT_CASE(kmalloc_pagealloc_uaf),
|
|
|
|
KUNIT_CASE(kmalloc_pagealloc_invalid_free),
|
|
|
|
KUNIT_CASE(kmalloc_large_oob_right),
|
|
|
|
KUNIT_CASE(kmalloc_oob_krealloc_more),
|
|
|
|
KUNIT_CASE(kmalloc_oob_krealloc_less),
|
|
|
|
KUNIT_CASE(kmalloc_oob_16),
|
2020-11-02 08:07:37 +07:00
|
|
|
KUNIT_CASE(kmalloc_uaf_16),
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_CASE(kmalloc_oob_in_memset),
|
|
|
|
KUNIT_CASE(kmalloc_oob_memset_2),
|
|
|
|
KUNIT_CASE(kmalloc_oob_memset_4),
|
|
|
|
KUNIT_CASE(kmalloc_oob_memset_8),
|
|
|
|
KUNIT_CASE(kmalloc_oob_memset_16),
|
|
|
|
KUNIT_CASE(kmalloc_memmove_invalid_size),
|
|
|
|
KUNIT_CASE(kmalloc_uaf),
|
|
|
|
KUNIT_CASE(kmalloc_uaf_memset),
|
|
|
|
KUNIT_CASE(kmalloc_uaf2),
|
|
|
|
KUNIT_CASE(kfree_via_page),
|
|
|
|
KUNIT_CASE(kfree_via_phys),
|
|
|
|
KUNIT_CASE(kmem_cache_oob),
|
|
|
|
KUNIT_CASE(memcg_accounted_kmem_cache),
|
|
|
|
KUNIT_CASE(kasan_global_oob),
|
|
|
|
KUNIT_CASE(kasan_stack_oob),
|
|
|
|
KUNIT_CASE(kasan_alloca_oob_left),
|
|
|
|
KUNIT_CASE(kasan_alloca_oob_right),
|
|
|
|
KUNIT_CASE(ksize_unpoisons_memory),
|
|
|
|
KUNIT_CASE(kmem_cache_double_free),
|
|
|
|
KUNIT_CASE(kmem_cache_invalid_free),
|
|
|
|
KUNIT_CASE(kasan_memchr),
|
|
|
|
KUNIT_CASE(kasan_memcmp),
|
|
|
|
KUNIT_CASE(kasan_strings),
|
2020-11-02 08:07:37 +07:00
|
|
|
KUNIT_CASE(kasan_bitops_generic),
|
|
|
|
KUNIT_CASE(kasan_bitops_tags),
|
2020-10-14 06:55:06 +07:00
|
|
|
KUNIT_CASE(kmalloc_double_kzfree),
|
|
|
|
KUNIT_CASE(vmalloc_oob),
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct kunit_suite kasan_kunit_test_suite = {
|
|
|
|
.name = "kasan",
|
|
|
|
.init = kasan_test_init,
|
|
|
|
.test_cases = kasan_kunit_test_cases,
|
|
|
|
.exit = kasan_test_exit,
|
|
|
|
};
|
|
|
|
|
|
|
|
kunit_test_suite(kasan_kunit_test_suite);
|
2015-02-14 05:39:53 +07:00
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|