mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 01:56:40 +07:00
Merge branch 'kcsan' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into locking/core
Pull KCSAN updates for v5.10 from Paul E. McKenney: - Improve kernel messages. - Be more permissive with bitops races under KCSAN_ASSUME_PLAIN_WRITES_ATOMIC=y. - Optimize debugfs stat counters. - Introduce the instrument_*read_write() annotations, to provide a finer description of certain ops - using KCSAN's compound instrumentation. Use them for atomic RNW and bitops, where appropriate. Doing this might find new races. (Depends on the compiler having tsan-compound-read-before-write=1 support.) - Support atomic built-ins, which will help certain architectures, such as s390. - Misc enhancements and smaller fixes. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
d6c4c11348
File diff suppressed because it is too large
Load Diff
@ -67,7 +67,7 @@ static inline void change_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||||
return arch_test_and_set_bit(nr, addr);
|
return arch_test_and_set_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,7 +80,7 @@ static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||||
return arch_test_and_clear_bit(nr, addr);
|
return arch_test_and_clear_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
|
static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||||
return arch_test_and_change_bit(nr, addr);
|
return arch_test_and_change_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_atomic_write(addr + BIT_WORD(nr), sizeof(long));
|
instrument_atomic_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||||
return arch_test_and_set_bit_lock(nr, addr);
|
return arch_test_and_set_bit_lock(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,6 +58,30 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
|
|||||||
arch___change_bit(nr, addr);
|
arch___change_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void __instrument_read_write_bitop(long nr, volatile unsigned long *addr)
|
||||||
|
{
|
||||||
|
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC)) {
|
||||||
|
/*
|
||||||
|
* We treat non-atomic read-write bitops a little more special.
|
||||||
|
* Given the operations here only modify a single bit, assuming
|
||||||
|
* non-atomicity of the writer is sufficient may be reasonable
|
||||||
|
* for certain usage (and follows the permissible nature of the
|
||||||
|
* assume-plain-writes-atomic rule):
|
||||||
|
* 1. report read-modify-write races -> check read;
|
||||||
|
* 2. do not report races with marked readers, but do report
|
||||||
|
* races with unmarked readers -> check "atomic" write.
|
||||||
|
*/
|
||||||
|
kcsan_check_read(addr + BIT_WORD(nr), sizeof(long));
|
||||||
|
/*
|
||||||
|
* Use generic write instrumentation, in case other sanitizers
|
||||||
|
* or tools are enabled alongside KCSAN.
|
||||||
|
*/
|
||||||
|
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
||||||
|
} else {
|
||||||
|
instrument_read_write(addr + BIT_WORD(nr), sizeof(long));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* __test_and_set_bit - Set a bit and return its old value
|
* __test_and_set_bit - Set a bit and return its old value
|
||||||
* @nr: Bit to set
|
* @nr: Bit to set
|
||||||
@ -68,7 +92,7 @@ static inline void __change_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
|
static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
__instrument_read_write_bitop(nr, addr);
|
||||||
return arch___test_and_set_bit(nr, addr);
|
return arch___test_and_set_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +106,7 @@ static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
__instrument_read_write_bitop(nr, addr);
|
||||||
return arch___test_and_clear_bit(nr, addr);
|
return arch___test_and_clear_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -96,7 +120,7 @@ static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
|
|||||||
*/
|
*/
|
||||||
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
|
static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
|
||||||
{
|
{
|
||||||
instrument_write(addr + BIT_WORD(nr), sizeof(long));
|
__instrument_read_write_bitop(nr, addr);
|
||||||
return arch___test_and_change_bit(nr, addr);
|
return arch___test_and_change_bit(nr, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,6 +42,21 @@ static __always_inline void instrument_write(const volatile void *v, size_t size
|
|||||||
kcsan_check_write(v, size);
|
kcsan_check_write(v, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* instrument_read_write - instrument regular read-write access
|
||||||
|
*
|
||||||
|
* Instrument a regular write access. The instrumentation should be inserted
|
||||||
|
* before the actual write happens.
|
||||||
|
*
|
||||||
|
* @ptr address of access
|
||||||
|
* @size size of access
|
||||||
|
*/
|
||||||
|
static __always_inline void instrument_read_write(const volatile void *v, size_t size)
|
||||||
|
{
|
||||||
|
kasan_check_write(v, size);
|
||||||
|
kcsan_check_read_write(v, size);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* instrument_atomic_read - instrument atomic read access
|
* instrument_atomic_read - instrument atomic read access
|
||||||
*
|
*
|
||||||
@ -72,6 +87,21 @@ static __always_inline void instrument_atomic_write(const volatile void *v, size
|
|||||||
kcsan_check_atomic_write(v, size);
|
kcsan_check_atomic_write(v, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* instrument_atomic_read_write - instrument atomic read-write access
|
||||||
|
*
|
||||||
|
* Instrument an atomic read-write access. The instrumentation should be
|
||||||
|
* inserted before the actual write happens.
|
||||||
|
*
|
||||||
|
* @ptr address of access
|
||||||
|
* @size size of access
|
||||||
|
*/
|
||||||
|
static __always_inline void instrument_atomic_read_write(const volatile void *v, size_t size)
|
||||||
|
{
|
||||||
|
kasan_check_write(v, size);
|
||||||
|
kcsan_check_atomic_read_write(v, size);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* instrument_copy_to_user - instrument reads of copy_to_user
|
* instrument_copy_to_user - instrument reads of copy_to_user
|
||||||
*
|
*
|
||||||
|
@ -7,19 +7,13 @@
|
|||||||
#include <linux/compiler_attributes.h>
|
#include <linux/compiler_attributes.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
|
|
||||||
/*
|
/* Access types -- if KCSAN_ACCESS_WRITE is not set, the access is a read. */
|
||||||
* ACCESS TYPE MODIFIERS
|
#define KCSAN_ACCESS_WRITE (1 << 0) /* Access is a write. */
|
||||||
*
|
#define KCSAN_ACCESS_COMPOUND (1 << 1) /* Compounded read-write instrumentation. */
|
||||||
* <none>: normal read access;
|
#define KCSAN_ACCESS_ATOMIC (1 << 2) /* Access is atomic. */
|
||||||
* WRITE : write access;
|
/* The following are special, and never due to compiler instrumentation. */
|
||||||
* ATOMIC: access is atomic;
|
#define KCSAN_ACCESS_ASSERT (1 << 3) /* Access is an assertion. */
|
||||||
* ASSERT: access is not a regular access, but an assertion;
|
#define KCSAN_ACCESS_SCOPED (1 << 4) /* Access is a scoped access. */
|
||||||
* SCOPED: access is a scoped access;
|
|
||||||
*/
|
|
||||||
#define KCSAN_ACCESS_WRITE 0x1
|
|
||||||
#define KCSAN_ACCESS_ATOMIC 0x2
|
|
||||||
#define KCSAN_ACCESS_ASSERT 0x4
|
|
||||||
#define KCSAN_ACCESS_SCOPED 0x8
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
|
* __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
|
||||||
@ -204,6 +198,15 @@ static inline void __kcsan_disable_current(void) { }
|
|||||||
#define __kcsan_check_write(ptr, size) \
|
#define __kcsan_check_write(ptr, size) \
|
||||||
__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
|
__kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __kcsan_check_read_write - check regular read-write access for races
|
||||||
|
*
|
||||||
|
* @ptr: address of access
|
||||||
|
* @size: size of access
|
||||||
|
*/
|
||||||
|
#define __kcsan_check_read_write(ptr, size) \
|
||||||
|
__kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* kcsan_check_read - check regular read access for races
|
* kcsan_check_read - check regular read access for races
|
||||||
*
|
*
|
||||||
@ -221,6 +224,15 @@ static inline void __kcsan_disable_current(void) { }
|
|||||||
#define kcsan_check_write(ptr, size) \
|
#define kcsan_check_write(ptr, size) \
|
||||||
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
|
kcsan_check_access(ptr, size, KCSAN_ACCESS_WRITE)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kcsan_check_read_write - check regular read-write access for races
|
||||||
|
*
|
||||||
|
* @ptr: address of access
|
||||||
|
* @size: size of access
|
||||||
|
*/
|
||||||
|
#define kcsan_check_read_write(ptr, size) \
|
||||||
|
kcsan_check_access(ptr, size, KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Check for atomic accesses: if atomic accesses are not ignored, this simply
|
* Check for atomic accesses: if atomic accesses are not ignored, this simply
|
||||||
* aliases to kcsan_check_access(), otherwise becomes a no-op.
|
* aliases to kcsan_check_access(), otherwise becomes a no-op.
|
||||||
@ -228,11 +240,14 @@ static inline void __kcsan_disable_current(void) { }
|
|||||||
#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
|
#ifdef CONFIG_KCSAN_IGNORE_ATOMICS
|
||||||
#define kcsan_check_atomic_read(...) do { } while (0)
|
#define kcsan_check_atomic_read(...) do { } while (0)
|
||||||
#define kcsan_check_atomic_write(...) do { } while (0)
|
#define kcsan_check_atomic_write(...) do { } while (0)
|
||||||
|
#define kcsan_check_atomic_read_write(...) do { } while (0)
|
||||||
#else
|
#else
|
||||||
#define kcsan_check_atomic_read(ptr, size) \
|
#define kcsan_check_atomic_read(ptr, size) \
|
||||||
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
|
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC)
|
||||||
#define kcsan_check_atomic_write(ptr, size) \
|
#define kcsan_check_atomic_write(ptr, size) \
|
||||||
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
|
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE)
|
||||||
|
#define kcsan_check_atomic_read_write(ptr, size) \
|
||||||
|
kcsan_check_access(ptr, size, KCSAN_ACCESS_ATOMIC | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_COMPOUND)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) "kcsan: " fmt
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/delay.h>
|
#include <linux/delay.h>
|
||||||
@ -98,6 +100,9 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
|
|||||||
*/
|
*/
|
||||||
static DEFINE_PER_CPU(long, kcsan_skip);
|
static DEFINE_PER_CPU(long, kcsan_skip);
|
||||||
|
|
||||||
|
/* For kcsan_prandom_u32_max(). */
|
||||||
|
static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
|
||||||
|
|
||||||
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
|
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
|
||||||
size_t size,
|
size_t size,
|
||||||
bool expect_write,
|
bool expect_write,
|
||||||
@ -223,7 +228,7 @@ is_atomic(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *ctx
|
|||||||
|
|
||||||
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
|
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
|
||||||
(type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
|
(type & KCSAN_ACCESS_WRITE) && size <= sizeof(long) &&
|
||||||
IS_ALIGNED((unsigned long)ptr, size))
|
!(type & KCSAN_ACCESS_COMPOUND) && IS_ALIGNED((unsigned long)ptr, size))
|
||||||
return true; /* Assume aligned writes up to word size are atomic. */
|
return true; /* Assume aligned writes up to word size are atomic. */
|
||||||
|
|
||||||
if (ctx->atomic_next > 0) {
|
if (ctx->atomic_next > 0) {
|
||||||
@ -269,11 +274,28 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
|
||||||
|
* for more details.
|
||||||
|
*
|
||||||
|
* The open-coded version here is using only safe primitives for all contexts
|
||||||
|
* where we can have KCSAN instrumentation. In particular, we cannot use
|
||||||
|
* prandom_u32() directly, as its tracepoint could cause recursion.
|
||||||
|
*/
|
||||||
|
static u32 kcsan_prandom_u32_max(u32 ep_ro)
|
||||||
|
{
|
||||||
|
struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
|
||||||
|
const u32 res = prandom_u32_state(state);
|
||||||
|
|
||||||
|
put_cpu_var(kcsan_rand_state);
|
||||||
|
return (u32)(((u64) res * ep_ro) >> 32);
|
||||||
|
}
|
||||||
|
|
||||||
static inline void reset_kcsan_skip(void)
|
static inline void reset_kcsan_skip(void)
|
||||||
{
|
{
|
||||||
long skip_count = kcsan_skip_watch -
|
long skip_count = kcsan_skip_watch -
|
||||||
(IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
|
(IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
|
||||||
prandom_u32_max(kcsan_skip_watch) :
|
kcsan_prandom_u32_max(kcsan_skip_watch) :
|
||||||
0);
|
0);
|
||||||
this_cpu_write(kcsan_skip, skip_count);
|
this_cpu_write(kcsan_skip, skip_count);
|
||||||
}
|
}
|
||||||
@ -283,12 +305,18 @@ static __always_inline bool kcsan_is_enabled(void)
|
|||||||
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
|
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int get_delay(void)
|
/* Introduce delay depending on context and configuration. */
|
||||||
|
static void delay_access(int type)
|
||||||
{
|
{
|
||||||
unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
|
unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
|
||||||
return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
|
/* For certain access types, skew the random delay to be longer. */
|
||||||
prandom_u32_max(delay) :
|
unsigned int skew_delay_order =
|
||||||
0);
|
(type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
|
||||||
|
|
||||||
|
delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
|
||||||
|
kcsan_prandom_u32_max(delay >> skew_delay_order) :
|
||||||
|
0;
|
||||||
|
udelay(delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
void kcsan_save_irqtrace(struct task_struct *task)
|
void kcsan_save_irqtrace(struct task_struct *task)
|
||||||
@ -361,13 +389,13 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
|
|||||||
* already removed the watchpoint, or another thread consumed
|
* already removed the watchpoint, or another thread consumed
|
||||||
* the watchpoint before this thread.
|
* the watchpoint before this thread.
|
||||||
*/
|
*/
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_REPORT_RACES]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((type & KCSAN_ACCESS_ASSERT) != 0)
|
if ((type & KCSAN_ACCESS_ASSERT) != 0)
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
|
||||||
else
|
else
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_DATA_RACES]);
|
||||||
|
|
||||||
user_access_restore(flags);
|
user_access_restore(flags);
|
||||||
}
|
}
|
||||||
@ -408,7 +436,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
if (!check_encodable((unsigned long)ptr, size)) {
|
if (!check_encodable((unsigned long)ptr, size)) {
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_UNENCODABLE_ACCESSES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_UNENCODABLE_ACCESSES]);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,12 +456,12 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||||||
* with which should_watch() returns true should be tweaked so
|
* with which should_watch() returns true should be tweaked so
|
||||||
* that this case happens very rarely.
|
* that this case happens very rarely.
|
||||||
*/
|
*/
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_NO_CAPACITY);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_NO_CAPACITY]);
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_SETUP_WATCHPOINTS);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_SETUP_WATCHPOINTS]);
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_USED_WATCHPOINTS);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read the current value, to later check and infer a race if the data
|
* Read the current value, to later check and infer a race if the data
|
||||||
@ -459,7 +487,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||||||
|
|
||||||
if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
|
if (IS_ENABLED(CONFIG_KCSAN_DEBUG)) {
|
||||||
kcsan_disable_current();
|
kcsan_disable_current();
|
||||||
pr_err("KCSAN: watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
|
pr_err("watching %s, size: %zu, addr: %px [slot: %d, encoded: %lx]\n",
|
||||||
is_write ? "write" : "read", size, ptr,
|
is_write ? "write" : "read", size, ptr,
|
||||||
watchpoint_slot((unsigned long)ptr),
|
watchpoint_slot((unsigned long)ptr),
|
||||||
encode_watchpoint((unsigned long)ptr, size, is_write));
|
encode_watchpoint((unsigned long)ptr, size, is_write));
|
||||||
@ -470,7 +498,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||||||
* Delay this thread, to increase probability of observing a racy
|
* Delay this thread, to increase probability of observing a racy
|
||||||
* conflicting access.
|
* conflicting access.
|
||||||
*/
|
*/
|
||||||
udelay(get_delay());
|
delay_access(type);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Re-read value, and check if it is as expected; if not, we infer a
|
* Re-read value, and check if it is as expected; if not, we infer a
|
||||||
@ -535,16 +563,16 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||||||
* increment this counter.
|
* increment this counter.
|
||||||
*/
|
*/
|
||||||
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
|
if (is_assert && value_change == KCSAN_VALUE_CHANGE_TRUE)
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
|
||||||
|
|
||||||
kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
|
kcsan_report(ptr, size, type, value_change, KCSAN_REPORT_RACE_SIGNAL,
|
||||||
watchpoint - watchpoints);
|
watchpoint - watchpoints);
|
||||||
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
|
} else if (value_change == KCSAN_VALUE_CHANGE_TRUE) {
|
||||||
/* Inferring a race, since the value should not have changed. */
|
/* Inferring a race, since the value should not have changed. */
|
||||||
|
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]);
|
||||||
if (is_assert)
|
if (is_assert)
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ASSERT_FAILURES]);
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
|
if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
|
||||||
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
|
kcsan_report(ptr, size, type, KCSAN_VALUE_CHANGE_TRUE,
|
||||||
@ -557,7 +585,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
|
|||||||
* reused after this point.
|
* reused after this point.
|
||||||
*/
|
*/
|
||||||
remove_watchpoint(watchpoint);
|
remove_watchpoint(watchpoint);
|
||||||
kcsan_counter_dec(KCSAN_COUNTER_USED_WATCHPOINTS);
|
atomic_long_dec(&kcsan_counters[KCSAN_COUNTER_USED_WATCHPOINTS]);
|
||||||
out_unlock:
|
out_unlock:
|
||||||
if (!kcsan_interrupt_watcher)
|
if (!kcsan_interrupt_watcher)
|
||||||
local_irq_restore(irq_flags);
|
local_irq_restore(irq_flags);
|
||||||
@ -614,14 +642,17 @@ void __init kcsan_init(void)
|
|||||||
BUG_ON(!in_task());
|
BUG_ON(!in_task());
|
||||||
|
|
||||||
kcsan_debugfs_init();
|
kcsan_debugfs_init();
|
||||||
|
prandom_seed_full_state(&kcsan_rand_state);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are in the init task, and no other tasks should be running;
|
* We are in the init task, and no other tasks should be running;
|
||||||
* WRITE_ONCE without memory barrier is sufficient.
|
* WRITE_ONCE without memory barrier is sufficient.
|
||||||
*/
|
*/
|
||||||
if (kcsan_early_enable)
|
if (kcsan_early_enable) {
|
||||||
|
pr_info("enabled early\n");
|
||||||
WRITE_ONCE(kcsan_enabled, true);
|
WRITE_ONCE(kcsan_enabled, true);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* === Exported interface =================================================== */
|
/* === Exported interface =================================================== */
|
||||||
|
|
||||||
@ -793,7 +824,17 @@ EXPORT_SYMBOL(__kcsan_check_access);
|
|||||||
EXPORT_SYMBOL(__tsan_write##size); \
|
EXPORT_SYMBOL(__tsan_write##size); \
|
||||||
void __tsan_unaligned_write##size(void *ptr) \
|
void __tsan_unaligned_write##size(void *ptr) \
|
||||||
__alias(__tsan_write##size); \
|
__alias(__tsan_write##size); \
|
||||||
EXPORT_SYMBOL(__tsan_unaligned_write##size)
|
EXPORT_SYMBOL(__tsan_unaligned_write##size); \
|
||||||
|
void __tsan_read_write##size(void *ptr); \
|
||||||
|
void __tsan_read_write##size(void *ptr) \
|
||||||
|
{ \
|
||||||
|
check_access(ptr, size, \
|
||||||
|
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE); \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL(__tsan_read_write##size); \
|
||||||
|
void __tsan_unaligned_read_write##size(void *ptr) \
|
||||||
|
__alias(__tsan_read_write##size); \
|
||||||
|
EXPORT_SYMBOL(__tsan_unaligned_read_write##size)
|
||||||
|
|
||||||
DEFINE_TSAN_READ_WRITE(1);
|
DEFINE_TSAN_READ_WRITE(1);
|
||||||
DEFINE_TSAN_READ_WRITE(2);
|
DEFINE_TSAN_READ_WRITE(2);
|
||||||
@ -879,3 +920,130 @@ void __tsan_init(void)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__tsan_init);
|
EXPORT_SYMBOL(__tsan_init);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Instrumentation for atomic builtins (__atomic_*, __sync_*).
|
||||||
|
*
|
||||||
|
* Normal kernel code _should not_ be using them directly, but some
|
||||||
|
* architectures may implement some or all atomics using the compilers'
|
||||||
|
* builtins.
|
||||||
|
*
|
||||||
|
* Note: If an architecture decides to fully implement atomics using the
|
||||||
|
* builtins, because they are implicitly instrumented by KCSAN (and KASAN,
|
||||||
|
* etc.), implementing the ARCH_ATOMIC interface (to get instrumentation via
|
||||||
|
* atomic-instrumented) is no longer necessary.
|
||||||
|
*
|
||||||
|
* TSAN instrumentation replaces atomic accesses with calls to any of the below
|
||||||
|
* functions, whose job is to also execute the operation itself.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define DEFINE_TSAN_ATOMIC_LOAD_STORE(bits) \
|
||||||
|
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder); \
|
||||||
|
u##bits __tsan_atomic##bits##_load(const u##bits *ptr, int memorder) \
|
||||||
|
{ \
|
||||||
|
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||||
|
check_access(ptr, bits / BITS_PER_BYTE, KCSAN_ACCESS_ATOMIC); \
|
||||||
|
} \
|
||||||
|
return __atomic_load_n(ptr, memorder); \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic##bits##_load); \
|
||||||
|
void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder); \
|
||||||
|
void __tsan_atomic##bits##_store(u##bits *ptr, u##bits v, int memorder) \
|
||||||
|
{ \
|
||||||
|
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||||
|
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||||
|
KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC); \
|
||||||
|
} \
|
||||||
|
__atomic_store_n(ptr, v, memorder); \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic##bits##_store)
|
||||||
|
|
||||||
|
#define DEFINE_TSAN_ATOMIC_RMW(op, bits, suffix) \
|
||||||
|
u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder); \
|
||||||
|
u##bits __tsan_atomic##bits##_##op(u##bits *ptr, u##bits v, int memorder) \
|
||||||
|
{ \
|
||||||
|
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||||
|
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||||
|
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
|
||||||
|
KCSAN_ACCESS_ATOMIC); \
|
||||||
|
} \
|
||||||
|
return __atomic_##op##suffix(ptr, v, memorder); \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic##bits##_##op)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: CAS operations are always classified as write, even in case they
|
||||||
|
* fail. We cannot perform check_access() after a write, as it might lead to
|
||||||
|
* false positives, in cases such as:
|
||||||
|
*
|
||||||
|
* T0: __atomic_compare_exchange_n(&p->flag, &old, 1, ...)
|
||||||
|
*
|
||||||
|
* T1: if (__atomic_load_n(&p->flag, ...)) {
|
||||||
|
* modify *p;
|
||||||
|
* p->flag = 0;
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* The only downside is that, if there are 3 threads, with one CAS that
|
||||||
|
* succeeds, another CAS that fails, and an unmarked racing operation, we may
|
||||||
|
* point at the wrong CAS as the source of the race. However, if we assume that
|
||||||
|
* all CAS can succeed in some other execution, the data race is still valid.
|
||||||
|
*/
|
||||||
|
#define DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strength, weak) \
|
||||||
|
int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
|
||||||
|
u##bits val, int mo, int fail_mo); \
|
||||||
|
int __tsan_atomic##bits##_compare_exchange_##strength(u##bits *ptr, u##bits *exp, \
|
||||||
|
u##bits val, int mo, int fail_mo) \
|
||||||
|
{ \
|
||||||
|
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||||
|
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||||
|
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
|
||||||
|
KCSAN_ACCESS_ATOMIC); \
|
||||||
|
} \
|
||||||
|
return __atomic_compare_exchange_n(ptr, exp, val, weak, mo, fail_mo); \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_##strength)
|
||||||
|
|
||||||
|
#define DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits) \
|
||||||
|
u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
|
||||||
|
int mo, int fail_mo); \
|
||||||
|
u##bits __tsan_atomic##bits##_compare_exchange_val(u##bits *ptr, u##bits exp, u##bits val, \
|
||||||
|
int mo, int fail_mo) \
|
||||||
|
{ \
|
||||||
|
if (!IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS)) { \
|
||||||
|
check_access(ptr, bits / BITS_PER_BYTE, \
|
||||||
|
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | \
|
||||||
|
KCSAN_ACCESS_ATOMIC); \
|
||||||
|
} \
|
||||||
|
__atomic_compare_exchange_n(ptr, &exp, val, 0, mo, fail_mo); \
|
||||||
|
return exp; \
|
||||||
|
} \
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic##bits##_compare_exchange_val)
|
||||||
|
|
||||||
|
#define DEFINE_TSAN_ATOMIC_OPS(bits) \
|
||||||
|
DEFINE_TSAN_ATOMIC_LOAD_STORE(bits); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(exchange, bits, _n); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(fetch_add, bits, ); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(fetch_sub, bits, ); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(fetch_and, bits, ); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(fetch_or, bits, ); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(fetch_xor, bits, ); \
|
||||||
|
DEFINE_TSAN_ATOMIC_RMW(fetch_nand, bits, ); \
|
||||||
|
DEFINE_TSAN_ATOMIC_CMPXCHG(bits, strong, 0); \
|
||||||
|
DEFINE_TSAN_ATOMIC_CMPXCHG(bits, weak, 1); \
|
||||||
|
DEFINE_TSAN_ATOMIC_CMPXCHG_VAL(bits)
|
||||||
|
|
||||||
|
DEFINE_TSAN_ATOMIC_OPS(8);
|
||||||
|
DEFINE_TSAN_ATOMIC_OPS(16);
|
||||||
|
DEFINE_TSAN_ATOMIC_OPS(32);
|
||||||
|
DEFINE_TSAN_ATOMIC_OPS(64);
|
||||||
|
|
||||||
|
void __tsan_atomic_thread_fence(int memorder);
|
||||||
|
void __tsan_atomic_thread_fence(int memorder)
|
||||||
|
{
|
||||||
|
__atomic_thread_fence(memorder);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic_thread_fence);
|
||||||
|
|
||||||
|
void __tsan_atomic_signal_fence(int memorder);
|
||||||
|
void __tsan_atomic_signal_fence(int memorder) { }
|
||||||
|
EXPORT_SYMBOL(__tsan_atomic_signal_fence);
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) "kcsan: " fmt
|
||||||
|
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/bsearch.h>
|
#include <linux/bsearch.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
@ -15,10 +17,19 @@
|
|||||||
|
|
||||||
#include "kcsan.h"
|
#include "kcsan.h"
|
||||||
|
|
||||||
/*
|
atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
|
||||||
* Statistics counters.
|
static const char *const counter_names[] = {
|
||||||
*/
|
[KCSAN_COUNTER_USED_WATCHPOINTS] = "used_watchpoints",
|
||||||
static atomic_long_t counters[KCSAN_COUNTER_COUNT];
|
[KCSAN_COUNTER_SETUP_WATCHPOINTS] = "setup_watchpoints",
|
||||||
|
[KCSAN_COUNTER_DATA_RACES] = "data_races",
|
||||||
|
[KCSAN_COUNTER_ASSERT_FAILURES] = "assert_failures",
|
||||||
|
[KCSAN_COUNTER_NO_CAPACITY] = "no_capacity",
|
||||||
|
[KCSAN_COUNTER_REPORT_RACES] = "report_races",
|
||||||
|
[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN] = "races_unknown_origin",
|
||||||
|
[KCSAN_COUNTER_UNENCODABLE_ACCESSES] = "unencodable_accesses",
|
||||||
|
[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES] = "encoding_false_positives",
|
||||||
|
};
|
||||||
|
static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Addresses for filtering functions from reporting. This list can be used as a
|
* Addresses for filtering functions from reporting. This list can be used as a
|
||||||
@ -39,34 +50,6 @@ static struct {
|
|||||||
};
|
};
|
||||||
static DEFINE_SPINLOCK(report_filterlist_lock);
|
static DEFINE_SPINLOCK(report_filterlist_lock);
|
||||||
|
|
||||||
static const char *counter_to_name(enum kcsan_counter_id id)
|
|
||||||
{
|
|
||||||
switch (id) {
|
|
||||||
case KCSAN_COUNTER_USED_WATCHPOINTS: return "used_watchpoints";
|
|
||||||
case KCSAN_COUNTER_SETUP_WATCHPOINTS: return "setup_watchpoints";
|
|
||||||
case KCSAN_COUNTER_DATA_RACES: return "data_races";
|
|
||||||
case KCSAN_COUNTER_ASSERT_FAILURES: return "assert_failures";
|
|
||||||
case KCSAN_COUNTER_NO_CAPACITY: return "no_capacity";
|
|
||||||
case KCSAN_COUNTER_REPORT_RACES: return "report_races";
|
|
||||||
case KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN: return "races_unknown_origin";
|
|
||||||
case KCSAN_COUNTER_UNENCODABLE_ACCESSES: return "unencodable_accesses";
|
|
||||||
case KCSAN_COUNTER_ENCODING_FALSE_POSITIVES: return "encoding_false_positives";
|
|
||||||
case KCSAN_COUNTER_COUNT:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kcsan_counter_inc(enum kcsan_counter_id id)
|
|
||||||
{
|
|
||||||
atomic_long_inc(&counters[id]);
|
|
||||||
}
|
|
||||||
|
|
||||||
void kcsan_counter_dec(enum kcsan_counter_id id)
|
|
||||||
{
|
|
||||||
atomic_long_dec(&counters[id]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The microbenchmark allows benchmarking KCSAN core runtime only. To run
|
* The microbenchmark allows benchmarking KCSAN core runtime only. To run
|
||||||
* multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
|
* multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
|
||||||
@ -86,7 +69,7 @@ static noinline void microbenchmark(unsigned long iters)
|
|||||||
*/
|
*/
|
||||||
WRITE_ONCE(kcsan_enabled, false);
|
WRITE_ONCE(kcsan_enabled, false);
|
||||||
|
|
||||||
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
|
pr_info("%s begin | iters: %lu\n", __func__, iters);
|
||||||
|
|
||||||
cycles = get_cycles();
|
cycles = get_cycles();
|
||||||
while (iters--) {
|
while (iters--) {
|
||||||
@ -97,73 +80,13 @@ static noinline void microbenchmark(unsigned long iters)
|
|||||||
}
|
}
|
||||||
cycles = get_cycles() - cycles;
|
cycles = get_cycles() - cycles;
|
||||||
|
|
||||||
pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
|
pr_info("%s end | cycles: %llu\n", __func__, cycles);
|
||||||
|
|
||||||
WRITE_ONCE(kcsan_enabled, was_enabled);
|
WRITE_ONCE(kcsan_enabled, was_enabled);
|
||||||
/* restore context */
|
/* restore context */
|
||||||
current->kcsan_ctx = ctx_save;
|
current->kcsan_ctx = ctx_save;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
|
|
||||||
* debugfs file from multiple tasks to generate real conflicts and show reports.
|
|
||||||
*/
|
|
||||||
static long test_dummy;
|
|
||||||
static long test_flags;
|
|
||||||
static long test_scoped;
|
|
||||||
static noinline void test_thread(unsigned long iters)
|
|
||||||
{
|
|
||||||
const long CHANGE_BITS = 0xff00ff00ff00ff00L;
|
|
||||||
const struct kcsan_ctx ctx_save = current->kcsan_ctx;
|
|
||||||
cycles_t cycles;
|
|
||||||
|
|
||||||
/* We may have been called from an atomic region; reset context. */
|
|
||||||
memset(¤t->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
|
|
||||||
|
|
||||||
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
|
|
||||||
pr_info("test_dummy@%px, test_flags@%px, test_scoped@%px,\n",
|
|
||||||
&test_dummy, &test_flags, &test_scoped);
|
|
||||||
|
|
||||||
cycles = get_cycles();
|
|
||||||
while (iters--) {
|
|
||||||
/* These all should generate reports. */
|
|
||||||
__kcsan_check_read(&test_dummy, sizeof(test_dummy));
|
|
||||||
ASSERT_EXCLUSIVE_WRITER(test_dummy);
|
|
||||||
ASSERT_EXCLUSIVE_ACCESS(test_dummy);
|
|
||||||
|
|
||||||
ASSERT_EXCLUSIVE_BITS(test_flags, ~CHANGE_BITS); /* no report */
|
|
||||||
__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
|
|
||||||
|
|
||||||
ASSERT_EXCLUSIVE_BITS(test_flags, CHANGE_BITS); /* report */
|
|
||||||
__kcsan_check_read(&test_flags, sizeof(test_flags)); /* no report */
|
|
||||||
|
|
||||||
/* not actually instrumented */
|
|
||||||
WRITE_ONCE(test_dummy, iters); /* to observe value-change */
|
|
||||||
__kcsan_check_write(&test_dummy, sizeof(test_dummy));
|
|
||||||
|
|
||||||
test_flags ^= CHANGE_BITS; /* generate value-change */
|
|
||||||
__kcsan_check_write(&test_flags, sizeof(test_flags));
|
|
||||||
|
|
||||||
BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
|
|
||||||
{
|
|
||||||
/* Should generate reports anywhere in this block. */
|
|
||||||
ASSERT_EXCLUSIVE_WRITER_SCOPED(test_scoped);
|
|
||||||
ASSERT_EXCLUSIVE_ACCESS_SCOPED(test_scoped);
|
|
||||||
BUG_ON(!current->kcsan_ctx.scoped_accesses.prev);
|
|
||||||
/* Unrelated accesses. */
|
|
||||||
__kcsan_check_access(&cycles, sizeof(cycles), 0);
|
|
||||||
__kcsan_check_access(&cycles, sizeof(cycles), KCSAN_ACCESS_ATOMIC);
|
|
||||||
}
|
|
||||||
BUG_ON(current->kcsan_ctx.scoped_accesses.prev);
|
|
||||||
}
|
|
||||||
cycles = get_cycles() - cycles;
|
|
||||||
|
|
||||||
pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
|
|
||||||
|
|
||||||
/* restore context */
|
|
||||||
current->kcsan_ctx = ctx_save;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
|
static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
|
||||||
{
|
{
|
||||||
const unsigned long a = *(const unsigned long *)rhs;
|
const unsigned long a = *(const unsigned long *)rhs;
|
||||||
@ -220,7 +143,7 @@ static ssize_t insert_report_filterlist(const char *func)
|
|||||||
ssize_t ret = 0;
|
ssize_t ret = 0;
|
||||||
|
|
||||||
if (!addr) {
|
if (!addr) {
|
||||||
pr_err("KCSAN: could not find function: '%s'\n", func);
|
pr_err("could not find function: '%s'\n", func);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,9 +193,10 @@ static int show_info(struct seq_file *file, void *v)
|
|||||||
|
|
||||||
/* show stats */
|
/* show stats */
|
||||||
seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
|
seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
|
||||||
for (i = 0; i < KCSAN_COUNTER_COUNT; ++i)
|
for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
|
||||||
seq_printf(file, "%s: %ld\n", counter_to_name(i),
|
seq_printf(file, "%s: %ld\n", counter_names[i],
|
||||||
atomic_long_read(&counters[i]));
|
atomic_long_read(&kcsan_counters[i]));
|
||||||
|
}
|
||||||
|
|
||||||
/* show filter functions, and filter type */
|
/* show filter functions, and filter type */
|
||||||
spin_lock_irqsave(&report_filterlist_lock, flags);
|
spin_lock_irqsave(&report_filterlist_lock, flags);
|
||||||
@ -307,18 +231,12 @@ debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *o
|
|||||||
WRITE_ONCE(kcsan_enabled, true);
|
WRITE_ONCE(kcsan_enabled, true);
|
||||||
} else if (!strcmp(arg, "off")) {
|
} else if (!strcmp(arg, "off")) {
|
||||||
WRITE_ONCE(kcsan_enabled, false);
|
WRITE_ONCE(kcsan_enabled, false);
|
||||||
} else if (!strncmp(arg, "microbench=", sizeof("microbench=") - 1)) {
|
} else if (str_has_prefix(arg, "microbench=")) {
|
||||||
unsigned long iters;
|
unsigned long iters;
|
||||||
|
|
||||||
if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
|
if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
microbenchmark(iters);
|
microbenchmark(iters);
|
||||||
} else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
|
|
||||||
unsigned long iters;
|
|
||||||
|
|
||||||
if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
|
|
||||||
return -EINVAL;
|
|
||||||
test_thread(iters);
|
|
||||||
} else if (!strcmp(arg, "whitelist")) {
|
} else if (!strcmp(arg, "whitelist")) {
|
||||||
set_report_filterlist_whitelist(true);
|
set_report_filterlist_whitelist(true);
|
||||||
} else if (!strcmp(arg, "blacklist")) {
|
} else if (!strcmp(arg, "blacklist")) {
|
||||||
|
@ -27,6 +27,12 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <trace/events/printk.h>
|
#include <trace/events/printk.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
|
||||||
|
#define __KCSAN_ACCESS_RW(alt) (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE)
|
||||||
|
#else
|
||||||
|
#define __KCSAN_ACCESS_RW(alt) (alt)
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Points to current test-case memory access "kernels". */
|
/* Points to current test-case memory access "kernels". */
|
||||||
static void (*access_kernels[2])(void);
|
static void (*access_kernels[2])(void);
|
||||||
|
|
||||||
@ -186,20 +192,21 @@ static bool report_matches(const struct expect_report *r)
|
|||||||
|
|
||||||
/* Access 1 & 2 */
|
/* Access 1 & 2 */
|
||||||
for (i = 0; i < 2; ++i) {
|
for (i = 0; i < 2; ++i) {
|
||||||
|
const int ty = r->access[i].type;
|
||||||
const char *const access_type =
|
const char *const access_type =
|
||||||
(r->access[i].type & KCSAN_ACCESS_ASSERT) ?
|
(ty & KCSAN_ACCESS_ASSERT) ?
|
||||||
((r->access[i].type & KCSAN_ACCESS_WRITE) ?
|
((ty & KCSAN_ACCESS_WRITE) ?
|
||||||
"assert no accesses" :
|
"assert no accesses" :
|
||||||
"assert no writes") :
|
"assert no writes") :
|
||||||
((r->access[i].type & KCSAN_ACCESS_WRITE) ?
|
((ty & KCSAN_ACCESS_WRITE) ?
|
||||||
"write" :
|
((ty & KCSAN_ACCESS_COMPOUND) ?
|
||||||
|
"read-write" :
|
||||||
|
"write") :
|
||||||
"read");
|
"read");
|
||||||
const char *const access_type_aux =
|
const char *const access_type_aux =
|
||||||
(r->access[i].type & KCSAN_ACCESS_ATOMIC) ?
|
(ty & KCSAN_ACCESS_ATOMIC) ?
|
||||||
" (marked)" :
|
" (marked)" :
|
||||||
((r->access[i].type & KCSAN_ACCESS_SCOPED) ?
|
((ty & KCSAN_ACCESS_SCOPED) ? " (scoped)" : "");
|
||||||
" (scoped)" :
|
|
||||||
"");
|
|
||||||
|
|
||||||
if (i == 1) {
|
if (i == 1) {
|
||||||
/* Access 2 */
|
/* Access 2 */
|
||||||
@ -277,6 +284,12 @@ static noinline void test_kernel_write_atomic(void)
|
|||||||
WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
|
WRITE_ONCE(test_var, READ_ONCE_NOCHECK(test_sink) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinline void test_kernel_atomic_rmw(void)
|
||||||
|
{
|
||||||
|
/* Use builtin, so we can set up the "bad" atomic/non-atomic scenario. */
|
||||||
|
__atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
__no_kcsan
|
__no_kcsan
|
||||||
static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
|
static noinline void test_kernel_write_uninstrumented(void) { test_var++; }
|
||||||
|
|
||||||
@ -390,6 +403,15 @@ static noinline void test_kernel_seqlock_writer(void)
|
|||||||
write_sequnlock_irqrestore(&test_seqlock, flags);
|
write_sequnlock_irqrestore(&test_seqlock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinline void test_kernel_atomic_builtins(void)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Generate concurrent accesses, expecting no reports, ensuring KCSAN
|
||||||
|
* treats builtin atomics as actually atomic.
|
||||||
|
*/
|
||||||
|
__atomic_load_n(&test_var, __ATOMIC_RELAXED);
|
||||||
|
}
|
||||||
|
|
||||||
/* ===== Test cases ===== */
|
/* ===== Test cases ===== */
|
||||||
|
|
||||||
/* Simple test with normal data race. */
|
/* Simple test with normal data race. */
|
||||||
@ -430,8 +452,8 @@ static void test_concurrent_races(struct kunit *test)
|
|||||||
const struct expect_report expect = {
|
const struct expect_report expect = {
|
||||||
.access = {
|
.access = {
|
||||||
/* NULL will match any address. */
|
/* NULL will match any address. */
|
||||||
{ test_kernel_rmw_array, NULL, 0, KCSAN_ACCESS_WRITE },
|
{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(KCSAN_ACCESS_WRITE) },
|
||||||
{ test_kernel_rmw_array, NULL, 0, 0 },
|
{ test_kernel_rmw_array, NULL, 0, __KCSAN_ACCESS_RW(0) },
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
static const struct expect_report never = {
|
static const struct expect_report never = {
|
||||||
@ -620,6 +642,29 @@ static void test_read_plain_atomic_write(struct kunit *test)
|
|||||||
KUNIT_EXPECT_TRUE(test, match_expect);
|
KUNIT_EXPECT_TRUE(test, match_expect);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Test that atomic RMWs generate correct report. */
|
||||||
|
__no_kcsan
|
||||||
|
static void test_read_plain_atomic_rmw(struct kunit *test)
|
||||||
|
{
|
||||||
|
const struct expect_report expect = {
|
||||||
|
.access = {
|
||||||
|
{ test_kernel_read, &test_var, sizeof(test_var), 0 },
|
||||||
|
{ test_kernel_atomic_rmw, &test_var, sizeof(test_var),
|
||||||
|
KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC },
|
||||||
|
},
|
||||||
|
};
|
||||||
|
bool match_expect = false;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_KCSAN_IGNORE_ATOMICS))
|
||||||
|
return;
|
||||||
|
|
||||||
|
begin_test_checks(test_kernel_read, test_kernel_atomic_rmw);
|
||||||
|
do {
|
||||||
|
match_expect = report_matches(&expect);
|
||||||
|
} while (!end_test_checks(match_expect));
|
||||||
|
KUNIT_EXPECT_TRUE(test, match_expect);
|
||||||
|
}
|
||||||
|
|
||||||
/* Zero-sized accesses should never cause data race reports. */
|
/* Zero-sized accesses should never cause data race reports. */
|
||||||
__no_kcsan
|
__no_kcsan
|
||||||
static void test_zero_size_access(struct kunit *test)
|
static void test_zero_size_access(struct kunit *test)
|
||||||
@ -852,6 +897,59 @@ static void test_seqlock_noreport(struct kunit *test)
|
|||||||
KUNIT_EXPECT_FALSE(test, match_never);
|
KUNIT_EXPECT_FALSE(test, match_never);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Test atomic builtins work and required instrumentation functions exist. We
|
||||||
|
* also test that KCSAN understands they're atomic by racing with them via
|
||||||
|
* test_kernel_atomic_builtins(), and expect no reports.
|
||||||
|
*
|
||||||
|
* The atomic builtins _SHOULD NOT_ be used in normal kernel code!
|
||||||
|
*/
|
||||||
|
static void test_atomic_builtins(struct kunit *test)
|
||||||
|
{
|
||||||
|
bool match_never = false;
|
||||||
|
|
||||||
|
begin_test_checks(test_kernel_atomic_builtins, test_kernel_atomic_builtins);
|
||||||
|
do {
|
||||||
|
long tmp;
|
||||||
|
|
||||||
|
kcsan_enable_current();
|
||||||
|
|
||||||
|
__atomic_store_n(&test_var, 42L, __ATOMIC_RELAXED);
|
||||||
|
KUNIT_EXPECT_EQ(test, 42L, __atomic_load_n(&test_var, __ATOMIC_RELAXED));
|
||||||
|
|
||||||
|
KUNIT_EXPECT_EQ(test, 42L, __atomic_exchange_n(&test_var, 20, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, 20L, test_var);
|
||||||
|
|
||||||
|
tmp = 20L;
|
||||||
|
KUNIT_EXPECT_TRUE(test, __atomic_compare_exchange_n(&test_var, &tmp, 30L,
|
||||||
|
0, __ATOMIC_RELAXED,
|
||||||
|
__ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, tmp, 20L);
|
||||||
|
KUNIT_EXPECT_EQ(test, test_var, 30L);
|
||||||
|
KUNIT_EXPECT_FALSE(test, __atomic_compare_exchange_n(&test_var, &tmp, 40L,
|
||||||
|
1, __ATOMIC_RELAXED,
|
||||||
|
__ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, tmp, 30L);
|
||||||
|
KUNIT_EXPECT_EQ(test, test_var, 30L);
|
||||||
|
|
||||||
|
KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_add(&test_var, 1, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, 31L, __atomic_fetch_sub(&test_var, 1, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, 30L, __atomic_fetch_and(&test_var, 0xf, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, 14L, __atomic_fetch_xor(&test_var, 0xf, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, 1L, __atomic_fetch_or(&test_var, 0xf0, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, 241L, __atomic_fetch_nand(&test_var, 0xf, __ATOMIC_RELAXED));
|
||||||
|
KUNIT_EXPECT_EQ(test, -2L, test_var);
|
||||||
|
|
||||||
|
__atomic_thread_fence(__ATOMIC_SEQ_CST);
|
||||||
|
__atomic_signal_fence(__ATOMIC_SEQ_CST);
|
||||||
|
|
||||||
|
kcsan_disable_current();
|
||||||
|
|
||||||
|
match_never = report_available();
|
||||||
|
} while (!end_test_checks(match_never));
|
||||||
|
KUNIT_EXPECT_FALSE(test, match_never);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Each test case is run with different numbers of threads. Until KUnit supports
|
* Each test case is run with different numbers of threads. Until KUnit supports
|
||||||
* passing arguments for each test case, we encode #threads in the test case
|
* passing arguments for each test case, we encode #threads in the test case
|
||||||
@ -880,6 +978,7 @@ static struct kunit_case kcsan_test_cases[] = {
|
|||||||
KCSAN_KUNIT_CASE(test_write_write_struct_part),
|
KCSAN_KUNIT_CASE(test_write_write_struct_part),
|
||||||
KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
|
KCSAN_KUNIT_CASE(test_read_atomic_write_atomic),
|
||||||
KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
|
KCSAN_KUNIT_CASE(test_read_plain_atomic_write),
|
||||||
|
KCSAN_KUNIT_CASE(test_read_plain_atomic_rmw),
|
||||||
KCSAN_KUNIT_CASE(test_zero_size_access),
|
KCSAN_KUNIT_CASE(test_zero_size_access),
|
||||||
KCSAN_KUNIT_CASE(test_data_race),
|
KCSAN_KUNIT_CASE(test_data_race),
|
||||||
KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
|
KCSAN_KUNIT_CASE(test_assert_exclusive_writer),
|
||||||
@ -891,6 +990,7 @@ static struct kunit_case kcsan_test_cases[] = {
|
|||||||
KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
|
KCSAN_KUNIT_CASE(test_assert_exclusive_access_scoped),
|
||||||
KCSAN_KUNIT_CASE(test_jiffies_noreport),
|
KCSAN_KUNIT_CASE(test_jiffies_noreport),
|
||||||
KCSAN_KUNIT_CASE(test_seqlock_noreport),
|
KCSAN_KUNIT_CASE(test_seqlock_noreport),
|
||||||
|
KCSAN_KUNIT_CASE(test_atomic_builtins),
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#ifndef _KERNEL_KCSAN_KCSAN_H
|
#ifndef _KERNEL_KCSAN_KCSAN_H
|
||||||
#define _KERNEL_KCSAN_KCSAN_H
|
#define _KERNEL_KCSAN_KCSAN_H
|
||||||
|
|
||||||
|
#include <linux/atomic.h>
|
||||||
#include <linux/kcsan.h>
|
#include <linux/kcsan.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
|
||||||
@ -34,6 +35,10 @@ void kcsan_restore_irqtrace(struct task_struct *task);
|
|||||||
*/
|
*/
|
||||||
void kcsan_debugfs_init(void);
|
void kcsan_debugfs_init(void);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Statistics counters displayed via debugfs; should only be modified in
|
||||||
|
* slow-paths.
|
||||||
|
*/
|
||||||
enum kcsan_counter_id {
|
enum kcsan_counter_id {
|
||||||
/*
|
/*
|
||||||
* Number of watchpoints currently in use.
|
* Number of watchpoints currently in use.
|
||||||
@ -86,12 +91,7 @@ enum kcsan_counter_id {
|
|||||||
|
|
||||||
KCSAN_COUNTER_COUNT, /* number of counters */
|
KCSAN_COUNTER_COUNT, /* number of counters */
|
||||||
};
|
};
|
||||||
|
extern atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
|
||||||
/*
|
|
||||||
* Increment/decrement counter with given id; avoid calling these in fast-path.
|
|
||||||
*/
|
|
||||||
extern void kcsan_counter_inc(enum kcsan_counter_id id);
|
|
||||||
extern void kcsan_counter_dec(enum kcsan_counter_id id);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns true if data races in the function symbol that maps to func_addr
|
* Returns true if data races in the function symbol that maps to func_addr
|
||||||
|
@ -228,6 +228,10 @@ static const char *get_access_type(int type)
|
|||||||
return "write";
|
return "write";
|
||||||
case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
|
case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
|
||||||
return "write (marked)";
|
return "write (marked)";
|
||||||
|
case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE:
|
||||||
|
return "read-write";
|
||||||
|
case KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
|
||||||
|
return "read-write (marked)";
|
||||||
case KCSAN_ACCESS_SCOPED:
|
case KCSAN_ACCESS_SCOPED:
|
||||||
return "read (scoped)";
|
return "read (scoped)";
|
||||||
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
|
case KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_ATOMIC:
|
||||||
@ -275,8 +279,8 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
|
|||||||
|
|
||||||
cur = strnstr(buf, "kcsan_", len);
|
cur = strnstr(buf, "kcsan_", len);
|
||||||
if (cur) {
|
if (cur) {
|
||||||
cur += sizeof("kcsan_") - 1;
|
cur += strlen("kcsan_");
|
||||||
if (strncmp(cur, "test", sizeof("test") - 1))
|
if (!str_has_prefix(cur, "test"))
|
||||||
continue; /* KCSAN runtime function. */
|
continue; /* KCSAN runtime function. */
|
||||||
/* KCSAN related test. */
|
/* KCSAN related test. */
|
||||||
}
|
}
|
||||||
@ -555,7 +559,7 @@ static bool prepare_report_consumer(unsigned long *flags,
|
|||||||
* If the actual accesses to not match, this was a false
|
* If the actual accesses to not match, this was a false
|
||||||
* positive due to watchpoint encoding.
|
* positive due to watchpoint encoding.
|
||||||
*/
|
*/
|
||||||
kcsan_counter_inc(KCSAN_COUNTER_ENCODING_FALSE_POSITIVES);
|
atomic_long_inc(&kcsan_counters[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]);
|
||||||
goto discard;
|
goto discard;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) "kcsan: " fmt
|
||||||
|
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
@ -116,16 +118,16 @@ static int __init kcsan_selftest(void)
|
|||||||
if (do_test()) \
|
if (do_test()) \
|
||||||
++passed; \
|
++passed; \
|
||||||
else \
|
else \
|
||||||
pr_err("KCSAN selftest: " #do_test " failed"); \
|
pr_err("selftest: " #do_test " failed"); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
RUN_TEST(test_requires);
|
RUN_TEST(test_requires);
|
||||||
RUN_TEST(test_encode_decode);
|
RUN_TEST(test_encode_decode);
|
||||||
RUN_TEST(test_matching_access);
|
RUN_TEST(test_matching_access);
|
||||||
|
|
||||||
pr_info("KCSAN selftest: %d/%d tests passed\n", passed, total);
|
pr_info("selftest: %d/%d tests passed\n", passed, total);
|
||||||
if (passed != total)
|
if (passed != total)
|
||||||
panic("KCSAN selftests failed");
|
panic("selftests failed");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
postcore_initcall(kcsan_selftest);
|
postcore_initcall(kcsan_selftest);
|
||||||
|
@ -40,6 +40,11 @@ menuconfig KCSAN
|
|||||||
|
|
||||||
if KCSAN
|
if KCSAN
|
||||||
|
|
||||||
|
# Compiler capabilities that should not fail the test if they are unavailable.
|
||||||
|
config CC_HAS_TSAN_COMPOUND_READ_BEFORE_WRITE
|
||||||
|
def_bool (CC_IS_CLANG && $(cc-option,-fsanitize=thread -mllvm -tsan-compound-read-before-write=1)) || \
|
||||||
|
(CC_IS_GCC && $(cc-option,-fsanitize=thread --param tsan-compound-read-before-write=1))
|
||||||
|
|
||||||
config KCSAN_VERBOSE
|
config KCSAN_VERBOSE
|
||||||
bool "Show verbose reports with more information about system state"
|
bool "Show verbose reports with more information about system state"
|
||||||
depends on PROVE_LOCKING
|
depends on PROVE_LOCKING
|
||||||
|
@ -11,5 +11,5 @@ endif
|
|||||||
# of some options does not break KCSAN nor causes false positive reports.
|
# of some options does not break KCSAN nor causes false positive reports.
|
||||||
CFLAGS_KCSAN := -fsanitize=thread \
|
CFLAGS_KCSAN := -fsanitize=thread \
|
||||||
$(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
|
$(call cc-option,$(call cc-param,tsan-instrument-func-entry-exit=0) -fno-optimize-sibling-calls) \
|
||||||
$(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1)) \
|
$(call cc-option,$(call cc-param,tsan-compound-read-before-write=1),$(call cc-option,$(call cc-param,tsan-instrument-read-before-write=1))) \
|
||||||
$(call cc-param,tsan-distinguish-volatile=1)
|
$(call cc-param,tsan-distinguish-volatile=1)
|
||||||
|
@ -5,9 +5,10 @@ ATOMICDIR=$(dirname $0)
|
|||||||
|
|
||||||
. ${ATOMICDIR}/atomic-tbl.sh
|
. ${ATOMICDIR}/atomic-tbl.sh
|
||||||
|
|
||||||
#gen_param_check(arg)
|
#gen_param_check(meta, arg)
|
||||||
gen_param_check()
|
gen_param_check()
|
||||||
{
|
{
|
||||||
|
local meta="$1"; shift
|
||||||
local arg="$1"; shift
|
local arg="$1"; shift
|
||||||
local type="${arg%%:*}"
|
local type="${arg%%:*}"
|
||||||
local name="$(gen_param_name "${arg}")"
|
local name="$(gen_param_name "${arg}")"
|
||||||
@ -17,17 +18,25 @@ gen_param_check()
|
|||||||
i) return;;
|
i) return;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# We don't write to constant parameters
|
if [ ${type#c} != ${type} ]; then
|
||||||
[ ${type#c} != ${type} ] && rw="read"
|
# We don't write to constant parameters.
|
||||||
|
rw="read"
|
||||||
|
elif [ "${meta}" != "s" ]; then
|
||||||
|
# An atomic RMW: if this parameter is not a constant, and this atomic is
|
||||||
|
# not just a 's'tore, this parameter is both read from and written to.
|
||||||
|
rw="read_write"
|
||||||
|
fi
|
||||||
|
|
||||||
printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
|
printf "\tinstrument_atomic_${rw}(${name}, sizeof(*${name}));\n"
|
||||||
}
|
}
|
||||||
|
|
||||||
#gen_param_check(arg...)
|
#gen_params_checks(meta, arg...)
|
||||||
gen_params_checks()
|
gen_params_checks()
|
||||||
{
|
{
|
||||||
|
local meta="$1"; shift
|
||||||
|
|
||||||
while [ "$#" -gt 0 ]; do
|
while [ "$#" -gt 0 ]; do
|
||||||
gen_param_check "$1"
|
gen_param_check "$meta" "$1"
|
||||||
shift;
|
shift;
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
@ -77,7 +86,7 @@ gen_proto_order_variant()
|
|||||||
|
|
||||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||||
local checks="$(gen_params_checks "$@")"
|
local checks="$(gen_params_checks "${meta}" "$@")"
|
||||||
local args="$(gen_args "$@")"
|
local args="$(gen_args "$@")"
|
||||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||||
|
|
||||||
|
@ -528,6 +528,61 @@ static const char *uaccess_safe_builtin[] = {
|
|||||||
"__tsan_write4",
|
"__tsan_write4",
|
||||||
"__tsan_write8",
|
"__tsan_write8",
|
||||||
"__tsan_write16",
|
"__tsan_write16",
|
||||||
|
"__tsan_read_write1",
|
||||||
|
"__tsan_read_write2",
|
||||||
|
"__tsan_read_write4",
|
||||||
|
"__tsan_read_write8",
|
||||||
|
"__tsan_read_write16",
|
||||||
|
"__tsan_atomic8_load",
|
||||||
|
"__tsan_atomic16_load",
|
||||||
|
"__tsan_atomic32_load",
|
||||||
|
"__tsan_atomic64_load",
|
||||||
|
"__tsan_atomic8_store",
|
||||||
|
"__tsan_atomic16_store",
|
||||||
|
"__tsan_atomic32_store",
|
||||||
|
"__tsan_atomic64_store",
|
||||||
|
"__tsan_atomic8_exchange",
|
||||||
|
"__tsan_atomic16_exchange",
|
||||||
|
"__tsan_atomic32_exchange",
|
||||||
|
"__tsan_atomic64_exchange",
|
||||||
|
"__tsan_atomic8_fetch_add",
|
||||||
|
"__tsan_atomic16_fetch_add",
|
||||||
|
"__tsan_atomic32_fetch_add",
|
||||||
|
"__tsan_atomic64_fetch_add",
|
||||||
|
"__tsan_atomic8_fetch_sub",
|
||||||
|
"__tsan_atomic16_fetch_sub",
|
||||||
|
"__tsan_atomic32_fetch_sub",
|
||||||
|
"__tsan_atomic64_fetch_sub",
|
||||||
|
"__tsan_atomic8_fetch_and",
|
||||||
|
"__tsan_atomic16_fetch_and",
|
||||||
|
"__tsan_atomic32_fetch_and",
|
||||||
|
"__tsan_atomic64_fetch_and",
|
||||||
|
"__tsan_atomic8_fetch_or",
|
||||||
|
"__tsan_atomic16_fetch_or",
|
||||||
|
"__tsan_atomic32_fetch_or",
|
||||||
|
"__tsan_atomic64_fetch_or",
|
||||||
|
"__tsan_atomic8_fetch_xor",
|
||||||
|
"__tsan_atomic16_fetch_xor",
|
||||||
|
"__tsan_atomic32_fetch_xor",
|
||||||
|
"__tsan_atomic64_fetch_xor",
|
||||||
|
"__tsan_atomic8_fetch_nand",
|
||||||
|
"__tsan_atomic16_fetch_nand",
|
||||||
|
"__tsan_atomic32_fetch_nand",
|
||||||
|
"__tsan_atomic64_fetch_nand",
|
||||||
|
"__tsan_atomic8_compare_exchange_strong",
|
||||||
|
"__tsan_atomic16_compare_exchange_strong",
|
||||||
|
"__tsan_atomic32_compare_exchange_strong",
|
||||||
|
"__tsan_atomic64_compare_exchange_strong",
|
||||||
|
"__tsan_atomic8_compare_exchange_weak",
|
||||||
|
"__tsan_atomic16_compare_exchange_weak",
|
||||||
|
"__tsan_atomic32_compare_exchange_weak",
|
||||||
|
"__tsan_atomic64_compare_exchange_weak",
|
||||||
|
"__tsan_atomic8_compare_exchange_val",
|
||||||
|
"__tsan_atomic16_compare_exchange_val",
|
||||||
|
"__tsan_atomic32_compare_exchange_val",
|
||||||
|
"__tsan_atomic64_compare_exchange_val",
|
||||||
|
"__tsan_atomic_thread_fence",
|
||||||
|
"__tsan_atomic_signal_fence",
|
||||||
/* KCOV */
|
/* KCOV */
|
||||||
"write_comp_data",
|
"write_comp_data",
|
||||||
"check_kcov_mode",
|
"check_kcov_mode",
|
||||||
|
Loading…
Reference in New Issue
Block a user