linux_dsm_epyc7002/include/linux/compiler.h
Will Deacon dee081bf8f READ_ONCE: Drop pointer qualifiers when reading from scalar types
Passing a volatile-qualified pointer to READ_ONCE() is an absolute
trainwreck for code generation: the use of 'typeof()' to define a
temporary variable inside the macro means that the final evaluation in
macro scope ends up forcing a read back from the stack. When stack
protector is enabled (the default for arm64, at least), this causes
the compiler to vomit up all sorts of junk.

Unfortunately, dropping pointer qualifiers inside the macro poses quite
a challenge, especially since the pointed-to type is permitted to be an
aggregate, and this is relied upon by mm/ code accessing things like
'pmd_t'. Based on numerous hacks and discussions on the mailing list,
this is the best I've managed to come up with.

Introduce '__unqual_scalar_typeof()' which takes an expression and, if
the expression is an optionally qualified 8, 16, 32 or 64-bit scalar
type, evaluates to the unqualified type. Other input types, including
aggregates, remain unchanged. Hopefully READ_ONCE() on volatile aggregate
pointers isn't something we do on a fast-path.

Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Reported-by: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Will Deacon <will@kernel.org>
2020-04-16 12:28:34 +01:00

347 lines
10 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_COMPILER_H
#define __LINUX_COMPILER_H
#include <linux/compiler_types.h>
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
/*
* Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
* to disable branch tracing on a per file basis.
*/
#if defined(CONFIG_TRACE_BRANCH_PROFILING) \
&& !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
void ftrace_likely_update(struct ftrace_likely_data *f, int val,
int expect, int is_constant);
#define likely_notrace(x) __builtin_expect(!!(x), 1)
#define unlikely_notrace(x) __builtin_expect(!!(x), 0)
#define __branch_check__(x, expect, is_constant) ({ \
long ______r; \
static struct ftrace_likely_data \
__aligned(4) \
__section(_ftrace_annotated_branch) \
______f = { \
.data.func = __func__, \
.data.file = __FILE__, \
.data.line = __LINE__, \
}; \
______r = __builtin_expect(!!(x), expect); \
ftrace_likely_update(&______f, ______r, \
expect, is_constant); \
______r; \
})
/*
* Using __builtin_constant_p(x) to ignore cases where the return
* value is always the same. This idea is taken from a similar patch
* written by Daniel Walker.
*/
# ifndef likely
# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x)))
# endif
# ifndef unlikely
# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x)))
# endif
#ifdef CONFIG_PROFILE_ALL_BRANCHES
/*
* "Define 'is'", Bill Clinton
* "Define 'if'", Steven Rostedt
*/
#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) )
#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond))
#define __trace_if_value(cond) ({ \
static struct ftrace_branch_data \
__aligned(4) \
__section(_ftrace_branch) \
__if_trace = { \
.func = __func__, \
.file = __FILE__, \
.line = __LINE__, \
}; \
(cond) ? \
(__if_trace.miss_hit[1]++,1) : \
(__if_trace.miss_hit[0]++,0); \
})
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
#else
# define likely(x) __builtin_expect(!!(x), 1)
# define unlikely(x) __builtin_expect(!!(x), 0)
#endif
/* Optimization barrier */
#ifndef barrier
# define barrier() __memory_barrier()
#endif
#ifndef barrier_data
# define barrier_data(ptr) barrier()
#endif
/* workaround for GCC PR82365 if needed */
#ifndef barrier_before_unreachable
# define barrier_before_unreachable() do { } while (0)
#endif
/* Unreachable code */
#ifdef CONFIG_STACK_VALIDATION
/*
* These macros help objtool understand GCC code flow for unreachable code.
* The __COUNTER__ based labels are a hack to make each instance of the macros
* unique, to convince GCC not to merge duplicate inline asm statements.
*/
#define annotate_reachable() ({ \
asm volatile("%c0:\n\t" \
".pushsection .discard.reachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
asm volatile("%c0:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long %c0b - .\n\t" \
".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define ASM_UNREACHABLE \
"999:\n\t" \
".pushsection .discard.unreachable\n\t" \
".long 999b - .\n\t" \
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(.rodata..c_jump_table)
#else
#define annotate_reachable()
#define annotate_unreachable()
#define __annotate_jump_table
#endif
#ifndef ASM_UNREACHABLE
# define ASM_UNREACHABLE
#endif
#ifndef unreachable
# define unreachable() do { \
annotate_unreachable(); \
__builtin_unreachable(); \
} while (0)
#endif
/*
* KENTRY - kernel entry point
* This can be used to annotate symbols (functions or data) that are used
* without their linker symbol being referenced explicitly. For example,
* interrupt vector handlers, or functions in the kernel image that are found
* programatically.
*
* Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those
* are handled in their own way (with KEEP() in linker scripts).
*
* KENTRY can be avoided if the symbols in question are marked as KEEP() in the
* linker script. For example an architecture could KEEP() its entire
* boot/exception vector code rather than annotate each function and data.
*/
#ifndef KENTRY
# define KENTRY(sym) \
extern typeof(sym) sym; \
static const unsigned long __kentry_##sym \
__used \
__section("___kentry" "+" #sym ) \
= (unsigned long)&sym;
#endif
#ifndef RELOC_HIDE
# define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \
(typeof(ptr)) (__ptr + (off)); })
#endif
#ifndef OPTIMIZER_HIDE_VAR
/* Make the optimizer believe the variable can be manipulated arbitrarily. */
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
#endif
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
* READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some
* particular ordering. One way to make the compiler aware of ordering is to
* put the two invocations of READ_ONCE or WRITE_ONCE in different C
* statements.
*
* These two macros will also work on aggregate data types like structs or
* unions.
*
* Their two major use cases are: (1) Mediating communication between
* process-level code and irq/NMI handlers, all running on the same CPU,
* and (2) Ensuring that the compiler does not fold, spindle, or otherwise
* mutilate accesses that either do not require ordering or that interact
* with an explicit memory barrier or atomic instruction that provides the
* required ordering.
*/
#include <asm/barrier.h>
#include <linux/kasan-checks.h>
/*
* Use __READ_ONCE() instead of READ_ONCE() if you do not require any
* atomicity or dependency ordering guarantees. Note that this may result
* in tears!
*/
#define __READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
#define __READ_ONCE_SCALAR(x) \
({ \
__unqual_scalar_typeof(x) __x = __READ_ONCE(x); \
smp_read_barrier_depends(); \
(typeof(x))__x; \
})
#define READ_ONCE(x) \
({ \
compiletime_assert_rwonce_type(x); \
__READ_ONCE_SCALAR(x); \
})
#define __WRITE_ONCE(x, val) \
do { \
*(volatile typeof(x) *)&(x) = (val); \
} while (0)
#define WRITE_ONCE(x, val) \
do { \
compiletime_assert_rwonce_type(x); \
__WRITE_ONCE(x, val); \
} while (0)
#ifdef CONFIG_KASAN
/*
* We can't declare function 'inline' because __no_sanitize_address conflicts
* with inlining. Attempt to inline it may cause a build failure.
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
static __no_kasan_or_inline
unsigned long __read_once_word_nocheck(const void *addr)
{
return __READ_ONCE(*(unsigned long *)addr);
}
/*
* Use READ_ONCE_NOCHECK() instead of READ_ONCE() if you need to load a
* word from memory atomically but without telling KASAN. This is usually
* used by unwinding code when walking the stack of a running process.
*/
#define READ_ONCE_NOCHECK(x) \
({ \
unsigned long __x = __read_once_word_nocheck(&(x)); \
smp_read_barrier_depends(); \
__x; \
})
static __no_kasan_or_inline
unsigned long read_word_at_a_time(const void *addr)
{
kasan_check_read(addr, 1);
return *(unsigned long *)addr;
}
#endif /* __KERNEL__ */
/*
* Force the compiler to emit 'sym' as a symbol, so that we can reference
* it from inline assembler. Necessary in case 'sym' could be inlined
* otherwise, or eliminated entirely due to lack of references that are
* visible to the compiler.
*/
#define __ADDRESSABLE(sym) \
static void * __section(.discard.addressable) __used \
__PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
/**
* offset_to_ptr - convert a relative memory offset to an absolute pointer
* @off: the address of the 32-bit offset value
*/
static inline void *offset_to_ptr(const int *off)
{
return (void *)((unsigned long)off + *off);
}
#endif /* __ASSEMBLY__ */
/* Compile time object size, -1 for unknown */
#ifndef __compiletime_object_size
# define __compiletime_object_size(obj) -1
#endif
#ifndef __compiletime_warning
# define __compiletime_warning(message)
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
#endif
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (!(condition)) \
prefix ## suffix(); \
} while (0)
#else
# define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0)
#endif
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
/**
* compiletime_assert - break build and emit msg if condition is false
* @condition: a compile-time constant condition to check
* @msg: a message to emit if condition is false
*
* In tradition of POSIX assert, this macro will break the build if the
* supplied condition is *false*, emitting the supplied error message if the
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
#define compiletime_assert_atomic_type(t) \
compiletime_assert(__native_word(t), \
"Need native word sized stores/loads for atomicity.")
/*
* Yes, this permits 64-bit accesses on 32-bit architectures. These will
* actually be atomic in many cases (namely x86), but for others we rely on
* the access being split into 2x32-bit accesses for a 32-bit quantity (e.g.
* a virtual address) and a strong prevailing wind.
*/
#define compiletime_assert_rwonce_type(t) \
compiletime_assert(__native_word(t) || sizeof(t) == sizeof(long long), \
"Unsupported access size for {READ,WRITE}_ONCE().")
/* &a[0] degrades to a pointer: a different type from an array */
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif /* __LINUX_COMPILER_H */