mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-19 23:20:02 +07:00

The full memory barrier in the XDP socket rings on the consumer side between the load of the data and the store of the consumer ring is there to protect the store from being executed before the load of the data. If this was allowed to happen, the producer might overwrite the data field with a new entry before the consumer got the chance to read it. On x86, stores are guaranteed not to be reordered with older loads, so it does not need a full memory barrier here. A compile time barrier would be enough. This patch introdcues a new primitive in libbpf_util.h that implements a new barrier type (libbpf_smp_rwmb) hindering stores to be reordered with older loads. It is then used in the XDP socket ring access code in libbpf to improve performance. Signed-off-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
61 lines
2.0 KiB
C
61 lines
2.0 KiB
C
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
|
|
/* Copyright (c) 2019 Facebook */
|
|
|
|
#ifndef __LIBBPF_LIBBPF_UTIL_H
|
|
#define __LIBBPF_LIBBPF_UTIL_H
|
|
|
|
#include <stdbool.h>
|
|
|
|
#ifdef __cplusplus
|
|
extern "C" {
|
|
#endif
|
|
|
|
extern void libbpf_print(enum libbpf_print_level level,
|
|
const char *format, ...)
|
|
__attribute__((format(printf, 2, 3)));
|
|
|
|
#define __pr(level, fmt, ...) \
|
|
do { \
|
|
libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
|
|
} while (0)
|
|
|
|
#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
|
|
#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
|
|
#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
|
|
|
|
/* Use these barrier functions instead of smp_[rw]mb() when they are
|
|
* used in a libbpf header file. That way they can be built into the
|
|
* application that uses libbpf.
|
|
*/
|
|
#if defined(__i386__) || defined(__x86_64__)
|
|
# define libbpf_smp_rmb() asm volatile("" : : : "memory")
|
|
# define libbpf_smp_wmb() asm volatile("" : : : "memory")
|
|
# define libbpf_smp_mb() \
|
|
asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
|
|
/* Hinders stores to be observed before older loads. */
|
|
# define libbpf_smp_rwmb() asm volatile("" : : : "memory")
|
|
#elif defined(__aarch64__)
|
|
# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
|
|
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
|
|
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
|
|
# define libbpf_smp_rwmb() libbpf_smp_mb()
|
|
#elif defined(__arm__)
|
|
/* These are only valid for armv7 and above */
|
|
# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
|
|
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
|
|
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
|
|
# define libbpf_smp_rwmb() libbpf_smp_mb()
|
|
#else
|
|
# warning Architecture missing native barrier functions in libbpf_util.h.
|
|
# define libbpf_smp_rmb() __sync_synchronize()
|
|
# define libbpf_smp_wmb() __sync_synchronize()
|
|
# define libbpf_smp_mb() __sync_synchronize()
|
|
# define libbpf_smp_rwmb() __sync_synchronize()
|
|
#endif
|
|
|
|
#ifdef __cplusplus
|
|
} /* extern "C" */
|
|
#endif
|
|
|
|
#endif
|