Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Alexei Starovoitov says:

====================
pull-request: bpf 2020-11-06

1) Pre-allocated per-cpu hashmap needs to zero-fill reused element, from David.

2) Tighten bpf_lsm function check, from KP.

3) Fix bpftool attaching to flow dissector, from Lorenz.

4) Use -fno-gcse for the whole kernel/bpf/core.c instead of function attribute, from Ard.

* git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  bpf: Update verification logic for LSM programs
  bpf: Zero-fill re-used per-cpu map element
  bpf: BPF_PRELOAD depends on BPF_SYSCALL
  tools/bpftool: Fix attaching flow dissector
  libbpf: Fix possible use after free in xsk_socket__delete
  libbpf: Fix null dereference in xsk_socket__delete
  libbpf, hashmap: Fix undefined behavior in hash_bits
  bpf: Don't rely on GCC __attribute__((optimize)) to disable GCSE
  tools, bpftool: Remove two unused variables.
  tools, bpftool: Avoid array index warnings.
  xsk: Fix possible memory leak at socket close
  bpf: Add struct bpf_redir_neigh forward declaration to BPF helper defs
  samples/bpf: Set rlimit for memlock to infinity in all samples
  bpf: Fix -Wshadow warnings
  selftest/bpf: Fix profiler test using CO-RE relocation for enums
====================

Link: https://lore.kernel.org/r/20201106221759.24143-1-alexei.starovoitov@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2020-11-06 17:49:31 -08:00
commit 86bbf01977
25 changed files with 346 additions and 49 deletions

View File

@ -175,5 +175,3 @@
#else #else
#define __diag_GCC_8(s) #define __diag_GCC_8(s)
#endif #endif
#define __no_fgcse __attribute__((optimize("-fno-gcse")))

View File

@ -247,10 +247,6 @@ struct ftrace_likely_data {
#define asm_inline asm #define asm_inline asm
#endif #endif
#ifndef __no_fgcse
# define __no_fgcse
#endif
/* Are two types/vars the same type (ignoring qualifiers)? */ /* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))

View File

@ -558,21 +558,21 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \ #define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
u32 ret; \ u32 __ret; \
cant_migrate(); \ cant_migrate(); \
if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
struct bpf_prog_stats *stats; \ struct bpf_prog_stats *__stats; \
u64 start = sched_clock(); \ u64 __start = sched_clock(); \
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
stats = this_cpu_ptr(prog->aux->stats); \ __stats = this_cpu_ptr(prog->aux->stats); \
u64_stats_update_begin(&stats->syncp); \ u64_stats_update_begin(&__stats->syncp); \
stats->cnt++; \ __stats->cnt++; \
stats->nsecs += sched_clock() - start; \ __stats->nsecs += sched_clock() - __start; \
u64_stats_update_end(&stats->syncp); \ u64_stats_update_end(&__stats->syncp); \
} else { \ } else { \
ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \ __ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
} \ } \
ret; }) __ret; })
#define BPF_PROG_RUN(prog, ctx) \ #define BPF_PROG_RUN(prog, ctx) \
__BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func) __BPF_PROG_RUN(prog, ctx, bpf_dispatcher_nop_func)

View File

@ -86,7 +86,7 @@ int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
void xp_destroy(struct xsk_buff_pool *pool); void xp_destroy(struct xsk_buff_pool *pool);
void xp_release(struct xdp_buff_xsk *xskb); void xp_release(struct xdp_buff_xsk *xskb);
void xp_get_pool(struct xsk_buff_pool *pool); void xp_get_pool(struct xsk_buff_pool *pool);
void xp_put_pool(struct xsk_buff_pool *pool); bool xp_put_pool(struct xsk_buff_pool *pool);
void xp_clear_dev(struct xsk_buff_pool *pool); void xp_clear_dev(struct xsk_buff_pool *pool);
void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);
void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs); void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs);

View File

@ -1,6 +1,10 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-y := core.o obj-y := core.o
CFLAGS_core.o += $(call cc-disable-warning, override-init) ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o

View File

@ -13,6 +13,7 @@
#include <linux/bpf_verifier.h> #include <linux/bpf_verifier.h>
#include <net/bpf_sk_storage.h> #include <net/bpf_sk_storage.h>
#include <linux/bpf_local_storage.h> #include <linux/bpf_local_storage.h>
#include <linux/btf_ids.h>
/* For every LSM hook that allows attachment of BPF programs, declare a nop /* For every LSM hook that allows attachment of BPF programs, declare a nop
* function where a BPF program can be attached. * function where a BPF program can be attached.
@ -26,7 +27,11 @@ noinline RET bpf_lsm_##NAME(__VA_ARGS__) \
#include <linux/lsm_hook_defs.h> #include <linux/lsm_hook_defs.h>
#undef LSM_HOOK #undef LSM_HOOK
#define BPF_LSM_SYM_PREFX "bpf_lsm_" #define LSM_HOOK(RET, DEFAULT, NAME, ...) BTF_ID(func, bpf_lsm_##NAME)
BTF_SET_START(bpf_lsm_hooks)
#include <linux/lsm_hook_defs.h>
#undef LSM_HOOK
BTF_SET_END(bpf_lsm_hooks)
int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog, int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog) const struct bpf_prog *prog)
@ -37,8 +42,7 @@ int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
return -EINVAL; return -EINVAL;
} }
if (strncmp(BPF_LSM_SYM_PREFX, prog->aux->attach_func_name, if (!btf_id_set_contains(&bpf_lsm_hooks, prog->aux->attach_btf_id)) {
sizeof(BPF_LSM_SYM_PREFX) - 1)) {
bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n", bpf_log(vlog, "attach_btf_id %u points to wrong type name %s\n",
prog->aux->attach_btf_id, prog->aux->attach_func_name); prog->aux->attach_btf_id, prog->aux->attach_func_name);
return -EINVAL; return -EINVAL;

View File

@ -1369,7 +1369,7 @@ u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
* *
* Decode and execute eBPF instructions. * Decode and execute eBPF instructions.
*/ */
static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack) static u64 ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u64 *stack)
{ {
#define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y #define BPF_INSN_2_LBL(x, y) [BPF_##x | BPF_##y] = &&x##_##y
#define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z #define BPF_INSN_3_LBL(x, y, z) [BPF_##x | BPF_##y | BPF_##z] = &&x##_##y##_##z

View File

@ -821,6 +821,32 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
} }
} }
static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
void *value, bool onallcpus)
{
/* When using prealloc and not setting the initial value on all cpus,
* zero-fill element values for other cpus (just as what happens when
* not using prealloc). Otherwise, bpf program has no way to ensure
* known initial values for cpus other than current one
* (onallcpus=false always when coming from bpf prog).
*/
if (htab_is_prealloc(htab) && !onallcpus) {
u32 size = round_up(htab->map.value_size, 8);
int current_cpu = raw_smp_processor_id();
int cpu;
for_each_possible_cpu(cpu) {
if (cpu == current_cpu)
bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
size);
else
memset(per_cpu_ptr(pptr, cpu), 0, size);
}
} else {
pcpu_copy_value(htab, pptr, value, onallcpus);
}
}
static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab) static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
{ {
return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS && return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
@ -891,7 +917,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
} }
} }
pcpu_copy_value(htab, pptr, value, onallcpus); pcpu_init_value(htab, pptr, value, onallcpus);
if (!prealloc) if (!prealloc)
htab_elem_set_ptr(l_new, key_size, pptr); htab_elem_set_ptr(l_new, key_size, pptr);
@ -1183,7 +1209,7 @@ static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size), pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
value, onallcpus); value, onallcpus);
} else { } else {
pcpu_copy_value(htab, htab_elem_get_ptr(l_new, key_size), pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
value, onallcpus); value, onallcpus);
hlist_nulls_add_head_rcu(&l_new->hash_node, head); hlist_nulls_add_head_rcu(&l_new->hash_node, head);
l_new = NULL; l_new = NULL;

View File

@ -6,6 +6,7 @@ config USERMODE_DRIVER
menuconfig BPF_PRELOAD menuconfig BPF_PRELOAD
bool "Preload BPF file system with kernel specific program and map iterators" bool "Preload BPF file system with kernel specific program and map iterators"
depends on BPF depends on BPF
depends on BPF_SYSCALL
# The dependency on !COMPILE_TEST prevents it from being enabled # The dependency on !COMPILE_TEST prevents it from being enabled
# in allmodconfig or allyesconfig configurations # in allmodconfig or allyesconfig configurations
depends on !COMPILE_TEST depends on !COMPILE_TEST

View File

@ -1146,7 +1146,8 @@ static void xsk_destruct(struct sock *sk)
if (!sock_flag(sk, SOCK_DEAD)) if (!sock_flag(sk, SOCK_DEAD))
return; return;
xp_put_pool(xs->pool); if (!xp_put_pool(xs->pool))
xdp_put_umem(xs->umem);
sk_refcnt_debug_dec(sk); sk_refcnt_debug_dec(sk);
} }

View File

@ -251,15 +251,18 @@ void xp_get_pool(struct xsk_buff_pool *pool)
refcount_inc(&pool->users); refcount_inc(&pool->users);
} }
void xp_put_pool(struct xsk_buff_pool *pool) bool xp_put_pool(struct xsk_buff_pool *pool)
{ {
if (!pool) if (!pool)
return; return false;
if (refcount_dec_and_test(&pool->users)) { if (refcount_dec_and_test(&pool->users)) {
INIT_WORK(&pool->work, xp_release_deferred); INIT_WORK(&pool->work, xp_release_deferred);
schedule_work(&pool->work); schedule_work(&pool->work);
return true;
} }
return false;
} }
static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)

View File

@ -290,7 +290,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct rlimit r = {1024*1024, RLIM_INFINITY}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
extern char __executable_start; extern char __executable_start;
char filename[256], buf[256]; char filename[256], buf[256];
__u64 uprobe_file_offset; __u64 uprobe_file_offset;

View File

@ -116,7 +116,7 @@ static void int_exit(int sig)
int main(int ac, char **argv) int main(int ac, char **argv)
{ {
struct rlimit r = {1024*1024, RLIM_INFINITY}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
long key, next_key, value; long key, next_key, value;
struct bpf_link *links[2]; struct bpf_link *links[2];
struct bpf_program *prog; struct bpf_program *prog;

View File

@ -107,7 +107,7 @@ static void print_hist(int fd)
int main(int ac, char **argv) int main(int ac, char **argv)
{ {
struct rlimit r = {1024*1024, RLIM_INFINITY}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_link *links[2]; struct bpf_link *links[2];
struct bpf_program *prog; struct bpf_program *prog;
struct bpf_object *obj; struct bpf_object *obj;

View File

@ -765,7 +765,7 @@ static int load_cpumap_prog(char *file_name, char *prog_name,
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs"; char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
char *mprog_filename = "xdp_redirect_kern.o"; char *mprog_filename = "xdp_redirect_kern.o";
char *redir_interface = NULL, *redir_map = NULL; char *redir_interface = NULL, *redir_map = NULL;

View File

@ -450,7 +450,7 @@ static void stats_poll(int interval, int action, __u32 cfg_opt)
int main(int argc, char **argv) int main(int argc, char **argv)
{ {
__u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */ __u32 cfg_options= NO_TOUCH ; /* Default: Don't touch packet memory */
struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY}; struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
struct bpf_prog_load_attr prog_load_attr = { struct bpf_prog_load_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP, .prog_type = BPF_PROG_TYPE_XDP,
}; };

View File

@ -408,6 +408,7 @@ class PrinterHelpers(Printer):
'struct bpf_perf_event_data', 'struct bpf_perf_event_data',
'struct bpf_perf_event_value', 'struct bpf_perf_event_value',
'struct bpf_pidns_info', 'struct bpf_pidns_info',
'struct bpf_redir_neigh',
'struct bpf_sock', 'struct bpf_sock',
'struct bpf_sock_addr', 'struct bpf_sock_addr',
'struct bpf_sock_ops', 'struct bpf_sock_ops',

View File

@ -843,9 +843,14 @@ static int handle_perms(void)
else else
p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'", p_err("missing %s%s%s%s%s%s%s%srequired for full feature probing; run as root or use 'unprivileged'",
capability_msg(bpf_caps, 0), capability_msg(bpf_caps, 0),
#ifdef CAP_BPF
capability_msg(bpf_caps, 1), capability_msg(bpf_caps, 1),
capability_msg(bpf_caps, 2), capability_msg(bpf_caps, 2),
capability_msg(bpf_caps, 3)); capability_msg(bpf_caps, 3)
#else
"", "", "", "", "", ""
#endif /* CAP_BPF */
);
goto exit_free; goto exit_free;
} }

View File

@ -940,7 +940,7 @@ static int parse_attach_detach_args(int argc, char **argv, int *progfd,
} }
if (*attach_type == BPF_FLOW_DISSECTOR) { if (*attach_type == BPF_FLOW_DISSECTOR) {
*mapfd = -1; *mapfd = 0;
return 0; return 0;
} }

View File

@ -70,7 +70,7 @@ int BPF_PROG(fentry_XXX)
static inline void static inline void
fexit_update_maps(u32 id, struct bpf_perf_event_value *after) fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
{ {
struct bpf_perf_event_value *before, diff, *accum; struct bpf_perf_event_value *before, diff;
before = bpf_map_lookup_elem(&fentry_readings, &id); before = bpf_map_lookup_elem(&fentry_readings, &id);
/* only account samples with a valid fentry_reading */ /* only account samples with a valid fentry_reading */
@ -95,7 +95,7 @@ int BPF_PROG(fexit_XXX)
{ {
struct bpf_perf_event_value readings[MAX_NUM_MATRICS]; struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
u32 cpu = bpf_get_smp_processor_id(); u32 cpu = bpf_get_smp_processor_id();
u32 i, one = 1, zero = 0; u32 i, zero = 0;
int err; int err;
u64 *count; u64 *count;

View File

@ -15,6 +15,9 @@
static inline size_t hash_bits(size_t h, int bits) static inline size_t hash_bits(size_t h, int bits)
{ {
/* shuffle bits and return requested number of upper bits */ /* shuffle bits and return requested number of upper bits */
if (bits == 0)
return 0;
#if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__) #if (__SIZEOF_SIZE_T__ == __SIZEOF_LONG_LONG__)
/* LP64 case */ /* LP64 case */
return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits); return (h * 11400714819323198485llu) >> (__SIZEOF_LONG_LONG__ * 8 - bits);
@ -174,17 +177,17 @@ bool hashmap__find(const struct hashmap *map, const void *key, void **value);
* @key: key to iterate entries for * @key: key to iterate entries for
*/ */
#define hashmap__for_each_key_entry(map, cur, _key) \ #define hashmap__for_each_key_entry(map, cur, _key) \
for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ for (cur = map->buckets \
map->cap_bits); \ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
map->buckets ? map->buckets[bkt] : NULL; }); \ : NULL; \
cur; \ cur; \
cur = cur->next) \ cur = cur->next) \
if (map->equal_fn(cur->key, (_key), map->ctx)) if (map->equal_fn(cur->key, (_key), map->ctx))
#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \ #define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\ for (cur = map->buckets \
map->cap_bits); \ ? map->buckets[hash_bits(map->hash_fn((_key), map->ctx), map->cap_bits)] \
cur = map->buckets ? map->buckets[bkt] : NULL; }); \ : NULL; \
cur && ({ tmp = cur->next; true; }); \ cur && ({ tmp = cur->next; true; }); \
cur = tmp) \ cur = tmp) \
if (map->equal_fn(cur->key, (_key), map->ctx)) if (map->equal_fn(cur->key, (_key), map->ctx))

View File

@ -891,13 +891,16 @@ int xsk_umem__delete(struct xsk_umem *umem)
void xsk_socket__delete(struct xsk_socket *xsk) void xsk_socket__delete(struct xsk_socket *xsk)
{ {
size_t desc_sz = sizeof(struct xdp_desc); size_t desc_sz = sizeof(struct xdp_desc);
struct xsk_ctx *ctx = xsk->ctx;
struct xdp_mmap_offsets off; struct xdp_mmap_offsets off;
struct xsk_umem *umem;
struct xsk_ctx *ctx;
int err; int err;
if (!xsk) if (!xsk)
return; return;
ctx = xsk->ctx;
umem = ctx->umem;
if (ctx->prog_fd != -1) { if (ctx->prog_fd != -1) {
xsk_delete_bpf_maps(xsk); xsk_delete_bpf_maps(xsk);
close(ctx->prog_fd); close(ctx->prog_fd);
@ -917,11 +920,11 @@ void xsk_socket__delete(struct xsk_socket *xsk)
xsk_put_ctx(ctx); xsk_put_ctx(ctx);
ctx->umem->refcount--; umem->refcount--;
/* Do not close an fd that also has an associated umem connected /* Do not close an fd that also has an associated umem connected
* to it. * to it.
*/ */
if (xsk->fd != ctx->umem->fd) if (xsk->fd != umem->fd)
close(xsk->fd); close(xsk->fd);
free(xsk); free(xsk);
} }

View File

@ -0,0 +1,214 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
#include <test_progs.h>
#include "test_map_init.skel.h"
#define TEST_VALUE 0x1234
#define FILL_VALUE 0xdeadbeef
static int nr_cpus;
static int duration;
typedef unsigned long long map_key_t;
typedef unsigned long long map_value_t;
typedef struct {
map_value_t v; /* padding */
} __bpf_percpu_val_align pcpu_map_value_t;
static int map_populate(int map_fd, int num)
{
pcpu_map_value_t value[nr_cpus];
int i, err;
map_key_t key;
for (i = 0; i < nr_cpus; i++)
bpf_percpu(value, i) = FILL_VALUE;
for (key = 1; key <= num; key++) {
err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
if (!ASSERT_OK(err, "bpf_map_update_elem"))
return -1;
}
return 0;
}
static struct test_map_init *setup(enum bpf_map_type map_type, int map_sz,
int *map_fd, int populate)
{
struct test_map_init *skel;
int err;
skel = test_map_init__open();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return NULL;
err = bpf_map__set_type(skel->maps.hashmap1, map_type);
if (!ASSERT_OK(err, "bpf_map__set_type"))
goto error;
err = bpf_map__set_max_entries(skel->maps.hashmap1, map_sz);
if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
goto error;
err = test_map_init__load(skel);
if (!ASSERT_OK(err, "skel_load"))
goto error;
*map_fd = bpf_map__fd(skel->maps.hashmap1);
if (CHECK(*map_fd < 0, "bpf_map__fd", "failed\n"))
goto error;
err = map_populate(*map_fd, populate);
if (!ASSERT_OK(err, "map_populate"))
goto error_map;
return skel;
error_map:
close(*map_fd);
error:
test_map_init__destroy(skel);
return NULL;
}
/* executes bpf program that updates map with key, value */
static int prog_run_insert_elem(struct test_map_init *skel, map_key_t key,
map_value_t value)
{
struct test_map_init__bss *bss;
bss = skel->bss;
bss->inKey = key;
bss->inValue = value;
bss->inPid = getpid();
if (!ASSERT_OK(test_map_init__attach(skel), "skel_attach"))
return -1;
/* Let tracepoint trigger */
syscall(__NR_getpgid);
test_map_init__detach(skel);
return 0;
}
static int check_values_one_cpu(pcpu_map_value_t *value, map_value_t expected)
{
int i, nzCnt = 0;
map_value_t val;
for (i = 0; i < nr_cpus; i++) {
val = bpf_percpu(value, i);
if (val) {
if (CHECK(val != expected, "map value",
"unexpected for cpu %d: 0x%llx\n", i, val))
return -1;
nzCnt++;
}
}
if (CHECK(nzCnt != 1, "map value", "set for %d CPUs instead of 1!\n",
nzCnt))
return -1;
return 0;
}
/* Add key=1 elem with values set for all CPUs
* Delete elem key=1
* Run bpf prog that inserts new key=1 elem with value=0x1234
* (bpf prog can only set value for current CPU)
* Lookup Key=1 and check value is as expected for all CPUs:
* value set by bpf prog for one CPU, 0 for all others
*/
static void test_pcpu_map_init(void)
{
pcpu_map_value_t value[nr_cpus];
struct test_map_init *skel;
int map_fd, err;
map_key_t key;
/* max 1 elem in map so insertion is forced to reuse freed entry */
skel = setup(BPF_MAP_TYPE_PERCPU_HASH, 1, &map_fd, 1);
if (!ASSERT_OK_PTR(skel, "prog_setup"))
return;
/* delete element so the entry can be re-used*/
key = 1;
err = bpf_map_delete_elem(map_fd, &key);
if (!ASSERT_OK(err, "bpf_map_delete_elem"))
goto cleanup;
/* run bpf prog that inserts new elem, re-using the slot just freed */
err = prog_run_insert_elem(skel, key, TEST_VALUE);
if (!ASSERT_OK(err, "prog_run_insert_elem"))
goto cleanup;
/* check that key=1 was re-created by bpf prog */
err = bpf_map_lookup_elem(map_fd, &key, value);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto cleanup;
/* and has expected values */
check_values_one_cpu(value, TEST_VALUE);
cleanup:
test_map_init__destroy(skel);
}
/* Add key=1 and key=2 elems with values set for all CPUs
* Run bpf prog that inserts new key=3 elem
* (only for current cpu; other cpus should have initial value = 0)
* Lookup Key=1 and check value is as expected for all CPUs
*/
static void test_pcpu_lru_map_init(void)
{
pcpu_map_value_t value[nr_cpus];
struct test_map_init *skel;
int map_fd, err;
map_key_t key;
/* Set up LRU map with 2 elements, values filled for all CPUs.
* With these 2 elements, the LRU map is full
*/
skel = setup(BPF_MAP_TYPE_LRU_PERCPU_HASH, 2, &map_fd, 2);
if (!ASSERT_OK_PTR(skel, "prog_setup"))
return;
/* run bpf prog that inserts new key=3 element, re-using LRU slot */
key = 3;
err = prog_run_insert_elem(skel, key, TEST_VALUE);
if (!ASSERT_OK(err, "prog_run_insert_elem"))
goto cleanup;
/* check that key=3 replaced one of earlier elements */
err = bpf_map_lookup_elem(map_fd, &key, value);
if (!ASSERT_OK(err, "bpf_map_lookup_elem"))
goto cleanup;
/* and has expected values */
check_values_one_cpu(value, TEST_VALUE);
cleanup:
test_map_init__destroy(skel);
}
void test_map_init(void)
{
nr_cpus = bpf_num_possible_cpus();
if (nr_cpus <= 1) {
printf("%s:SKIP: >1 cpu needed for this test\n", __func__);
test__skip();
return;
}
if (test__start_subtest("pcpu_map_init"))
test_pcpu_map_init();
if (test__start_subtest("pcpu_lru_map_init"))
test_pcpu_lru_map_init();
}

View File

@ -243,7 +243,10 @@ static ino_t get_inode_from_kernfs(struct kernfs_node* node)
} }
} }
int pids_cgrp_id = 1; extern bool CONFIG_CGROUP_PIDS __kconfig __weak;
enum cgroup_subsys_id___local {
pids_cgrp_id___local = 123, /* value doesn't matter */
};
static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data, static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
struct task_struct* task, struct task_struct* task,
@ -253,7 +256,9 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn); BPF_CORE_READ(task, nsproxy, cgroup_ns, root_cset, dfl_cgrp, kn);
struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn); struct kernfs_node* proc_kernfs = BPF_CORE_READ(task, cgroups, dfl_cgrp, kn);
if (ENABLE_CGROUP_V1_RESOLVER) { if (ENABLE_CGROUP_V1_RESOLVER && CONFIG_CGROUP_PIDS) {
int cgrp_id = bpf_core_enum_value(enum cgroup_subsys_id___local,
pids_cgrp_id___local);
#ifdef UNROLL #ifdef UNROLL
#pragma unroll #pragma unroll
#endif #endif
@ -262,7 +267,7 @@ static INLINE void* populate_cgroup_info(struct cgroup_data_t* cgroup_data,
BPF_CORE_READ(task, cgroups, subsys[i]); BPF_CORE_READ(task, cgroups, subsys[i]);
if (subsys != NULL) { if (subsys != NULL) {
int subsys_id = BPF_CORE_READ(subsys, ss, id); int subsys_id = BPF_CORE_READ(subsys, ss, id);
if (subsys_id == pids_cgrp_id) { if (subsys_id == cgrp_id) {
proc_kernfs = BPF_CORE_READ(subsys, cgroup, kn); proc_kernfs = BPF_CORE_READ(subsys, cgroup, kn);
root_kernfs = BPF_CORE_READ(subsys, ss, root, kf_root, kn); root_kernfs = BPF_CORE_READ(subsys, ss, root, kf_root, kn);
break; break;

View File

@ -0,0 +1,33 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2020 Tessares SA <http://www.tessares.net> */
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
__u64 inKey = 0;
__u64 inValue = 0;
__u32 inPid = 0;
struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__uint(max_entries, 2);
__type(key, __u64);
__type(value, __u64);
} hashmap1 SEC(".maps");
SEC("tp/syscalls/sys_enter_getpgid")
int sysenter_getpgid(const void *ctx)
{
/* Just do it for once, when called from our own test prog. This
* ensures the map value is only updated for a single CPU.
*/
int cur_pid = bpf_get_current_pid_tgid() >> 32;
if (cur_pid == inPid)
bpf_map_update_elem(&hashmap1, &inKey, &inValue, BPF_NOEXIST);
return 0;
}
char _license[] SEC("license") = "GPL";