mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-13 01:36:55 +07:00
bpf: Add bpf_map iterator
Implement seq_file operations to traverse all bpf_maps. Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andriin@fb.com> Link: https://lore.kernel.org/bpf/20200509175909.2476096-1-yhs@fb.com
This commit is contained in:
parent
e5158d987b
commit
6086d29def
@ -1082,6 +1082,7 @@ int generic_map_update_batch(struct bpf_map *map,
|
||||
int generic_map_delete_batch(struct bpf_map *map,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
|
||||
|
||||
extern int sysctl_unprivileged_bpf_disabled;
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
obj-y := core.o
|
||||
CFLAGS_core.o += $(call cc-disable-warning, override-init)
|
||||
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
|
||||
|
97
kernel/bpf/map_iter.c
Normal file
97
kernel/bpf/map_iter.c
Normal file
@ -0,0 +1,97 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2020 Facebook */
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct bpf_iter_seq_map_info {
|
||||
u32 mid;
|
||||
};
|
||||
|
||||
static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
{
|
||||
struct bpf_iter_seq_map_info *info = seq->private;
|
||||
struct bpf_map *map;
|
||||
|
||||
map = bpf_map_get_curr_or_next(&info->mid);
|
||||
if (!map)
|
||||
return NULL;
|
||||
|
||||
++*pos;
|
||||
return map;
|
||||
}
|
||||
|
||||
static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct bpf_iter_seq_map_info *info = seq->private;
|
||||
struct bpf_map *map;
|
||||
|
||||
++*pos;
|
||||
++info->mid;
|
||||
bpf_map_put((struct bpf_map *)v);
|
||||
map = bpf_map_get_curr_or_next(&info->mid);
|
||||
if (!map)
|
||||
return NULL;
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
struct bpf_iter__bpf_map {
|
||||
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
||||
__bpf_md_ptr(struct bpf_map *, map);
|
||||
};
|
||||
|
||||
DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map)
|
||||
|
||||
static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop)
|
||||
{
|
||||
struct bpf_iter__bpf_map ctx;
|
||||
struct bpf_iter_meta meta;
|
||||
struct bpf_prog *prog;
|
||||
int ret = 0;
|
||||
|
||||
ctx.meta = &meta;
|
||||
ctx.map = v;
|
||||
meta.seq = seq;
|
||||
prog = bpf_iter_get_info(&meta, in_stop);
|
||||
if (prog)
|
||||
ret = bpf_iter_run_prog(prog, &ctx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bpf_map_seq_show(struct seq_file *seq, void *v)
|
||||
{
|
||||
return __bpf_map_seq_show(seq, v, false);
|
||||
}
|
||||
|
||||
static void bpf_map_seq_stop(struct seq_file *seq, void *v)
|
||||
{
|
||||
if (!v)
|
||||
(void)__bpf_map_seq_show(seq, v, true);
|
||||
else
|
||||
bpf_map_put((struct bpf_map *)v);
|
||||
}
|
||||
|
||||
static const struct seq_operations bpf_map_seq_ops = {
|
||||
.start = bpf_map_seq_start,
|
||||
.next = bpf_map_seq_next,
|
||||
.stop = bpf_map_seq_stop,
|
||||
.show = bpf_map_seq_show,
|
||||
};
|
||||
|
||||
static int __init bpf_map_iter_init(void)
|
||||
{
|
||||
struct bpf_iter_reg reg_info = {
|
||||
.target = "bpf_map",
|
||||
.seq_ops = &bpf_map_seq_ops,
|
||||
.init_seq_private = NULL,
|
||||
.fini_seq_private = NULL,
|
||||
.seq_priv_size = sizeof(struct bpf_iter_seq_map_info),
|
||||
};
|
||||
|
||||
return bpf_iter_reg_target(®_info);
|
||||
}
|
||||
|
||||
late_initcall(bpf_map_iter_init);
|
@ -2934,6 +2934,25 @@ static int bpf_obj_get_next_id(const union bpf_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
struct bpf_map *bpf_map_get_curr_or_next(u32 *id)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
|
||||
spin_lock_bh(&map_idr_lock);
|
||||
again:
|
||||
map = idr_get_next(&map_idr, id);
|
||||
if (map) {
|
||||
map = __bpf_map_inc_not_zero(map, false);
|
||||
if (IS_ERR(map)) {
|
||||
(*id)++;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&map_idr_lock);
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
|
||||
|
||||
struct bpf_prog *bpf_prog_by_id(u32 id)
|
||||
|
Loading…
Reference in New Issue
Block a user