mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
5e7b30205c
Commita5cbe05a66
("bpf: Implement bpf iterator for map elements") added bpf iterator support for map elements. The map element bpf iterator requires info to identify a particular map. In the above commit, the attr->link_create.target_fd is used to carry map_fd and an enum bpf_iter_link_info is added to uapi to specify the target_fd actually representing a map_fd: enum bpf_iter_link_info { BPF_ITER_LINK_UNSPEC = 0, BPF_ITER_LINK_MAP_FD = 1, MAX_BPF_ITER_LINK_INFO, }; This is an extensible approach as we can grow enumerator for pid, cgroup_id, etc. and we can unionize target_fd for pid, cgroup_id, etc. But in the future, there are chances that more complex customization may happen, e.g., for tasks, it could be filtered based on both cgroup_id and user_id. This patch changed the uapi to have fields __aligned_u64 iter_info; __u32 iter_info_len; for additional iter_info for link_create. The iter_info is defined as union bpf_iter_link_info { struct { __u32 map_fd; } map; }; So future extension for additional customization will be easier. The bpf_iter_link_info will be passed to target callback to validate and generic bpf_iter framework does not need to deal it any more. Note that map_fd = 0 will be considered invalid and -EBADF will be returned to user space. Fixes:a5cbe05a66
("bpf: Implement bpf iterator for map elements") Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200805055056.1457463-1-yhs@fb.com
181 lines
4.1 KiB
C
181 lines
4.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Copyright (c) 2020 Facebook */
|
|
#include <linux/bpf.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/filter.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/btf_ids.h>
|
|
|
|
struct bpf_iter_seq_map_info {
|
|
u32 map_id;
|
|
};
|
|
|
|
static void *bpf_map_seq_start(struct seq_file *seq, loff_t *pos)
|
|
{
|
|
struct bpf_iter_seq_map_info *info = seq->private;
|
|
struct bpf_map *map;
|
|
|
|
map = bpf_map_get_curr_or_next(&info->map_id);
|
|
if (!map)
|
|
return NULL;
|
|
|
|
if (*pos == 0)
|
|
++*pos;
|
|
return map;
|
|
}
|
|
|
|
static void *bpf_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
|
{
|
|
struct bpf_iter_seq_map_info *info = seq->private;
|
|
|
|
++*pos;
|
|
++info->map_id;
|
|
bpf_map_put((struct bpf_map *)v);
|
|
return bpf_map_get_curr_or_next(&info->map_id);
|
|
}
|
|
|
|
struct bpf_iter__bpf_map {
|
|
__bpf_md_ptr(struct bpf_iter_meta *, meta);
|
|
__bpf_md_ptr(struct bpf_map *, map);
|
|
};
|
|
|
|
DEFINE_BPF_ITER_FUNC(bpf_map, struct bpf_iter_meta *meta, struct bpf_map *map)
|
|
|
|
static int __bpf_map_seq_show(struct seq_file *seq, void *v, bool in_stop)
|
|
{
|
|
struct bpf_iter__bpf_map ctx;
|
|
struct bpf_iter_meta meta;
|
|
struct bpf_prog *prog;
|
|
int ret = 0;
|
|
|
|
ctx.meta = &meta;
|
|
ctx.map = v;
|
|
meta.seq = seq;
|
|
prog = bpf_iter_get_info(&meta, in_stop);
|
|
if (prog)
|
|
ret = bpf_iter_run_prog(prog, &ctx);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int bpf_map_seq_show(struct seq_file *seq, void *v)
|
|
{
|
|
return __bpf_map_seq_show(seq, v, false);
|
|
}
|
|
|
|
static void bpf_map_seq_stop(struct seq_file *seq, void *v)
|
|
{
|
|
if (!v)
|
|
(void)__bpf_map_seq_show(seq, v, true);
|
|
else
|
|
bpf_map_put((struct bpf_map *)v);
|
|
}
|
|
|
|
static const struct seq_operations bpf_map_seq_ops = {
|
|
.start = bpf_map_seq_start,
|
|
.next = bpf_map_seq_next,
|
|
.stop = bpf_map_seq_stop,
|
|
.show = bpf_map_seq_show,
|
|
};
|
|
|
|
BTF_ID_LIST(btf_bpf_map_id)
|
|
BTF_ID(struct, bpf_map)
|
|
|
|
static const struct bpf_iter_seq_info bpf_map_seq_info = {
|
|
.seq_ops = &bpf_map_seq_ops,
|
|
.init_seq_private = NULL,
|
|
.fini_seq_private = NULL,
|
|
.seq_priv_size = sizeof(struct bpf_iter_seq_map_info),
|
|
};
|
|
|
|
static struct bpf_iter_reg bpf_map_reg_info = {
|
|
.target = "bpf_map",
|
|
.ctx_arg_info_size = 1,
|
|
.ctx_arg_info = {
|
|
{ offsetof(struct bpf_iter__bpf_map, map),
|
|
PTR_TO_BTF_ID_OR_NULL },
|
|
},
|
|
.seq_info = &bpf_map_seq_info,
|
|
};
|
|
|
|
static int bpf_iter_attach_map(struct bpf_prog *prog,
|
|
union bpf_iter_link_info *linfo,
|
|
struct bpf_iter_aux_info *aux)
|
|
{
|
|
u32 key_acc_size, value_acc_size, key_size, value_size;
|
|
struct bpf_map *map;
|
|
bool is_percpu = false;
|
|
int err = -EINVAL;
|
|
|
|
if (!linfo->map.map_fd)
|
|
return -EBADF;
|
|
|
|
map = bpf_map_get_with_uref(linfo->map.map_fd);
|
|
if (IS_ERR(map))
|
|
return PTR_ERR(map);
|
|
|
|
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
|
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
|
|
map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
|
|
is_percpu = true;
|
|
else if (map->map_type != BPF_MAP_TYPE_HASH &&
|
|
map->map_type != BPF_MAP_TYPE_LRU_HASH &&
|
|
map->map_type != BPF_MAP_TYPE_ARRAY)
|
|
goto put_map;
|
|
|
|
key_acc_size = prog->aux->max_rdonly_access;
|
|
value_acc_size = prog->aux->max_rdwr_access;
|
|
key_size = map->key_size;
|
|
if (!is_percpu)
|
|
value_size = map->value_size;
|
|
else
|
|
value_size = round_up(map->value_size, 8) * num_possible_cpus();
|
|
|
|
if (key_acc_size > key_size || value_acc_size > value_size) {
|
|
err = -EACCES;
|
|
goto put_map;
|
|
}
|
|
|
|
aux->map = map;
|
|
return 0;
|
|
|
|
put_map:
|
|
bpf_map_put_with_uref(map);
|
|
return err;
|
|
}
|
|
|
|
static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
|
|
{
|
|
bpf_map_put_with_uref(aux->map);
|
|
}
|
|
|
|
DEFINE_BPF_ITER_FUNC(bpf_map_elem, struct bpf_iter_meta *meta,
|
|
struct bpf_map *map, void *key, void *value)
|
|
|
|
static const struct bpf_iter_reg bpf_map_elem_reg_info = {
|
|
.target = "bpf_map_elem",
|
|
.attach_target = bpf_iter_attach_map,
|
|
.detach_target = bpf_iter_detach_map,
|
|
.ctx_arg_info_size = 2,
|
|
.ctx_arg_info = {
|
|
{ offsetof(struct bpf_iter__bpf_map_elem, key),
|
|
PTR_TO_RDONLY_BUF_OR_NULL },
|
|
{ offsetof(struct bpf_iter__bpf_map_elem, value),
|
|
PTR_TO_RDWR_BUF_OR_NULL },
|
|
},
|
|
};
|
|
|
|
static int __init bpf_map_iter_init(void)
|
|
{
|
|
int ret;
|
|
|
|
bpf_map_reg_info.ctx_arg_info[0].btf_id = *btf_bpf_map_id;
|
|
ret = bpf_iter_reg_target(&bpf_map_reg_info);
|
|
if (ret)
|
|
return ret;
|
|
|
|
return bpf_iter_reg_target(&bpf_map_elem_reg_info);
|
|
}
|
|
|
|
late_initcall(bpf_map_iter_init);
|