Merge branch 'bpf-libbpf-btf-defined-maps'

Andrii Nakryiko says:

====================
This patch set implements initial version (as discussed at LSF/MM2019
conference) of a new way to specify BPF maps, relying on BTF type information,
which allows for easy extensibility, preserving forward and backward
compatibility. See details and examples in description for patch #6.

[0] contains an outline of follow up extensions to be added after this basic
set of features lands. They are useful by itself, but also allows to bring
libbpf to feature-parity with iproute2 BPF loader. That should open a path
forward for BPF loaders unification.

Patch #1 centralizes commonly used min/max macro in libbpf_internal.h.
Patch #2 extracts .BTF and .BTF.ext loading loging from elf_collect().
Patch #3 simplifies elf_collect() error-handling logic.
Patch #4 refactors map initialization logic into user-provided maps and global
data maps, in preparation to adding another way (BTF-defined maps).
Patch #5 adds support for map definitions in multiple ELF sections and
deprecates bpf_object__find_map_by_offset() API which doesn't appear to be
used anymore and makes assumption that all map definitions reside in single
ELF section.
Patch #6 splits BTF intialization from sanitization/loading into kernel to
preserve original BTF at the time of map initialization.
Patch #7 adds support for BTF-defined maps.
Patch #8 adds new test for BTF-defined map definition.
Patches #9-11 convert test BPF map definitions to use BTF way.

[0] https://lore.kernel.org/bpf/CAEf4BzbfdG2ub7gCi0OYqBrUoChVHWsmOntWAkJt47=FE+km+A@mail.gmail.com/

v1->v2:
- more BTF-sanity checks in parsing map definitions (Song);
- removed confusing usage of "attribute", switched to "field;
- split off elf_collect() refactor from btf loading refactor (Song);
- split selftests conversion into 3 patches (Stanislav):
  1. test already relying on BTF;
  2. tests w/ custom types as key/value (so benefiting from BTF);
  3. all the rest tests (integers as key/value, special maps w/o BTF support).
- smaller code improvements (Song);

rfc->v1:
- error out on unknown field by default (Stanislav, Jakub, Lorenz);
====================

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
Daniel Borkmann 2019-06-18 00:11:14 +02:00
commit 32b88d3743
28 changed files with 1045 additions and 429 deletions

View File

@ -26,10 +26,11 @@
#include <memory.h>
#include <unistd.h>
#include <asm/unistd.h>
#include <errno.h>
#include <linux/bpf.h>
#include "bpf.h"
#include "libbpf.h"
#include <errno.h>
#include "libbpf_internal.h"
/*
* When building perf, unistd.h is overridden. __NR_bpf is
@ -53,10 +54,6 @@
# endif
#endif
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
static inline __u64 ptr_to_u64(const void *ptr)
{
return (__u64) (unsigned long) ptr;

View File

@ -6,10 +6,7 @@
#include <linux/err.h>
#include <linux/bpf.h>
#include "libbpf.h"
#ifndef min
#define min(x, y) ((x) < (y) ? (x) : (y))
#endif
#include "libbpf_internal.h"
struct bpf_prog_linfo {
void *raw_linfo;

View File

@ -16,9 +16,6 @@
#include "libbpf_internal.h"
#include "hashmap.h"
#define max(a, b) ((a) > (b) ? (a) : (b))
#define min(a, b) ((a) < (b) ? (a) : (b))
#define BTF_MAX_NR_TYPES 0x7fffffff
#define BTF_MAX_STR_OFFSET 0x7fffffff

View File

@ -17,6 +17,7 @@ extern "C" {
#define BTF_ELF_SEC ".BTF"
#define BTF_EXT_ELF_SEC ".BTF.ext"
#define MAPS_ELF_SEC ".maps"
struct btf;
struct btf_ext;

View File

@ -18,9 +18,6 @@
#include "libbpf.h"
#include "libbpf_internal.h"
#define min(x, y) ((x) < (y) ? (x) : (y))
#define max(x, y) ((x) < (y) ? (y) : (x))
static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,13 @@
#define BTF_PARAM_ENC(name, type) (name), (type)
#define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size)
#ifndef min
# define min(x, y) ((x) < (y) ? (x) : (y))
#endif
#ifndef max
# define max(x, y) ((x) < (y) ? (y) : (x))
#endif
extern void libbpf_print(enum libbpf_print_level level,
const char *format, ...)
__attribute__((format(printf, 2, 3)));

View File

@ -57,17 +57,25 @@ struct frag_hdr {
__be32 identification;
};
struct bpf_map_def SEC("maps") jmp_table = {
struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} jmp_table SEC(".maps") = {
.type = BPF_MAP_TYPE_PROG_ARRAY,
.max_entries = 8,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 8
};
struct bpf_map_def SEC("maps") last_dissection = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_flow_keys *value;
} last_dissection SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_flow_keys),
.max_entries = 1,
};

View File

@ -10,24 +10,22 @@
#define REFRESH_TIME_NS 100000000
#define NS_PER_SEC 1000000000
struct bpf_map_def SEC("maps") percpu_netcnt = {
struct {
__u32 type;
struct bpf_cgroup_storage_key *key;
struct percpu_net_cnt *value;
} percpu_netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct percpu_net_cnt),
};
BPF_ANNOTATE_KV_PAIR(percpu_netcnt, struct bpf_cgroup_storage_key,
struct percpu_net_cnt);
struct bpf_map_def SEC("maps") netcnt = {
struct {
__u32 type;
struct bpf_cgroup_storage_key *key;
struct net_cnt *value;
} netcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct net_cnt),
};
BPF_ANNOTATE_KV_PAIR(netcnt, struct bpf_cgroup_storage_key,
struct net_cnt);
SEC("cgroup/skb")
int bpf_nextcnt(struct __sk_buff *skb)
{

View File

@ -12,15 +12,16 @@ struct socket_cookie {
__u32 cookie_value;
};
struct bpf_map_def SEC("maps") socket_cookies = {
struct {
__u32 type;
__u32 map_flags;
int *key;
struct socket_cookie *value;
} socket_cookies SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct socket_cookie),
.map_flags = BPF_F_NO_PREALLOC,
};
BPF_ANNOTATE_KV_PAIR(socket_cookies, int, struct socket_cookie);
SEC("cgroup/connect6")
int set_cookie(struct bpf_sock_addr *ctx)
{

View File

@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018 Facebook */
#include <linux/bpf.h>
#include "bpf_helpers.h"
int _version SEC("version") = 1;
struct ipv_counts {
unsigned int v4;
unsigned int v6;
};
/* just to validate we can handle maps in multiple sections */
struct bpf_map_def SEC("maps") btf_map_legacy = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(long long),
.max_entries = 4,
};
BPF_ANNOTATE_KV_PAIR(btf_map_legacy, int, struct ipv_counts);
struct {
int *key;
struct ipv_counts *value;
unsigned int type;
unsigned int max_entries;
} btf_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.max_entries = 4,
};
struct dummy_tracepoint_args {
unsigned long long pad;
struct sock *sock;
};
__attribute__((noinline))
static int test_long_fname_2(struct dummy_tracepoint_args *arg)
{
struct ipv_counts *counts;
int key = 0;
if (!arg->sock)
return 0;
counts = bpf_map_lookup_elem(&btf_map, &key);
if (!counts)
return 0;
counts->v6++;
/* just verify we can reference both maps */
counts = bpf_map_lookup_elem(&btf_map_legacy, &key);
if (!counts)
return 0;
return 0;
}
__attribute__((noinline))
static int test_long_fname_1(struct dummy_tracepoint_args *arg)
{
return test_long_fname_2(arg);
}
SEC("dummy_tracepoint")
int _dummy_tracepoint(struct dummy_tracepoint_args *arg)
{
return test_long_fname_1(arg);
}
char _license[] SEC("license") = "GPL";

View File

@ -15,17 +15,25 @@ struct stack_trace_t {
struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
};
struct bpf_map_def SEC("maps") perfmap = {
struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} perfmap SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.max_entries = 2,
.key_size = sizeof(int),
.value_size = sizeof(__u32),
.max_entries = 2,
};
struct bpf_map_def SEC("maps") stackdata_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct stack_trace_t *value;
} stackdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct stack_trace_t),
.max_entries = 1,
};
@ -47,10 +55,13 @@ struct bpf_map_def SEC("maps") stackdata_map = {
* issue and avoid complicated C programming massaging.
* This is an acceptable workaround since there is one entry here.
*/
struct bpf_map_def SEC("maps") rawdata_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 (*value)[2 * MAX_STACK_RAWTP];
} rawdata_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
.max_entries = 1,
};

View File

@ -7,17 +7,23 @@
#include "bpf_helpers.h"
struct bpf_map_def SEC("maps") result_number = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} result_number SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 11,
};
struct bpf_map_def SEC("maps") result_string = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
const char (*value)[32];
} result_string SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = 32,
.max_entries = 5,
};
@ -27,10 +33,13 @@ struct foo {
__u64 c;
};
struct bpf_map_def SEC("maps") result_struct = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct foo *value;
} result_struct SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct foo),
.max_entries = 5,
};

View File

@ -169,38 +169,53 @@ struct eth_hdr {
unsigned short eth_proto;
};
struct bpf_map_def SEC("maps") vip_map = {
struct {
__u32 type;
__u32 max_entries;
struct vip *key;
struct vip_meta *value;
} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS,
};
struct bpf_map_def SEC("maps") ch_rings = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE,
};
struct bpf_map_def SEC("maps") reals = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct real_definition *value;
} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS,
};
struct bpf_map_def SEC("maps") stats = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct vip_stats *value;
} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS,
};
struct bpf_map_def SEC("maps") ctl_array = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct ctl_value *value;
} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE,
};

View File

@ -165,38 +165,53 @@ struct eth_hdr {
unsigned short eth_proto;
};
struct bpf_map_def SEC("maps") vip_map = {
struct {
__u32 type;
__u32 max_entries;
struct vip *key;
struct vip_meta *value;
} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct vip_meta),
.max_entries = MAX_VIPS,
};
struct bpf_map_def SEC("maps") ch_rings = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = CH_RINGS_SIZE,
};
struct bpf_map_def SEC("maps") reals = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct real_definition *value;
} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = MAX_REALS,
};
struct bpf_map_def SEC("maps") stats = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct vip_stats *value;
} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct vip_stats),
.max_entries = MAX_VIPS,
};
struct bpf_map_def SEC("maps") ctl_array = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct ctl_value *value;
} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = CTL_MAP_SIZE,
};

View File

@ -11,29 +11,31 @@ struct hmap_elem {
int var[VAR_NUM];
};
struct bpf_map_def SEC("maps") hash_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct hmap_elem *value;
} hash_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int),
.value_size = sizeof(struct hmap_elem),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(hash_map, int, struct hmap_elem);
struct array_elem {
struct bpf_spin_lock lock;
int var[VAR_NUM];
};
struct bpf_map_def SEC("maps") array_map = {
struct {
__u32 type;
__u32 max_entries;
int *key;
struct array_elem *value;
} array_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(struct array_elem),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(array_map, int, struct array_elem);
SEC("map_lock_demo")
int bpf_map_lock_test(struct __sk_buff *skb)
{

View File

@ -21,38 +21,55 @@ int _version SEC("version") = 1;
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
struct bpf_map_def SEC("maps") outer_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} outer_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
.max_entries = 1,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") result_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = NR_RESULTS,
};
struct bpf_map_def SEC("maps") tmp_index_ovr_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
int *value;
} tmp_index_ovr_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(int),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") linum_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} linum_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") data_check_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct data_check *value;
} data_check_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct data_check),
.max_entries = 1,
};

View File

@ -4,24 +4,26 @@
#include <linux/version.h>
#include "bpf_helpers.h"
struct bpf_map_def SEC("maps") info_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} info_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(info_map, __u32, __u64);
struct bpf_map_def SEC("maps") status_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} status_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(status_map, __u32, __u64);
SEC("send_signal_demo")
int bpf_send_signal_test(void *ctx)
{

View File

@ -27,31 +27,43 @@ enum bpf_linum_array_idx {
__NR_BPF_LINUM_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") addr_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct sockaddr_in6 *value;
} addr_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct sockaddr_in6),
.max_entries = __NR_BPF_ADDR_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") sock_result_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_sock *value;
} sock_result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_sock),
.max_entries = __NR_BPF_RESULT_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") tcp_sock_result_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct bpf_tcp_sock *value;
} tcp_sock_result_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_tcp_sock),
.max_entries = __NR_BPF_RESULT_ARRAY_IDX,
};
struct bpf_map_def SEC("maps") linum_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} linum_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = __NR_BPF_LINUM_ARRAY_IDX,
};
@ -60,26 +72,26 @@ struct bpf_spinlock_cnt {
__u32 cnt;
};
struct bpf_map_def SEC("maps") sk_pkt_out_cnt = {
struct {
__u32 type;
__u32 map_flags;
int *key;
struct bpf_spinlock_cnt *value;
} sk_pkt_out_cnt SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct bpf_spinlock_cnt),
.max_entries = 0,
.map_flags = BPF_F_NO_PREALLOC,
};
BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt, int, struct bpf_spinlock_cnt);
struct bpf_map_def SEC("maps") sk_pkt_out_cnt10 = {
struct {
__u32 type;
__u32 map_flags;
int *key;
struct bpf_spinlock_cnt *value;
} sk_pkt_out_cnt10 SEC(".maps") = {
.type = BPF_MAP_TYPE_SK_STORAGE,
.key_size = sizeof(int),
.value_size = sizeof(struct bpf_spinlock_cnt),
.max_entries = 0,
.map_flags = BPF_F_NO_PREALLOC,
};
BPF_ANNOTATE_KV_PAIR(sk_pkt_out_cnt10, int, struct bpf_spinlock_cnt);
static bool is_loopback6(__u32 *a6)
{
return !a6[0] && !a6[1] && !a6[2] && a6[3] == bpf_htonl(1);

View File

@ -10,30 +10,29 @@ struct hmap_elem {
int test_padding;
};
struct bpf_map_def SEC("maps") hmap = {
struct {
__u32 type;
__u32 max_entries;
int *key;
struct hmap_elem *value;
} hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(int),
.value_size = sizeof(struct hmap_elem),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(hmap, int, struct hmap_elem);
struct cls_elem {
struct bpf_spin_lock lock;
volatile int cnt;
};
struct bpf_map_def SEC("maps") cls_map = {
struct {
__u32 type;
struct bpf_cgroup_storage_key *key;
struct cls_elem *value;
} cls_map SEC(".maps") = {
.type = BPF_MAP_TYPE_CGROUP_STORAGE,
.key_size = sizeof(struct bpf_cgroup_storage_key),
.value_size = sizeof(struct cls_elem),
};
BPF_ANNOTATE_KV_PAIR(cls_map, struct bpf_cgroup_storage_key,
struct cls_elem);
struct bpf_vqueue {
struct bpf_spin_lock lock;
/* 4 byte hole */
@ -42,14 +41,16 @@ struct bpf_vqueue {
unsigned int rate;
};
struct bpf_map_def SEC("maps") vqueue = {
struct {
__u32 type;
__u32 max_entries;
int *key;
struct bpf_vqueue *value;
} vqueue SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(int),
.value_size = sizeof(struct bpf_vqueue),
.max_entries = 1,
};
BPF_ANNOTATE_KV_PAIR(vqueue, int, struct bpf_vqueue);
#define CREDIT_PER_NS(delta, rate) (((delta) * rate) >> 20)
SEC("spin_lock_demo")

View File

@ -8,34 +8,50 @@
#define PERF_MAX_STACK_DEPTH 127
#endif
struct bpf_map_def SEC("maps") control_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} control_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") stackid_hmap = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} stackid_hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 16384,
};
struct bpf_map_def SEC("maps") stackmap = {
typedef struct bpf_stack_build_id stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__u32 type;
__u32 max_entries;
__u32 map_flags;
__u32 key_size;
__u32 value_size;
} stackmap SEC(".maps") = {
.type = BPF_MAP_TYPE_STACK_TRACE,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH,
.max_entries = 128,
.map_flags = BPF_F_STACK_BUILD_ID,
.key_size = sizeof(__u32),
.value_size = sizeof(stack_trace_t),
};
struct bpf_map_def SEC("maps") stack_amap = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
/* there seems to be a bug in kernel not handling typedef properly */
struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH];
} stack_amap SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct bpf_stack_build_id)
* PERF_MAX_STACK_DEPTH,
.max_entries = 128,
};

View File

@ -8,31 +8,47 @@
#define PERF_MAX_STACK_DEPTH 127
#endif
struct bpf_map_def SEC("maps") control_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} control_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 1,
};
struct bpf_map_def SEC("maps") stackid_hmap = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} stackid_hmap SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 16384,
};
struct bpf_map_def SEC("maps") stackmap = {
typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} stackmap SEC(".maps") = {
.type = BPF_MAP_TYPE_STACK_TRACE,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
.max_entries = 16384,
.key_size = sizeof(__u32),
.value_size = sizeof(stack_trace_t),
};
struct bpf_map_def SEC("maps") stack_amap = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 (*value)[PERF_MAX_STACK_DEPTH];
} stack_amap SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64) * PERF_MAX_STACK_DEPTH,
.max_entries = 16384,
};

View File

@ -148,10 +148,13 @@ struct tcp_estats_basic_event {
struct tcp_estats_conn_id conn_id;
};
struct bpf_map_def SEC("maps") ev_record_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct tcp_estats_basic_event *value;
} ev_record_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcp_estats_basic_event),
.max_entries = 1024,
};

View File

@ -14,17 +14,23 @@
#include "bpf_endian.h"
#include "test_tcpbpf.h"
struct bpf_map_def SEC("maps") global_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct tcpbpf_globals *value;
} global_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcpbpf_globals),
.max_entries = 4,
};
struct bpf_map_def SEC("maps") sockopt_results = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
int *value;
} sockopt_results SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(int),
.max_entries = 2,
};

View File

@ -14,18 +14,26 @@
#include "bpf_endian.h"
#include "test_tcpnotify.h"
struct bpf_map_def SEC("maps") global_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct tcpnotify_globals *value;
} global_map SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct tcpnotify_globals),
.max_entries = 4,
};
struct bpf_map_def SEC("maps") perf_event_map = {
struct {
__u32 type;
__u32 max_entries;
__u32 key_size;
__u32 value_size;
} perf_event_map SEC(".maps") = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
.max_entries = 2,
.key_size = sizeof(int),
.value_size = sizeof(__u32),
.max_entries = 2,
};
int _version SEC("version") = 1;

View File

@ -22,17 +22,23 @@
int _version SEC("version") = 1;
struct bpf_map_def SEC("maps") rxcnt = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u64 *value;
} rxcnt SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u64),
.max_entries = 256,
};
struct bpf_map_def SEC("maps") vip2tnl = {
struct {
__u32 type;
__u32 max_entries;
struct vip *key;
struct iptnl_info *value;
} vip2tnl SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip),
.value_size = sizeof(struct iptnl_info),
.max_entries = MAX_IPTNL_ENTRIES,
};

View File

@ -163,52 +163,66 @@ struct lb_stats {
__u64 v1;
};
struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = {
struct {
__u32 type;
__u32 max_entries;
struct vip_definition *key;
struct vip_meta *value;
} vip_map SEC(".maps") = {
.type = BPF_MAP_TYPE_HASH,
.key_size = sizeof(struct vip_definition),
.value_size = sizeof(struct vip_meta),
.max_entries = 512,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = {
struct {
__u32 type;
__u32 max_entries;
__u32 map_flags;
struct flow_key *key;
struct real_pos_lru *value;
} lru_cache SEC(".maps") = {
.type = BPF_MAP_TYPE_LRU_HASH,
.key_size = sizeof(struct flow_key),
.value_size = sizeof(struct real_pos_lru),
.max_entries = 300,
.map_flags = 1U << 1,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
__u32 *value;
} ch_rings SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(__u32),
.max_entries = 12 * 655,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) reals = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct real_definition *value;
} reals SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct real_definition),
.max_entries = 40,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) stats = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct lb_stats *value;
} stats SEC(".maps") = {
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct lb_stats),
.max_entries = 515,
.map_flags = 0,
};
struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = {
struct {
__u32 type;
__u32 max_entries;
__u32 *key;
struct ctl_value *value;
} ctl_array SEC(".maps") = {
.type = BPF_MAP_TYPE_ARRAY,
.key_size = sizeof(__u32),
.value_size = sizeof(struct ctl_value),
.max_entries = 16,
.map_flags = 0,
};
struct eth_hdr {

View File

@ -4016,13 +4016,9 @@ struct btf_file_test {
};
static struct btf_file_test file_tests[] = {
{
.file = "test_btf_haskv.o",
},
{
.file = "test_btf_nokv.o",
.btf_kv_notfound = true,
},
{ .file = "test_btf_haskv.o", },
{ .file = "test_btf_newkv.o", },
{ .file = "test_btf_nokv.o", .btf_kv_notfound = true, },
};
static int do_test_file(unsigned int test_num)