linux_dsm_epyc7002/tools/lib/bpf/libbpf.map

231 lines
5.1 KiB
Plaintext
Raw Normal View History

LIBBPF_0.0.1 {
global:
bpf_btf_get_fd_by_id;
bpf_create_map;
bpf_create_map_in_map;
bpf_create_map_in_map_node;
bpf_create_map_name;
bpf_create_map_node;
bpf_create_map_xattr;
bpf_load_btf;
bpf_load_program;
bpf_load_program_xattr;
bpf_map__btf_key_type_id;
bpf_map__btf_value_type_id;
bpf_map__def;
bpf_map__fd;
bpf_map__is_offload_neutral;
bpf_map__name;
bpf_map__next;
bpf_map__pin;
bpf_map__prev;
bpf_map__priv;
bpf_map__reuse_fd;
bpf_map__set_ifindex;
bpf_map__set_inner_map_fd;
bpf_map__set_priv;
bpf_map__unpin;
bpf_map_delete_elem;
bpf_map_get_fd_by_id;
bpf_map_get_next_id;
bpf_map_get_next_key;
bpf_map_lookup_and_delete_elem;
bpf_map_lookup_elem;
bpf_map_update_elem;
bpf_obj_get;
bpf_obj_get_info_by_fd;
bpf_obj_pin;
bpf_object__btf_fd;
bpf_object__close;
bpf_object__find_map_by_name;
bpf_object__find_map_by_offset;
bpf_object__find_program_by_title;
bpf_object__kversion;
bpf_object__load;
bpf_object__name;
bpf_object__next;
bpf_object__open;
bpf_object__open_buffer;
bpf_object__open_xattr;
bpf_object__pin;
bpf_object__pin_maps;
bpf_object__pin_programs;
bpf_object__priv;
bpf_object__set_priv;
bpf_object__unload;
bpf_object__unpin_maps;
bpf_object__unpin_programs;
bpf_perf_event_read_simple;
bpf_prog_attach;
bpf_prog_detach;
bpf_prog_detach2;
bpf_prog_get_fd_by_id;
bpf_prog_get_next_id;
bpf_prog_load;
bpf_prog_load_xattr;
bpf_prog_query;
bpf_prog_test_run;
bpf_prog_test_run_xattr;
bpf_program__fd;
bpf_program__is_kprobe;
bpf_program__is_perf_event;
bpf_program__is_raw_tracepoint;
bpf_program__is_sched_act;
bpf_program__is_sched_cls;
bpf_program__is_socket_filter;
bpf_program__is_tracepoint;
bpf_program__is_xdp;
bpf_program__load;
bpf_program__next;
bpf_program__nth_fd;
bpf_program__pin;
bpf_program__pin_instance;
bpf_program__prev;
bpf_program__priv;
bpf_program__set_expected_attach_type;
bpf_program__set_ifindex;
bpf_program__set_kprobe;
bpf_program__set_perf_event;
bpf_program__set_prep;
bpf_program__set_priv;
bpf_program__set_raw_tracepoint;
bpf_program__set_sched_act;
bpf_program__set_sched_cls;
bpf_program__set_socket_filter;
bpf_program__set_tracepoint;
bpf_program__set_type;
bpf_program__set_xdp;
bpf_program__title;
bpf_program__unload;
bpf_program__unpin;
bpf_program__unpin_instance;
bpf: libbpf: bpftool: Print bpf_line_info during prog dump This patch adds print bpf_line_info function in 'prog dump jitted' and 'prog dump xlated': [root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv [...] int test_long_fname_2(struct dummy_tracepoint_args * arg): bpf_prog_44a040bf25481309_test_long_fname_2: ; static int test_long_fname_2(struct dummy_tracepoint_args *arg) 0: push %rbp 1: mov %rsp,%rbp 4: sub $0x30,%rsp b: sub $0x28,%rbp f: mov %rbx,0x0(%rbp) 13: mov %r13,0x8(%rbp) 17: mov %r14,0x10(%rbp) 1b: mov %r15,0x18(%rbp) 1f: xor %eax,%eax 21: mov %rax,0x20(%rbp) 25: xor %esi,%esi ; int key = 0; 27: mov %esi,-0x4(%rbp) ; if (!arg->sock) 2a: mov 0x8(%rdi),%rdi ; if (!arg->sock) 2e: cmp $0x0,%rdi 32: je 0x0000000000000070 34: mov %rbp,%rsi ; counts = bpf_map_lookup_elem(&btf_map, &key); 37: add $0xfffffffffffffffc,%rsi 3b: movabs $0xffff8881139d7480,%rdi 45: add $0x110,%rdi 4c: mov 0x0(%rsi),%eax 4f: cmp $0x4,%rax 53: jae 0x000000000000005e 55: shl $0x3,%rax 59: add %rdi,%rax 5c: jmp 0x0000000000000060 5e: xor %eax,%eax ; if (!counts) 60: cmp $0x0,%rax 64: je 0x0000000000000070 ; counts->v6++; 66: mov 0x4(%rax),%edi 69: add $0x1,%rdi 6d: mov %edi,0x4(%rax) 70: mov 0x0(%rbp),%rbx 74: mov 0x8(%rbp),%r13 78: mov 0x10(%rbp),%r14 7c: mov 0x18(%rbp),%r15 80: add $0x28,%rbp 84: leaveq 85: retq [...] With linum: [root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum int _dummy_tracepoint(struct dummy_tracepoint_args * arg): bpf_prog_b07ccb89267cf242__dummy_tracepoint: ; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9] 0: push %rbp 1: mov %rsp,%rbp 4: sub $0x28,%rsp b: sub $0x28,%rbp f: mov %rbx,0x0(%rbp) 13: mov %r13,0x8(%rbp) 17: mov %r14,0x10(%rbp) 1b: mov %r15,0x18(%rbp) 1f: xor %eax,%eax 21: mov %rax,0x20(%rbp) 25: callq 0x000000000000851e ; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2] 2a: xor %eax,%eax 2c: mov 0x0(%rbp),%rbx 30: mov 0x8(%rbp),%r13 34: mov 0x10(%rbp),%r14 38: mov 0x18(%rbp),%r15 3c: add $0x28,%rbp 40: leaveq 41: retq [...] Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 07:42:32 +07:00
bpf_prog_linfo__free;
bpf_prog_linfo__new;
bpf_prog_linfo__lfind_addr_func;
bpf_prog_linfo__lfind;
bpf_raw_tracepoint_open;
bpf_set_link_xdp_fd;
bpf_task_fd_query;
bpf_verify_program;
btf__fd;
btf__find_by_name;
btf__free;
btf__get_from_id;
btf__name_by_offset;
btf__new;
btf__resolve_size;
btf__resolve_type;
btf__type_by_id;
libbpf_attach_type_by_name;
libbpf_get_error;
libbpf_prog_type_by_name;
libbpf_set_print;
libbpf_strerror;
local:
*;
};
tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 22:27:53 +07:00
LIBBPF_0.0.2 {
global:
tools: bpftool: add probes for eBPF helper functions Similarly to what was done for program types and map types, add a set of probes to test the availability of the different eBPF helper functions on the current system. For each known program type, all known helpers are tested, in order to establish a compatibility matrix. Output is provided as a set of lists of available helpers, one per program type. Sample output: # bpftool feature probe kernel ... Scanning eBPF helper functions... eBPF helpers supported for program type socket_filter: - bpf_map_lookup_elem - bpf_map_update_elem - bpf_map_delete_elem ... eBPF helpers supported for program type kprobe: - bpf_map_lookup_elem - bpf_map_update_elem - bpf_map_delete_elem ... # bpftool --json --pretty feature probe kernel { ... "helpers": { "socket_filter_available_helpers": ["bpf_map_lookup_elem", \ "bpf_map_update_elem","bpf_map_delete_elem", ... ], "kprobe_available_helpers": ["bpf_map_lookup_elem", \ "bpf_map_update_elem","bpf_map_delete_elem", ... ], ... } } v5: - In libbpf.map, move global symbol to the new LIBBPF_0.0.2 section. v4: - Use "enum bpf_func_id" instead of "__u32" in bpf_probe_helper() declaration for the type of the argument used to pass the id of the helper to probe. - Undef BPF_HELPER_MAKE_ENTRY after using it. v3: - Do not pass kernel version from bpftool to libbpf probes (kernel version for testing program with kprobes is retrieved directly from libbpf). - Dump one list of available helpers per program type (instead of one list of compatible program types per helper). v2: - Move probes from bpftool to libbpf. - Test all program types for each helper, print a list of working prog types for each helper. - Fall back on include/uapi/linux/bpf.h for names and ids of helpers. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 22:27:55 +07:00
bpf_probe_helper;
bpf_probe_map_type;
tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 22:27:53 +07:00
bpf_probe_prog_type;
bpf_map__resize;
bpf_map_lookup_elem_flags;
bpf_object__btf;
bpf_object__find_map_fd_by_name;
bpf_get_link_xdp_id;
btf__dedup;
btf__get_map_kv_tids;
btf__get_nr_types;
btf__get_raw_data;
btf__load;
btf_ext__free;
btf_ext__func_info_rec_size;
btf_ext__get_raw_data;
btf_ext__line_info_rec_size;
btf_ext__new;
btf_ext__reloc_func_info;
btf_ext__reloc_line_info;
xsk_umem__create;
xsk_socket__create;
xsk_umem__delete;
xsk_socket__delete;
xsk_umem__fd;
xsk_socket__fd;
bpf_program__get_prog_info_linear;
bpf_program__bpil_addr_to_offs;
bpf_program__bpil_offs_to_addr;
tools: bpftool: add probes for eBPF program types Introduce probes for supported BPF program types in libbpf, and call it from bpftool to test what types are available on the system. The probe simply consists in loading a very basic program of that type and see if the verifier complains or not. Sample output: # bpftool feature probe kernel ... Scanning eBPF program types... eBPF program_type socket_filter is available eBPF program_type kprobe is available eBPF program_type sched_cls is available ... # bpftool --json --pretty feature probe kernel { ... "program_types": { "have_socket_filter_prog_type": true, "have_kprobe_prog_type": true, "have_sched_cls_prog_type": true, ... } } v5: - In libbpf.map, move global symbol to a new LIBBPF_0.0.2 section. - Rename (non-API function) prog_load() as probe_load(). v3: - Get kernel version for checking kprobes availability from libbpf instead of from bpftool. Do not pass kernel_version as an argument when calling libbpf probes. - Use a switch with all enum values for setting specific program parameters just before probing, so that gcc complains at compile time (-Wswitch-enum) if new prog types were added to the kernel but libbpf was not updated. - Add a comment in libbpf.h about setrlimit() usage to allow many consecutive probe attempts. v2: - Move probes from bpftool to libbpf. - Remove C-style macros output from this patch. Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Stanislav Fomichev <sdf@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-01-17 22:27:53 +07:00
} LIBBPF_0.0.1;
bpf, libbpf: support global data/bss/rodata sections This work adds BPF loader support for global data sections to libbpf. This allows to write BPF programs in more natural C-like way by being able to define global variables and const data. Back at LPC 2018 [0] we presented a first prototype which implemented support for global data sections by extending BPF syscall where union bpf_attr would get additional memory/size pair for each section passed during prog load in order to later add this base address into the ldimm64 instruction along with the user provided offset when accessing a variable. Consensus from LPC was that for proper upstream support, it would be more desirable to use maps instead of bpf_attr extension as this would allow for introspection of these sections as well as potential live updates of their content. This work follows this path by taking the following steps from loader side: 1) In bpf_object__elf_collect() step we pick up ".data", ".rodata", and ".bss" section information. 2) If present, in bpf_object__init_internal_map() we add maps to the obj's map array that corresponds to each of the present sections. Given section size and access properties can differ, a single entry array map is created with value size that is corresponding to the ELF section size of .data, .bss or .rodata. These internal maps are integrated into the normal map handling of libbpf such that when user traverses all obj maps, they can be differentiated from user-created ones via bpf_map__is_internal(). In later steps when we actually create these maps in the kernel via bpf_object__create_maps(), then for .data and .rodata sections their content is copied into the map through bpf_map_update_elem(). For .bss this is not necessary since array map is already zero-initialized by default. Additionally, for .rodata the map is frozen as read-only after setup, such that neither from program nor syscall side writes would be possible. 3) In bpf_program__collect_reloc() step, we record the corresponding map, insn index, and relocation type for the global data. 4) And last but not least in the actual relocation step in bpf_program__relocate(), we mark the ldimm64 instruction with src_reg = BPF_PSEUDO_MAP_VALUE where in the first imm field the map's file descriptor is stored as similarly done as in BPF_PSEUDO_MAP_FD, and in the second imm field (as ldimm64 is 2-insn wide) we store the access offset into the section. Given these maps have only single element ldimm64's off remains zero in both parts. 5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE load will then store the actual target address in order to have a 'map-lookup'-free access. That is, the actual map value base address + offset. The destination register in the verifier will then be marked as PTR_TO_MAP_VALUE, containing the fixed offset as reg->off and backing BPF map as reg->map_ptr. Meaning, it's treated as any other normal map value from verification side, only with efficient, direct value access instead of actual call to map lookup helper as in the typical case. Currently, only support for static global variables has been added, and libbpf rejects non-static global variables from loading. This can be lifted until we have proper semantics for how BPF will treat multi-object BPF loads. From BTF side, libbpf will set the value type id of the types corresponding to the ".bss", ".data" and ".rodata" names which LLVM will emit without the object name prefix. The key type will be left as zero, thus making use of the key-less BTF option in array maps. Simple example dump of program using globals vars in each section: # bpftool prog [...] 6784: sched_cls name load_static_dat tag a7e1291567277844 gpl loaded_at 2019-03-11T15:39:34+0000 uid 0 xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240 # bpftool map show id 2237 2237: array name test_glo.bss flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2235 2235: array name test_glo.data flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2236 2236: array name test_glo.rodata flags 0x80 key 4B value 96B max_entries 1 memlock 4096B # bpftool prog dump xlated id 6784 int load_static_data(struct __sk_buff * skb): ; int load_static_data(struct __sk_buff *skb) 0: (b7) r6 = 0 ; test_reloc(number, 0, &num0); 1: (63) *(u32 *)(r10 -4) = r6 2: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 3: (07) r2 += -4 ; test_reloc(number, 0, &num0); 4: (18) r1 = map[id:2238] 6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area 8: (b7) r4 = 0 9: (85) call array_map_update_elem#100464 10: (b7) r1 = 1 ; test_reloc(number, 1, &num1); [...] ; test_reloc(string, 2, str2); 120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16 122: (18) r1 = map[id:2239] 124: (18) r3 = map[id:2237][0]+16 126: (b7) r4 = 0 127: (85) call array_map_update_elem#100464 128: (b7) r1 = 120 ; str1[5] = 'x'; 129: (73) *(u8 *)(r9 +5) = r1 ; test_reloc(string, 3, str1); 130: (b7) r1 = 3 131: (63) *(u32 *)(r10 -4) = r1 132: (b7) r9 = 3 133: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 134: (07) r2 += -4 ; test_reloc(string, 3, str1); 135: (18) r1 = map[id:2239] 137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area 139: (b7) r4 = 0 140: (85) call array_map_update_elem#100464 141: (b7) r1 = 111 ; __builtin_memcpy(&str2[2], "hello", sizeof("hello")); 142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data 143: (b7) r1 = 108 144: (73) *(u8 *)(r8 +5) = r1 [...] For Cilium use-case in particular, this enables migrating configuration constants from Cilium daemon's generated header defines into global data sections such that expensive runtime recompilations with LLVM can be avoided altogether. Instead, the ELF file becomes effectively a "template", meaning, it is compiled only once (!) and the Cilium daemon will then rewrite relevant configuration data from the ELF's .data or .rodata sections directly instead of recompiling the program. The updated ELF is then loaded into the kernel and atomically replaces the existing program in the networking datapath. More info in [0]. Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail for static variables"). [0] LPC 2018, BPF track, "ELF relocation for static data in BPF", http://vger.kernel.org/lpc-bpf2018.html#session-3 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 04:20:13 +07:00
LIBBPF_0.0.3 {
global:
bpf_map__is_internal;
bpf_map_freeze;
btf__finalize_data;
bpf, libbpf: support global data/bss/rodata sections This work adds BPF loader support for global data sections to libbpf. This allows to write BPF programs in more natural C-like way by being able to define global variables and const data. Back at LPC 2018 [0] we presented a first prototype which implemented support for global data sections by extending BPF syscall where union bpf_attr would get additional memory/size pair for each section passed during prog load in order to later add this base address into the ldimm64 instruction along with the user provided offset when accessing a variable. Consensus from LPC was that for proper upstream support, it would be more desirable to use maps instead of bpf_attr extension as this would allow for introspection of these sections as well as potential live updates of their content. This work follows this path by taking the following steps from loader side: 1) In bpf_object__elf_collect() step we pick up ".data", ".rodata", and ".bss" section information. 2) If present, in bpf_object__init_internal_map() we add maps to the obj's map array that corresponds to each of the present sections. Given section size and access properties can differ, a single entry array map is created with value size that is corresponding to the ELF section size of .data, .bss or .rodata. These internal maps are integrated into the normal map handling of libbpf such that when user traverses all obj maps, they can be differentiated from user-created ones via bpf_map__is_internal(). In later steps when we actually create these maps in the kernel via bpf_object__create_maps(), then for .data and .rodata sections their content is copied into the map through bpf_map_update_elem(). For .bss this is not necessary since array map is already zero-initialized by default. Additionally, for .rodata the map is frozen as read-only after setup, such that neither from program nor syscall side writes would be possible. 3) In bpf_program__collect_reloc() step, we record the corresponding map, insn index, and relocation type for the global data. 4) And last but not least in the actual relocation step in bpf_program__relocate(), we mark the ldimm64 instruction with src_reg = BPF_PSEUDO_MAP_VALUE where in the first imm field the map's file descriptor is stored as similarly done as in BPF_PSEUDO_MAP_FD, and in the second imm field (as ldimm64 is 2-insn wide) we store the access offset into the section. Given these maps have only single element ldimm64's off remains zero in both parts. 5) On kernel side, this special marked BPF_PSEUDO_MAP_VALUE load will then store the actual target address in order to have a 'map-lookup'-free access. That is, the actual map value base address + offset. The destination register in the verifier will then be marked as PTR_TO_MAP_VALUE, containing the fixed offset as reg->off and backing BPF map as reg->map_ptr. Meaning, it's treated as any other normal map value from verification side, only with efficient, direct value access instead of actual call to map lookup helper as in the typical case. Currently, only support for static global variables has been added, and libbpf rejects non-static global variables from loading. This can be lifted until we have proper semantics for how BPF will treat multi-object BPF loads. From BTF side, libbpf will set the value type id of the types corresponding to the ".bss", ".data" and ".rodata" names which LLVM will emit without the object name prefix. The key type will be left as zero, thus making use of the key-less BTF option in array maps. Simple example dump of program using globals vars in each section: # bpftool prog [...] 6784: sched_cls name load_static_dat tag a7e1291567277844 gpl loaded_at 2019-03-11T15:39:34+0000 uid 0 xlated 1776B jited 993B memlock 4096B map_ids 2238,2237,2235,2236,2239,2240 # bpftool map show id 2237 2237: array name test_glo.bss flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2235 2235: array name test_glo.data flags 0x0 key 4B value 64B max_entries 1 memlock 4096B # bpftool map show id 2236 2236: array name test_glo.rodata flags 0x80 key 4B value 96B max_entries 1 memlock 4096B # bpftool prog dump xlated id 6784 int load_static_data(struct __sk_buff * skb): ; int load_static_data(struct __sk_buff *skb) 0: (b7) r6 = 0 ; test_reloc(number, 0, &num0); 1: (63) *(u32 *)(r10 -4) = r6 2: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 3: (07) r2 += -4 ; test_reloc(number, 0, &num0); 4: (18) r1 = map[id:2238] 6: (18) r3 = map[id:2237][0]+0 <-- direct addr in .bss area 8: (b7) r4 = 0 9: (85) call array_map_update_elem#100464 10: (b7) r1 = 1 ; test_reloc(number, 1, &num1); [...] ; test_reloc(string, 2, str2); 120: (18) r8 = map[id:2237][0]+16 <-- same here at offset +16 122: (18) r1 = map[id:2239] 124: (18) r3 = map[id:2237][0]+16 126: (b7) r4 = 0 127: (85) call array_map_update_elem#100464 128: (b7) r1 = 120 ; str1[5] = 'x'; 129: (73) *(u8 *)(r9 +5) = r1 ; test_reloc(string, 3, str1); 130: (b7) r1 = 3 131: (63) *(u32 *)(r10 -4) = r1 132: (b7) r9 = 3 133: (bf) r2 = r10 ; int load_static_data(struct __sk_buff *skb) 134: (07) r2 += -4 ; test_reloc(string, 3, str1); 135: (18) r1 = map[id:2239] 137: (18) r3 = map[id:2235][0]+16 <-- direct addr in .data area 139: (b7) r4 = 0 140: (85) call array_map_update_elem#100464 141: (b7) r1 = 111 ; __builtin_memcpy(&str2[2], "hello", sizeof("hello")); 142: (73) *(u8 *)(r8 +6) = r1 <-- further access based on .bss data 143: (b7) r1 = 108 144: (73) *(u8 *)(r8 +5) = r1 [...] For Cilium use-case in particular, this enables migrating configuration constants from Cilium daemon's generated header defines into global data sections such that expensive runtime recompilations with LLVM can be avoided altogether. Instead, the ELF file becomes effectively a "template", meaning, it is compiled only once (!) and the Cilium daemon will then rewrite relevant configuration data from the ELF's .data or .rodata sections directly instead of recompiling the program. The updated ELF is then loaded into the kernel and atomically replaces the existing program in the networking datapath. More info in [0]. Based upon recent fix in LLVM, commit c0db6b6bd444 ("[BPF] Don't fail for static variables"). [0] LPC 2018, BPF track, "ELF relocation for static data in BPF", http://vger.kernel.org/lpc-bpf2018.html#session-3 Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-10 04:20:13 +07:00
} LIBBPF_0.0.2;
LIBBPF_0.0.4 {
global:
bpf_link__destroy;
bpf_object__load_xattr;
bpf_program__attach_kprobe;
bpf_program__attach_perf_event;
bpf_program__attach_raw_tracepoint;
bpf_program__attach_tracepoint;
bpf_program__attach_uprobe;
btf_dump__dump_type;
btf_dump__free;
btf_dump__new;
btf__parse_elf;
libbpf_num_possible_cpus;
libbpf: add perf buffer API BPF_MAP_TYPE_PERF_EVENT_ARRAY map is often used to send data from BPF program to user space for additional processing. libbpf already has very low-level API to read single CPU perf buffer, bpf_perf_event_read_simple(), but it's hard to use and requires a lot of code to set everything up. This patch adds perf_buffer abstraction on top of it, abstracting setting up and polling per-CPU logic into simple and convenient API, similar to what BCC provides. perf_buffer__new() sets up per-CPU ring buffers and updates corresponding BPF map entries. It accepts two user-provided callbacks: one for handling raw samples and one for get notifications of lost samples due to buffer overflow. perf_buffer__new_raw() is similar, but provides more control over how perf events are set up (by accepting user-provided perf_event_attr), how they are handled (perf_event_header pointer is passed directly to user-provided callback), and on which CPUs ring buffers are created (it's possible to provide a list of CPUs and corresponding map keys to update). This API allows advanced users fuller control. perf_buffer__poll() is used to fetch ring buffer data across all CPUs, utilizing epoll instance. perf_buffer__free() does corresponding clean up and unsets FDs from BPF map. All APIs are not thread-safe. User should ensure proper locking/coordination if used in multi-threaded set up. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Acked-by: Yonghong Song <yhs@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2019-07-07 01:06:24 +07:00
perf_buffer__free;
perf_buffer__new;
perf_buffer__new_raw;
perf_buffer__poll;
xsk_umem__create;
} LIBBPF_0.0.3;
LIBBPF_0.0.5 {
global:
bpf_btf_get_next_id;
} LIBBPF_0.0.4;
LIBBPF_0.0.6 {
global:
bpf_get_link_xdp_info;
bpf_map__get_pin_path;
bpf_map__is_pinned;
bpf_map__set_pin_path;
bpf_object__open_file;
bpf_object__open_mem;
bpf_program__attach_trace;
bpf_program__get_expected_attach_type;
bpf_program__get_type;
bpf_program__is_tracing;
bpf_program__set_tracing;
bpf_program__size;
btf__find_by_name_kind;
libbpf_find_vmlinux_btf_id;
} LIBBPF_0.0.5;
LIBBPF_0.0.7 {
global:
btf_dump__emit_type_decl;
libbpf: Add bpf_link__disconnect() API to preserve underlying BPF resource There are cases in which BPF resource (program, map, etc) has to outlive userspace program that "installed" it in the system in the first place. When BPF program is attached, libbpf returns bpf_link object, which is supposed to be destroyed after no longer necessary through bpf_link__destroy() API. Currently, bpf_link destruction causes both automatic detachment and frees up any resources allocated to for bpf_link in-memory representation. This is inconvenient for the case described above because of coupling of detachment and resource freeing. This patch introduces bpf_link__disconnect() API call, which marks bpf_link as disconnected from its underlying BPF resouces. This means that when bpf_link is destroyed later, all its memory resources will be freed, but BPF resource itself won't be detached. This design allows to follow strict and resource-leak-free design by default, while giving easy and straightforward way for user code to opt for keeping BPF resource attached beyond lifetime of a bpf_link. For some BPF programs (i.e., FS-based tracepoints, kprobes, raw tracepoint, etc), user has to make sure to pin BPF program to prevent kernel to automatically detach it on process exit. This should typically be achived by pinning BPF program (or map in some cases) in BPF FS. Signed-off-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Martin KaFai Lau <kafai@fb.com> Link: https://lore.kernel.org/bpf/20191218225039.2668205-1-andriin@fb.com
2019-12-19 05:50:39 +07:00
bpf_link__disconnect;
bpf: libbpf: Add STRUCT_OPS support This patch adds BPF STRUCT_OPS support to libbpf. The only sec_name convention is SEC(".struct_ops") to identify the struct_ops implemented in BPF, e.g. To implement a tcp_congestion_ops: SEC(".struct_ops") struct tcp_congestion_ops dctcp = { .init = (void *)dctcp_init, /* <-- a bpf_prog */ /* ... some more func prts ... */ .name = "bpf_dctcp", }; Each struct_ops is defined as a global variable under SEC(".struct_ops") as above. libbpf creates a map for each variable and the variable name is the map's name. Multiple struct_ops is supported under SEC(".struct_ops"). In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops") section and find out what is the btf-type the struct_ops is implementing. Note that the btf-type here is referring to a type in the bpf_prog.o's btf. A "struct bpf_map" is added by bpf_object__add_map() as other maps do. It will then collect (through SHT_REL) where are the bpf progs that the func ptrs are referring to. No btf_vmlinux is needed in the open phase. In the bpf_object__load phase, the map-fields, which depend on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()). It will also set the prog->type, prog->attach_btf_id, and prog->expected_attach_type. Thus, the prog's properties do not rely on its section name. [ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching process is as simple as: member-name match + btf-kind match + size match. If these matching conditions fail, libbpf will reject. The current targeting support is "struct tcp_congestion_ops" which most of its members are function pointers. The member ordering of the bpf_prog's btf-type can be different from the btf_vmlinux's btf-type. ] Then, all obj->maps are created as usual (in bpf_object__create_maps()). Once the maps are created and prog's properties are all set, the libbpf will proceed to load all the progs. bpf_map__attach_struct_ops() is added to register a struct_ops map to a kernel subsystem. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 07:35:14 +07:00
bpf_map__attach_struct_ops;
bpf_object__find_program_by_name;
bpf_object__attach_skeleton;
bpf_object__destroy_skeleton;
bpf_object__detach_skeleton;
bpf_object__load_skeleton;
bpf_object__open_skeleton;
bpf_probe_large_insn_limit;
bpf_prog_attach_xattr;
bpf_program__attach;
bpf_program__name;
bpf: libbpf: Add STRUCT_OPS support This patch adds BPF STRUCT_OPS support to libbpf. The only sec_name convention is SEC(".struct_ops") to identify the struct_ops implemented in BPF, e.g. To implement a tcp_congestion_ops: SEC(".struct_ops") struct tcp_congestion_ops dctcp = { .init = (void *)dctcp_init, /* <-- a bpf_prog */ /* ... some more func prts ... */ .name = "bpf_dctcp", }; Each struct_ops is defined as a global variable under SEC(".struct_ops") as above. libbpf creates a map for each variable and the variable name is the map's name. Multiple struct_ops is supported under SEC(".struct_ops"). In the bpf_object__open phase, libbpf will look for the SEC(".struct_ops") section and find out what is the btf-type the struct_ops is implementing. Note that the btf-type here is referring to a type in the bpf_prog.o's btf. A "struct bpf_map" is added by bpf_object__add_map() as other maps do. It will then collect (through SHT_REL) where are the bpf progs that the func ptrs are referring to. No btf_vmlinux is needed in the open phase. In the bpf_object__load phase, the map-fields, which depend on the btf_vmlinux, are initialized (in bpf_map__init_kern_struct_ops()). It will also set the prog->type, prog->attach_btf_id, and prog->expected_attach_type. Thus, the prog's properties do not rely on its section name. [ Currently, the bpf_prog's btf-type ==> btf_vmlinux's btf-type matching process is as simple as: member-name match + btf-kind match + size match. If these matching conditions fail, libbpf will reject. The current targeting support is "struct tcp_congestion_ops" which most of its members are function pointers. The member ordering of the bpf_prog's btf-type can be different from the btf_vmlinux's btf-type. ] Then, all obj->maps are created as usual (in bpf_object__create_maps()). Once the maps are created and prog's properties are all set, the libbpf will proceed to load all the progs. bpf_map__attach_struct_ops() is added to register a struct_ops map to a kernel subsystem. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200109003514.3856730-1-kafai@fb.com
2020-01-09 07:35:14 +07:00
bpf_program__is_struct_ops;
bpf_program__set_struct_ops;
btf__align_of;
} LIBBPF_0.0.6;