2018-12-13 10:59:25 +07:00
|
|
|
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
|
|
|
|
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
|
2017-10-05 10:10:04 +07:00
|
|
|
|
|
|
|
#ifndef __BPF_TOOL_H
|
|
|
|
#define __BPF_TOOL_H
|
|
|
|
|
2017-10-10 00:30:13 +07:00
|
|
|
/* BFD and kernel.h both define GCC_VERSION, differently */
|
|
|
|
#undef GCC_VERSION
|
2017-10-05 10:10:04 +07:00
|
|
|
#include <stdbool.h>
|
|
|
|
#include <stdio.h>
|
|
|
|
#include <linux/bpf.h>
|
2017-11-29 08:44:29 +07:00
|
|
|
#include <linux/compiler.h>
|
2017-10-10 00:30:13 +07:00
|
|
|
#include <linux/kernel.h>
|
tools: bpftool: show filenames of pinned objects
Added support to show filenames of pinned objects.
For example:
root@test# ./bpftool prog
3: tracepoint name tracepoint__irq tag f677a7dd722299a3
loaded_at Oct 26/11:39 uid 0
xlated 160B not jited memlock 4096B map_ids 4
pinned /sys/fs/bpf/softirq_prog
4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6
loaded_at Oct 26/11:39 uid 0
xlated 392B not jited memlock 4096B map_ids 4,6
root@test# ./bpftool --json --pretty prog
[{
"id": 3,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "f677a7dd722299a3",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 160,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4
],
"pinned": ["/sys/fs/bpf/softirq_prog"
]
},{
"id": 4,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "ea5dc530d00b92b6",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 392,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4,6
],
"pinned": []
}
]
root@test# ./bpftool map
4: hash name start flags 0x0
key 4B value 16B max_entries 10240 memlock 1003520B
pinned /sys/fs/bpf/softirq_map1
5: hash name iptr flags 0x0
key 4B value 8B max_entries 10240 memlock 921600B
root@test# ./bpftool --json --pretty map
[{
"id": 4,
"type": "hash",
"name": "start",
"flags": 0,
"bytes_key": 4,
"bytes_value": 16,
"max_entries": 10240,
"bytes_memlock": 1003520,
"pinned": ["/sys/fs/bpf/softirq_map1"
]
},{
"id": 5,
"type": "hash",
"name": "iptr",
"flags": 0,
"bytes_key": 4,
"bytes_value": 8,
"max_entries": 10240,
"bytes_memlock": 921600,
"pinned": []
}
]
Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 11:55:48 +07:00
|
|
|
#include <linux/hashtable.h>
|
2018-07-11 04:43:05 +07:00
|
|
|
#include <tools/libc_compat.h>
|
2017-10-05 10:10:04 +07:00
|
|
|
|
tools: bpftool: Restore message on failure to guess program type
In commit 4a3d6c6a6e4d ("libbpf: Reduce log level for custom section
names"), log level for messages for libbpf_attach_type_by_name() and
libbpf_prog_type_by_name() was downgraded from "info" to "debug". The
latter function, in particular, is used by bpftool when attempting to
load programs, and this change caused bpftool to exit with no hint or
error message when it fails to detect the type of the program to load
(unless "-d" option was provided).
To help users understand why bpftool fails to load the program, let's do
a second run of the function with log level in "debug" mode in case of
failure.
Before:
# bpftool prog load sample_ret0.o /sys/fs/bpf/sample_ret0
# echo $?
255
Or really verbose with -d flag:
# bpftool -d prog load sample_ret0.o /sys/fs/bpf/sample_ret0
libbpf: loading sample_ret0.o
libbpf: section(1) .strtab, size 134, link 0, flags 0, type=3
libbpf: skip section(1) .strtab
libbpf: section(2) .text, size 16, link 0, flags 6, type=1
libbpf: found program .text
libbpf: section(3) .debug_abbrev, size 55, link 0, flags 0, type=1
libbpf: skip section(3) .debug_abbrev
libbpf: section(4) .debug_info, size 75, link 0, flags 0, type=1
libbpf: skip section(4) .debug_info
libbpf: section(5) .rel.debug_info, size 32, link 14, flags 0, type=9
libbpf: skip relo .rel.debug_info(5) for section(4)
libbpf: section(6) .debug_str, size 150, link 0, flags 30, type=1
libbpf: skip section(6) .debug_str
libbpf: section(7) .BTF, size 155, link 0, flags 0, type=1
libbpf: section(8) .BTF.ext, size 80, link 0, flags 0, type=1
libbpf: section(9) .rel.BTF.ext, size 32, link 14, flags 0, type=9
libbpf: skip relo .rel.BTF.ext(9) for section(8)
libbpf: section(10) .debug_frame, size 40, link 0, flags 0, type=1
libbpf: skip section(10) .debug_frame
libbpf: section(11) .rel.debug_frame, size 16, link 14, flags 0, type=9
libbpf: skip relo .rel.debug_frame(11) for section(10)
libbpf: section(12) .debug_line, size 74, link 0, flags 0, type=1
libbpf: skip section(12) .debug_line
libbpf: section(13) .rel.debug_line, size 16, link 14, flags 0, type=9
libbpf: skip relo .rel.debug_line(13) for section(12)
libbpf: section(14) .symtab, size 96, link 1, flags 0, type=2
libbpf: looking for externs among 4 symbols...
libbpf: collected 0 externs total
libbpf: failed to guess program type from ELF section '.text'
libbpf: supported section(type) names are: socket sk_reuseport kprobe/ [...]
After:
# bpftool prog load sample_ret0.o /sys/fs/bpf/sample_ret0
libbpf: failed to guess program type from ELF section '.text'
libbpf: supported section(type) names are: socket sk_reuseport kprobe/ [...]
Signed-off-by: Quentin Monnet <quentin@isovalent.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200311021205.9755-1-quentin@isovalent.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2020-03-11 09:12:05 +07:00
|
|
|
#include <bpf/libbpf.h>
|
|
|
|
|
2017-10-23 23:24:07 +07:00
|
|
|
#include "json_writer.h"
|
|
|
|
|
2020-05-11 23:15:33 +07:00
|
|
|
/* Make sure we do not use kernel-only integer typedefs */
|
|
|
|
#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64
|
|
|
|
|
2017-10-05 10:10:04 +07:00
|
|
|
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
|
|
|
|
|
|
|
|
#define NEXT_ARG() ({ argc--; argv++; if (argc < 0) usage(); })
|
|
|
|
#define NEXT_ARGP() ({ (*argc)--; (*argv)++; if (*argc < 0) usage(); })
|
2017-11-29 08:44:31 +07:00
|
|
|
#define BAD_ARG() ({ p_err("what is '%s'?", *argv); -1; })
|
2018-07-11 04:42:57 +07:00
|
|
|
#define GET_ARG() ({ argc--; *argv++; })
|
|
|
|
#define REQ_ARGS(cnt) \
|
|
|
|
({ \
|
|
|
|
int _cnt = (cnt); \
|
|
|
|
bool _res; \
|
|
|
|
\
|
|
|
|
if (argc < _cnt) { \
|
|
|
|
p_err("'%s' needs at least %d arguments, %d found", \
|
|
|
|
argv[-1], _cnt, argc); \
|
|
|
|
_res = false; \
|
|
|
|
} else { \
|
|
|
|
_res = true; \
|
|
|
|
} \
|
|
|
|
_res; \
|
|
|
|
})
|
2017-10-05 10:10:04 +07:00
|
|
|
|
2017-10-25 10:11:28 +07:00
|
|
|
#define ERR_MAX_LEN 1024
|
|
|
|
|
2017-10-17 00:12:54 +07:00
|
|
|
#define BPF_TAG_FMT "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx"
|
2017-10-05 10:10:04 +07:00
|
|
|
|
|
|
|
#define HELP_SPEC_PROGRAM \
|
2019-12-14 02:10:17 +07:00
|
|
|
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
|
2017-10-23 23:24:16 +07:00
|
|
|
#define HELP_SPEC_OPTIONS \
|
2018-12-18 17:13:19 +07:00
|
|
|
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-f|--bpffs} |\n" \
|
|
|
|
"\t {-m|--mapcompat} | {-n|--nomount} }"
|
2018-07-11 04:43:07 +07:00
|
|
|
#define HELP_SPEC_MAP \
|
2019-12-14 02:10:37 +07:00
|
|
|
"MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
|
2020-04-29 07:16:12 +07:00
|
|
|
#define HELP_SPEC_LINK \
|
|
|
|
"LINK := { id LINK_ID | pinned FILE }"
|
2017-10-05 10:10:04 +07:00
|
|
|
|
2020-06-24 21:31:24 +07:00
|
|
|
extern const char * const prog_type_name[];
|
|
|
|
extern const size_t prog_type_name_size;
|
tools: bpftool: add owner_prog_type and owner_jited to bpftool output
For prog array maps, the type of the owner program, and the JIT-ed state
of that program, are available from the file descriptor information
under /proc. Add them to "bpftool map show" output. Example output:
# bpftool map show
158225: prog_array name jmp_table flags 0x0
key 4B value 4B max_entries 8 memlock 4096B
owner_prog_type flow_dissector owner jited
# bpftool --json --pretty map show
[{
"id": 1337,
"type": "prog_array",
"name": "jmp_table",
"flags": 0,
"bytes_key": 4,
"bytes_value": 4,
"max_entries": 8,
"bytes_memlock": 4096,
"owner_prog_type": "flow_dissector",
"owner_jited": true
}
]
As we move the table used for associating names to program types,
complete it with the missing types (lwt_seg6local and sk_reuseport).
Also add missing types to the help message for "bpftool prog"
(sk_reuseport and flow_dissector).
Suggested-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-11-30 23:25:48 +07:00
|
|
|
|
2020-06-24 21:31:54 +07:00
|
|
|
extern const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE];
|
2020-04-29 07:16:11 +07:00
|
|
|
|
2019-01-17 22:27:54 +07:00
|
|
|
extern const char * const map_type_name[];
|
|
|
|
extern const size_t map_type_name_size;
|
|
|
|
|
tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs
Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).
Sample output for each of 4 BPF objects:
$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)
$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com
2020-06-20 06:17:02 +07:00
|
|
|
/* keep in sync with the definition in skeleton/pid_iter.bpf.c */
|
2017-10-05 10:10:04 +07:00
|
|
|
enum bpf_obj_type {
|
|
|
|
BPF_OBJ_UNKNOWN,
|
|
|
|
BPF_OBJ_PROG,
|
|
|
|
BPF_OBJ_MAP,
|
2020-04-29 07:16:12 +07:00
|
|
|
BPF_OBJ_LINK,
|
tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs
Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).
Sample output for each of 4 BPF objects:
$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)
$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com
2020-06-20 06:17:02 +07:00
|
|
|
BPF_OBJ_BTF,
|
2017-10-05 10:10:04 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
extern const char *bin_name;
|
|
|
|
|
2017-10-23 23:24:07 +07:00
|
|
|
extern json_writer_t *json_wtr;
|
|
|
|
extern bool json_output;
|
2017-11-08 11:55:49 +07:00
|
|
|
extern bool show_pinned;
|
tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs
Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).
Sample output for each of 4 BPF objects:
$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)
$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com
2020-06-20 06:17:02 +07:00
|
|
|
extern bool show_pids;
|
2018-12-18 17:13:19 +07:00
|
|
|
extern bool block_mount;
|
2019-05-24 17:36:48 +07:00
|
|
|
extern bool verifier_logs;
|
2019-10-08 05:56:04 +07:00
|
|
|
extern bool relaxed_maps;
|
tools: bpftool: show filenames of pinned objects
Added support to show filenames of pinned objects.
For example:
root@test# ./bpftool prog
3: tracepoint name tracepoint__irq tag f677a7dd722299a3
loaded_at Oct 26/11:39 uid 0
xlated 160B not jited memlock 4096B map_ids 4
pinned /sys/fs/bpf/softirq_prog
4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6
loaded_at Oct 26/11:39 uid 0
xlated 392B not jited memlock 4096B map_ids 4,6
root@test# ./bpftool --json --pretty prog
[{
"id": 3,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "f677a7dd722299a3",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 160,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4
],
"pinned": ["/sys/fs/bpf/softirq_prog"
]
},{
"id": 4,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "ea5dc530d00b92b6",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 392,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4,6
],
"pinned": []
}
]
root@test# ./bpftool map
4: hash name start flags 0x0
key 4B value 16B max_entries 10240 memlock 1003520B
pinned /sys/fs/bpf/softirq_map1
5: hash name iptr flags 0x0
key 4B value 8B max_entries 10240 memlock 921600B
root@test# ./bpftool --json --pretty map
[{
"id": 4,
"type": "hash",
"name": "start",
"flags": 0,
"bytes_key": 4,
"bytes_value": 16,
"max_entries": 10240,
"bytes_memlock": 1003520,
"pinned": ["/sys/fs/bpf/softirq_map1"
]
},{
"id": 5,
"type": "hash",
"name": "iptr",
"flags": 0,
"bytes_key": 4,
"bytes_value": 8,
"max_entries": 10240,
"bytes_memlock": 921600,
"pinned": []
}
]
Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 11:55:48 +07:00
|
|
|
extern struct pinned_obj_table prog_table;
|
|
|
|
extern struct pinned_obj_table map_table;
|
2020-04-29 07:16:12 +07:00
|
|
|
extern struct pinned_obj_table link_table;
|
tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs
Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).
Sample output for each of 4 BPF objects:
$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)
$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com
2020-06-20 06:17:02 +07:00
|
|
|
extern struct obj_refs_table refs_table;
|
2017-10-23 23:24:07 +07:00
|
|
|
|
2019-08-15 21:32:20 +07:00
|
|
|
void __printf(1, 2) p_err(const char *fmt, ...);
|
|
|
|
void __printf(1, 2) p_info(const char *fmt, ...);
|
2017-11-04 03:59:07 +07:00
|
|
|
|
2017-10-05 10:10:04 +07:00
|
|
|
bool is_prefix(const char *pfx, const char *str);
|
2019-07-06 00:54:33 +07:00
|
|
|
int detect_common_prefix(const char *arg, ...);
|
2017-10-20 05:46:19 +07:00
|
|
|
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
|
2017-11-29 08:44:29 +07:00
|
|
|
void usage(void) __noreturn;
|
2017-10-05 10:10:04 +07:00
|
|
|
|
tools: bpftool: adjust rlimit RLIMIT_MEMLOCK when loading programs, maps
The limit for memory locked in the kernel by a process is usually set to
64 kbytes by default. This can be an issue when creating large BPF maps
and/or loading many programs. A workaround is to raise this limit for
the current process before trying to create a new BPF map. Changing the
hard limit requires the CAP_SYS_RESOURCE and can usually only be done by
root user (for non-root users, a call to setrlimit fails (and sets
errno) and the program simply goes on with its rlimit unchanged).
There is no API to get the current amount of memory locked for a user,
therefore we cannot raise the limit only when required. One solution,
used by bcc, is to try to create the map, and on getting a EPERM error,
raising the limit to infinity before giving another try. Another
approach, used in iproute2, is to raise the limit in all cases, before
trying to create the map.
Here we do the same as in iproute2: the rlimit is raised to infinity
before trying to load programs or to create maps with bpftool.
Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-11-07 19:29:30 +07:00
|
|
|
void set_max_rlimit(void);
|
|
|
|
|
2018-12-18 17:13:18 +07:00
|
|
|
int mount_tracefs(const char *target);
|
|
|
|
|
tools: bpftool: show filenames of pinned objects
Added support to show filenames of pinned objects.
For example:
root@test# ./bpftool prog
3: tracepoint name tracepoint__irq tag f677a7dd722299a3
loaded_at Oct 26/11:39 uid 0
xlated 160B not jited memlock 4096B map_ids 4
pinned /sys/fs/bpf/softirq_prog
4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6
loaded_at Oct 26/11:39 uid 0
xlated 392B not jited memlock 4096B map_ids 4,6
root@test# ./bpftool --json --pretty prog
[{
"id": 3,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "f677a7dd722299a3",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 160,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4
],
"pinned": ["/sys/fs/bpf/softirq_prog"
]
},{
"id": 4,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "ea5dc530d00b92b6",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 392,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4,6
],
"pinned": []
}
]
root@test# ./bpftool map
4: hash name start flags 0x0
key 4B value 16B max_entries 10240 memlock 1003520B
pinned /sys/fs/bpf/softirq_map1
5: hash name iptr flags 0x0
key 4B value 8B max_entries 10240 memlock 921600B
root@test# ./bpftool --json --pretty map
[{
"id": 4,
"type": "hash",
"name": "start",
"flags": 0,
"bytes_key": 4,
"bytes_value": 16,
"max_entries": 10240,
"bytes_memlock": 1003520,
"pinned": ["/sys/fs/bpf/softirq_map1"
]
},{
"id": 5,
"type": "hash",
"name": "iptr",
"flags": 0,
"bytes_key": 4,
"bytes_value": 8,
"max_entries": 10240,
"bytes_memlock": 921600,
"pinned": []
}
]
Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 11:55:48 +07:00
|
|
|
struct pinned_obj_table {
|
|
|
|
DECLARE_HASHTABLE(table, 16);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct pinned_obj {
|
|
|
|
__u32 id;
|
|
|
|
char *path;
|
|
|
|
struct hlist_node hash;
|
|
|
|
};
|
|
|
|
|
tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs
Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).
Sample output for each of 4 BPF objects:
$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)
$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com
2020-06-20 06:17:02 +07:00
|
|
|
struct obj_refs_table {
|
|
|
|
DECLARE_HASHTABLE(table, 16);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct obj_ref {
|
|
|
|
int pid;
|
|
|
|
char comm[16];
|
|
|
|
};
|
|
|
|
|
|
|
|
struct obj_refs {
|
|
|
|
struct hlist_node node;
|
|
|
|
__u32 id;
|
|
|
|
int ref_cnt;
|
|
|
|
struct obj_ref *refs;
|
|
|
|
};
|
|
|
|
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 07:42:32 +07:00
|
|
|
struct btf;
|
|
|
|
struct bpf_line_info;
|
|
|
|
|
tools: bpftool: show filenames of pinned objects
Added support to show filenames of pinned objects.
For example:
root@test# ./bpftool prog
3: tracepoint name tracepoint__irq tag f677a7dd722299a3
loaded_at Oct 26/11:39 uid 0
xlated 160B not jited memlock 4096B map_ids 4
pinned /sys/fs/bpf/softirq_prog
4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6
loaded_at Oct 26/11:39 uid 0
xlated 392B not jited memlock 4096B map_ids 4,6
root@test# ./bpftool --json --pretty prog
[{
"id": 3,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "f677a7dd722299a3",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 160,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4
],
"pinned": ["/sys/fs/bpf/softirq_prog"
]
},{
"id": 4,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "ea5dc530d00b92b6",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 392,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4,6
],
"pinned": []
}
]
root@test# ./bpftool map
4: hash name start flags 0x0
key 4B value 16B max_entries 10240 memlock 1003520B
pinned /sys/fs/bpf/softirq_map1
5: hash name iptr flags 0x0
key 4B value 8B max_entries 10240 memlock 921600B
root@test# ./bpftool --json --pretty map
[{
"id": 4,
"type": "hash",
"name": "start",
"flags": 0,
"bytes_key": 4,
"bytes_value": 16,
"max_entries": 10240,
"bytes_memlock": 1003520,
"pinned": ["/sys/fs/bpf/softirq_map1"
]
},{
"id": 5,
"type": "hash",
"name": "iptr",
"flags": 0,
"bytes_key": 4,
"bytes_value": 8,
"max_entries": 10240,
"bytes_memlock": 921600,
"pinned": []
}
]
Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 11:55:48 +07:00
|
|
|
int build_pinned_obj_table(struct pinned_obj_table *table,
|
|
|
|
enum bpf_obj_type type);
|
|
|
|
void delete_pinned_obj_table(struct pinned_obj_table *tab);
|
tools/bpftool: Show info for processes holding BPF map/prog/link/btf FDs
Add bpf_iter-based way to find all the processes that hold open FDs against
BPF object (map, prog, link, btf). bpftool always attempts to discover this,
but will silently give up if kernel doesn't yet support bpf_iter BPF programs.
Process name and PID are emitted for each process (task group).
Sample output for each of 4 BPF objects:
$ sudo ./bpftool prog show
2694: cgroup_device tag 8c42dee26e8cd4c2 gpl
loaded_at 2020-06-16T15:34:32-0700 uid 0
xlated 648B jited 409B memlock 4096B
pids systemd(1)
2907: cgroup_skb name egress tag 9ad187367cf2b9e8 gpl
loaded_at 2020-06-16T18:06:54-0700 uid 0
xlated 48B jited 59B memlock 4096B map_ids 2436
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool map show
2436: array name test_cgr.bss flags 0x400
key 4B value 8B max_entries 1 memlock 8192B
btf_id 1202
pids test_progs(2238417), test_progs(2238445)
2445: array name pid_iter.rodata flags 0x480
key 4B value 4B max_entries 1 memlock 8192B
btf_id 1214 frozen
pids bpftool(2239612)
$ sudo ./bpftool link show
61: cgroup prog 2908
cgroup_id 375301 attach_type egress
pids test_progs(2238417), test_progs(2238445)
62: cgroup prog 2908
cgroup_id 375344 attach_type egress
pids test_progs(2238417), test_progs(2238445)
$ sudo ./bpftool btf show
1202: size 1527B prog_ids 2908,2907 map_ids 2436
pids test_progs(2238417), test_progs(2238445)
1242: size 34684B
pids bpftool(2258892)
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Reviewed-by: Quentin Monnet <quentin@isovalent.com>
Link: https://lore.kernel.org/bpf/20200619231703.738941-9-andriin@fb.com
2020-06-20 06:17:02 +07:00
|
|
|
__weak int build_obj_refs_table(struct obj_refs_table *table,
|
|
|
|
enum bpf_obj_type type);
|
|
|
|
__weak void delete_obj_refs_table(struct obj_refs_table *table);
|
|
|
|
__weak void emit_obj_refs_json(struct obj_refs_table *table, __u32 id,
|
|
|
|
json_writer_t *json_wtr);
|
|
|
|
__weak void emit_obj_refs_plain(struct obj_refs_table *table, __u32 id,
|
|
|
|
const char *prefix);
|
2017-12-28 09:39:10 +07:00
|
|
|
void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
|
|
|
|
void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode);
|
tools: bpftool: show filenames of pinned objects
Added support to show filenames of pinned objects.
For example:
root@test# ./bpftool prog
3: tracepoint name tracepoint__irq tag f677a7dd722299a3
loaded_at Oct 26/11:39 uid 0
xlated 160B not jited memlock 4096B map_ids 4
pinned /sys/fs/bpf/softirq_prog
4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6
loaded_at Oct 26/11:39 uid 0
xlated 392B not jited memlock 4096B map_ids 4,6
root@test# ./bpftool --json --pretty prog
[{
"id": 3,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "f677a7dd722299a3",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 160,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4
],
"pinned": ["/sys/fs/bpf/softirq_prog"
]
},{
"id": 4,
"type": "tracepoint",
"name": "tracepoint__irq",
"tag": "ea5dc530d00b92b6",
"loaded_at": "Oct 26/11:39",
"uid": 0,
"bytes_xlated": 392,
"jited": false,
"bytes_memlock": 4096,
"map_ids": [4,6
],
"pinned": []
}
]
root@test# ./bpftool map
4: hash name start flags 0x0
key 4B value 16B max_entries 10240 memlock 1003520B
pinned /sys/fs/bpf/softirq_map1
5: hash name iptr flags 0x0
key 4B value 8B max_entries 10240 memlock 921600B
root@test# ./bpftool --json --pretty map
[{
"id": 4,
"type": "hash",
"name": "start",
"flags": 0,
"bytes_key": 4,
"bytes_value": 16,
"max_entries": 10240,
"bytes_memlock": 1003520,
"pinned": ["/sys/fs/bpf/softirq_map1"
]
},{
"id": 5,
"type": "hash",
"name": "iptr",
"flags": 0,
"bytes_key": 4,
"bytes_value": 8,
"max_entries": 10240,
"bytes_memlock": 921600,
"pinned": []
}
]
Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 11:55:48 +07:00
|
|
|
|
2017-10-05 10:10:04 +07:00
|
|
|
struct cmd {
|
|
|
|
const char *cmd;
|
|
|
|
int (*func)(int argc, char **argv);
|
|
|
|
};
|
|
|
|
|
|
|
|
int cmd_select(const struct cmd *cmds, int argc, char **argv,
|
|
|
|
int (*help)(int argc, char **argv));
|
|
|
|
|
|
|
|
int get_fd_type(int fd);
|
|
|
|
const char *get_fd_type_name(enum bpf_obj_type type);
|
|
|
|
char *get_fdinfo(int fd, const char *key);
|
2020-07-21 09:48:16 +07:00
|
|
|
int open_obj_pinned(const char *path, bool quiet);
|
|
|
|
int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type);
|
2018-11-09 23:21:44 +07:00
|
|
|
int mount_bpffs_for_pin(const char *name);
|
2020-03-13 01:46:07 +07:00
|
|
|
int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(int *, char ***));
|
2017-12-13 22:18:53 +07:00
|
|
|
int do_pin_fd(int fd, const char *name);
|
2017-10-05 10:10:04 +07:00
|
|
|
|
2020-06-20 06:16:59 +07:00
|
|
|
/* commands available in bootstrap mode */
|
bpftool: Add skeleton codegen command
Add `bpftool gen skeleton` command, which takes in compiled BPF .o object file
and dumps a BPF skeleton struct and related code to work with that skeleton.
Skeleton itself is tailored to a specific structure of provided BPF object
file, containing accessors (just plain struct fields) for every map and
program, as well as dedicated space for bpf_links. If BPF program is using
global variables, corresponding structure definitions of compatible memory
layout are emitted as well, making it possible to initialize and subsequently
read/update global variables values using simple and clear C syntax for
accessing fields. This skeleton majorly improves usability of
opening/loading/attaching of BPF object, as well as interacting with it
throughout the lifetime of loaded BPF object.
Generated skeleton struct has the following structure:
struct <object-name> {
/* used by libbpf's skeleton API */
struct bpf_object_skeleton *skeleton;
/* bpf_object for libbpf APIs */
struct bpf_object *obj;
struct {
/* for every defined map in BPF object: */
struct bpf_map *<map-name>;
} maps;
struct {
/* for every program in BPF object: */
struct bpf_program *<program-name>;
} progs;
struct {
/* for every program in BPF object: */
struct bpf_link *<program-name>;
} links;
/* for every present global data section: */
struct <object-name>__<one of bss, data, or rodata> {
/* memory layout of corresponding data section,
* with every defined variable represented as a struct field
* with exactly the same type, but without const/volatile
* modifiers, e.g.:
*/
int *my_var_1;
...
} *<one of bss, data, or rodata>;
};
This provides great usability improvements:
- no need to look up maps and programs by name, instead just
my_obj->maps.my_map or my_obj->progs.my_prog would give necessary
bpf_map/bpf_program pointers, which user can pass to existing libbpf APIs;
- pre-defined places for bpf_links, which will be automatically populated for
program types that libbpf knows how to attach automatically (currently
tracepoints, kprobe/kretprobe, raw tracepoint and tracing programs). On
tearing down skeleton, all active bpf_links will be destroyed (meaning BPF
programs will be detached, if they are attached). For cases in which libbpf
doesn't know how to auto-attach BPF program, user can manually create link
after loading skeleton and they will be auto-detached on skeleton
destruction:
my_obj->links.my_fancy_prog = bpf_program__attach_cgroup_whatever(
my_obj->progs.my_fancy_prog, <whatever extra param);
- it's extremely easy and convenient to work with global data from userspace
now. Both for read-only and read/write variables, it's possible to
pre-initialize them before skeleton is loaded:
skel = my_obj__open(raw_embed_data);
my_obj->rodata->my_var = 123;
my_obj__load(skel); /* 123 will be initialization value for my_var */
After load, if kernel supports mmap() for BPF arrays, user can still read
(and write for .bss and .data) variables values, but at that point it will
be directly mmap()-ed to BPF array, backing global variables. This allows to
seamlessly exchange data with BPF side. From userspace program's POV, all
the pointers and memory contents stay the same, but mapped kernel memory
changes to point to created map.
If kernel doesn't yet support mmap() for BPF arrays, it's still possible to
use those data section structs to pre-initialize .bss, .data, and .rodata,
but after load their pointers will be reset to NULL, allowing user code to
gracefully handle this condition, if necessary.
Signed-off-by: Andrii Nakryiko <andriin@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Link: https://lore.kernel.org/bpf/20191214014341.3442258-14-andriin@fb.com
2019-12-14 08:43:37 +07:00
|
|
|
int do_gen(int argc, char **argv);
|
2020-06-20 06:16:59 +07:00
|
|
|
int do_btf(int argc, char **argv);
|
|
|
|
|
|
|
|
/* non-bootstrap only commands */
|
|
|
|
int do_prog(int argc, char **arg) __weak;
|
|
|
|
int do_map(int argc, char **arg) __weak;
|
|
|
|
int do_link(int argc, char **arg) __weak;
|
|
|
|
int do_event_pipe(int argc, char **argv) __weak;
|
|
|
|
int do_cgroup(int argc, char **arg) __weak;
|
|
|
|
int do_perf(int argc, char **arg) __weak;
|
|
|
|
int do_net(int argc, char **arg) __weak;
|
|
|
|
int do_tracelog(int argc, char **arg) __weak;
|
|
|
|
int do_feature(int argc, char **argv) __weak;
|
|
|
|
int do_struct_ops(int argc, char **argv) __weak;
|
|
|
|
int do_iter(int argc, char **argv) __weak;
|
2017-10-05 10:10:04 +07:00
|
|
|
|
2018-10-16 06:30:36 +07:00
|
|
|
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what);
|
2017-10-05 10:10:04 +07:00
|
|
|
int prog_parse_fd(int *argc, char ***argv);
|
2020-06-20 06:16:58 +07:00
|
|
|
int prog_parse_fds(int *argc, char ***argv, int **fds);
|
2018-07-11 04:43:07 +07:00
|
|
|
int map_parse_fd(int *argc, char ***argv);
|
2020-06-20 06:16:58 +07:00
|
|
|
int map_parse_fds(int *argc, char ***argv, int **fds);
|
2018-05-04 08:37:16 +07:00
|
|
|
int map_parse_fd_and_info(int *argc, char ***argv, void *info, __u32 *info_len);
|
2017-10-05 10:10:04 +07:00
|
|
|
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 07:42:32 +07:00
|
|
|
struct bpf_prog_linfo;
|
2018-12-14 20:56:00 +07:00
|
|
|
#ifdef HAVE_LIBBFD_SUPPORT
|
2018-01-17 07:05:21 +07:00
|
|
|
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 07:42:32 +07:00
|
|
|
const char *arch, const char *disassembler_options,
|
|
|
|
const struct btf *btf,
|
|
|
|
const struct bpf_prog_linfo *prog_linfo,
|
|
|
|
__u64 func_ksym, unsigned int func_idx,
|
|
|
|
bool linum);
|
2018-11-13 04:44:10 +07:00
|
|
|
int disasm_init(void);
|
|
|
|
#else
|
|
|
|
static inline
|
|
|
|
void disasm_print_insn(unsigned char *image, ssize_t len, int opcodes,
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 07:42:32 +07:00
|
|
|
const char *arch, const char *disassembler_options,
|
|
|
|
const struct btf *btf,
|
|
|
|
const struct bpf_prog_linfo *prog_linfo,
|
|
|
|
__u64 func_ksym, unsigned int func_idx,
|
|
|
|
bool linum)
|
2018-11-13 04:44:10 +07:00
|
|
|
{
|
|
|
|
}
|
|
|
|
static inline int disasm_init(void)
|
|
|
|
{
|
|
|
|
p_err("No libbfd support");
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
#endif
|
2018-05-04 08:37:16 +07:00
|
|
|
void print_data_json(uint8_t *data, size_t len);
|
tools: bpftool: add JSON output for `bpftool prog dump xlated *` command
Add a new printing function to dump translated eBPF instructions as
JSON. As for plain output, opcodes are printed only on request (when
`opcodes` is provided on the command line).
The disassembled output is generated by the same code that is used by
the kernel verifier.
Example output:
$ bpftool --json --pretty prog dump xlated id 1
[{
"disasm": "(bf) r6 = r1"
},{
"disasm": "(61) r7 = *(u32 *)(r6 +16)"
},{
"disasm": "(95) exit"
}
]
$ bpftool --json --pretty prog dump xlated id 1 opcodes
[{
"disasm": "(bf) r6 = r1",
"opcodes": {
"code": "0xbf",
"src_reg": "0x1",
"dst_reg": "0x6",
"off": ["0x00","0x00"
],
"imm": ["0x00","0x00","0x00","0x00"
]
}
},{
"disasm": "(61) r7 = *(u32 *)(r6 +16)",
"opcodes": {
"code": "0x61",
"src_reg": "0x6",
"dst_reg": "0x7",
"off": ["0x10","0x00"
],
"imm": ["0x00","0x00","0x00","0x00"
]
}
},{
"disasm": "(95) exit",
"opcodes": {
"code": "0x95",
"src_reg": "0x0",
"dst_reg": "0x0",
"off": ["0x00","0x00"
],
"imm": ["0x00","0x00","0x00","0x00"
]
}
}
]
Signed-off-by: Quentin Monnet <quentin.monnet@netronome.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-23 23:24:10 +07:00
|
|
|
void print_hex_data_json(uint8_t *data, size_t len);
|
2017-10-05 10:10:04 +07:00
|
|
|
|
2018-05-04 08:37:16 +07:00
|
|
|
unsigned int get_page_size(void);
|
2018-05-04 08:37:15 +07:00
|
|
|
unsigned int get_possible_cpus(void);
|
2018-10-19 01:34:55 +07:00
|
|
|
const char *
|
|
|
|
ifindex_to_bfd_params(__u32 ifindex, __u64 ns_dev, __u64 ns_ino,
|
|
|
|
const char **opt);
|
2018-01-17 07:05:21 +07:00
|
|
|
|
bpf: btf: add btf print functionality
This consumes functionality exported in the previous patch. It does the
main job of printing with BTF data. This is used in the following patch
to provide a more readable output of a map's dump. It relies on
json_writer to do json printing. Below is sample output where map keys
are ints and values are of type struct A:
typedef int int_type;
enum E {
E0,
E1,
};
struct B {
int x;
int y;
};
struct A {
int m;
unsigned long long n;
char o;
int p[8];
int q[4][8];
enum E r;
void *s;
struct B t;
const int u;
int_type v;
unsigned int w1: 3;
unsigned int w2: 3;
};
$ sudo bpftool map dump id 14
[{
"key": 0,
"value": {
"m": 1,
"n": 2,
"o": "c",
"p": [15,16,17,18,15,16,17,18
],
"q": [[25,26,27,28,25,26,27,28
],[35,36,37,38,35,36,37,38
],[45,46,47,48,45,46,47,48
],[55,56,57,58,55,56,57,58
]
],
"r": 1,
"s": 0x7ffd80531cf8,
"t": {
"x": 5,
"y": 10
},
"u": 100,
"v": 20,
"w1": 0x7,
"w2": 0x3
}
}
]
This patch uses json's {} and [] to imply struct/union and array. More
explicit information can be added later. For example, a command line
option can be introduced to print whether a key or value is struct
or union, name of a struct etc. This will however come at the expense
of duplicating info when, for example, printing an array of structs.
enums are printed as ints without their names.
Signed-off-by: Okash Khawaja <osk@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-07-14 11:57:03 +07:00
|
|
|
struct btf_dumper {
|
|
|
|
const struct btf *btf;
|
|
|
|
json_writer_t *jw;
|
|
|
|
bool is_plain_text;
|
2020-03-19 00:16:50 +07:00
|
|
|
bool prog_id_as_func_ptr;
|
bpf: btf: add btf print functionality
This consumes functionality exported in the previous patch. It does the
main job of printing with BTF data. This is used in the following patch
to provide a more readable output of a map's dump. It relies on
json_writer to do json printing. Below is sample output where map keys
are ints and values are of type struct A:
typedef int int_type;
enum E {
E0,
E1,
};
struct B {
int x;
int y;
};
struct A {
int m;
unsigned long long n;
char o;
int p[8];
int q[4][8];
enum E r;
void *s;
struct B t;
const int u;
int_type v;
unsigned int w1: 3;
unsigned int w2: 3;
};
$ sudo bpftool map dump id 14
[{
"key": 0,
"value": {
"m": 1,
"n": 2,
"o": "c",
"p": [15,16,17,18,15,16,17,18
],
"q": [[25,26,27,28,25,26,27,28
],[35,36,37,38,35,36,37,38
],[45,46,47,48,45,46,47,48
],[55,56,57,58,55,56,57,58
]
],
"r": 1,
"s": 0x7ffd80531cf8,
"t": {
"x": 5,
"y": 10
},
"u": 100,
"v": 20,
"w1": 0x7,
"w2": 0x3
}
}
]
This patch uses json's {} and [] to imply struct/union and array. More
explicit information can be added later. For example, a command line
option can be introduced to print whether a key or value is struct
or union, name of a struct etc. This will however come at the expense
of duplicating info when, for example, printing an array of structs.
enums are printed as ints without their names.
Signed-off-by: Okash Khawaja <osk@fb.com>
Acked-by: Martin KaFai Lau <kafai@fb.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-07-14 11:57:03 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* btf_dumper_type - print data along with type information
|
|
|
|
* @d: an instance containing context for dumping types
|
|
|
|
* @type_id: index in btf->types array. this points to the type to be dumped
|
|
|
|
* @data: pointer the actual data, i.e. the values to be printed
|
|
|
|
*
|
|
|
|
* Returns zero on success and negative error code otherwise
|
|
|
|
*/
|
|
|
|
int btf_dumper_type(const struct btf_dumper *d, __u32 type_id,
|
|
|
|
const void *data);
|
2018-11-20 06:29:21 +07:00
|
|
|
void btf_dumper_type_only(const struct btf *btf, __u32 func_type_id,
|
|
|
|
char *func_only, int size);
|
tools/bpf: bpftool: add net support
Add "bpftool net" support. Networking devices are enumerated
to dump device index/name associated with xdp progs.
For each networking device, tc classes and qdiscs are enumerated
in order to check their bpf filters.
In addition, root handle and clsact ingress/egress are also checked for
bpf filters. Not all filter information is printed out. Only ifindex,
kind, filter name, prog_id and tag are printed out, which are good
enough to show attachment information. If the filter action
is a bpf action, its bpf program id, bpf name and tag will be
printed out as well.
For example,
$ ./bpftool net
xdp [
ifindex 2 devname eth0 prog_id 198
]
tc_filters [
ifindex 2 kind qdisc_htb name prefix_matcher.o:[cls_prefix_matcher_htb]
prog_id 111727 tag d08fe3b4319bc2fd act []
ifindex 2 kind qdisc_clsact_ingress name fbflow_icmp
prog_id 130246 tag 3f265c7f26db62c9 act []
ifindex 2 kind qdisc_clsact_egress name prefix_matcher.o:[cls_prefix_matcher_clsact]
prog_id 111726 tag 99a197826974c876
ifindex 2 kind qdisc_clsact_egress name cls_fg_dscp
prog_id 108619 tag dc4630674fd72dcc act []
ifindex 2 kind qdisc_clsact_egress name fbflow_egress
prog_id 130245 tag 72d2d830d6888d2c
]
$ ./bpftool -jp net
[{
"xdp": [{
"ifindex": 2,
"devname": "eth0",
"prog_id": 198
}
],
"tc_filters": [{
"ifindex": 2,
"kind": "qdisc_htb",
"name": "prefix_matcher.o:[cls_prefix_matcher_htb]",
"prog_id": 111727,
"tag": "d08fe3b4319bc2fd",
"act": []
},{
"ifindex": 2,
"kind": "qdisc_clsact_ingress",
"name": "fbflow_icmp",
"prog_id": 130246,
"tag": "3f265c7f26db62c9",
"act": []
},{
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"name": "prefix_matcher.o:[cls_prefix_matcher_clsact]",
"prog_id": 111726,
"tag": "99a197826974c876"
},{
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"name": "cls_fg_dscp",
"prog_id": 108619,
"tag": "dc4630674fd72dcc",
"act": []
},{
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"name": "fbflow_egress",
"prog_id": 130245,
"tag": "72d2d830d6888d2c"
}
]
}
]
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-09-06 06:58:06 +07:00
|
|
|
|
bpf: libbpf: bpftool: Print bpf_line_info during prog dump
This patch adds print bpf_line_info function in 'prog dump jitted'
and 'prog dump xlated':
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv
[...]
int test_long_fname_2(struct dummy_tracepoint_args * arg):
bpf_prog_44a040bf25481309_test_long_fname_2:
; static int test_long_fname_2(struct dummy_tracepoint_args *arg)
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x30,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: xor %esi,%esi
; int key = 0;
27: mov %esi,-0x4(%rbp)
; if (!arg->sock)
2a: mov 0x8(%rdi),%rdi
; if (!arg->sock)
2e: cmp $0x0,%rdi
32: je 0x0000000000000070
34: mov %rbp,%rsi
; counts = bpf_map_lookup_elem(&btf_map, &key);
37: add $0xfffffffffffffffc,%rsi
3b: movabs $0xffff8881139d7480,%rdi
45: add $0x110,%rdi
4c: mov 0x0(%rsi),%eax
4f: cmp $0x4,%rax
53: jae 0x000000000000005e
55: shl $0x3,%rax
59: add %rdi,%rax
5c: jmp 0x0000000000000060
5e: xor %eax,%eax
; if (!counts)
60: cmp $0x0,%rax
64: je 0x0000000000000070
; counts->v6++;
66: mov 0x4(%rax),%edi
69: add $0x1,%rdi
6d: mov %edi,0x4(%rax)
70: mov 0x0(%rbp),%rbx
74: mov 0x8(%rbp),%r13
78: mov 0x10(%rbp),%r14
7c: mov 0x18(%rbp),%r15
80: add $0x28,%rbp
84: leaveq
85: retq
[...]
With linum:
[root@arch-fb-vm1 bpf]# ~/devshare/fb-kernel/linux/tools/bpf/bpftool/bpftool prog dump jited pinned /sys/fs/bpf/test_btf_haskv linum
int _dummy_tracepoint(struct dummy_tracepoint_args * arg):
bpf_prog_b07ccb89267cf242__dummy_tracepoint:
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:9]
0: push %rbp
1: mov %rsp,%rbp
4: sub $0x28,%rsp
b: sub $0x28,%rbp
f: mov %rbx,0x0(%rbp)
13: mov %r13,0x8(%rbp)
17: mov %r14,0x10(%rbp)
1b: mov %r15,0x18(%rbp)
1f: xor %eax,%eax
21: mov %rax,0x20(%rbp)
25: callq 0x000000000000851e
; return test_long_fname_1(arg); [file:/data/users/kafai/fb-kernel/linux/tools/testing/selftests/bpf/test_btf_haskv.c line_num:54 line_col:2]
2a: xor %eax,%eax
2c: mov 0x0(%rbp),%rbx
30: mov 0x8(%rbp),%r13
34: mov 0x10(%rbp),%r14
38: mov 0x18(%rbp),%r15
3c: add $0x28,%rbp
40: leaveq
41: retq
[...]
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-12-08 07:42:32 +07:00
|
|
|
void btf_dump_linfo_plain(const struct btf *btf,
|
|
|
|
const struct bpf_line_info *linfo,
|
|
|
|
const char *prefix, bool linum);
|
|
|
|
void btf_dump_linfo_json(const struct btf *btf,
|
|
|
|
const struct bpf_line_info *linfo, bool linum);
|
|
|
|
|
tools/bpf: bpftool: add net support
Add "bpftool net" support. Networking devices are enumerated
to dump device index/name associated with xdp progs.
For each networking device, tc classes and qdiscs are enumerated
in order to check their bpf filters.
In addition, root handle and clsact ingress/egress are also checked for
bpf filters. Not all filter information is printed out. Only ifindex,
kind, filter name, prog_id and tag are printed out, which are good
enough to show attachment information. If the filter action
is a bpf action, its bpf program id, bpf name and tag will be
printed out as well.
For example,
$ ./bpftool net
xdp [
ifindex 2 devname eth0 prog_id 198
]
tc_filters [
ifindex 2 kind qdisc_htb name prefix_matcher.o:[cls_prefix_matcher_htb]
prog_id 111727 tag d08fe3b4319bc2fd act []
ifindex 2 kind qdisc_clsact_ingress name fbflow_icmp
prog_id 130246 tag 3f265c7f26db62c9 act []
ifindex 2 kind qdisc_clsact_egress name prefix_matcher.o:[cls_prefix_matcher_clsact]
prog_id 111726 tag 99a197826974c876
ifindex 2 kind qdisc_clsact_egress name cls_fg_dscp
prog_id 108619 tag dc4630674fd72dcc act []
ifindex 2 kind qdisc_clsact_egress name fbflow_egress
prog_id 130245 tag 72d2d830d6888d2c
]
$ ./bpftool -jp net
[{
"xdp": [{
"ifindex": 2,
"devname": "eth0",
"prog_id": 198
}
],
"tc_filters": [{
"ifindex": 2,
"kind": "qdisc_htb",
"name": "prefix_matcher.o:[cls_prefix_matcher_htb]",
"prog_id": 111727,
"tag": "d08fe3b4319bc2fd",
"act": []
},{
"ifindex": 2,
"kind": "qdisc_clsact_ingress",
"name": "fbflow_icmp",
"prog_id": 130246,
"tag": "3f265c7f26db62c9",
"act": []
},{
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"name": "prefix_matcher.o:[cls_prefix_matcher_clsact]",
"prog_id": 111726,
"tag": "99a197826974c876"
},{
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"name": "cls_fg_dscp",
"prog_id": 108619,
"tag": "dc4630674fd72dcc",
"act": []
},{
"ifindex": 2,
"kind": "qdisc_clsact_egress",
"name": "fbflow_egress",
"prog_id": 130245,
"tag": "72d2d830d6888d2c"
}
]
}
]
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2018-09-06 06:58:06 +07:00
|
|
|
struct nlattr;
|
|
|
|
struct ifinfomsg;
|
|
|
|
struct tcmsg;
|
|
|
|
int do_xdp_dump(struct ifinfomsg *ifinfo, struct nlattr **tb);
|
tools/bpf: bpftool: improve output format for bpftool net
This is a followup patch for Commit f6f3bac08ff9
("tools/bpf: bpftool: add net support").
Some improvements are made for the bpftool net output.
Specially, plain output is more concise such that
per attachment should nicely fit in one line.
Compared to previous output, the prog tag is removed
since it can be easily obtained with program id.
Similar to xdp attachments, the device name is added
to tc attachments.
The bpf program attached through shared block
mechanism is supported as well.
$ ip link add dev v1 type veth peer name v2
$ tc qdisc add dev v1 ingress_block 10 egress_block 20 clsact
$ tc qdisc add dev v2 ingress_block 10 egress_block 20 clsact
$ tc filter add block 10 protocol ip prio 25 bpf obj bpf_shared.o sec ingress flowid 1:1
$ tc filter add block 20 protocol ip prio 30 bpf obj bpf_cyclic.o sec classifier flowid 1:1
$ bpftool net
xdp:
tc:
v2(7) clsact/ingress bpf_shared.o:[ingress] id 23
v2(7) clsact/egress bpf_cyclic.o:[classifier] id 24
v1(8) clsact/ingress bpf_shared.o:[ingress] id 23
v1(8) clsact/egress bpf_cyclic.o:[classifier] id 24
The documentation and "bpftool net help" are updated
to make it clear that current implementation only
supports xdp and tc attachments. For programs
attached to cgroups, "bpftool cgroup" can be used
to dump attachments. For other programs e.g.
sk_{filter,skb,msg,reuseport} and lwt/seg6,
iproute2 tools should be used.
The new output:
$ bpftool net
xdp:
eth0(2) driver id 198
tc:
eth0(2) clsact/ingress fbflow_icmp id 335 act [{icmp_action id 336}]
eth0(2) clsact/egress fbflow_egress id 334
$ bpftool -jp net
[{
"xdp": [{
"devname": "eth0",
"ifindex": 2,
"mode": "driver",
"id": 198
}
],
"tc": [{
"devname": "eth0",
"ifindex": 2,
"kind": "clsact/ingress",
"name": "fbflow_icmp",
"id": 335,
"act": [{
"name": "icmp_action",
"id": 336
}
]
},{
"devname": "eth0",
"ifindex": 2,
"kind": "clsact/egress",
"name": "fbflow_egress",
"id": 334
}
]
}
]
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
2018-09-18 06:13:00 +07:00
|
|
|
int do_filter_dump(struct tcmsg *ifinfo, struct nlattr **tb, const char *kind,
|
|
|
|
const char *devname, int ifindex);
|
tools: bpftool: Restore message on failure to guess program type
In commit 4a3d6c6a6e4d ("libbpf: Reduce log level for custom section
names"), log level for messages for libbpf_attach_type_by_name() and
libbpf_prog_type_by_name() was downgraded from "info" to "debug". The
latter function, in particular, is used by bpftool when attempting to
load programs, and this change caused bpftool to exit with no hint or
error message when it fails to detect the type of the program to load
(unless "-d" option was provided).
To help users understand why bpftool fails to load the program, let's do
a second run of the function with log level in "debug" mode in case of
failure.
Before:
# bpftool prog load sample_ret0.o /sys/fs/bpf/sample_ret0
# echo $?
255
Or really verbose with -d flag:
# bpftool -d prog load sample_ret0.o /sys/fs/bpf/sample_ret0
libbpf: loading sample_ret0.o
libbpf: section(1) .strtab, size 134, link 0, flags 0, type=3
libbpf: skip section(1) .strtab
libbpf: section(2) .text, size 16, link 0, flags 6, type=1
libbpf: found program .text
libbpf: section(3) .debug_abbrev, size 55, link 0, flags 0, type=1
libbpf: skip section(3) .debug_abbrev
libbpf: section(4) .debug_info, size 75, link 0, flags 0, type=1
libbpf: skip section(4) .debug_info
libbpf: section(5) .rel.debug_info, size 32, link 14, flags 0, type=9
libbpf: skip relo .rel.debug_info(5) for section(4)
libbpf: section(6) .debug_str, size 150, link 0, flags 30, type=1
libbpf: skip section(6) .debug_str
libbpf: section(7) .BTF, size 155, link 0, flags 0, type=1
libbpf: section(8) .BTF.ext, size 80, link 0, flags 0, type=1
libbpf: section(9) .rel.BTF.ext, size 32, link 14, flags 0, type=9
libbpf: skip relo .rel.BTF.ext(9) for section(8)
libbpf: section(10) .debug_frame, size 40, link 0, flags 0, type=1
libbpf: skip section(10) .debug_frame
libbpf: section(11) .rel.debug_frame, size 16, link 14, flags 0, type=9
libbpf: skip relo .rel.debug_frame(11) for section(10)
libbpf: section(12) .debug_line, size 74, link 0, flags 0, type=1
libbpf: skip section(12) .debug_line
libbpf: section(13) .rel.debug_line, size 16, link 14, flags 0, type=9
libbpf: skip relo .rel.debug_line(13) for section(12)
libbpf: section(14) .symtab, size 96, link 1, flags 0, type=2
libbpf: looking for externs among 4 symbols...
libbpf: collected 0 externs total
libbpf: failed to guess program type from ELF section '.text'
libbpf: supported section(type) names are: socket sk_reuseport kprobe/ [...]
After:
# bpftool prog load sample_ret0.o /sys/fs/bpf/sample_ret0
libbpf: failed to guess program type from ELF section '.text'
libbpf: supported section(type) names are: socket sk_reuseport kprobe/ [...]
Signed-off-by: Quentin Monnet <quentin@isovalent.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: John Fastabend <john.fastabend@gmail.com>
Link: https://lore.kernel.org/bpf/20200311021205.9755-1-quentin@isovalent.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2020-03-11 09:12:05 +07:00
|
|
|
|
|
|
|
int print_all_levels(__maybe_unused enum libbpf_print_level level,
|
|
|
|
const char *format, va_list args);
|
2017-10-05 10:10:04 +07:00
|
|
|
#endif
|