mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 18:26:44 +07:00
3ffab54602
This tool xdp_monitor demonstrate how to use the different xdp_redirect tracepoints xdp_redirect{,_map}{,_err} from a BPF program. The default mode is to only monitor the error counters, to avoid affecting the per packet performance. Tracepoints comes with a base overhead of 25 nanosec for an attached bpf_prog, and 48 nanosec for using a full perf record (with non-matching filter). Thus, default loading the --stats mode could affect the maximum performance. This version of the tool is very simple and count all types of errors as one. It will be natural to extend this later with the different types of errors that can occur, which should help users quickly identify common mistakes. Because the TP_STRUCT was kept in sync all the tracepoints loads the same BPF code. It would also be natural to extend the map version to demonstrate how the map information could be used. Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
89 lines
2.4 KiB
C
89 lines
2.4 KiB
C
/* XDP monitor tool, based on tracepoints
|
|
*
|
|
* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
|
|
*/
|
|
#include <uapi/linux/bpf.h>
|
|
#include "bpf_helpers.h"
|
|
|
|
struct bpf_map_def SEC("maps") redirect_err_cnt = {
|
|
.type = BPF_MAP_TYPE_PERCPU_ARRAY,
|
|
.key_size = sizeof(u32),
|
|
.value_size = sizeof(u64),
|
|
.max_entries = 2,
|
|
/* TODO: have entries for all possible errno's */
|
|
};
|
|
|
|
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
*/
|
|
struct xdp_redirect_ctx {
|
|
unsigned short common_type; // offset:0; size:2; signed:0;
|
|
unsigned char common_flags; // offset:2; size:1; signed:0;
|
|
unsigned char common_preempt_count;// offset:3; size:1; signed:0;
|
|
int common_pid; // offset:4; size:4; signed:1;
|
|
|
|
int prog_id; // offset:8; size:4; signed:1;
|
|
u32 act; // offset:12 size:4; signed:0;
|
|
int ifindex; // offset:16 size:4; signed:1;
|
|
int err; // offset:20 size:4; signed:1;
|
|
int to_ifindex; // offset:24 size:4; signed:1;
|
|
u32 map_id; // offset:28 size:4; signed:0;
|
|
int map_index; // offset:32 size:4; signed:1;
|
|
}; // offset:36
|
|
|
|
enum {
|
|
XDP_REDIRECT_SUCCESS = 0,
|
|
XDP_REDIRECT_ERROR = 1
|
|
};
|
|
|
|
static __always_inline
|
|
int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
u32 key = XDP_REDIRECT_ERROR;
|
|
int err = ctx->err;
|
|
u64 *cnt;
|
|
|
|
if (!err)
|
|
key = XDP_REDIRECT_SUCCESS;
|
|
|
|
cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
|
|
if (!cnt)
|
|
return 0;
|
|
*cnt += 1;
|
|
|
|
return 0; /* Indicate event was filtered (no further processing)*/
|
|
/*
|
|
* Returning 1 here would allow e.g. a perf-record tracepoint
|
|
* to see and record these events, but it doesn't work well
|
|
* in-practice as stopping perf-record also unload this
|
|
* bpf_prog. Plus, there is additional overhead of doing so.
|
|
*/
|
|
}
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_err")
|
|
int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_map_err")
|
|
int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
/* Likely unloaded when prog starts */
|
|
SEC("tracepoint/xdp/xdp_redirect")
|
|
int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|
|
|
|
/* Likely unloaded when prog starts */
|
|
SEC("tracepoint/xdp/xdp_redirect_map")
|
|
int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
|
|
{
|
|
return xdp_redirect_collect_stat(ctx);
|
|
}
|