2018-01-19 23:15:50 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0
|
|
|
|
* Copyright(c) 2017-2018 Jesper Dangaard Brouer, Red Hat Inc.
|
2017-08-29 21:38:11 +07:00
|
|
|
*
|
2018-01-19 23:15:50 +07:00
|
|
|
* XDP monitor tool, based on tracepoints
|
2017-08-29 21:38:11 +07:00
|
|
|
*/
|
|
|
|
#include <uapi/linux/bpf.h>
|
2020-01-20 20:06:49 +07:00
|
|
|
#include <bpf/bpf_helpers.h>
|
2017-08-29 21:38:11 +07:00
|
|
|
|
2020-10-11 01:17:34 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, u64);
|
|
|
|
__uint(max_entries, 2);
|
2017-08-29 21:38:11 +07:00
|
|
|
/* TODO: have entries for all possible errno's */
|
2020-10-11 01:17:34 +07:00
|
|
|
} redirect_err_cnt SEC(".maps");
|
2017-08-29 21:38:11 +07:00
|
|
|
|
2017-10-06 15:41:46 +07:00
|
|
|
#define XDP_UNKNOWN XDP_REDIRECT + 1
|
2020-10-11 01:17:34 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, u64);
|
|
|
|
__uint(max_entries, XDP_UNKNOWN + 1);
|
|
|
|
} exception_cnt SEC(".maps");
|
2017-10-06 15:41:46 +07:00
|
|
|
|
2017-08-29 21:38:11 +07:00
|
|
|
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_redirect/format
|
|
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
|
|
*/
|
|
|
|
struct xdp_redirect_ctx {
|
2017-10-06 15:41:41 +07:00
|
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
|
|
int prog_id; // offset:8; size:4; signed:1;
|
|
|
|
u32 act; // offset:12 size:4; signed:0;
|
|
|
|
int ifindex; // offset:16 size:4; signed:1;
|
|
|
|
int err; // offset:20 size:4; signed:1;
|
|
|
|
int to_ifindex; // offset:24 size:4; signed:1;
|
|
|
|
u32 map_id; // offset:28 size:4; signed:0;
|
|
|
|
int map_index; // offset:32 size:4; signed:1;
|
|
|
|
}; // offset:36
|
2017-08-29 21:38:11 +07:00
|
|
|
|
|
|
|
enum {
|
|
|
|
XDP_REDIRECT_SUCCESS = 0,
|
|
|
|
XDP_REDIRECT_ERROR = 1
|
|
|
|
};
|
|
|
|
|
|
|
|
static __always_inline
|
|
|
|
int xdp_redirect_collect_stat(struct xdp_redirect_ctx *ctx)
|
|
|
|
{
|
|
|
|
u32 key = XDP_REDIRECT_ERROR;
|
|
|
|
int err = ctx->err;
|
|
|
|
u64 *cnt;
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
key = XDP_REDIRECT_SUCCESS;
|
|
|
|
|
|
|
|
cnt = bpf_map_lookup_elem(&redirect_err_cnt, &key);
|
|
|
|
if (!cnt)
|
2017-10-06 15:41:46 +07:00
|
|
|
return 1;
|
2017-08-29 21:38:11 +07:00
|
|
|
*cnt += 1;
|
|
|
|
|
|
|
|
return 0; /* Indicate event was filtered (no further processing)*/
|
|
|
|
/*
|
|
|
|
* Returning 1 here would allow e.g. a perf-record tracepoint
|
|
|
|
* to see and record these events, but it doesn't work well
|
|
|
|
* in-practice as stopping perf-record also unload this
|
|
|
|
* bpf_prog. Plus, there is additional overhead of doing so.
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_err")
|
|
|
|
int trace_xdp_redirect_err(struct xdp_redirect_ctx *ctx)
|
|
|
|
{
|
|
|
|
return xdp_redirect_collect_stat(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_map_err")
|
|
|
|
int trace_xdp_redirect_map_err(struct xdp_redirect_ctx *ctx)
|
|
|
|
{
|
|
|
|
return xdp_redirect_collect_stat(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Likely unloaded when prog starts */
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect")
|
|
|
|
int trace_xdp_redirect(struct xdp_redirect_ctx *ctx)
|
|
|
|
{
|
|
|
|
return xdp_redirect_collect_stat(ctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Likely unloaded when prog starts */
|
|
|
|
SEC("tracepoint/xdp/xdp_redirect_map")
|
|
|
|
int trace_xdp_redirect_map(struct xdp_redirect_ctx *ctx)
|
|
|
|
{
|
|
|
|
return xdp_redirect_collect_stat(ctx);
|
|
|
|
}
|
2017-10-06 15:41:46 +07:00
|
|
|
|
|
|
|
/* Tracepoint format: /sys/kernel/debug/tracing/events/xdp/xdp_exception/format
|
|
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
|
|
*/
|
|
|
|
struct xdp_exception_ctx {
|
|
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
|
|
int prog_id; // offset:8; size:4; signed:1;
|
|
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
|
|
int ifindex; // offset:16; size:4; signed:1;
|
|
|
|
};
|
|
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_exception")
|
|
|
|
int trace_xdp_exception(struct xdp_exception_ctx *ctx)
|
|
|
|
{
|
2018-01-16 21:15:30 +07:00
|
|
|
u64 *cnt;
|
2017-10-06 15:41:46 +07:00
|
|
|
u32 key;
|
|
|
|
|
|
|
|
key = ctx->act;
|
|
|
|
if (key > XDP_REDIRECT)
|
|
|
|
key = XDP_UNKNOWN;
|
|
|
|
|
|
|
|
cnt = bpf_map_lookup_elem(&exception_cnt, &key);
|
|
|
|
if (!cnt)
|
|
|
|
return 1;
|
|
|
|
*cnt += 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-01-19 23:15:50 +07:00
|
|
|
|
|
|
|
/* Common stats data record shared with _user.c */
|
|
|
|
struct datarec {
|
|
|
|
u64 processed;
|
|
|
|
u64 dropped;
|
|
|
|
u64 info;
|
2018-05-24 21:46:22 +07:00
|
|
|
u64 err;
|
2018-01-19 23:15:50 +07:00
|
|
|
};
|
|
|
|
#define MAX_CPUS 64
|
|
|
|
|
2020-10-11 01:17:34 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, struct datarec);
|
|
|
|
__uint(max_entries, MAX_CPUS);
|
|
|
|
} cpumap_enqueue_cnt SEC(".maps");
|
2018-01-19 23:15:50 +07:00
|
|
|
|
2020-10-11 01:17:34 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, struct datarec);
|
|
|
|
__uint(max_entries, 1);
|
|
|
|
} cpumap_kthread_cnt SEC(".maps");
|
2018-01-19 23:15:50 +07:00
|
|
|
|
|
|
|
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_enqueue/format
|
|
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
|
|
*/
|
|
|
|
struct cpumap_enqueue_ctx {
|
|
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
|
|
int map_id; // offset:8; size:4; signed:1;
|
|
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
|
|
int cpu; // offset:16; size:4; signed:1;
|
|
|
|
unsigned int drops; // offset:20; size:4; signed:0;
|
|
|
|
unsigned int processed; // offset:24; size:4; signed:0;
|
|
|
|
int to_cpu; // offset:28; size:4; signed:1;
|
|
|
|
};
|
|
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_cpumap_enqueue")
|
|
|
|
int trace_xdp_cpumap_enqueue(struct cpumap_enqueue_ctx *ctx)
|
|
|
|
{
|
|
|
|
u32 to_cpu = ctx->to_cpu;
|
|
|
|
struct datarec *rec;
|
|
|
|
|
|
|
|
if (to_cpu >= MAX_CPUS)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
rec = bpf_map_lookup_elem(&cpumap_enqueue_cnt, &to_cpu);
|
|
|
|
if (!rec)
|
|
|
|
return 0;
|
|
|
|
rec->processed += ctx->processed;
|
|
|
|
rec->dropped += ctx->drops;
|
|
|
|
|
|
|
|
/* Record bulk events, then userspace can calc average bulk size */
|
|
|
|
if (ctx->processed > 0)
|
|
|
|
rec->info += 1;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_cpumap_kthread/format
|
|
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
|
|
*/
|
|
|
|
struct cpumap_kthread_ctx {
|
|
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
|
|
|
int map_id; // offset:8; size:4; signed:1;
|
|
|
|
u32 act; // offset:12; size:4; signed:0;
|
|
|
|
int cpu; // offset:16; size:4; signed:1;
|
|
|
|
unsigned int drops; // offset:20; size:4; signed:0;
|
|
|
|
unsigned int processed; // offset:24; size:4; signed:0;
|
|
|
|
int sched; // offset:28; size:4; signed:1;
|
|
|
|
};
|
|
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_cpumap_kthread")
|
|
|
|
int trace_xdp_cpumap_kthread(struct cpumap_kthread_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct datarec *rec;
|
|
|
|
u32 key = 0;
|
|
|
|
|
|
|
|
rec = bpf_map_lookup_elem(&cpumap_kthread_cnt, &key);
|
|
|
|
if (!rec)
|
|
|
|
return 0;
|
|
|
|
rec->processed += ctx->processed;
|
|
|
|
rec->dropped += ctx->drops;
|
|
|
|
|
|
|
|
/* Count times kthread yielded CPU via schedule call */
|
|
|
|
if (ctx->sched)
|
|
|
|
rec->info++;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-05-24 21:46:02 +07:00
|
|
|
|
2020-10-11 01:17:34 +07:00
|
|
|
struct {
|
|
|
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
|
|
|
__type(key, u32);
|
|
|
|
__type(value, struct datarec);
|
|
|
|
__uint(max_entries, 1);
|
|
|
|
} devmap_xmit_cnt SEC(".maps");
|
2018-05-24 21:46:02 +07:00
|
|
|
|
|
|
|
/* Tracepoint: /sys/kernel/debug/tracing/events/xdp/xdp_devmap_xmit/format
|
|
|
|
* Code in: kernel/include/trace/events/xdp.h
|
|
|
|
*/
|
|
|
|
struct devmap_xmit_ctx {
|
|
|
|
u64 __pad; // First 8 bytes are not accessible by bpf code
|
2020-01-16 22:14:46 +07:00
|
|
|
int from_ifindex; // offset:8; size:4; signed:1;
|
2018-05-24 21:46:02 +07:00
|
|
|
u32 act; // offset:12; size:4; signed:0;
|
2020-01-16 22:14:46 +07:00
|
|
|
int to_ifindex; // offset:16; size:4; signed:1;
|
2018-05-24 21:46:02 +07:00
|
|
|
int drops; // offset:20; size:4; signed:1;
|
|
|
|
int sent; // offset:24; size:4; signed:1;
|
2020-01-16 22:14:46 +07:00
|
|
|
int err; // offset:28; size:4; signed:1;
|
2018-05-24 21:46:02 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
SEC("tracepoint/xdp/xdp_devmap_xmit")
|
|
|
|
int trace_xdp_devmap_xmit(struct devmap_xmit_ctx *ctx)
|
|
|
|
{
|
|
|
|
struct datarec *rec;
|
|
|
|
u32 key = 0;
|
|
|
|
|
|
|
|
rec = bpf_map_lookup_elem(&devmap_xmit_cnt, &key);
|
|
|
|
if (!rec)
|
|
|
|
return 0;
|
|
|
|
rec->processed += ctx->sent;
|
|
|
|
rec->dropped += ctx->drops;
|
|
|
|
|
|
|
|
/* Record bulk events, then userspace can calc average bulk size */
|
|
|
|
rec->info += 1;
|
|
|
|
|
2018-05-24 21:46:22 +07:00
|
|
|
/* Record error cases, where no frame were sent */
|
|
|
|
if (ctx->err)
|
|
|
|
rec->err++;
|
|
|
|
|
|
|
|
/* Catch API error of drv ndo_xdp_xmit sent more than count */
|
|
|
|
if (ctx->drops < 0)
|
|
|
|
rec->err++;
|
|
|
|
|
2018-05-24 21:46:02 +07:00
|
|
|
return 1;
|
|
|
|
}
|