mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 05:05:29 +07:00
e312747b49
Now that 'perf trace' fills in that "filtered_pids" BPF map, remove the set of filtered pids used as an example to test that feature. That feature works like this: Starting a system wide 'strace' like 'perf trace' augmented session we noticed that lots of events take place for a pid, which ends up being the feedback loop of perf trace's syscalls being processed by the 'gnome-terminal' process: # perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c 0.391 ( 0.002 ms): gnome-terminal/2469 read(fd: 17</dev/ptmx>, buf: 0x564b79f750bc, count: 8176) = 453 0.394 ( 0.001 ms): gnome-terminal/2469 read(fd: 17</dev/ptmx>, buf: 0x564b79f75280, count: 7724) = -1 EAGAIN Resource temporarily unavailable 0.438 ( 0.001 ms): gnome-terminal/2469 read(fd: 4<anon_inode:[eventfd]>, buf: 0x7fffc696aeb0, count: 16) = 8 0.519 ( 0.001 ms): gnome-terminal/2469 read(fd: 17</dev/ptmx>, buf: 0x564b79f75280, count: 7724) = 114 0.522 ( 0.001 ms): gnome-terminal/2469 read(fd: 17</dev/ptmx>, buf: 0x564b79f752f1, count: 7611) = -1 EAGAIN Resource temporarily unavailable ^C So we can use --filter-pids to get rid of that one, and in this case what is being used to implement that functionality is that "filtered_pids" BPF map that the tools/perf/examples/bpf/augmented_raw_syscalls.c created and that 'perf trace' bpf loader noticed and created a "struct bpf_map" associated that then got populated by 'perf trace': # perf trace --filter-pids 2469 -e tools/perf/examples/bpf/augmented_raw_syscalls.c 0.020 ( 0.002 ms): gnome-shell/1663 epoll_pwait(epfd: 12<anon_inode:[eventpoll]>, events: 0x7ffd8f3ef960, maxevents: 32, sigsetsize: 8) = 1 0.025 ( 0.002 ms): gnome-shell/1663 read(fd: 24</dev/input/event4>, buf: 0x560c01bb8240, count: 8112) = 48 0.029 ( 0.001 ms): gnome-shell/1663 read(fd: 24</dev/input/event4>, buf: 0x560c01bb8258, count: 8088) = -1 EAGAIN Resource temporarily unavailable 0.032 ( 0.001 ms): gnome-shell/1663 read(fd: 24</dev/input/event4>, buf: 0x560c01bb8240, count: 8112) = -1 EAGAIN Resource temporarily unavailable 0.040 ( 0.003 ms): gnome-shell/1663 recvmsg(fd: 46<socket:[35893]>, msg: 0x7ffd8f3ef950) = -1 EAGAIN Resource temporarily unavailable 21.529 ( 0.002 ms): gnome-shell/1663 epoll_pwait(epfd: 5<anon_inode:[eventpoll]>, events: 0x7ffd8f3ef960, maxevents: 32, sigsetsize: 8) = 1 21.533 ( 0.004 ms): gnome-shell/1663 recvmsg(fd: 82<socket:[42826]>, msg: 0x7ffd8f3ef7b0, flags: DONTWAIT|CMSG_CLOEXEC) = 236 21.581 ( 0.006 ms): gnome-shell/1663 ioctl(fd: 8</dev/dri/card0>, cmd: DRM_I915_GEM_BUSY, arg: 0x7ffd8f3ef060) = 0 21.605 ( 0.020 ms): gnome-shell/1663 ioctl(fd: 8</dev/dri/card0>, cmd: DRM_I915_GEM_CREATE, arg: 0x7ffd8f3eeea0) = 0 21.626 ( 0.119 ms): gnome-shell/1663 ioctl(fd: 8</dev/dri/card0>, cmd: DRM_I915_GEM_SET_DOMAIN, arg: 0x7ffd8f3eee94) = 0 21.746 ( 0.081 ms): gnome-shell/1663 ioctl(fd: 8</dev/dri/card0>, cmd: DRM_I915_GEM_PWRITE, arg: 0x7ffd8f3eeea0) = 0 ^C Oops, yet another gnome process that is involved with the output that 'perf trace' generates, lets filter that out too: # perf trace --filter-pids 2469,1663 -e tools/perf/examples/bpf/augmented_raw_syscalls.c ? ( ): wpa_supplicant/1366 ... [continued]: select()) = 0 Timeout 0.006 ( 0.002 ms): wpa_supplicant/1366 clock_gettime(which_clock: BOOTTIME, tp: 0x7fffe5b1e430) = 0 0.011 ( 0.001 ms): wpa_supplicant/1366 clock_gettime(which_clock: BOOTTIME, tp: 0x7fffe5b1e3e0) = 0 0.014 ( 0.001 ms): wpa_supplicant/1366 clock_gettime(which_clock: BOOTTIME, tp: 0x7fffe5b1e430) = 0 ? ( ): gmain/1791 ... [continued]: poll()) = 0 Timeout 0.017 ( ): wpa_supplicant/1366 select(n: 6, inp: 0x55646fed3ad0, outp: 0x55646fed3b60, exp: 0x55646fed3bf0, tvp: 0x7fffe5b1e4a0) ... 157.879 ( 0.019 ms): gmain/1791 inotify_add_watch(fd: 8<anon_inode:inotify>, pathname: , mask: 16789454) = -1 ENOENT No such file or directory ? ( ): cupsd/1001 ... [continued]: epoll_pwait()) = 0 ? ( ): gsd-color/1908 ... [continued]: poll()) = 0 Timeout 499.615 ( ): cupsd/1001 epoll_pwait(epfd: 4<anon_inode:[eventpoll]>, events: 0x557a21166500, maxevents: 4096, timeout: 1000, sigsetsize: 8) ... 586.593 ( 0.004 ms): gsd-color/1908 recvmsg(fd: 3<socket:[38074]>, msg: 0x7ffdef34e800) = -1 EAGAIN Resource temporarily unavailable ? ( ): fwupd/2230 ... [continued]: poll()) = 0 Timeout ? ( ): rtkit-daemon/906 ... [continued]: poll()) = 0 Timeout ? ( ): rtkit-daemon/907 ... [continued]: poll()) = 1 724.603 ( 0.007 ms): rtkit-daemon/907 read(fd: 6<anon_inode:[eventfd]>, buf: 0x7f05ff768d08, count: 8) = 8 ? ( ): ssh/5461 ... [continued]: select()) = 1 810.431 ( 0.002 ms): ssh/5461 clock_gettime(which_clock: BOOTTIME, tp: 0x7ffd7f39f870) = 0 ^C Several syscall exit events for syscalls in flight when 'perf trace' started, etc. Saner :-) Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Wang Nan <wangnan0@huawei.com> Link: https://lkml.kernel.org/n/tip-c3tu5yg204p5mvr9kvwew07n@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
142 lines
4.0 KiB
C
142 lines
4.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
|
|
*
|
|
* Test it with:
|
|
*
|
|
* perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
|
|
*
|
|
* This exactly matches what is marshalled into the raw_syscall:sys_enter
|
|
* payload expected by the 'perf trace' beautifiers.
|
|
*
|
|
* For now it just uses the existing tracepoint augmentation code in 'perf
|
|
* trace', in the next csets we'll hook up these with the sys_enter/sys_exit
|
|
* code that will combine entry/exit in a strace like way.
|
|
*/
|
|
|
|
#include <stdio.h>
|
|
#include <unistd.h>
|
|
#include <pid_filter.h>
|
|
|
|
/* bpf-output associated map */
|
|
struct bpf_map SEC("maps") __augmented_syscalls__ = {
|
|
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
|
.key_size = sizeof(int),
|
|
.value_size = sizeof(u32),
|
|
.max_entries = __NR_CPUS__,
|
|
};
|
|
|
|
struct syscall_enter_args {
|
|
unsigned long long common_tp_fields;
|
|
long syscall_nr;
|
|
unsigned long args[6];
|
|
};
|
|
|
|
struct syscall_exit_args {
|
|
unsigned long long common_tp_fields;
|
|
long syscall_nr;
|
|
long ret;
|
|
};
|
|
|
|
struct augmented_filename {
|
|
unsigned int size;
|
|
int reserved;
|
|
char value[256];
|
|
};
|
|
|
|
#define SYS_WRITE 1
|
|
#define SYS_OPEN 2
|
|
#define SYS_POLL 7
|
|
#define SYS_OPENAT 257
|
|
|
|
pid_filter(pids_filtered);
|
|
|
|
SEC("raw_syscalls:sys_enter")
|
|
int sys_enter(struct syscall_enter_args *args)
|
|
{
|
|
struct {
|
|
struct syscall_enter_args args;
|
|
struct augmented_filename filename;
|
|
} augmented_args;
|
|
unsigned int len = sizeof(augmented_args);
|
|
const void *filename_arg = NULL;
|
|
|
|
if (pid_filter__has(&pids_filtered, getpid()))
|
|
return 0;
|
|
|
|
probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
|
|
/*
|
|
* Yonghong and Edward Cree sayz:
|
|
*
|
|
* https://www.spinics.net/lists/netdev/msg531645.html
|
|
*
|
|
* >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
|
|
* >> 10: (bf) r1 = r6
|
|
* >> 11: (07) r1 += 16
|
|
* >> 12: (05) goto pc+2
|
|
* >> 15: (79) r3 = *(u64 *)(r1 +0)
|
|
* >> dereference of modified ctx ptr R1 off=16 disallowed
|
|
* > Aha, we at least got a different error message this time.
|
|
* > And indeed llvm has done that optimisation, rather than the more obvious
|
|
* > 11: r3 = *(u64 *)(r1 +16)
|
|
* > because it wants to have lots of reads share a single insn. You may be able
|
|
* > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
|
|
* > with llvm knowledge can figure out how to stop it (ideally, llvm would know
|
|
* > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
|
|
*
|
|
* The optimization mostly likes below:
|
|
*
|
|
* br1:
|
|
* ...
|
|
* r1 += 16
|
|
* goto merge
|
|
* br2:
|
|
* ...
|
|
* r1 += 20
|
|
* goto merge
|
|
* merge:
|
|
* *(u64 *)(r1 + 0)
|
|
*
|
|
* The compiler tries to merge common loads. There is no easy way to
|
|
* stop this compiler optimization without turning off a lot of other
|
|
* optimizations. The easiest way is to add barriers:
|
|
*
|
|
* __asm__ __volatile__("": : :"memory")
|
|
*
|
|
* after the ctx memory access to prevent their down stream merging.
|
|
*/
|
|
switch (augmented_args.args.syscall_nr) {
|
|
case SYS_WRITE:
|
|
case SYS_POLL: return 0;
|
|
case SYS_OPEN: filename_arg = (const void *)args->args[0];
|
|
__asm__ __volatile__("": : :"memory");
|
|
break;
|
|
case SYS_OPENAT: filename_arg = (const void *)args->args[1];
|
|
break;
|
|
}
|
|
|
|
if (filename_arg != NULL) {
|
|
augmented_args.filename.reserved = 0;
|
|
augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
|
|
sizeof(augmented_args.filename.value),
|
|
filename_arg);
|
|
if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
|
|
len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
|
|
len &= sizeof(augmented_args.filename.value) - 1;
|
|
}
|
|
} else {
|
|
len = sizeof(augmented_args.args);
|
|
}
|
|
|
|
perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
|
|
return 0;
|
|
}
|
|
|
|
SEC("raw_syscalls:sys_exit")
|
|
int sys_exit(struct syscall_exit_args *args)
|
|
{
|
|
return !pid_filter__has(&pids_filtered, getpid());
|
|
}
|
|
|
|
license(GPL);
|