From 313e7f6fb1d9a8814424c2c6878848648c9c090f Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Mon, 28 Oct 2019 11:20:49 +0100 Subject: [PATCH 01/30] selftest/bpf: Use -m{little, big}-endian for clang When cross-compiling tests from x86 to s390, the resulting BPF objects fail to load due to endianness mismatch. Fix by using BPF-GCC endianness check for clang as well. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20191028102049.7489-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/Makefile | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 933f39381039..3209c208f3b3 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -131,8 +131,13 @@ $(shell $(1) -v -E - &1 \ | sed -n '/<...> search starts here:/,/End of search list./{ s| \(/.*\)|-idirafter \1|p }') endef +# Determine target endianness. +IS_LITTLE_ENDIAN = $(shell $(CC) -dM -E - Date: Mon, 28 Oct 2019 11:21:10 +0100 Subject: [PATCH 02/30] selftests/bpf: Restore $(OUTPUT)/test_stub.o rule `make O=/linux-build kselftest TARGETS=bpf` fails with make[3]: *** No rule to make target '/linux-build/bpf/test_stub.o', needed by '/linux-build/bpf/test_verifier' The same command without the O= part works, presumably thanks to the implicit rule. Fix by restoring the explicit $(OUTPUT)/test_stub.o rule. Fixes: 74b5a5968fe8 ("selftests/bpf: Replace test_progs and test_maps w/ general rule") Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20191028102110.7545-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/Makefile | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 3209c208f3b3..b334a6db15c1 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -89,6 +89,9 @@ $(notdir $(TEST_GEN_PROGS) \ $(OUTPUT)/urandom_read: urandom_read.c $(CC) -o $@ $< -Wl,--build-id +$(OUTPUT)/test_stub.o: test_stub.c + $(CC) -c $(CFLAGS) -o $@ $< + BPFOBJ := $(OUTPUT)/libbpf.a $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ) From 94ff9ebb49a546b7f009ed840bafa235c96d4c4b Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Fri, 25 Oct 2019 11:17:15 +0200 Subject: [PATCH 03/30] libbpf: Fix compatibility for kernels without need_wakeup When the need_wakeup flag was added to AF_XDP, the format of the XDP_MMAP_OFFSETS getsockopt was extended. Code was added to the kernel to take care of compatibility issues arrising from running applications using any of the two formats. However, libbpf was not extended to take care of the case when the application/libbpf uses the new format but the kernel only supports the old format. This patch adds support in libbpf for parsing the old format, before the need_wakeup flag was added, and emulating a set of static need_wakeup flags that will always work for the application. v2 -> v3: * Incorporated code improvements suggested by Jonathan Lemon v1 -> v2: * Rebased to bpf-next * Rewrote the code as the previous version made you blind Fixes: a4500432c2587cb2a ("libbpf: add support for need_wakeup flag in AF_XDP part") Reported-by: Eloy Degen Signed-off-by: Magnus Karlsson Signed-off-by: Alexei Starovoitov Acked-by: Jonathan Lemon Link: https://lore.kernel.org/bpf/1571995035-21889-1-git-send-email-magnus.karlsson@intel.com --- tools/lib/bpf/xsk.c | 83 ++++++++++++++++++++++++++++++++++++++------- 1 file changed, 71 insertions(+), 12 deletions(-) diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index 0b6287d1b15e..d54111133123 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -73,6 +73,21 @@ struct xsk_nl_info { int fd; }; +/* Up until and including Linux 5.3 */ +struct xdp_ring_offset_v1 { + __u64 producer; + __u64 consumer; + __u64 desc; +}; + +/* Up until and including Linux 5.3 */ +struct xdp_mmap_offsets_v1 { + struct xdp_ring_offset_v1 rx; + struct xdp_ring_offset_v1 tx; + struct xdp_ring_offset_v1 fr; + struct xdp_ring_offset_v1 cr; +}; + int xsk_umem__fd(const struct xsk_umem *umem) { return umem ? umem->fd : -EINVAL; @@ -133,6 +148,58 @@ static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, return 0; } +static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) +{ + struct xdp_mmap_offsets_v1 off_v1; + + /* getsockopt on a kernel <= 5.3 has no flags fields. + * Copy over the offsets to the correct places in the >=5.4 format + * and put the flags where they would have been on that kernel. + */ + memcpy(&off_v1, off, sizeof(off_v1)); + + off->rx.producer = off_v1.rx.producer; + off->rx.consumer = off_v1.rx.consumer; + off->rx.desc = off_v1.rx.desc; + off->rx.flags = off_v1.rx.consumer + sizeof(u32); + + off->tx.producer = off_v1.tx.producer; + off->tx.consumer = off_v1.tx.consumer; + off->tx.desc = off_v1.tx.desc; + off->tx.flags = off_v1.tx.consumer + sizeof(u32); + + off->fr.producer = off_v1.fr.producer; + off->fr.consumer = off_v1.fr.consumer; + off->fr.desc = off_v1.fr.desc; + off->fr.flags = off_v1.fr.consumer + sizeof(u32); + + off->cr.producer = off_v1.cr.producer; + off->cr.consumer = off_v1.cr.consumer; + off->cr.desc = off_v1.cr.desc; + off->cr.flags = off_v1.cr.consumer + sizeof(u32); +} + +static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) +{ + socklen_t optlen; + int err; + + optlen = sizeof(*off); + err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen); + if (err) + return err; + + if (optlen == sizeof(*off)) + return 0; + + if (optlen == sizeof(struct xdp_mmap_offsets_v1)) { + xsk_mmap_offsets_v1(off); + return 0; + } + + return -EINVAL; +} + int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, @@ -141,7 +208,6 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, struct xdp_mmap_offsets off; struct xdp_umem_reg mr; struct xsk_umem *umem; - socklen_t optlen; void *map; int err; @@ -190,8 +256,7 @@ int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, goto out_socket; } - optlen = sizeof(off); - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(umem->fd, &off); if (err) { err = -errno; goto out_socket; @@ -514,7 +579,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, struct sockaddr_xdp sxdp = {}; struct xdp_mmap_offsets off; struct xsk_socket *xsk; - socklen_t optlen; int err; if (!umem || !xsk_ptr || !rx || !tx) @@ -573,8 +637,7 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, } } - optlen = sizeof(off); - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (err) { err = -errno; goto out_socket; @@ -660,7 +723,6 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname, int xsk_umem__delete(struct xsk_umem *umem) { struct xdp_mmap_offsets off; - socklen_t optlen; int err; if (!umem) @@ -669,8 +731,7 @@ int xsk_umem__delete(struct xsk_umem *umem) if (umem->refcount) return -EBUSY; - optlen = sizeof(off); - err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(umem->fd, &off); if (!err) { munmap(umem->fill->ring - off.fr.desc, off.fr.desc + umem->config.fill_size * sizeof(__u64)); @@ -688,7 +749,6 @@ void xsk_socket__delete(struct xsk_socket *xsk) { size_t desc_sz = sizeof(struct xdp_desc); struct xdp_mmap_offsets off; - socklen_t optlen; int err; if (!xsk) @@ -699,8 +759,7 @@ void xsk_socket__delete(struct xsk_socket *xsk) close(xsk->prog_fd); } - optlen = sizeof(off); - err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen); + err = xsk_get_mmap_offsets(xsk->fd, &off); if (!err) { if (xsk->rx) { munmap(xsk->rx->ring - off.rx.desc, From d3a3aa0c59e83d8c13a758128b10dd459ce0d866 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 28 Oct 2019 16:37:27 -0700 Subject: [PATCH 04/30] libbpf: Fix off-by-one error in ELF sanity check libbpf's bpf_object__elf_collect() does simple sanity check after iterating over all ELF sections, if checks that .strtab index is correct. Unfortunately, due to section indices being 1-based, the check breaks for cases when .strtab ends up being the very last section in ELF. Fixes: 77ba9a5b48a7 ("tools lib bpf: Fetch map names from correct strtab") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Link: https://lore.kernel.org/bpf/20191028233727.1286699-1-andriin@fb.com --- tools/lib/bpf/libbpf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index d71631a01926..5d15cc4dfcd6 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1664,7 +1664,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps) } } - if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { + if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { pr_warn("Corrupted ELF file: index of strtab invalid\n"); return -LIBBPF_ERRNO__FORMAT; } From a566e35f1e8b4b3be1e96a804d1cca38b578167c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 28 Oct 2019 22:59:53 -0700 Subject: [PATCH 05/30] libbpf: Don't use kernel-side u32 type in xsk.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit u32 is a kernel-side typedef. User-space library is supposed to use __u32. This breaks Github's projection of libbpf. Do u32 -> __u32 fix. Fixes: 94ff9ebb49a5 ("libbpf: Fix compatibility for kernels without need_wakeup") Signed-off-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov Acked-by: Björn Töpel Cc: Magnus Karlsson Link: https://lore.kernel.org/bpf/20191029055953.2461336-1-andriin@fb.com --- tools/lib/bpf/xsk.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/xsk.c b/tools/lib/bpf/xsk.c index d54111133123..74d84f36a5b2 100644 --- a/tools/lib/bpf/xsk.c +++ b/tools/lib/bpf/xsk.c @@ -161,22 +161,22 @@ static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off) off->rx.producer = off_v1.rx.producer; off->rx.consumer = off_v1.rx.consumer; off->rx.desc = off_v1.rx.desc; - off->rx.flags = off_v1.rx.consumer + sizeof(u32); + off->rx.flags = off_v1.rx.consumer + sizeof(__u32); off->tx.producer = off_v1.tx.producer; off->tx.consumer = off_v1.tx.consumer; off->tx.desc = off_v1.tx.desc; - off->tx.flags = off_v1.tx.consumer + sizeof(u32); + off->tx.flags = off_v1.tx.consumer + sizeof(__u32); off->fr.producer = off_v1.fr.producer; off->fr.consumer = off_v1.fr.consumer; off->fr.desc = off_v1.fr.desc; - off->fr.flags = off_v1.fr.consumer + sizeof(u32); + off->fr.flags = off_v1.fr.consumer + sizeof(__u32); off->cr.producer = off_v1.cr.producer; off->cr.consumer = off_v1.cr.consumer; off->cr.desc = off_v1.cr.desc; - off->cr.flags = off_v1.cr.consumer + sizeof(u32); + off->cr.flags = off_v1.cr.consumer + sizeof(__u32); } static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off) From 15ab09bdca616538597fe4d2eb4db3c2d28716ba Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 28 Oct 2019 20:24:26 -0700 Subject: [PATCH 06/30] bpf: Enforce 'return 0' in BTF-enabled raw_tp programs The return value of raw_tp programs is ignored by __bpf_trace_run() that calls them. The verifier also allows any value to be returned. For BTF-enabled raw_tp lets enforce 'return 0', so that return value can be used for something in the future. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20191029032426.1206762-1-ast@kernel.org --- kernel/bpf/verifier.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index c59778c0fc4d..6b0de04f8b91 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -6279,6 +6279,11 @@ static int check_return_code(struct bpf_verifier_env *env) case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: break; + case BPF_PROG_TYPE_RAW_TRACEPOINT: + if (!env->prog->aux->attach_btf_id) + return 0; + range = tnum_const(0); + break; default: return 0; } From 9ffccb76062ab882e45bbfb9d370e366c27fa04b Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 29 Oct 2019 15:30:27 +0100 Subject: [PATCH 07/30] selftests/bpf: Test narrow load from bpf_sysctl.write There are tests for full and narrows loads from bpf_sysctl.file_pos, but for bpf_sysctl.write only full load is tested. Add the missing test. Suggested-by: Andrey Ignatov Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Acked-by: Andrey Ignatov Link: https://lore.kernel.org/bpf/20191029143027.28681-1-iii@linux.ibm.com --- tools/testing/selftests/bpf/test_sysctl.c | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c index a320e3844b17..7aff907003d3 100644 --- a/tools/testing/selftests/bpf/test_sysctl.c +++ b/tools/testing/selftests/bpf/test_sysctl.c @@ -120,6 +120,29 @@ static struct sysctl_test tests[] = { .newval = "(none)", /* same as default, should fail anyway */ .result = OP_EPERM, }, + { + .descr = "ctx:write sysctl:write read ok narrow", + .insns = { + /* u64 w = (u16)write & 1; */ +#if __BYTE_ORDER == __LITTLE_ENDIAN + BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sysctl, write)), +#else + BPF_LDX_MEM(BPF_H, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sysctl, write) + 2), +#endif + BPF_ALU64_IMM(BPF_AND, BPF_REG_7, 1), + /* return 1 - w; */ + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_7), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SYSCTL, + .sysctl = "kernel/domainname", + .open_flags = O_WRONLY, + .newval = "(none)", /* same as default, should fail anyway */ + .result = OP_EPERM, + }, { .descr = "ctx:write sysctl:read write reject", .insns = { From 7e07e7aec56984364c7e16d242a0ec97b91861a8 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Tue, 29 Oct 2019 18:29:16 +0100 Subject: [PATCH 08/30] bpf: Add s390 testing documentation This commits adds a document that explains how to test BPF in an s390 QEMU guest. Signed-off-by: Ilya Leoshkevich Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20191029172916.36528-1-iii@linux.ibm.com --- Documentation/bpf/index.rst | 9 ++ Documentation/bpf/s390.rst | 205 ++++++++++++++++++++++++++++++++++++ 2 files changed, 214 insertions(+) create mode 100644 Documentation/bpf/s390.rst diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst index 801a6ed3f2e5..4f5410b61441 100644 --- a/Documentation/bpf/index.rst +++ b/Documentation/bpf/index.rst @@ -47,6 +47,15 @@ Program types prog_flow_dissector +Testing BPF +=========== + +.. toctree:: + :maxdepth: 1 + + s390 + + .. Links: .. _Documentation/networking/filter.txt: ../networking/filter.txt .. _man-pages: https://www.kernel.org/doc/man-pages/ diff --git a/Documentation/bpf/s390.rst b/Documentation/bpf/s390.rst new file mode 100644 index 000000000000..21ecb309daea --- /dev/null +++ b/Documentation/bpf/s390.rst @@ -0,0 +1,205 @@ +=================== +Testing BPF on s390 +=================== + +1. Introduction +*************** + +IBM Z are mainframe computers, which are descendants of IBM System/360 from +year 1964. They are supported by the Linux kernel under the name "s390". This +document describes how to test BPF in an s390 QEMU guest. + +2. One-time setup +***************** + +The following is required to build and run the test suite: + + * s390 GCC + * s390 development headers and libraries + * Clang with BPF support + * QEMU with s390 support + * Disk image with s390 rootfs + +Debian supports installing compiler and libraries for s390 out of the box. +Users of other distros may use debootstrap in order to set up a Debian chroot:: + + sudo debootstrap \ + --variant=minbase \ + --include=sudo \ + testing \ + ./s390-toolchain + sudo mount --rbind /dev ./s390-toolchain/dev + sudo mount --rbind /proc ./s390-toolchain/proc + sudo mount --rbind /sys ./s390-toolchain/sys + sudo chroot ./s390-toolchain + +Once on Debian, the build prerequisites can be installed as follows:: + + sudo dpkg --add-architecture s390x + sudo apt-get update + sudo apt-get install \ + bc \ + bison \ + cmake \ + debootstrap \ + dwarves \ + flex \ + g++ \ + gcc \ + g++-s390x-linux-gnu \ + gcc-s390x-linux-gnu \ + gdb-multiarch \ + git \ + make \ + python3 \ + qemu-system-misc \ + qemu-utils \ + rsync \ + libcap-dev:s390x \ + libelf-dev:s390x \ + libncurses-dev + +Latest Clang targeting BPF can be installed as follows:: + + git clone https://github.com/llvm/llvm-project.git + ln -s ../../clang llvm-project/llvm/tools/ + mkdir llvm-project-build + cd llvm-project-build + cmake \ + -DLLVM_TARGETS_TO_BUILD=BPF \ + -DCMAKE_BUILD_TYPE=Release \ + -DCMAKE_INSTALL_PREFIX=/opt/clang-bpf \ + ../llvm-project/llvm + make + sudo make install + export PATH=/opt/clang-bpf/bin:$PATH + +The disk image can be prepared using a loopback mount and debootstrap:: + + qemu-img create -f raw ./s390.img 1G + sudo losetup -f ./s390.img + sudo mkfs.ext4 /dev/loopX + mkdir ./s390.rootfs + sudo mount /dev/loopX ./s390.rootfs + sudo debootstrap \ + --foreign \ + --arch=s390x \ + --variant=minbase \ + --include=" \ + iproute2, \ + iputils-ping, \ + isc-dhcp-client, \ + kmod, \ + libcap2, \ + libelf1, \ + netcat, \ + procps" \ + testing \ + ./s390.rootfs + sudo umount ./s390.rootfs + sudo losetup -d /dev/loopX + +3. Compilation +************** + +In addition to the usual Kconfig options required to run the BPF test suite, it +is also helpful to select:: + + CONFIG_NET_9P=y + CONFIG_9P_FS=y + CONFIG_NET_9P_VIRTIO=y + CONFIG_VIRTIO_PCI=y + +as that would enable a very easy way to share files with the s390 virtual +machine. + +Compiling kernel, modules and testsuite, as well as preparing gdb scripts to +simplify debugging, can be done using the following commands:: + + make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- menuconfig + make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- bzImage modules scripts_gdb + make ARCH=s390 CROSS_COMPILE=s390x-linux-gnu- \ + -C tools/testing/selftests \ + TARGETS=bpf \ + INSTALL_PATH=$PWD/tools/testing/selftests/kselftest_install \ + install + +4. Running the test suite +************************* + +The virtual machine can be started as follows:: + + qemu-system-s390x \ + -cpu max,zpci=on \ + -smp 2 \ + -m 4G \ + -kernel linux/arch/s390/boot/compressed/vmlinux \ + -drive file=./s390.img,if=virtio,format=raw \ + -nographic \ + -append 'root=/dev/vda rw console=ttyS1' \ + -virtfs local,path=./linux,security_model=none,mount_tag=linux \ + -object rng-random,filename=/dev/urandom,id=rng0 \ + -device virtio-rng-ccw,rng=rng0 \ + -netdev user,id=net0 \ + -device virtio-net-ccw,netdev=net0 + +When using this on a real IBM Z, ``-enable-kvm`` may be added for better +performance. When starting the virtual machine for the first time, disk image +setup must be finalized using the following command:: + + /debootstrap/debootstrap --second-stage + +Directory with the code built on the host as well as ``/proc`` and ``/sys`` +need to be mounted as follows:: + + mkdir -p /linux + mount -t 9p linux /linux + mount -t proc proc /proc + mount -t sysfs sys /sys + +After that, the test suite can be run using the following commands:: + + cd /linux/tools/testing/selftests/kselftest_install + ./run_kselftest.sh + +As usual, tests can be also run individually:: + + cd /linux/tools/testing/selftests/bpf + ./test_verifier + +5. Debugging +************ + +It is possible to debug the s390 kernel using QEMU GDB stub, which is activated +by passing ``-s`` to QEMU. + +It is preferable to turn KASLR off, so that gdb would know where to find the +kernel image in memory, by building the kernel with:: + + RANDOMIZE_BASE=n + +GDB can then be attached using the following command:: + + gdb-multiarch -ex 'target remote localhost:1234' ./vmlinux + +6. Network +********** + +In case one needs to use the network in the virtual machine in order to e.g. +install additional packages, it can be configured using:: + + dhclient eth0 + +7. Links +******** + +This document is a compilation of techniques, whose more comprehensive +descriptions can be found by following these links: + +- `Debootstrap `_ +- `Multiarch `_ +- `Building LLVM `_ +- `Cross-compiling the kernel `_ +- `QEMU s390x Guest Support `_ +- `Plan 9 folder sharing over Virtio `_ +- `Using GDB with QEMU `_ From af21c717f4757985771e2cee0b6e0cf21b831477 Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Fri, 25 Oct 2019 16:42:22 +0300 Subject: [PATCH 09/30] bpf, testing: Refactor test_skb_segment() for testing skb_segment() on different skbs Currently, test_skb_segment() builds a single test skb and runs skb_segment() on it. Extend test_skb_segment() so it processes an array of numerous skb/feature pairs to test. Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20191025134223.2761-2-shmulik.ladkani@gmail.com --- lib/test_bpf.c | 51 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 41 insertions(+), 10 deletions(-) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 5ef3eccee27c..c952df82b515 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -6859,34 +6859,65 @@ static __init struct sk_buff *build_test_skb(void) return NULL; } -static __init int test_skb_segment(void) -{ +struct skb_segment_test { + const char *descr; + struct sk_buff *(*build_skb)(void); netdev_features_t features; +}; + +static struct skb_segment_test skb_segment_tests[] __initconst = { + { + .descr = "gso_with_rx_frags", + .build_skb = build_test_skb, + .features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM + } +}; + +static __init int test_skb_segment_single(const struct skb_segment_test *test) +{ struct sk_buff *skb, *segs; int ret = -1; - features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM; - features |= NETIF_F_RXCSUM; - skb = build_test_skb(); + skb = test->build_skb(); if (!skb) { pr_info("%s: failed to build_test_skb", __func__); goto done; } - segs = skb_segment(skb, features); + segs = skb_segment(skb, test->features); if (!IS_ERR(segs)) { kfree_skb_list(segs); ret = 0; - pr_info("%s: success in skb_segment!", __func__); - } else { - pr_info("%s: failed in skb_segment!", __func__); } kfree_skb(skb); done: return ret; } +static __init int test_skb_segment(void) +{ + int i, err_cnt = 0, pass_cnt = 0; + + for (i = 0; i < ARRAY_SIZE(skb_segment_tests); i++) { + const struct skb_segment_test *test = &skb_segment_tests[i]; + + pr_info("#%d %s ", i, test->descr); + + if (test_skb_segment_single(test)) { + pr_cont("FAIL\n"); + err_cnt++; + } else { + pr_cont("PASS\n"); + pass_cnt++; + } + } + + pr_info("%s: Summary: %d PASSED, %d FAILED\n", __func__, + pass_cnt, err_cnt); + return err_cnt ? -EINVAL : 0; +} + static __init int test_bpf(void) { int i, err_cnt = 0, pass_cnt = 0; From cf204a718357c3c28557cc6bdc77a3adc33d0741 Mon Sep 17 00:00:00 2001 From: Shmulik Ladkani Date: Fri, 25 Oct 2019 16:42:23 +0300 Subject: [PATCH 10/30] bpf, testing: Introduce 'gso_linear_no_head_frag' skb_segment test Following reports of skb_segment() hitting a BUG_ON when working on GROed skbs which have their gso_size mangled (e.g. after a bpf_skb_change_proto call), add a reproducer test that mimics the input skbs that lead to the mentioned BUG_ON as in [1] and validates the fix submitted in [2]. [1] https://lists.openwall.net/netdev/2019/08/26/110 [2] commit 3dcbdb134f32 ("net: gso: Fix skb_segment splat when splitting gso_size mangled skb having linear-headed frag_list") Signed-off-by: Shmulik Ladkani Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20191025134223.2761-3-shmulik.ladkani@gmail.com --- lib/test_bpf.c | 63 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 63 insertions(+) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index c952df82b515..cecb230833be 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -6859,6 +6859,60 @@ static __init struct sk_buff *build_test_skb(void) return NULL; } +static __init struct sk_buff *build_test_skb_linear_no_head_frag(void) +{ + unsigned int alloc_size = 2000; + unsigned int headroom = 102, doffset = 72, data_size = 1308; + struct sk_buff *skb[2]; + int i; + + /* skbs linked in a frag_list, both with linear data, with head_frag=0 + * (data allocated by kmalloc), both have tcp data of 1308 bytes + * (total payload is 2616 bytes). + * Data offset is 72 bytes (40 ipv6 hdr, 32 tcp hdr). Some headroom. + */ + for (i = 0; i < 2; i++) { + skb[i] = alloc_skb(alloc_size, GFP_KERNEL); + if (!skb[i]) { + if (i == 0) + goto err_skb0; + else + goto err_skb1; + } + + skb[i]->protocol = htons(ETH_P_IPV6); + skb_reserve(skb[i], headroom); + skb_put(skb[i], doffset + data_size); + skb_reset_network_header(skb[i]); + if (i == 0) + skb_reset_mac_header(skb[i]); + else + skb_set_mac_header(skb[i], -ETH_HLEN); + __skb_pull(skb[i], doffset); + } + + /* setup shinfo. + * mimic bpf_skb_proto_4_to_6, which resets gso_segs and assigns a + * reduced gso_size. + */ + skb_shinfo(skb[0])->gso_size = 1288; + skb_shinfo(skb[0])->gso_type = SKB_GSO_TCPV6 | SKB_GSO_DODGY; + skb_shinfo(skb[0])->gso_segs = 0; + skb_shinfo(skb[0])->frag_list = skb[1]; + + /* adjust skb[0]'s len */ + skb[0]->len += skb[1]->len; + skb[0]->data_len += skb[1]->len; + skb[0]->truesize += skb[1]->truesize; + + return skb[0]; + +err_skb1: + kfree_skb(skb[0]); +err_skb0: + return NULL; +} + struct skb_segment_test { const char *descr; struct sk_buff *(*build_skb)(void); @@ -6871,6 +6925,15 @@ static struct skb_segment_test skb_segment_tests[] __initconst = { .build_skb = build_test_skb, .features = NETIF_F_SG | NETIF_F_GSO_PARTIAL | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM + }, + { + .descr = "gso_linear_no_head_frag", + .build_skb = build_test_skb_linear_no_head_frag, + .features = NETIF_F_SG | NETIF_F_FRAGLIST | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_GSO | + NETIF_F_LLTX_BIT | NETIF_F_GRO | + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_STAG_TX_BIT } }; From af91acbc62999d62e2ca1e80f660d20561ca55d3 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 30 Oct 2019 16:30:19 -0700 Subject: [PATCH 11/30] bpf: Fix bpf jit kallsym access Jiri reported crash when JIT is on, but net.core.bpf_jit_kallsyms is off. bpf_prog_kallsyms_find() was skipping addr->bpf_prog resolution logic in oops and stack traces. That's incorrect. It should only skip addr->name resolution for 'cat /proc/kallsyms'. That's what bpf_jit_kallsyms and bpf_jit_harden protect. Fixes: 3dec541b2e63 ("bpf: Add support for BTF pointers to x86 JIT") Reported-by: Jiri Olsa Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20191030233019.1187404-1-ast@kernel.org --- kernel/bpf/core.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 673f5d40a93e..8d3fbc86ca5e 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -668,9 +668,6 @@ static struct bpf_prog *bpf_prog_kallsyms_find(unsigned long addr) { struct latch_tree_node *n; - if (!bpf_jit_kallsyms_enabled()) - return NULL; - n = latch_tree_find((void *)addr, &bpf_tree, &bpf_tree_ops); return n ? container_of(n, struct bpf_prog_aux, ksym_tnode)->prog : From f1b9509c2fb0ef4db8d22dac9aef8e856a5d81f6 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 30 Oct 2019 15:32:11 -0700 Subject: [PATCH 12/30] bpf: Replace prog_raw_tp+btf_id with prog_tracing The bpf program type raw_tp together with 'expected_attach_type' was the most appropriate api to indicate BTF-enabled raw_tp programs. But during development it became apparent that 'expected_attach_type' cannot be used and new 'attach_btf_id' field had to be introduced. Which means that the information is duplicated in two fields where one of them is ignored. Clean it up by introducing new program type where both 'expected_attach_type' and 'attach_btf_id' fields have specific meaning. In the future 'expected_attach_type' will be extended with other attach points that have similar semantics to raw_tp. This patch is replacing BTF-enabled BPF_PROG_TYPE_RAW_TRACEPOINT with prog_type = BPF_RPOG_TYPE_TRACING expected_attach_type = BPF_TRACE_RAW_TP attach_btf_id = btf_id of raw tracepoint inside the kernel Future patches will add expected_attach_type = BPF_TRACE_FENTRY or BPF_TRACE_FEXIT where programs have the same input context and the same helpers, but different attach points. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20191030223212.953010-2-ast@kernel.org --- include/linux/bpf.h | 5 +++++ include/linux/bpf_types.h | 1 + include/uapi/linux/bpf.h | 2 ++ kernel/bpf/syscall.c | 6 +++--- kernel/bpf/verifier.c | 34 +++++++++++++++++++++--------- kernel/trace/bpf_trace.c | 44 ++++++++++++++++++++++++++++++++------- 6 files changed, 71 insertions(+), 21 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 171be30fe0ae..80158cff44bd 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -373,6 +373,11 @@ enum bpf_cgroup_storage_type { #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX +/* The longest tracepoint has 12 args. + * See include/trace/bpf_probe.h + */ +#define MAX_BPF_FUNC_ARGS 12 + struct bpf_prog_stats { u64 cnt; u64 nsecs; diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 36a9c2325176..de14872b01ba 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h @@ -26,6 +26,7 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint) BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable) +BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing) #endif #ifdef CONFIG_CGROUP_BPF BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4af8b0819a32..a6bf19dabaab 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -173,6 +173,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, }; enum bpf_attach_type { @@ -199,6 +200,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, __MAX_BPF_ATTACH_TYPE }; diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index ff5225759553..985d01ced196 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -1571,7 +1571,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type, u32 btf_id) { switch (prog_type) { - case BPF_PROG_TYPE_RAW_TRACEPOINT: + case BPF_PROG_TYPE_TRACING: if (btf_id > BTF_MAX_TYPE) return -EINVAL; break; @@ -1833,13 +1833,13 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr) return PTR_ERR(prog); if (prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT && + prog->type != BPF_PROG_TYPE_TRACING && prog->type != BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE) { err = -EINVAL; goto out_put_prog; } - if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT && - prog->aux->attach_btf_id) { + if (prog->type == BPF_PROG_TYPE_TRACING) { if (attr->raw_tracepoint.name) { /* raw_tp name should not be specified in raw_tp * programs that were verified via in-kernel BTF info diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6b0de04f8b91..2f2374967b36 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -9381,24 +9381,36 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) { struct bpf_prog *prog = env->prog; u32 btf_id = prog->aux->attach_btf_id; + const char prefix[] = "btf_trace_"; const struct btf_type *t; const char *tname; - if (prog->type == BPF_PROG_TYPE_RAW_TRACEPOINT && btf_id) { - const char prefix[] = "btf_trace_"; + if (prog->type != BPF_PROG_TYPE_TRACING) + return 0; - t = btf_type_by_id(btf_vmlinux, btf_id); - if (!t) { - verbose(env, "attach_btf_id %u is invalid\n", btf_id); - return -EINVAL; - } + if (!btf_id) { + verbose(env, "Tracing programs must provide btf_id\n"); + return -EINVAL; + } + t = btf_type_by_id(btf_vmlinux, btf_id); + if (!t) { + verbose(env, "attach_btf_id %u is invalid\n", btf_id); + return -EINVAL; + } + tname = btf_name_by_offset(btf_vmlinux, t->name_off); + if (!tname) { + verbose(env, "attach_btf_id %u doesn't have a name\n", btf_id); + return -EINVAL; + } + + switch (prog->expected_attach_type) { + case BPF_TRACE_RAW_TP: if (!btf_type_is_typedef(t)) { verbose(env, "attach_btf_id %u is not a typedef\n", btf_id); return -EINVAL; } - tname = btf_name_by_offset(btf_vmlinux, t->name_off); - if (!tname || strncmp(prefix, tname, sizeof(prefix) - 1)) { + if (strncmp(prefix, tname, sizeof(prefix) - 1)) { verbose(env, "attach_btf_id %u points to wrong type name %s\n", btf_id, tname); return -EINVAL; @@ -9419,8 +9431,10 @@ static int check_attach_btf_id(struct bpf_verifier_env *env) prog->aux->attach_func_name = tname; prog->aux->attach_func_proto = t; prog->aux->attach_btf_trace = true; + return 0; + default: + return -EINVAL; } - return 0; } int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 571c25d60710..f50bf19f7a05 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1055,10 +1055,6 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) switch (func_id) { case BPF_FUNC_perf_event_output: return &bpf_perf_event_output_proto_raw_tp; -#ifdef CONFIG_NET - case BPF_FUNC_skb_output: - return &bpf_skb_output_proto; -#endif case BPF_FUNC_get_stackid: return &bpf_get_stackid_proto_raw_tp; case BPF_FUNC_get_stack: @@ -1068,20 +1064,44 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } } +static const struct bpf_func_proto * +tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) +{ + switch (func_id) { +#ifdef CONFIG_NET + case BPF_FUNC_skb_output: + return &bpf_skb_output_proto; +#endif + default: + return raw_tp_prog_func_proto(func_id, prog); + } +} + static bool raw_tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { - /* largest tracepoint in the kernel has 12 args */ - if (off < 0 || off >= sizeof(__u64) * 12) + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) + return false; + if (type != BPF_READ) + return false; + if (off % size != 0) + return false; + return true; +} + +static bool tracing_prog_is_valid_access(int off, int size, + enum bpf_access_type type, + const struct bpf_prog *prog, + struct bpf_insn_access_aux *info) +{ + if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) return false; if (type != BPF_READ) return false; if (off % size != 0) return false; - if (!prog->aux->attach_btf_id) - return true; return btf_ctx_access(off, size, type, prog, info); } @@ -1093,6 +1113,14 @@ const struct bpf_verifier_ops raw_tracepoint_verifier_ops = { const struct bpf_prog_ops raw_tracepoint_prog_ops = { }; +const struct bpf_verifier_ops tracing_verifier_ops = { + .get_func_proto = tracing_prog_func_proto, + .is_valid_access = tracing_prog_is_valid_access, +}; + +const struct bpf_prog_ops tracing_prog_ops = { +}; + static bool raw_tp_writable_prog_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, From 12a8654b2e5aab37b22c9608d008f9f0565862c0 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Wed, 30 Oct 2019 15:32:12 -0700 Subject: [PATCH 13/30] libbpf: Add support for prog_tracing Cleanup libbpf from expected_attach_type == attach_btf_id hack and introduce BPF_PROG_TYPE_TRACING. Signed-off-by: Alexei Starovoitov Signed-off-by: Daniel Borkmann Acked-by: Andrii Nakryiko Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/bpf/20191030223212.953010-3-ast@kernel.org --- tools/include/uapi/linux/bpf.h | 2 + tools/lib/bpf/bpf.c | 8 ++-- tools/lib/bpf/bpf.h | 5 ++- tools/lib/bpf/libbpf.c | 79 ++++++++++++++++++++++++---------- tools/lib/bpf/libbpf.h | 2 + tools/lib/bpf/libbpf.map | 2 + tools/lib/bpf/libbpf_probes.c | 1 + 7 files changed, 71 insertions(+), 28 deletions(-) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 4af8b0819a32..a6bf19dabaab 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -173,6 +173,7 @@ enum bpf_prog_type { BPF_PROG_TYPE_CGROUP_SYSCTL, BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, BPF_PROG_TYPE_CGROUP_SOCKOPT, + BPF_PROG_TYPE_TRACING, }; enum bpf_attach_type { @@ -199,6 +200,7 @@ enum bpf_attach_type { BPF_CGROUP_UDP6_RECVMSG, BPF_CGROUP_GETSOCKOPT, BPF_CGROUP_SETSOCKOPT, + BPF_TRACE_RAW_TP, __MAX_BPF_ATTACH_TYPE }; diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index 79046067720f..ca0d635b1d5e 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -228,9 +228,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, memset(&attr, 0, sizeof(attr)); attr.prog_type = load_attr->prog_type; attr.expected_attach_type = load_attr->expected_attach_type; - if (attr.prog_type == BPF_PROG_TYPE_RAW_TRACEPOINT) - /* expected_attach_type is ignored for tracing progs */ - attr.attach_btf_id = attr.expected_attach_type; + if (attr.prog_type == BPF_PROG_TYPE_TRACING) + attr.attach_btf_id = load_attr->attach_btf_id; + else + attr.prog_ifindex = load_attr->prog_ifindex; attr.insn_cnt = (__u32)load_attr->insns_cnt; attr.insns = ptr_to_u64(load_attr->insns); attr.license = ptr_to_u64(load_attr->license); @@ -245,7 +246,6 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr, } attr.kern_version = load_attr->kern_version; - attr.prog_ifindex = load_attr->prog_ifindex; attr.prog_btf_fd = load_attr->prog_btf_fd; attr.func_info_rec_size = load_attr->func_info_rec_size; attr.func_info_cnt = load_attr->func_info_cnt; diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 0db01334740f..1c53bc5b4b3c 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -78,7 +78,10 @@ struct bpf_load_program_attr { size_t insns_cnt; const char *license; __u32 kern_version; - __u32 prog_ifindex; + union { + __u32 prog_ifindex; + __u32 attach_btf_id; + }; __u32 prog_btf_fd; __u32 func_info_rec_size; const void *func_info; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5d15cc4dfcd6..c80f316f1320 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -188,6 +188,7 @@ struct bpf_program { bpf_program_clear_priv_t clear_priv; enum bpf_attach_type expected_attach_type; + __u32 attach_btf_id; void *func_info; __u32 func_info_rec_size; __u32 func_info_cnt; @@ -3446,6 +3447,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, load_attr.line_info_cnt = prog->line_info_cnt; load_attr.log_level = prog->log_level; load_attr.prog_flags = prog->prog_flags; + load_attr.attach_btf_id = prog->attach_btf_id; retry_load: log_buf = malloc(log_buf_size); @@ -3607,6 +3609,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level) return 0; } +static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id); + static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) @@ -3656,6 +3660,7 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_object__for_each_program(prog, obj) { enum bpf_prog_type prog_type; enum bpf_attach_type attach_type; + __u32 btf_id; err = libbpf_prog_type_by_name(prog->section_name, &prog_type, &attach_type); @@ -3667,6 +3672,12 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, bpf_program__set_type(prog, prog_type); bpf_program__set_expected_attach_type(prog, attach_type); + if (prog_type == BPF_PROG_TYPE_TRACING) { + err = libbpf_attach_btf_id_by_name(prog->section_name, &btf_id); + if (err) + goto out; + prog->attach_btf_id = btf_id; + } } return obj; @@ -4518,6 +4529,7 @@ BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); +BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); enum bpf_attach_type bpf_program__get_expected_attach_type(struct bpf_program *prog) @@ -4546,7 +4558,8 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog, BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, 0, eatype) /* Programs that use BTF to identify attach point */ -#define BPF_PROG_BTF(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 1, 0) +#define BPF_PROG_BTF(string, ptype, eatype) \ + BPF_PROG_SEC_IMPL(string, ptype, eatype, 0, 1, 0) /* Programs that can be attached but attach type can't be identified by section * name. Kept for backward compatibility. @@ -4573,7 +4586,8 @@ static const struct { BPF_PROG_SEC("tp/", BPF_PROG_TYPE_TRACEPOINT), BPF_PROG_SEC("raw_tracepoint/", BPF_PROG_TYPE_RAW_TRACEPOINT), BPF_PROG_SEC("raw_tp/", BPF_PROG_TYPE_RAW_TRACEPOINT), - BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_RAW_TRACEPOINT), + BPF_PROG_BTF("tp_btf/", BPF_PROG_TYPE_TRACING, + BPF_TRACE_RAW_TP), BPF_PROG_SEC("xdp", BPF_PROG_TYPE_XDP), BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT), BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN), @@ -4678,27 +4692,6 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, continue; *prog_type = section_names[i].prog_type; *expected_attach_type = section_names[i].expected_attach_type; - if (section_names[i].is_attach_btf) { - struct btf *btf = bpf_core_find_kernel_btf(); - char raw_tp_btf_name[128] = "btf_trace_"; - char *dst = raw_tp_btf_name + sizeof("btf_trace_") - 1; - int ret; - - if (IS_ERR(btf)) { - pr_warn("vmlinux BTF is not found\n"); - return -EINVAL; - } - /* prepend "btf_trace_" prefix per kernel convention */ - strncat(dst, name + section_names[i].len, - sizeof(raw_tp_btf_name) - sizeof("btf_trace_")); - ret = btf__find_by_name(btf, raw_tp_btf_name); - btf__free(btf); - if (ret <= 0) { - pr_warn("%s is not found in vmlinux BTF\n", dst); - return -EINVAL; - } - *expected_attach_type = ret; - } return 0; } pr_warn("failed to guess program type based on ELF section name '%s'\n", name); @@ -4711,6 +4704,46 @@ int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, return -ESRCH; } +#define BTF_PREFIX "btf_trace_" +static int libbpf_attach_btf_id_by_name(const char *name, __u32 *btf_id) +{ + struct btf *btf = bpf_core_find_kernel_btf(); + char raw_tp_btf_name[128] = BTF_PREFIX; + char *dst = raw_tp_btf_name + sizeof(BTF_PREFIX) - 1; + int ret, i, err = -EINVAL; + + if (IS_ERR(btf)) { + pr_warn("vmlinux BTF is not found\n"); + return -EINVAL; + } + + if (!name) + goto out; + + for (i = 0; i < ARRAY_SIZE(section_names); i++) { + if (!section_names[i].is_attach_btf) + continue; + if (strncmp(name, section_names[i].sec, section_names[i].len)) + continue; + /* prepend "btf_trace_" prefix per kernel convention */ + strncat(dst, name + section_names[i].len, + sizeof(raw_tp_btf_name) - sizeof(BTF_PREFIX)); + ret = btf__find_by_name(btf, raw_tp_btf_name); + if (ret <= 0) { + pr_warn("%s is not found in vmlinux BTF\n", dst); + goto out; + } + *btf_id = ret; + err = 0; + goto out; + } + pr_warn("failed to identify btf_id based on ELF section name '%s'\n", name); + err = -ESRCH; +out: + btf__free(btf); + return err; +} + int libbpf_attach_type_by_name(const char *name, enum bpf_attach_type *attach_type) { diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c63e2ff84abc..2b126ee5e173 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -307,6 +307,7 @@ LIBBPF_API int bpf_program__set_sched_cls(struct bpf_program *prog); LIBBPF_API int bpf_program__set_sched_act(struct bpf_program *prog); LIBBPF_API int bpf_program__set_xdp(struct bpf_program *prog); LIBBPF_API int bpf_program__set_perf_event(struct bpf_program *prog); +LIBBPF_API int bpf_program__set_tracing(struct bpf_program *prog); LIBBPF_API enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog); LIBBPF_API void bpf_program__set_type(struct bpf_program *prog, @@ -326,6 +327,7 @@ LIBBPF_API bool bpf_program__is_sched_cls(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_sched_act(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_xdp(const struct bpf_program *prog); LIBBPF_API bool bpf_program__is_perf_event(const struct bpf_program *prog); +LIBBPF_API bool bpf_program__is_tracing(const struct bpf_program *prog); /* * No need for __attribute__((packed)), all members of 'bpf_map_def' diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index d1473ea4d7a5..69dded5af00b 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -197,4 +197,6 @@ LIBBPF_0.0.6 { bpf_object__open_mem; bpf_program__get_expected_attach_type; bpf_program__get_type; + bpf_program__is_tracing; + bpf_program__set_tracing; } LIBBPF_0.0.5; diff --git a/tools/lib/bpf/libbpf_probes.c b/tools/lib/bpf/libbpf_probes.c index 4b0b0364f5fc..a9eb8b322671 100644 --- a/tools/lib/bpf/libbpf_probes.c +++ b/tools/lib/bpf/libbpf_probes.c @@ -102,6 +102,7 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, case BPF_PROG_TYPE_FLOW_DISSECTOR: case BPF_PROG_TYPE_CGROUP_SYSCTL: case BPF_PROG_TYPE_CGROUP_SOCKOPT: + case BPF_PROG_TYPE_TRACING: default: break; } From 75b0bfd2e1a7d0c698b617f5e5ceaf056bdcaef7 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Thu, 31 Oct 2019 17:51:27 -0700 Subject: [PATCH 14/30] Revert "selftests: bpf: Don't try to read files without read permission" This reverts commit 5bc60de50dfe ("selftests: bpf: Don't try to read files without read permission"). Quoted commit does not work at all, and was never tested. Script requires root permissions (and tests for them) and os.access() will always return true for root. The correct fix is needed in the bpf tree, so let's just revert and save ourselves the merge conflict. Signed-off-by: Jakub Kicinski Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Cc: Jiri Pirko Link: https://lore.kernel.org/bpf/20191101005127.1355-1-jakub.kicinski@netronome.com --- tools/testing/selftests/bpf/test_offload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index c44c650bde3a..15a666329a34 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py @@ -312,7 +312,7 @@ class DebugfsDir: if f == "ports": continue p = os.path.join(path, f) - if os.path.isfile(p) and os.access(p, os.R_OK): + if os.path.isfile(p): _, out = cmd('cat %s/%s' % (path, f)) dfs[f] = out.strip() elif os.path.isdir(p): From 64fe8c061de7d7ffdfdc104a57d48d645ae0ac4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Fri, 1 Nov 2019 12:03:44 +0100 Subject: [PATCH 15/30] xsk: Store struct xdp_sock as a flexible array member of the XSKMAP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prior this commit, the array storing XDP socket instances were stored in a separate allocated array of the XSKMAP. Now, we store the sockets as a flexible array member in a similar fashion as the arraymap. Doing so, we do less pointer chasing in the lookup. Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann Acked-by: Jonathan Lemon Link: https://lore.kernel.org/bpf/20191101110346.15004-2-bjorn.topel@gmail.com --- kernel/bpf/xskmap.c | 55 +++++++++++++++++++-------------------------- 1 file changed, 23 insertions(+), 32 deletions(-) diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 82a1ffe15dfa..edcbd863650e 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -11,9 +11,9 @@ struct xsk_map { struct bpf_map map; - struct xdp_sock **xsk_map; struct list_head __percpu *flush_list; spinlock_t lock; /* Synchronize map updates */ + struct xdp_sock *xsk_map[]; }; int xsk_map_inc(struct xsk_map *map) @@ -80,9 +80,10 @@ static void xsk_map_sock_delete(struct xdp_sock *xs, static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) { + struct bpf_map_memory mem; + int cpu, err, numa_node; struct xsk_map *m; - int cpu, err; - u64 cost; + u64 cost, size; if (!capable(CAP_NET_ADMIN)) return ERR_PTR(-EPERM); @@ -92,44 +93,35 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) attr->map_flags & ~(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY)) return ERR_PTR(-EINVAL); - m = kzalloc(sizeof(*m), GFP_USER); - if (!m) + numa_node = bpf_map_attr_numa_node(attr); + size = struct_size(m, xsk_map, attr->max_entries); + cost = size + array_size(sizeof(*m->flush_list), num_possible_cpus()); + + err = bpf_map_charge_init(&mem, cost); + if (err < 0) + return ERR_PTR(err); + + m = bpf_map_area_alloc(size, numa_node); + if (!m) { + bpf_map_charge_finish(&mem); return ERR_PTR(-ENOMEM); + } bpf_map_init_from_attr(&m->map, attr); + bpf_map_charge_move(&m->map.memory, &mem); spin_lock_init(&m->lock); - cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); - cost += sizeof(struct list_head) * num_possible_cpus(); - - /* Notice returns -EPERM on if map size is larger than memlock limit */ - err = bpf_map_charge_init(&m->map.memory, cost); - if (err) - goto free_m; - - err = -ENOMEM; - m->flush_list = alloc_percpu(struct list_head); - if (!m->flush_list) - goto free_charge; + if (!m->flush_list) { + bpf_map_charge_finish(&m->map.memory); + bpf_map_area_free(m); + return ERR_PTR(-ENOMEM); + } for_each_possible_cpu(cpu) INIT_LIST_HEAD(per_cpu_ptr(m->flush_list, cpu)); - m->xsk_map = bpf_map_area_alloc(m->map.max_entries * - sizeof(struct xdp_sock *), - m->map.numa_node); - if (!m->xsk_map) - goto free_percpu; return &m->map; - -free_percpu: - free_percpu(m->flush_list); -free_charge: - bpf_map_charge_finish(&m->map.memory); -free_m: - kfree(m); - return ERR_PTR(err); } static void xsk_map_free(struct bpf_map *map) @@ -139,8 +131,7 @@ static void xsk_map_free(struct bpf_map *map) bpf_clear_redirect_map(map); synchronize_net(); free_percpu(m->flush_list); - bpf_map_area_free(m->xsk_map); - kfree(m); + bpf_map_area_free(m); } static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) From e65650f291ee72fc578d6346080fc4699204ef2c Mon Sep 17 00:00:00 2001 From: Maciej Fijalkowski Date: Fri, 1 Nov 2019 12:03:45 +0100 Subject: [PATCH 16/30] bpf: Implement map_gen_lookup() callback for XSKMAP Inline the xsk_map_lookup_elem() via implementing the map_gen_lookup() callback. This results in emitting the bpf instructions in place of bpf_map_lookup_elem() helper call and better performance of bpf programs. Signed-off-by: Maciej Fijalkowski Signed-off-by: Daniel Borkmann Acked-by: Jonathan Lemon Link: https://lore.kernel.org/bpf/20191101110346.15004-3-bjorn.topel@gmail.com --- kernel/bpf/xskmap.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index edcbd863650e..554939f78b83 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -163,6 +163,22 @@ struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) return xs; } +static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) +{ + const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2; + struct bpf_insn *insn = insn_buf; + + *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0); + *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5); + *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(sizeof(struct xsk_sock *))); + *insn++ = BPF_ALU64_IMM(BPF_ADD, mp, offsetof(struct xsk_map, xsk_map)); + *insn++ = BPF_ALU64_REG(BPF_ADD, ret, mp); + *insn++ = BPF_LDX_MEM(BPF_SIZEOF(struct xsk_sock *), ret, ret, 0); + *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1); + *insn++ = BPF_MOV64_IMM(ret, 0); + return insn - insn_buf; +} + int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, struct xdp_sock *xs) { @@ -303,6 +319,7 @@ const struct bpf_map_ops xsk_map_ops = { .map_free = xsk_map_free, .map_get_next_key = xsk_map_get_next_key, .map_lookup_elem = xsk_map_lookup_elem, + .map_gen_lookup = xsk_map_gen_lookup, .map_lookup_elem_sys_only = xsk_map_lookup_elem_sys_only, .map_update_elem = xsk_map_update_elem, .map_delete_elem = xsk_map_delete_elem, From d817991cc7486ab83f6c7188b0bc80eebee872f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20T=C3=B6pel?= Date: Fri, 1 Nov 2019 12:03:46 +0100 Subject: [PATCH 17/30] xsk: Restructure/inline XSKMAP lookup/redirect/flush MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In this commit the XSKMAP entry lookup function used by the XDP redirect code is moved from the xskmap.c file to the xdp_sock.h header, so the lookup can be inlined from, e.g., the bpf_xdp_redirect_map() function. Further the __xsk_map_redirect() and __xsk_map_flush() is moved to the xsk.c, which lets the compiler inline the xsk_rcv() and xsk_flush() functions. Finally, all the XDP socket functions were moved from linux/bpf.h to net/xdp_sock.h, where most of the XDP sockets functions are anyway. This yields a ~2% performance boost for the xdpsock "rx_drop" scenario. Signed-off-by: Björn Töpel Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20191101110346.15004-4-bjorn.topel@gmail.com --- include/linux/bpf.h | 25 --------------------- include/net/xdp_sock.h | 51 ++++++++++++++++++++++++++++++++---------- kernel/bpf/xskmap.c | 48 --------------------------------------- net/xdp/xsk.c | 33 +++++++++++++++++++++++++-- 4 files changed, 70 insertions(+), 87 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 80158cff44bd..7c7f518811a6 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1009,31 +1009,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr, } #endif -#if defined(CONFIG_XDP_SOCKETS) -struct xdp_sock; -struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key); -int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, - struct xdp_sock *xs); -void __xsk_map_flush(struct bpf_map *map); -#else -struct xdp_sock; -static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, - u32 key) -{ - return NULL; -} - -static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, - struct xdp_sock *xs) -{ - return -EOPNOTSUPP; -} - -static inline void __xsk_map_flush(struct bpf_map *map) -{ -} -#endif - #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) void bpf_sk_reuseport_detach(struct sock *sk); int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index c9398ce7960f..e3780e4b74e1 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -69,7 +69,14 @@ struct xdp_umem { /* Nodes are linked in the struct xdp_sock map_list field, and used to * track which maps a certain socket reside in. */ -struct xsk_map; + +struct xsk_map { + struct bpf_map map; + struct list_head __percpu *flush_list; + spinlock_t lock; /* Synchronize map updates */ + struct xdp_sock *xsk_map[]; +}; + struct xsk_map_node { struct list_head node; struct xsk_map *map; @@ -109,8 +116,6 @@ struct xdp_sock { struct xdp_buff; #ifdef CONFIG_XDP_SOCKETS int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); -int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); -void xsk_flush(struct xdp_sock *xs); bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); /* Used from netdev driver */ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); @@ -134,6 +139,22 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, struct xdp_sock **map_entry); int xsk_map_inc(struct xsk_map *map); void xsk_map_put(struct xsk_map *map); +int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs); +void __xsk_map_flush(struct bpf_map *map); + +static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + struct xsk_map *m = container_of(map, struct xsk_map, map); + struct xdp_sock *xs; + + if (key >= map->max_entries) + return NULL; + + xs = READ_ONCE(m->xsk_map[key]); + return xs; +} static inline u64 xsk_umem_extract_addr(u64 addr) { @@ -224,15 +245,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) return -ENOTSUPP; } -static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) -{ - return -ENOTSUPP; -} - -static inline void xsk_flush(struct xdp_sock *xs) -{ -} - static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs) { return false; @@ -357,6 +369,21 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, return 0; } +static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs) +{ + return -EOPNOTSUPP; +} + +static inline void __xsk_map_flush(struct bpf_map *map) +{ +} + +static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, + u32 key) +{ + return NULL; +} #endif /* CONFIG_XDP_SOCKETS */ #endif /* _LINUX_XDP_SOCK_H */ diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index 554939f78b83..da16c30868f3 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c @@ -9,13 +9,6 @@ #include #include -struct xsk_map { - struct bpf_map map; - struct list_head __percpu *flush_list; - spinlock_t lock; /* Synchronize map updates */ - struct xdp_sock *xsk_map[]; -}; - int xsk_map_inc(struct xsk_map *map) { struct bpf_map *m = &map->map; @@ -151,18 +144,6 @@ static int xsk_map_get_next_key(struct bpf_map *map, void *key, void *next_key) return 0; } -struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key) -{ - struct xsk_map *m = container_of(map, struct xsk_map, map); - struct xdp_sock *xs; - - if (key >= map->max_entries) - return NULL; - - xs = READ_ONCE(m->xsk_map[key]); - return xs; -} - static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) { const int ret = BPF_REG_0, mp = BPF_REG_1, index = BPF_REG_2; @@ -179,35 +160,6 @@ static u32 xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf) return insn - insn_buf; } -int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, - struct xdp_sock *xs) -{ - struct xsk_map *m = container_of(map, struct xsk_map, map); - struct list_head *flush_list = this_cpu_ptr(m->flush_list); - int err; - - err = xsk_rcv(xs, xdp); - if (err) - return err; - - if (!xs->flush_node.prev) - list_add(&xs->flush_node, flush_list); - - return 0; -} - -void __xsk_map_flush(struct bpf_map *map) -{ - struct xsk_map *m = container_of(map, struct xsk_map, map); - struct list_head *flush_list = this_cpu_ptr(m->flush_list); - struct xdp_sock *xs, *tmp; - - list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { - xsk_flush(xs); - __list_del_clearprev(&xs->flush_node); - } -} - static void *xsk_map_lookup_elem(struct bpf_map *map, void *key) { WARN_ON_ONCE(!rcu_read_lock_held()); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 9044073fbf22..6040bc2b0088 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -196,7 +196,7 @@ static bool xsk_is_bound(struct xdp_sock *xs) return false; } -int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) +static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) { u32 len; @@ -212,7 +212,7 @@ int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) __xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len); } -void xsk_flush(struct xdp_sock *xs) +static void xsk_flush(struct xdp_sock *xs) { xskq_produce_flush_desc(xs->rx); xs->sk.sk_data_ready(&xs->sk); @@ -264,6 +264,35 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) return err; } +int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp, + struct xdp_sock *xs) +{ + struct xsk_map *m = container_of(map, struct xsk_map, map); + struct list_head *flush_list = this_cpu_ptr(m->flush_list); + int err; + + err = xsk_rcv(xs, xdp); + if (err) + return err; + + if (!xs->flush_node.prev) + list_add(&xs->flush_node, flush_list); + + return 0; +} + +void __xsk_map_flush(struct bpf_map *map) +{ + struct xsk_map *m = container_of(map, struct xsk_map, map); + struct list_head *flush_list = this_cpu_ptr(m->flush_list); + struct xdp_sock *xs, *tmp; + + list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { + xsk_flush(xs); + __list_del_clearprev(&xs->flush_node); + } +} + void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) { xskq_produce_flush_addr_n(umem->cq, nb_entries); From d1b4574a4b86565325ef2e545eda8dfc9aa07c60 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Sat, 2 Nov 2019 12:09:37 +0100 Subject: [PATCH 18/30] libbpf: Fix error handling in bpf_map__reuse_fd() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit bpf_map__reuse_fd() was calling close() in the error path before returning an error value based on errno. However, close can change errno, so that can lead to potentially misleading error messages. Instead, explicitly store errno in the err variable before each goto. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269297769.394725.12634985106772698611.stgit@toke.dk --- tools/lib/bpf/libbpf.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index c80f316f1320..36152c8729ea 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1917,16 +1917,22 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) return -errno; new_fd = open("/", O_RDONLY | O_CLOEXEC); - if (new_fd < 0) + if (new_fd < 0) { + err = -errno; goto err_free_new_name; + } new_fd = dup3(fd, new_fd, O_CLOEXEC); - if (new_fd < 0) + if (new_fd < 0) { + err = -errno; goto err_close_new_fd; + } err = zclose(map->fd); - if (err) + if (err) { + err = -errno; goto err_close_new_fd; + } free(map->name); map->fd = new_fd; @@ -1945,7 +1951,7 @@ int bpf_map__reuse_fd(struct bpf_map *map, int fd) close(new_fd); err_free_new_name: free(new_name); - return -errno; + return err; } int bpf_map__resize(struct bpf_map *map, __u32 max_entries) From 4580b25fcee5347327aaffcec31c615ec28a889a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Sat, 2 Nov 2019 12:09:38 +0100 Subject: [PATCH 19/30] libbpf: Store map pin path and status in struct bpf_map MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support storing and setting a pin path in struct bpf_map, which can be used for automatic pinning. Also store the pin status so we can avoid attempts to re-pin a map that has already been pinned (or reused from a previous pinning). The behaviour of bpf_object__{un,}pin_maps() is changed so that if it is called with a NULL path argument (which was previously illegal), it will (un)pin only those maps that have a pin_path set. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269297876.394725.14782206533681896279.stgit@toke.dk --- tools/lib/bpf/libbpf.c | 164 +++++++++++++++++++++++++++++---------- tools/lib/bpf/libbpf.h | 8 ++ tools/lib/bpf/libbpf.map | 3 + 3 files changed, 134 insertions(+), 41 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 36152c8729ea..22f6dd18de6f 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -227,6 +227,8 @@ struct bpf_map { void *priv; bpf_map_clear_priv_t clear_priv; enum libbpf_map_type libbpf_type; + char *pin_path; + bool pinned; }; struct bpf_secdata { @@ -4036,47 +4038,119 @@ int bpf_map__pin(struct bpf_map *map, const char *path) char *cp, errmsg[STRERR_BUFSIZE]; int err; - err = check_path(path); - if (err) - return err; - if (map == NULL) { pr_warn("invalid map pointer\n"); return -EINVAL; } - if (bpf_obj_pin(map->fd, path)) { - cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); - pr_warn("failed to pin map: %s\n", cp); - return -errno; + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return -EINVAL; + } else if (map->pinned) { + pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", + bpf_map__name(map), map->pin_path); + return 0; + } + } else { + if (!path) { + pr_warn("missing a path to pin map '%s' at\n", + bpf_map__name(map)); + return -EINVAL; + } else if (map->pinned) { + pr_warn("map '%s' already pinned\n", bpf_map__name(map)); + return -EEXIST; + } + + map->pin_path = strdup(path); + if (!map->pin_path) { + err = -errno; + goto out_err; + } } - pr_debug("pinned map '%s'\n", path); + err = check_path(map->pin_path); + if (err) + return err; + + if (bpf_obj_pin(map->fd, map->pin_path)) { + err = -errno; + goto out_err; + } + + map->pinned = true; + pr_debug("pinned map '%s'\n", map->pin_path); return 0; + +out_err: + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to pin map: %s\n", cp); + return err; } int bpf_map__unpin(struct bpf_map *map, const char *path) { int err; - err = check_path(path); - if (err) - return err; - if (map == NULL) { pr_warn("invalid map pointer\n"); return -EINVAL; } + if (map->pin_path) { + if (path && strcmp(path, map->pin_path)) { + pr_warn("map '%s' already has pin path '%s' different from '%s'\n", + bpf_map__name(map), map->pin_path, path); + return -EINVAL; + } + path = map->pin_path; + } else if (!path) { + pr_warn("no path to unpin map '%s' from\n", + bpf_map__name(map)); + return -EINVAL; + } + + err = check_path(path); + if (err) + return err; + err = unlink(path); if (err != 0) return -errno; - pr_debug("unpinned map '%s'\n", path); + + map->pinned = false; + pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); return 0; } +int bpf_map__set_pin_path(struct bpf_map *map, const char *path) +{ + char *new = NULL; + + if (path) { + new = strdup(path); + if (!new) + return -errno; + } + + free(map->pin_path); + map->pin_path = new; + return 0; +} + +const char *bpf_map__get_pin_path(const struct bpf_map *map) +{ + return map->pin_path; +} + +bool bpf_map__is_pinned(const struct bpf_map *map) +{ + return map->pinned; +} + int bpf_object__pin_maps(struct bpf_object *obj, const char *path) { struct bpf_map *map; @@ -4095,20 +4169,27 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) return err; bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; char buf[PATH_MAX]; - int len; - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) { - err = -EINVAL; - goto err_unpin_maps; - } else if (len >= PATH_MAX) { - err = -ENAMETOOLONG; - goto err_unpin_maps; + if (path) { + int len; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) { + err = -EINVAL; + goto err_unpin_maps; + } else if (len >= PATH_MAX) { + err = -ENAMETOOLONG; + goto err_unpin_maps; + } + pin_path = buf; + } else if (!map->pin_path) { + continue; } - err = bpf_map__pin(map, buf); + err = bpf_map__pin(map, pin_path); if (err) goto err_unpin_maps; } @@ -4117,17 +4198,10 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) err_unpin_maps: while ((map = bpf_map__prev(map, obj))) { - char buf[PATH_MAX]; - int len; - - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) - continue; - else if (len >= PATH_MAX) + if (!map->pin_path) continue; - bpf_map__unpin(map, buf); + bpf_map__unpin(map, NULL); } return err; @@ -4142,17 +4216,24 @@ int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) return -ENOENT; bpf_object__for_each_map(map, obj) { + char *pin_path = NULL; char buf[PATH_MAX]; - int len; - len = snprintf(buf, PATH_MAX, "%s/%s", path, - bpf_map__name(map)); - if (len < 0) - return -EINVAL; - else if (len >= PATH_MAX) - return -ENAMETOOLONG; + if (path) { + int len; - err = bpf_map__unpin(map, buf); + len = snprintf(buf, PATH_MAX, "%s/%s", path, + bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + pin_path = buf; + } else if (!map->pin_path) { + continue; + } + + err = bpf_map__unpin(map, pin_path); if (err) return err; } @@ -4277,6 +4358,7 @@ void bpf_object__close(struct bpf_object *obj) for (i = 0; i < obj->nr_maps; i++) { zfree(&obj->maps[i].name); + zfree(&obj->maps[i].pin_path); if (obj->maps[i].clear_priv) obj->maps[i].clear_priv(&obj->maps[i], obj->maps[i].priv); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 2b126ee5e173..e71773a6bfdf 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -124,6 +124,11 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name, __u32 *size); int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, __u32 *off); + +/* pin_maps and unpin_maps can both be called with a NULL path, in which case + * they will use the pin_path attribute of each map (and ignore all maps that + * don't have a pin_path set). + */ LIBBPF_API int bpf_object__pin_maps(struct bpf_object *obj, const char *path); LIBBPF_API int bpf_object__unpin_maps(struct bpf_object *obj, const char *path); @@ -387,6 +392,9 @@ LIBBPF_API int bpf_map__resize(struct bpf_map *map, __u32 max_entries); LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map); LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map); LIBBPF_API void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); +LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path); +LIBBPF_API const char *bpf_map__get_pin_path(const struct bpf_map *map); +LIBBPF_API bool bpf_map__is_pinned(const struct bpf_map *map); LIBBPF_API int bpf_map__pin(struct bpf_map *map, const char *path); LIBBPF_API int bpf_map__unpin(struct bpf_map *map, const char *path); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 69dded5af00b..86173cbb159d 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -193,6 +193,9 @@ LIBBPF_0.0.5 { LIBBPF_0.0.6 { global: + bpf_map__get_pin_path; + bpf_map__is_pinned; + bpf_map__set_pin_path; bpf_object__open_file; bpf_object__open_mem; bpf_program__get_expected_attach_type; From 196f8487f51ee6e2a46f51e10ac3f4ca67574ba9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Sat, 2 Nov 2019 12:09:39 +0100 Subject: [PATCH 20/30] libbpf: Move directory creation into _pin() functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The existing pin_*() functions all try to create the parent directory before pinning. Move this check into the per-object _pin() functions instead. This ensures consistent behaviour when auto-pinning is added (which doesn't go through the top-level pin_maps() function), at the cost of a few more calls to mkdir(). Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269297985.394725.5882630952992598610.stgit@toke.dk --- tools/lib/bpf/libbpf.c | 61 +++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 27 deletions(-) diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 22f6dd18de6f..fa15f3b315ba 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -3816,6 +3816,28 @@ int bpf_object__load(struct bpf_object *obj) return bpf_object__load_xattr(&attr); } +static int make_parent_dir(const char *path) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + char *dname, *dir; + int err = 0; + + dname = strdup(path); + if (dname == NULL) + return -ENOMEM; + + dir = dirname(dname); + if (mkdir(dir, 0700) && errno != EEXIST) + err = -errno; + + free(dname); + if (err) { + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("failed to mkdir %s: %s\n", path, cp); + } + return err; +} + static int check_path(const char *path) { char *cp, errmsg[STRERR_BUFSIZE]; @@ -3852,6 +3874,10 @@ int bpf_program__pin_instance(struct bpf_program *prog, const char *path, char *cp, errmsg[STRERR_BUFSIZE]; int err; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); if (err) return err; @@ -3905,25 +3931,14 @@ int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, return 0; } -static int make_dir(const char *path) -{ - char *cp, errmsg[STRERR_BUFSIZE]; - int err = 0; - - if (mkdir(path, 0700) && errno != EEXIST) - err = -errno; - - if (err) { - cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); - pr_warn("failed to mkdir %s: %s\n", path, cp); - } - return err; -} - int bpf_program__pin(struct bpf_program *prog, const char *path) { int i, err; + err = make_parent_dir(path); + if (err) + return err; + err = check_path(path); if (err) return err; @@ -3944,10 +3959,6 @@ int bpf_program__pin(struct bpf_program *prog, const char *path) return bpf_program__pin_instance(prog, path, 0); } - err = make_dir(path); - if (err) - return err; - for (i = 0; i < prog->instances.nr; i++) { char buf[PATH_MAX]; int len; @@ -4070,6 +4081,10 @@ int bpf_map__pin(struct bpf_map *map, const char *path) } } + err = make_parent_dir(map->pin_path); + if (err) + return err; + err = check_path(map->pin_path); if (err) return err; @@ -4164,10 +4179,6 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path) return -ENOENT; } - err = make_dir(path); - if (err) - return err; - bpf_object__for_each_map(map, obj) { char *pin_path = NULL; char buf[PATH_MAX]; @@ -4254,10 +4265,6 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path) return -ENOENT; } - err = make_dir(path); - if (err) - return err; - bpf_object__for_each_program(prog, obj) { char buf[PATH_MAX]; int len; From 57a00f41644f20b11c12a27061d814655f633544 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Sat, 2 Nov 2019 12:09:41 +0100 Subject: [PATCH 21/30] libbpf: Add auto-pinning of maps when loading BPF objects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds support to libbpf for setting map pinning information as part of the BTF map declaration, to get automatic map pinning (and reuse) on load. The pinning type currently only supports a single PIN_BY_NAME mode, where each map will be pinned by its name in a path that can be overridden, but defaults to /sys/fs/bpf. Since auto-pinning only does something if any maps actually have a 'pinning' BTF attribute set, we default the new option to enabled, on the assumption that seamless pinning is what most callers want. When a map has a pin_path set at load time, libbpf will compare the map pinned at that location (if any), and if the attributes match, will re-use that map instead of creating a new one. If no existing map is found, the newly created map will instead be pinned at the location. Programs wanting to customise the pinning can override the pinning paths using bpf_map__set_pin_path() before calling bpf_object__load() (including setting it to NULL to disable pinning of a particular map). Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269298092.394725.3966306029218559681.stgit@toke.dk --- tools/lib/bpf/bpf_helpers.h | 6 ++ tools/lib/bpf/libbpf.c | 146 ++++++++++++++++++++++++++++++++++-- tools/lib/bpf/libbpf.h | 13 +++- 3 files changed, 156 insertions(+), 9 deletions(-) diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 2203595f38c3..0c7d28292898 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -38,4 +38,10 @@ struct bpf_map_def { unsigned int map_flags; }; +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + #endif diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index fa15f3b315ba..7aa2a2a22cef 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -1093,10 +1093,32 @@ static bool get_map_field_int(const char *map_name, const struct btf *btf, return true; } +static int build_map_pin_path(struct bpf_map *map, const char *path) +{ + char buf[PATH_MAX]; + int err, len; + + if (!path) + path = "/sys/fs/bpf"; + + len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); + if (len < 0) + return -EINVAL; + else if (len >= PATH_MAX) + return -ENAMETOOLONG; + + err = bpf_map__set_pin_path(map, buf); + if (err) + return err; + + return 0; +} + static int bpf_object__init_user_btf_map(struct bpf_object *obj, const struct btf_type *sec, int var_idx, int sec_idx, - const Elf_Data *data, bool strict) + const Elf_Data *data, bool strict, + const char *pin_root_path) { const struct btf_type *var, *def, *t; const struct btf_var_secinfo *vi; @@ -1271,6 +1293,30 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, } map->def.value_size = sz; map->btf_value_type_id = t->type; + } else if (strcmp(name, "pinning") == 0) { + __u32 val; + int err; + + if (!get_map_field_int(map_name, obj->btf, def, m, + &val)) + return -EINVAL; + pr_debug("map '%s': found pinning = %u.\n", + map_name, val); + + if (val != LIBBPF_PIN_NONE && + val != LIBBPF_PIN_BY_NAME) { + pr_warn("map '%s': invalid pinning value %u.\n", + map_name, val); + return -EINVAL; + } + if (val == LIBBPF_PIN_BY_NAME) { + err = build_map_pin_path(map, pin_root_path); + if (err) { + pr_warn("map '%s': couldn't build pin path.\n", + map_name); + return err; + } + } } else { if (strict) { pr_warn("map '%s': unknown field '%s'.\n", @@ -1290,7 +1336,8 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj, return 0; } -static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) +static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, + const char *pin_root_path) { const struct btf_type *sec = NULL; int nr_types, i, vlen, err; @@ -1332,7 +1379,7 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) for (i = 0; i < vlen; i++) { err = bpf_object__init_user_btf_map(obj, sec, i, obj->efile.btf_maps_shndx, - data, strict); + data, strict, pin_root_path); if (err) return err; } @@ -1340,7 +1387,8 @@ static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict) return 0; } -static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps) +static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps, + const char *pin_root_path) { bool strict = !relaxed_maps; int err; @@ -1349,7 +1397,7 @@ static int bpf_object__init_maps(struct bpf_object *obj, bool relaxed_maps) if (err) return err; - err = bpf_object__init_user_btf_maps(obj, strict); + err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path); if (err) return err; @@ -1538,7 +1586,8 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) return 0; } -static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps) +static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps, + const char *pin_root_path) { Elf *elf = obj->efile.elf; GElf_Ehdr *ep = &obj->efile.ehdr; @@ -1673,7 +1722,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, bool relaxed_maps) } err = bpf_object__init_btf(obj, btf_data, btf_ext_data); if (!err) - err = bpf_object__init_maps(obj, relaxed_maps); + err = bpf_object__init_maps(obj, relaxed_maps, pin_root_path); if (!err) err = bpf_object__sanitize_and_load_btf(obj); if (!err) @@ -2129,6 +2178,66 @@ bpf_object__probe_caps(struct bpf_object *obj) return 0; } +static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) +{ + struct bpf_map_info map_info = {}; + char msg[STRERR_BUFSIZE]; + __u32 map_info_len; + + map_info_len = sizeof(map_info); + + if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) { + pr_warn("failed to get map info for map FD %d: %s\n", + map_fd, libbpf_strerror_r(errno, msg, sizeof(msg))); + return false; + } + + return (map_info.type == map->def.type && + map_info.key_size == map->def.key_size && + map_info.value_size == map->def.value_size && + map_info.max_entries == map->def.max_entries && + map_info.map_flags == map->def.map_flags); +} + +static int +bpf_object__reuse_map(struct bpf_map *map) +{ + char *cp, errmsg[STRERR_BUFSIZE]; + int err, pin_fd; + + pin_fd = bpf_obj_get(map->pin_path); + if (pin_fd < 0) { + err = -errno; + if (err == -ENOENT) { + pr_debug("found no pinned map to reuse at '%s'\n", + map->pin_path); + return 0; + } + + cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); + pr_warn("couldn't retrieve pinned map '%s': %s\n", + map->pin_path, cp); + return err; + } + + if (!map_is_reuse_compat(map, pin_fd)) { + pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", + map->pin_path); + close(pin_fd); + return -EINVAL; + } + + err = bpf_map__reuse_fd(map, pin_fd); + if (err) { + close(pin_fd); + return err; + } + map->pinned = true; + pr_debug("reused pinned map at '%s'\n", map->pin_path); + + return 0; +} + static int bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) { @@ -2171,6 +2280,15 @@ bpf_object__create_maps(struct bpf_object *obj) char *cp, errmsg[STRERR_BUFSIZE]; int *pfd = &map->fd; + if (map->pin_path) { + err = bpf_object__reuse_map(map); + if (err) { + pr_warn("error reusing pinned map %s\n", + map->name); + return err; + } + } + if (map->fd >= 0) { pr_debug("skip map create (preset) %s: fd=%d\n", map->name, map->fd); @@ -2249,6 +2367,15 @@ bpf_object__create_maps(struct bpf_object *obj) } } + if (map->pin_path && !map->pinned) { + err = bpf_map__pin(map, NULL); + if (err) { + pr_warn("failed to auto-pin map name '%s' at '%s'\n", + map->name, map->pin_path); + return err; + } + } + pr_debug("created map %s: fd=%d\n", map->name, *pfd); } @@ -3623,6 +3750,7 @@ static struct bpf_object * __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, struct bpf_object_open_opts *opts) { + const char *pin_root_path; struct bpf_program *prog; struct bpf_object *obj; const char *obj_name; @@ -3657,11 +3785,13 @@ __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, obj->relaxed_core_relocs = OPTS_GET(opts, relaxed_core_relocs, false); relaxed_maps = OPTS_GET(opts, relaxed_maps, false); + pin_root_path = OPTS_GET(opts, pin_root_path, NULL); CHECK_ERR(bpf_object__elf_init(obj), err, out); CHECK_ERR(bpf_object__check_endianness(obj), err, out); CHECK_ERR(bpf_object__probe_caps(obj), err, out); - CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps), err, out); + CHECK_ERR(bpf_object__elf_collect(obj, relaxed_maps, pin_root_path), + err, out); CHECK_ERR(bpf_object__collect_reloc(obj), err, out); bpf_object__elf_finish(obj); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index e71773a6bfdf..6ddc0419337b 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -103,8 +103,13 @@ struct bpf_object_open_opts { bool relaxed_maps; /* process CO-RE relocations non-strictly, allowing them to fail */ bool relaxed_core_relocs; + /* maps that set the 'pinning' attribute in their definition will have + * their pin_path attribute set to a file in this directory, and be + * auto-pinned to that path on load; defaults to "/sys/fs/bpf". + */ + const char *pin_root_path; }; -#define bpf_object_open_opts__last_field relaxed_core_relocs +#define bpf_object_open_opts__last_field pin_root_path LIBBPF_API struct bpf_object *bpf_object__open(const char *path); LIBBPF_API struct bpf_object * @@ -125,6 +130,12 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name, int bpf_object__variable_offset(const struct bpf_object *obj, const char *name, __u32 *off); +enum libbpf_pin_type { + LIBBPF_PIN_NONE, + /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */ + LIBBPF_PIN_BY_NAME, +}; + /* pin_maps and unpin_maps can both be called with a NULL path, in which case * they will use the pin_path attribute of each map (and ignore all maps that * don't have a pin_path set). From 2f4a32cc83a584ac3669d44fa2aa1d7f115d44c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Toke=20H=C3=B8iland-J=C3=B8rgensen?= Date: Sat, 2 Nov 2019 12:09:42 +0100 Subject: [PATCH 22/30] selftests: Add tests for automatic map pinning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This adds a new BPF selftest to exercise the new automatic map pinning code. Signed-off-by: Toke Høiland-Jørgensen Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/157269298209.394725.15420085139296213182.stgit@toke.dk --- .../selftests/bpf/prog_tests/pinning.c | 210 ++++++++++++++++++ .../selftests/bpf/progs/test_pinning.c | 31 +++ .../bpf/progs/test_pinning_invalid.c | 16 ++ 3 files changed, 257 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/pinning.c create mode 100644 tools/testing/selftests/bpf/progs/test_pinning.c create mode 100644 tools/testing/selftests/bpf/progs/test_pinning_invalid.c diff --git a/tools/testing/selftests/bpf/prog_tests/pinning.c b/tools/testing/selftests/bpf/prog_tests/pinning.c new file mode 100644 index 000000000000..525388971e08 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/pinning.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +__u32 get_map_id(struct bpf_object *obj, const char *name) +{ + struct bpf_map_info map_info = {}; + __u32 map_info_len, duration = 0; + struct bpf_map *map; + int err; + + map_info_len = sizeof(map_info); + + map = bpf_object__find_map_by_name(obj, name); + if (CHECK(!map, "find map", "NULL map")) + return 0; + + err = bpf_obj_get_info_by_fd(bpf_map__fd(map), + &map_info, &map_info_len); + CHECK(err, "get map info", "err %d errno %d", err, errno); + return map_info.id; +} + +void test_pinning(void) +{ + const char *file_invalid = "./test_pinning_invalid.o"; + const char *custpinpath = "/sys/fs/bpf/custom/pinmap"; + const char *nopinpath = "/sys/fs/bpf/nopinmap"; + const char *nopinpath2 = "/sys/fs/bpf/nopinmap2"; + const char *custpath = "/sys/fs/bpf/custom"; + const char *pinpath = "/sys/fs/bpf/pinmap"; + const char *file = "./test_pinning.o"; + __u32 map_id, map_id2, duration = 0; + struct stat statbuf = {}; + struct bpf_object *obj; + struct bpf_map *map; + int err; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, + .pin_root_path = custpath, + ); + + /* check that opening fails with invalid pinning value in map def */ + obj = bpf_object__open_file(file_invalid, NULL); + err = libbpf_get_error(obj); + if (CHECK(err != -EINVAL, "invalid open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* open the valid object file */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned */ + err = stat(pinpath, &statbuf); + if (CHECK(err, "stat pinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was *not* pinned */ + err = stat(nopinpath, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath", + "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap2 was *not* pinned */ + err = stat(nopinpath2, &statbuf); + if (CHECK(!err || errno != ENOENT, "stat nopinpath2", + "err %d errno %d\n", err, errno)) + goto out; + + map_id = get_map_id(obj, "pinmap"); + if (!map_id) + goto out; + + bpf_object__close(obj); + + obj = bpf_object__open_file(file, NULL); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "default load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that same map ID was reused for second load */ + map_id2 = get_map_id(obj, "pinmap"); + if (CHECK(map_id != map_id2, "check reuse", + "err %d errno %d id %d id2 %d\n", err, errno, map_id, map_id2)) + goto out; + + /* should be no-op to re-pin same map */ + map = bpf_object__find_map_by_name(obj, "pinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__pin(map, NULL); + if (CHECK(err, "re-pin map", "err %d errno %d\n", err, errno)) + goto out; + + /* but error to pin at different location */ + err = bpf_map__pin(map, "/sys/fs/bpf/other"); + if (CHECK(!err, "pin map different", "err %d errno %d\n", err, errno)) + goto out; + + /* unpin maps with a pin_path set */ + err = bpf_object__unpin_maps(obj, NULL); + if (CHECK(err, "unpin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* and re-pin them... */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* set pinning path of other map and re-pin all */ + map = bpf_object__find_map_by_name(obj, "nopinmap"); + if (CHECK(!map, "find map", "NULL map")) + goto out; + + err = bpf_map__set_pin_path(map, custpinpath); + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + + /* should only pin the one unpinned map */ + err = bpf_object__pin_maps(obj, NULL); + if (CHECK(err, "pin maps", "err %d errno %d\n", err, errno)) + goto out; + + /* check that nopinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + /* remove the custom pin path to re-test it with auto-pinning below */ + err = unlink(custpinpath); + if (CHECK(err, "unlink custpinpath", "err %d errno %d\n", err, errno)) + goto out; + + err = rmdir(custpath); + if (CHECK(err, "rmdir custpindir", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* open the valid object file again */ + obj = bpf_object__open_file(file, NULL); + err = libbpf_get_error(obj); + if (CHECK(err, "default open", "err %d errno %d\n", err, errno)) { + obj = NULL; + goto out; + } + + /* swap pin paths of the two maps */ + bpf_object__for_each_map(map, obj) { + if (!strcmp(bpf_map__name(map), "nopinmap")) + err = bpf_map__set_pin_path(map, pinpath); + else if (!strcmp(bpf_map__name(map), "pinmap")) + err = bpf_map__set_pin_path(map, NULL); + else + continue; + + if (CHECK(err, "set pin path", "err %d errno %d\n", err, errno)) + goto out; + } + + /* should fail because of map parameter mismatch */ + err = bpf_object__load(obj); + if (CHECK(err != -EINVAL, "param mismatch load", "err %d errno %d\n", err, errno)) + goto out; + + bpf_object__close(obj); + + /* test auto-pinning at custom path with open opt */ + obj = bpf_object__open_file(file, &opts); + if (CHECK_FAIL(libbpf_get_error(obj))) { + obj = NULL; + goto out; + } + + err = bpf_object__load(obj); + if (CHECK(err, "custom load", "err %d errno %d\n", err, errno)) + goto out; + + /* check that pinmap was pinned at the custom path */ + err = stat(custpinpath, &statbuf); + if (CHECK(err, "stat custpinpath", "err %d errno %d\n", err, errno)) + goto out; + +out: + unlink(pinpath); + unlink(nopinpath); + unlink(nopinpath2); + unlink(custpinpath); + rmdir(custpath); + if (obj) + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/test_pinning.c b/tools/testing/selftests/bpf/progs/test_pinning.c new file mode 100644 index 000000000000..f69a4a50d056 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_pinning.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_BY_NAME); +} pinmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} nopinmap SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_NONE); +} nopinmap2 SEC(".maps"); + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_pinning_invalid.c b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c new file mode 100644 index 000000000000..51b38abe7ba1 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_pinning_invalid.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include "bpf_helpers.h" + +int _version SEC("version") = 1; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); + __uint(pinning, 2); /* invalid */ +} nopinmap3 SEC(".maps"); + +char _license[] SEC("license") = "GPL"; From 1d1585ca0f48fe7ed95c3571f3e4a82b2b5045dc Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:56 +0100 Subject: [PATCH 23/30] uaccess: Add non-pagefault user-space write function Commit 3d7081822f7f ("uaccess: Add non-pagefault user-space read functions") missed to add probe write function, therefore factor out a probe_write_common() helper with most logic of probe_kernel_write() except setting KERNEL_DS, and add a new probe_user_write() helper so it can be used from BPF side. Again, on some archs, the user address space and kernel address space can co-exist and be overlapping, so in such case, setting KERNEL_DS would mean that the given address is treated as being in kernel address space. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Cc: Masami Hiramatsu Link: https://lore.kernel.org/bpf/9df2542e68141bfa3addde631441ee45503856a8.1572649915.git.daniel@iogearbox.net --- include/linux/uaccess.h | 12 +++++++++++ mm/maccess.c | 45 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 53 insertions(+), 4 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d4ee6e942562..38555435a64a 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -337,6 +337,18 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size); extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); +/* + * probe_user_write(): safely attempt to write to a location in user space + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +extern long notrace probe_user_write(void __user *dst, const void *src, size_t size); +extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); + extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); diff --git a/mm/maccess.c b/mm/maccess.c index d065736f6b87..2d3c3d01064c 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -18,6 +18,18 @@ probe_read_common(void *dst, const void __user *src, size_t size) return ret ? -EFAULT : 0; } +static __always_inline long +probe_write_common(void __user *dst, const void *src, size_t size) +{ + long ret; + + pagefault_disable(); + ret = __copy_to_user_inatomic(dst, src, size); + pagefault_enable(); + + return ret ? -EFAULT : 0; +} + /** * probe_kernel_read(): safely attempt to read from a kernel-space location * @dst: pointer to the buffer that shall take the data @@ -85,6 +97,7 @@ EXPORT_SYMBOL_GPL(probe_user_read); * Safely write to address @dst from the buffer at @src. If a kernel fault * happens, handle that and return -EFAULT. */ + long __weak probe_kernel_write(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_write"))); @@ -94,15 +107,39 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) mm_segment_t old_fs = get_fs(); set_fs(KERNEL_DS); - pagefault_disable(); - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); - pagefault_enable(); + ret = probe_write_common((__force void __user *)dst, src, size); set_fs(old_fs); - return ret ? -EFAULT : 0; + return ret; } EXPORT_SYMBOL_GPL(probe_kernel_write); +/** + * probe_user_write(): safely attempt to write to a user-space location + * @dst: address to write to + * @src: pointer to the data that shall be written + * @size: size of the data chunk + * + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ + +long __weak probe_user_write(void __user *dst, const void *src, size_t size) + __attribute__((alias("__probe_user_write"))); + +long __probe_user_write(void __user *dst, const void *src, size_t size) +{ + long ret = -EFAULT; + mm_segment_t old_fs = get_fs(); + + set_fs(USER_DS); + if (access_ok(dst, size)) + ret = probe_write_common(dst, src, size); + set_fs(old_fs); + + return ret; +} +EXPORT_SYMBOL_GPL(probe_user_write); /** * strncpy_from_unsafe: - Copy a NUL terminated string from unsafe address. From 75a1a607bb7e6d918be3aca11ec2214a275392f4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:57 +0100 Subject: [PATCH 24/30] uaccess: Add strict non-pagefault kernel-space read function Add two new probe_kernel_read_strict() and strncpy_from_unsafe_strict() helpers which by default alias to the __probe_kernel_read() and the __strncpy_from_unsafe(), respectively, but can be overridden by archs which have non-overlapping address ranges for kernel space and user space in order to bail out with -EFAULT when attempting to probe user memory including non-canonical user access addresses [0]: 4-level page tables: user-space mem: 0x0000000000000000 - 0x00007fffffffffff non-canonical: 0x0000800000000000 - 0xffff7fffffffffff 5-level page tables: user-space mem: 0x0000000000000000 - 0x00ffffffffffffff non-canonical: 0x0100000000000000 - 0xfeffffffffffffff The idea is that these helpers are complementary to the probe_user_read() and strncpy_from_unsafe_user() which probe user-only memory. Both added helpers here do the same, but for kernel-only addresses. Both set of helpers are going to be used for BPF tracing. They also explicitly avoid throwing the splat for non-canonical user addresses from 00c42373d397 ("x86-64: add warning for non-canonical user access address dereferences"). For compat, the current probe_kernel_read() and strncpy_from_unsafe() are left as-is. [0] Documentation/x86/x86_64/mm.txt Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Cc: Linus Torvalds Cc: Masami Hiramatsu Cc: x86@kernel.org Link: https://lore.kernel.org/bpf/eefeefd769aa5a013531f491a71f0936779e916b.1572649915.git.daniel@iogearbox.net --- arch/x86/mm/Makefile | 2 +- arch/x86/mm/maccess.c | 43 +++++++++++++++++++++++++++++++++++++++++ include/linux/uaccess.h | 4 ++++ mm/maccess.c | 25 +++++++++++++++++++++++- 4 files changed, 72 insertions(+), 2 deletions(-) create mode 100644 arch/x86/mm/maccess.c diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 84373dc9b341..bbc68a54795e 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -13,7 +13,7 @@ CFLAGS_REMOVE_mem_encrypt_identity.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ - pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o + pat.o pgtable.o physaddr.o setup_nx.o tlb.o cpu_entry_area.o maccess.o # Make sure __phys_addr has no stackprotector nostackp := $(call cc-option, -fno-stack-protector) diff --git a/arch/x86/mm/maccess.c b/arch/x86/mm/maccess.c new file mode 100644 index 000000000000..f5b85bdc0535 --- /dev/null +++ b/arch/x86/mm/maccess.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#ifdef CONFIG_X86_64 +static __always_inline u64 canonical_address(u64 vaddr, u8 vaddr_bits) +{ + return ((s64)vaddr << (64 - vaddr_bits)) >> (64 - vaddr_bits); +} + +static __always_inline bool invalid_probe_range(u64 vaddr) +{ + /* + * Range covering the highest possible canonical userspace address + * as well as non-canonical address range. For the canonical range + * we also need to include the userspace guard page. + */ + return vaddr < TASK_SIZE_MAX + PAGE_SIZE || + canonical_address(vaddr, boot_cpu_data.x86_virt_bits) != vaddr; +} +#else +static __always_inline bool invalid_probe_range(u64 vaddr) +{ + return vaddr < TASK_SIZE_MAX; +} +#endif + +long probe_kernel_read_strict(void *dst, const void *src, size_t size) +{ + if (unlikely(invalid_probe_range((unsigned long)src))) + return -EFAULT; + + return __probe_kernel_read(dst, src, size); +} + +long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, long count) +{ + if (unlikely(invalid_probe_range((unsigned long)unsafe_addr))) + return -EFAULT; + + return __strncpy_from_unsafe(dst, unsafe_addr, count); +} diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 38555435a64a..67f016010aad 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -311,6 +311,7 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src, * happens, handle that and return -EFAULT. */ extern long probe_kernel_read(void *dst, const void *src, size_t size); +extern long probe_kernel_read_strict(void *dst, const void *src, size_t size); extern long __probe_kernel_read(void *dst, const void *src, size_t size); /* @@ -350,6 +351,9 @@ extern long notrace probe_user_write(void __user *dst, const void *src, size_t s extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size); extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); +extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, + long count); +extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr, long count); extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count); diff --git a/mm/maccess.c b/mm/maccess.c index 2d3c3d01064c..3ca8d97e5010 100644 --- a/mm/maccess.c +++ b/mm/maccess.c @@ -43,11 +43,20 @@ probe_write_common(void __user *dst, const void *src, size_t size) * do_page_fault() doesn't attempt to take mmap_sem. This makes * probe_kernel_read() suitable for use within regions where the caller * already holds mmap_sem, or other locks which nest inside mmap_sem. + * + * probe_kernel_read_strict() is the same as probe_kernel_read() except for + * the case where architectures have non-overlapping user and kernel address + * ranges: probe_kernel_read_strict() will additionally return -EFAULT for + * probing memory on a user address range where probe_user_read() is supposed + * to be used instead. */ long __weak probe_kernel_read(void *dst, const void *src, size_t size) __attribute__((alias("__probe_kernel_read"))); +long __weak probe_kernel_read_strict(void *dst, const void *src, size_t size) + __attribute__((alias("__probe_kernel_read"))); + long __probe_kernel_read(void *dst, const void *src, size_t size) { long ret; @@ -157,8 +166,22 @@ EXPORT_SYMBOL_GPL(probe_user_write); * * If @count is smaller than the length of the string, copies @count-1 bytes, * sets the last byte of @dst buffer to NUL and returns @count. + * + * strncpy_from_unsafe_strict() is the same as strncpy_from_unsafe() except + * for the case where architectures have non-overlapping user and kernel address + * ranges: strncpy_from_unsafe_strict() will additionally return -EFAULT for + * probing memory on a user address range where strncpy_from_unsafe_user() is + * supposed to be used instead. */ -long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) + +long __weak strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) + __attribute__((alias("__strncpy_from_unsafe"))); + +long __weak strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr, + long count) + __attribute__((alias("__strncpy_from_unsafe"))); + +long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) { mm_segment_t old_fs = get_fs(); const void *src = unsafe_addr; From eb1b66887472eaa7342305b7890ae510dd9d1a79 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:58 +0100 Subject: [PATCH 25/30] bpf: Make use of probe_user_write in probe write helper Convert the bpf_probe_write_user() helper to probe_user_write() such that writes are not attempted under KERNEL_DS anymore which is buggy as kernel and user space pointers can have overlapping addresses. Also, given we have the access_ok() check inside probe_user_write(), the helper doesn't need to do it twice. Fixes: 96ae52279594 ("bpf: Add bpf_probe_write_user BPF helper to be called in tracers") Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/841c461781874c07a0ee404a454c3bc0459eed30.1572649915.git.daniel@iogearbox.net --- kernel/trace/bpf_trace.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index f50bf19f7a05..2d87fcdcb19b 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -163,7 +163,7 @@ static const struct bpf_func_proto bpf_probe_read_proto = { .arg3_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, +BPF_CALL_3(bpf_probe_write_user, void __user *, unsafe_ptr, const void *, src, u32, size) { /* @@ -186,10 +186,8 @@ BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src, return -EPERM; if (unlikely(!nmi_uaccess_okay())) return -EPERM; - if (!access_ok(unsafe_ptr, size)) - return -EPERM; - return probe_kernel_write(unsafe_ptr, src, size); + return probe_user_write(unsafe_ptr, src, size); } static const struct bpf_func_proto bpf_probe_write_user_proto = { From 6ae08ae3dea2cfa03dd3665a3c8475c2d429ef47 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:17:59 +0100 Subject: [PATCH 26/30] bpf: Add probe_read_{user, kernel} and probe_read_{user, kernel}_str helpers The current bpf_probe_read() and bpf_probe_read_str() helpers are broken in that they assume they can be used for probing memory access for kernel space addresses /as well as/ user space addresses. However, plain use of probe_kernel_read() for both cases will attempt to always access kernel space address space given access is performed under KERNEL_DS and some archs in-fact have overlapping address spaces where a kernel pointer and user pointer would have the /same/ address value and therefore accessing application memory via bpf_probe_read{,_str}() would read garbage values. Lets fix BPF side by making use of recently added 3d7081822f7f ("uaccess: Add non-pagefault user-space read functions"). Unfortunately, the only way to fix this status quo is to add dedicated bpf_probe_read_{user,kernel}() and bpf_probe_read_{user,kernel}_str() helpers. The bpf_probe_read{,_str}() helpers are kept as-is to retain their current behavior. The two *_user() variants attempt the access always under USER_DS set, the two *_kernel() variants will -EFAULT when accessing user memory if the underlying architecture has non-overlapping address ranges, also avoiding throwing the kernel warning via 00c42373d397 ("x86-64: add warning for non-canonical user access address dereferences"). Fixes: a5e8c07059d0 ("bpf: add bpf_probe_read_str helper") Fixes: 2541517c32be ("tracing, perf: Implement BPF programs attached to kprobes") Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/796ee46e948bc808d54891a1108435f8652c6ca4.1572649915.git.daniel@iogearbox.net --- include/uapi/linux/bpf.h | 122 +++++++++++++++------- kernel/trace/bpf_trace.c | 185 ++++++++++++++++++++++++--------- tools/include/uapi/linux/bpf.h | 122 +++++++++++++++------- 3 files changed, 301 insertions(+), 128 deletions(-) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index a6bf19dabaab..df6809a76404 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -563,10 +563,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from - * address *src* and store the data in *dst*. + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() + * instead. * Return * 0 on success, or a negative error in case of failure. * @@ -1428,45 +1431,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description - * Copy a NUL terminated string from an unsafe address - * *unsafe_ptr* to *dst*. The *size* should include the - * terminating NUL byte. In case the string length is smaller than - * *size*, the target is not padded with further NUL bytes. If the - * string length is larger than *size*, just *size*-1 bytes are - * copied and the last byte is set to NUL. + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * more details. * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: - * - * :: - * - * SEC("kprobe/sys_open") - * void bpf_sys_open(struct pt_regs *ctx) - * { - * char buf[PATHLEN]; // PATHLEN is defined to 256 - * int res = bpf_probe_read_str(buf, sizeof(buf), - * ctx->di); - * - * // Consume buf, for example push it to - * // userspace via bpf_perf_event_output(); we - * // can use res (the string length) as event - * // size, after checking its boundaries. - * } - * - * In comparison, using **bpf_probe_read()** helper here instead - * to read the string would require to estimate the length at - * compile time, and would often result in copying more memory - * than necessary. - * - * Another useful use case is when parsing individual process - * arguments or individual environment variables navigating - * *current*\ **->mm->arg_start** and *current*\ - * **->mm->env_start**: using this helper and the return value, - * one can quickly iterate at the right offset of the memory area. + * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() + * instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -2777,6 +2749,72 @@ union bpf_attr { * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user()** helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with bpf_probe_read_user_str() apply. + * Return + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2890,7 +2928,11 @@ union bpf_attr { FN(sk_storage_delete), \ FN(send_signal), \ FN(tcp_gen_syncookie), \ - FN(skb_output), + FN(skb_output), \ + FN(probe_read_user), \ + FN(probe_read_kernel), \ + FN(probe_read_user_str), \ + FN(probe_read_kernel_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 2d87fcdcb19b..ffc91d4935ac 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -138,24 +138,140 @@ static const struct bpf_func_proto bpf_override_return_proto = { }; #endif -BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) +BPF_CALL_3(bpf_probe_read_user, void *, dst, u32, size, + const void __user *, unsafe_ptr) { - int ret; + int ret = probe_user_read(dst, unsafe_ptr, size); - ret = security_locked_down(LOCKDOWN_BPF_READ); - if (ret < 0) - goto out; - - ret = probe_kernel_read(dst, unsafe_ptr, size); if (unlikely(ret < 0)) -out: memset(dst, 0, size); return ret; } -static const struct bpf_func_proto bpf_probe_read_proto = { - .func = bpf_probe_read, +static const struct bpf_func_proto bpf_probe_read_user_proto = { + .func = bpf_probe_read_user, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_user_str, void *, dst, u32, size, + const void __user *, unsafe_ptr) +{ + int ret = strncpy_from_unsafe_user(dst, unsafe_ptr, size); + + if (unlikely(ret < 0)) + memset(dst, 0, size); + + return ret; +} + +static const struct bpf_func_proto bpf_probe_read_user_str_proto = { + .func = bpf_probe_read_user_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +static __always_inline int +bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr, + const bool compat) +{ + int ret = security_locked_down(LOCKDOWN_BPF_READ); + + if (unlikely(ret < 0)) + goto out; + ret = compat ? probe_kernel_read(dst, unsafe_ptr, size) : + probe_kernel_read_strict(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +out: + memset(dst, 0, size); + return ret; +} + +BPF_CALL_3(bpf_probe_read_kernel, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, false); +} + +static const struct bpf_func_proto bpf_probe_read_kernel_proto = { + .func = bpf_probe_read_kernel, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_compat, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_common(dst, size, unsafe_ptr, true); +} + +static const struct bpf_func_proto bpf_probe_read_compat_proto = { + .func = bpf_probe_read_compat, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +static __always_inline int +bpf_probe_read_kernel_str_common(void *dst, u32 size, const void *unsafe_ptr, + const bool compat) +{ + int ret = security_locked_down(LOCKDOWN_BPF_READ); + + if (unlikely(ret < 0)) + goto out; + /* + * The strncpy_from_unsafe_*() call will likely not fill the entire + * buffer, but that's okay in this circumstance as we're probing + * arbitrary memory anyway similar to bpf_probe_read_*() and might + * as well probe the stack. Thus, memory is explicitly cleared + * only in error case, so that improper users ignoring return + * code altogether don't copy garbage; otherwise length of string + * is returned that can be used for bpf_perf_event_output() et al. + */ + ret = compat ? strncpy_from_unsafe(dst, unsafe_ptr, size) : + strncpy_from_unsafe_strict(dst, unsafe_ptr, size); + if (unlikely(ret < 0)) +out: + memset(dst, 0, size); + return ret; +} + +BPF_CALL_3(bpf_probe_read_kernel_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, false); +} + +static const struct bpf_func_proto bpf_probe_read_kernel_str_proto = { + .func = bpf_probe_read_kernel_str, + .gpl_only = true, + .ret_type = RET_INTEGER, + .arg1_type = ARG_PTR_TO_UNINIT_MEM, + .arg2_type = ARG_CONST_SIZE_OR_ZERO, + .arg3_type = ARG_ANYTHING, +}; + +BPF_CALL_3(bpf_probe_read_compat_str, void *, dst, u32, size, + const void *, unsafe_ptr) +{ + return bpf_probe_read_kernel_str_common(dst, size, unsafe_ptr, true); +} + +static const struct bpf_func_proto bpf_probe_read_compat_str_proto = { + .func = bpf_probe_read_compat_str, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_UNINIT_MEM, @@ -583,41 +699,6 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = { .arg2_type = ARG_ANYTHING, }; -BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size, - const void *, unsafe_ptr) -{ - int ret; - - ret = security_locked_down(LOCKDOWN_BPF_READ); - if (ret < 0) - goto out; - - /* - * The strncpy_from_unsafe() call will likely not fill the entire - * buffer, but that's okay in this circumstance as we're probing - * arbitrary memory anyway similar to bpf_probe_read() and might - * as well probe the stack. Thus, memory is explicitly cleared - * only in error case, so that improper users ignoring return - * code altogether don't copy garbage; otherwise length of string - * is returned that can be used for bpf_perf_event_output() et al. - */ - ret = strncpy_from_unsafe(dst, unsafe_ptr, size); - if (unlikely(ret < 0)) -out: - memset(dst, 0, size); - - return ret; -} - -static const struct bpf_func_proto bpf_probe_read_str_proto = { - .func = bpf_probe_read_str, - .gpl_only = true, - .ret_type = RET_INTEGER, - .arg1_type = ARG_PTR_TO_UNINIT_MEM, - .arg2_type = ARG_CONST_SIZE_OR_ZERO, - .arg3_type = ARG_ANYTHING, -}; - struct send_signal_irq_work { struct irq_work irq_work; struct task_struct *task; @@ -697,8 +778,6 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_map_pop_elem_proto; case BPF_FUNC_map_peek_elem: return &bpf_map_peek_elem_proto; - case BPF_FUNC_probe_read: - return &bpf_probe_read_proto; case BPF_FUNC_ktime_get_ns: return &bpf_ktime_get_ns_proto; case BPF_FUNC_tail_call: @@ -725,8 +804,18 @@ tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) return &bpf_current_task_under_cgroup_proto; case BPF_FUNC_get_prandom_u32: return &bpf_get_prandom_u32_proto; + case BPF_FUNC_probe_read_user: + return &bpf_probe_read_user_proto; + case BPF_FUNC_probe_read_kernel: + return &bpf_probe_read_kernel_proto; + case BPF_FUNC_probe_read: + return &bpf_probe_read_compat_proto; + case BPF_FUNC_probe_read_user_str: + return &bpf_probe_read_user_str_proto; + case BPF_FUNC_probe_read_kernel_str: + return &bpf_probe_read_kernel_str_proto; case BPF_FUNC_probe_read_str: - return &bpf_probe_read_str_proto; + return &bpf_probe_read_compat_str_proto; #ifdef CONFIG_CGROUPS case BPF_FUNC_get_current_cgroup_id: return &bpf_get_current_cgroup_id_proto; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index a6bf19dabaab..df6809a76404 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -563,10 +563,13 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read(void *dst, u32 size, const void *src) + * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr) * Description * For tracing programs, safely attempt to read *size* bytes from - * address *src* and store the data in *dst*. + * kernel space address *unsafe_ptr* and store the data in *dst*. + * + * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel() + * instead. * Return * 0 on success, or a negative error in case of failure. * @@ -1428,45 +1431,14 @@ union bpf_attr { * Return * 0 on success, or a negative error in case of failure. * - * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr) + * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr) * Description - * Copy a NUL terminated string from an unsafe address - * *unsafe_ptr* to *dst*. The *size* should include the - * terminating NUL byte. In case the string length is smaller than - * *size*, the target is not padded with further NUL bytes. If the - * string length is larger than *size*, just *size*-1 bytes are - * copied and the last byte is set to NUL. + * Copy a NUL terminated string from an unsafe kernel address + * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for + * more details. * - * On success, the length of the copied string is returned. This - * makes this helper useful in tracing programs for reading - * strings, and more importantly to get its length at runtime. See - * the following snippet: - * - * :: - * - * SEC("kprobe/sys_open") - * void bpf_sys_open(struct pt_regs *ctx) - * { - * char buf[PATHLEN]; // PATHLEN is defined to 256 - * int res = bpf_probe_read_str(buf, sizeof(buf), - * ctx->di); - * - * // Consume buf, for example push it to - * // userspace via bpf_perf_event_output(); we - * // can use res (the string length) as event - * // size, after checking its boundaries. - * } - * - * In comparison, using **bpf_probe_read()** helper here instead - * to read the string would require to estimate the length at - * compile time, and would often result in copying more memory - * than necessary. - * - * Another useful use case is when parsing individual process - * arguments or individual environment variables navigating - * *current*\ **->mm->arg_start** and *current*\ - * **->mm->env_start**: using this helper and the return value, - * one can quickly iterate at the right offset of the memory area. + * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str() + * instead. * Return * On success, the strictly positive length of the string, * including the trailing NUL character. On error, a negative @@ -2777,6 +2749,72 @@ union bpf_attr { * restricted to raw_tracepoint bpf programs. * Return * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from user space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Safely attempt to read *size* bytes from kernel space address + * *unsafe_ptr* and store the data in *dst*. + * Return + * 0 on success, or a negative error in case of failure. + * + * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe user address + * *unsafe_ptr* to *dst*. The *size* should include the + * terminating NUL byte. In case the string length is smaller than + * *size*, the target is not padded with further NUL bytes. If the + * string length is larger than *size*, just *size*-1 bytes are + * copied and the last byte is set to NUL. + * + * On success, the length of the copied string is returned. This + * makes this helper useful in tracing programs for reading + * strings, and more importantly to get its length at runtime. See + * the following snippet: + * + * :: + * + * SEC("kprobe/sys_open") + * void bpf_sys_open(struct pt_regs *ctx) + * { + * char buf[PATHLEN]; // PATHLEN is defined to 256 + * int res = bpf_probe_read_user_str(buf, sizeof(buf), + * ctx->di); + * + * // Consume buf, for example push it to + * // userspace via bpf_perf_event_output(); we + * // can use res (the string length) as event + * // size, after checking its boundaries. + * } + * + * In comparison, using **bpf_probe_read_user()** helper here + * instead to read the string would require to estimate the length + * at compile time, and would often result in copying more memory + * than necessary. + * + * Another useful use case is when parsing individual process + * arguments or individual environment variables navigating + * *current*\ **->mm->arg_start** and *current*\ + * **->mm->env_start**: using this helper and the return value, + * one can quickly iterate at the right offset of the memory area. + * Return + * On success, the strictly positive length of the string, + * including the trailing NUL character. On error, a negative + * value. + * + * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr) + * Description + * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr* + * to *dst*. Same semantics as with bpf_probe_read_user_str() apply. + * Return + * On success, the strictly positive length of the string, including + * the trailing NUL character. On error, a negative value. */ #define __BPF_FUNC_MAPPER(FN) \ FN(unspec), \ @@ -2890,7 +2928,11 @@ union bpf_attr { FN(sk_storage_delete), \ FN(send_signal), \ FN(tcp_gen_syncookie), \ - FN(skb_output), + FN(skb_output), \ + FN(probe_read_user), \ + FN(probe_read_kernel), \ + FN(probe_read_user_str), \ + FN(probe_read_kernel_str), /* integer value in 'imm' field of BPF_CALL instruction selects which helper * function eBPF program intends to call From 6e07a6341277a1dbdf5ed0c41921033c234c1635 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:18:00 +0100 Subject: [PATCH 27/30] bpf: Switch BPF probe insns to bpf_probe_read_kernel Commit 2a02759ef5f8 ("bpf: Add support for BTF pointers to interpreter") explicitly states that the pointer to BTF object is a pointer to a kernel object or NULL. Therefore we should also switch to using the strict kernel probe helper which is restricted to kernel addresses only when architectures have non-overlapping address spaces. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/d2b90827837685424a4b8008dfe0460558abfada.1572649915.git.daniel@iogearbox.net --- kernel/bpf/core.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 8d3fbc86ca5e..df82d5a42b23 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1306,11 +1306,12 @@ bool bpf_opcode_in_insntable(u8 code) } #ifndef CONFIG_BPF_JIT_ALWAYS_ON -u64 __weak bpf_probe_read(void * dst, u32 size, const void * unsafe_ptr) +u64 __weak bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr) { memset(dst, 0, size); return -EFAULT; } + /** * __bpf_prog_run - run eBPF program on a given context * @regs: is the array of MAX_BPF_EXT_REG eBPF pseudo-registers @@ -1566,9 +1567,9 @@ static u64 __no_fgcse ___bpf_prog_run(u64 *regs, const struct bpf_insn *insn, u6 LDST(W, u32) LDST(DW, u64) #undef LDST -#define LDX_PROBE(SIZEOP, SIZE) \ - LDX_PROBE_MEM_##SIZEOP: \ - bpf_probe_read(&DST, SIZE, (const void *)(long) SRC); \ +#define LDX_PROBE(SIZEOP, SIZE) \ + LDX_PROBE_MEM_##SIZEOP: \ + bpf_probe_read_kernel(&DST, SIZE, (const void *)(long) SRC); \ CONT; LDX_PROBE(B, 1) LDX_PROBE(H, 2) From 251e2d337a1a4f2572439ea29fd27f8699c5c368 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:18:01 +0100 Subject: [PATCH 28/30] bpf, samples: Use bpf_probe_read_user where appropriate Use bpf_probe_read_user() helper instead of bpf_probe_read() for samples that attach to kprobes probing on user addresses. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/5b0144b3f8e031ec5e2438bd7de8d7877e63bf2f.1572649915.git.daniel@iogearbox.net --- samples/bpf/map_perf_test_kern.c | 4 ++-- samples/bpf/test_map_in_map_kern.c | 4 ++-- samples/bpf/test_probe_write_user_kern.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c index 5c11aefbc489..281bcdaee58e 100644 --- a/samples/bpf/map_perf_test_kern.c +++ b/samples/bpf/map_perf_test_kern.c @@ -181,8 +181,8 @@ int stress_lru_hmap_alloc(struct pt_regs *ctx) if (addrlen != sizeof(*in6)) return 0; - ret = bpf_probe_read(test_params.dst6, sizeof(test_params.dst6), - &in6->sin6_addr); + ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6), + &in6->sin6_addr); if (ret) goto done; diff --git a/samples/bpf/test_map_in_map_kern.c b/samples/bpf/test_map_in_map_kern.c index 4f80cbe74c72..32ee752f19df 100644 --- a/samples/bpf/test_map_in_map_kern.c +++ b/samples/bpf/test_map_in_map_kern.c @@ -118,7 +118,7 @@ int trace_sys_connect(struct pt_regs *ctx) if (addrlen != sizeof(*in6)) return 0; - ret = bpf_probe_read(dst6, sizeof(dst6), &in6->sin6_addr); + ret = bpf_probe_read_user(dst6, sizeof(dst6), &in6->sin6_addr); if (ret) { inline_ret = ret; goto done; @@ -129,7 +129,7 @@ int trace_sys_connect(struct pt_regs *ctx) test_case = dst6[7]; - ret = bpf_probe_read(&port, sizeof(port), &in6->sin6_port); + ret = bpf_probe_read_user(&port, sizeof(port), &in6->sin6_port); if (ret) { inline_ret = ret; goto done; diff --git a/samples/bpf/test_probe_write_user_kern.c b/samples/bpf/test_probe_write_user_kern.c index a543358218e6..b7c48f37132c 100644 --- a/samples/bpf/test_probe_write_user_kern.c +++ b/samples/bpf/test_probe_write_user_kern.c @@ -37,7 +37,7 @@ int bpf_prog1(struct pt_regs *ctx) if (sockaddr_len > sizeof(orig_addr)) return 0; - if (bpf_probe_read(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0) + if (bpf_probe_read_user(&orig_addr, sizeof(orig_addr), sockaddr_arg) != 0) return 0; mapped_addr = bpf_map_lookup_elem(&dnat_map, &orig_addr); From 50f9aa44cac7256551b2e0901831e432a6c52b7f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:18:02 +0100 Subject: [PATCH 29/30] bpf, testing: Convert prog tests to probe_read_{user, kernel}{, _str} helper Use probe read *_{kernel,user}{,_str}() helpers instead of bpf_probe_read() or bpf_probe_read_user_str() for program tests where appropriate. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/4a61d4b71ce3765587d8ef5cb93afa18515e5b3e.1572649915.git.daniel@iogearbox.net --- tools/testing/selftests/bpf/progs/kfree_skb.c | 4 +- tools/testing/selftests/bpf/progs/pyperf.h | 67 ++++++++++--------- .../testing/selftests/bpf/progs/strobemeta.h | 36 +++++----- .../selftests/bpf/progs/test_tcp_estats.c | 2 +- 4 files changed, 57 insertions(+), 52 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/kfree_skb.c b/tools/testing/selftests/bpf/progs/kfree_skb.c index 89af8a921ee4..489319ea1d6a 100644 --- a/tools/testing/selftests/bpf/progs/kfree_skb.c +++ b/tools/testing/selftests/bpf/progs/kfree_skb.c @@ -79,11 +79,11 @@ int trace_kfree_skb(struct trace_kfree_skb *ctx) func = ptr->func; })); - bpf_probe_read(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); + bpf_probe_read_kernel(&pkt_type, sizeof(pkt_type), _(&skb->__pkt_type_offset)); pkt_type &= 7; /* read eth proto */ - bpf_probe_read(&pkt_data, sizeof(pkt_data), data + 12); + bpf_probe_read_kernel(&pkt_data, sizeof(pkt_data), data + 12); bpf_printk("rcuhead.next %llx func %llx\n", ptr, func); bpf_printk("skb->len %d users %d pkt_type %x\n", diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h index 003fe106fc70..71d383cc9b85 100644 --- a/tools/testing/selftests/bpf/progs/pyperf.h +++ b/tools/testing/selftests/bpf/progs/pyperf.h @@ -72,9 +72,9 @@ static __always_inline void *get_thread_state(void *tls_base, PidData *pidData) void* thread_state; int key; - bpf_probe_read(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); - bpf_probe_read(&thread_state, sizeof(thread_state), - tls_base + 0x310 + key * 0x10 + 0x08); + bpf_probe_read_user(&key, sizeof(key), (void*)(long)pidData->tls_key_addr); + bpf_probe_read_user(&thread_state, sizeof(thread_state), + tls_base + 0x310 + key * 0x10 + 0x08); return thread_state; } @@ -82,31 +82,33 @@ static __always_inline bool get_frame_data(void *frame_ptr, PidData *pidData, FrameData *frame, Symbol *symbol) { // read data from PyFrameObject - bpf_probe_read(&frame->f_back, - sizeof(frame->f_back), - frame_ptr + pidData->offsets.PyFrameObject_back); - bpf_probe_read(&frame->f_code, - sizeof(frame->f_code), - frame_ptr + pidData->offsets.PyFrameObject_code); + bpf_probe_read_user(&frame->f_back, + sizeof(frame->f_back), + frame_ptr + pidData->offsets.PyFrameObject_back); + bpf_probe_read_user(&frame->f_code, + sizeof(frame->f_code), + frame_ptr + pidData->offsets.PyFrameObject_code); // read data from PyCodeObject if (!frame->f_code) return false; - bpf_probe_read(&frame->co_filename, - sizeof(frame->co_filename), - frame->f_code + pidData->offsets.PyCodeObject_filename); - bpf_probe_read(&frame->co_name, - sizeof(frame->co_name), - frame->f_code + pidData->offsets.PyCodeObject_name); + bpf_probe_read_user(&frame->co_filename, + sizeof(frame->co_filename), + frame->f_code + pidData->offsets.PyCodeObject_filename); + bpf_probe_read_user(&frame->co_name, + sizeof(frame->co_name), + frame->f_code + pidData->offsets.PyCodeObject_name); // read actual names into symbol if (frame->co_filename) - bpf_probe_read_str(&symbol->file, - sizeof(symbol->file), - frame->co_filename + pidData->offsets.String_data); + bpf_probe_read_user_str(&symbol->file, + sizeof(symbol->file), + frame->co_filename + + pidData->offsets.String_data); if (frame->co_name) - bpf_probe_read_str(&symbol->name, - sizeof(symbol->name), - frame->co_name + pidData->offsets.String_data); + bpf_probe_read_user_str(&symbol->name, + sizeof(symbol->name), + frame->co_name + + pidData->offsets.String_data); return true; } @@ -174,9 +176,9 @@ static __always_inline int __on_event(struct pt_regs *ctx) event->kernel_stack_id = bpf_get_stackid(ctx, &stackmap, 0); void* thread_state_current = (void*)0; - bpf_probe_read(&thread_state_current, - sizeof(thread_state_current), - (void*)(long)pidData->current_state_addr); + bpf_probe_read_user(&thread_state_current, + sizeof(thread_state_current), + (void*)(long)pidData->current_state_addr); struct task_struct* task = (struct task_struct*)bpf_get_current_task(); void* tls_base = (void*)task; @@ -188,11 +190,13 @@ static __always_inline int __on_event(struct pt_regs *ctx) if (pidData->use_tls) { uint64_t pthread_created; uint64_t pthread_self; - bpf_probe_read(&pthread_self, sizeof(pthread_self), tls_base + 0x10); + bpf_probe_read_user(&pthread_self, sizeof(pthread_self), + tls_base + 0x10); - bpf_probe_read(&pthread_created, - sizeof(pthread_created), - thread_state + pidData->offsets.PyThreadState_thread); + bpf_probe_read_user(&pthread_created, + sizeof(pthread_created), + thread_state + + pidData->offsets.PyThreadState_thread); event->pthread_match = pthread_created == pthread_self; } else { event->pthread_match = 1; @@ -204,9 +208,10 @@ static __always_inline int __on_event(struct pt_regs *ctx) Symbol sym = {}; int cur_cpu = bpf_get_smp_processor_id(); - bpf_probe_read(&frame_ptr, - sizeof(frame_ptr), - thread_state + pidData->offsets.PyThreadState_frame); + bpf_probe_read_user(&frame_ptr, + sizeof(frame_ptr), + thread_state + + pidData->offsets.PyThreadState_frame); int32_t* symbol_counter = bpf_map_lookup_elem(&symbolmap, &sym); if (symbol_counter == NULL) diff --git a/tools/testing/selftests/bpf/progs/strobemeta.h b/tools/testing/selftests/bpf/progs/strobemeta.h index 067eb625d01c..4bf16e0a1b0e 100644 --- a/tools/testing/selftests/bpf/progs/strobemeta.h +++ b/tools/testing/selftests/bpf/progs/strobemeta.h @@ -98,7 +98,7 @@ struct strobe_map_raw { /* * having volatile doesn't change anything on BPF side, but clang * emits warnings for passing `volatile const char *` into - * bpf_probe_read_str that expects just `const char *` + * bpf_probe_read_user_str that expects just `const char *` */ const char* tag; /* @@ -309,18 +309,18 @@ static __always_inline void *calc_location(struct strobe_value_loc *loc, dtv_t *dtv; void *tls_ptr; - bpf_probe_read(&tls_index, sizeof(struct tls_index), - (void *)loc->offset); + bpf_probe_read_user(&tls_index, sizeof(struct tls_index), + (void *)loc->offset); /* valid module index is always positive */ if (tls_index.module > 0) { /* dtv = ((struct tcbhead *)tls_base)->dtv[tls_index.module] */ - bpf_probe_read(&dtv, sizeof(dtv), - &((struct tcbhead *)tls_base)->dtv); + bpf_probe_read_user(&dtv, sizeof(dtv), + &((struct tcbhead *)tls_base)->dtv); dtv += tls_index.module; } else { dtv = NULL; } - bpf_probe_read(&tls_ptr, sizeof(void *), dtv); + bpf_probe_read_user(&tls_ptr, sizeof(void *), dtv); /* if pointer has (void *)-1 value, then TLS wasn't initialized yet */ return tls_ptr && tls_ptr != (void *)-1 ? tls_ptr + tls_index.offset @@ -336,7 +336,7 @@ static __always_inline void read_int_var(struct strobemeta_cfg *cfg, if (!location) return; - bpf_probe_read(value, sizeof(struct strobe_value_generic), location); + bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); data->int_vals[idx] = value->val; if (value->header.len) data->int_vals_set_mask |= (1 << idx); @@ -356,13 +356,13 @@ static __always_inline uint64_t read_str_var(struct strobemeta_cfg *cfg, if (!location) return 0; - bpf_probe_read(value, sizeof(struct strobe_value_generic), location); - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, value->ptr); + bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, value->ptr); /* - * if bpf_probe_read_str returns error (<0), due to casting to + * if bpf_probe_read_user_str returns error (<0), due to casting to * unsinged int, it will become big number, so next check is * sufficient to check for errors AND prove to BPF verifier, that - * bpf_probe_read_str won't return anything bigger than + * bpf_probe_read_user_str won't return anything bigger than * STROBE_MAX_STR_LEN */ if (len > STROBE_MAX_STR_LEN) @@ -391,8 +391,8 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, if (!location) return payload; - bpf_probe_read(value, sizeof(struct strobe_value_generic), location); - if (bpf_probe_read(&map, sizeof(struct strobe_map_raw), value->ptr)) + bpf_probe_read_user(value, sizeof(struct strobe_value_generic), location); + if (bpf_probe_read_user(&map, sizeof(struct strobe_map_raw), value->ptr)) return payload; descr->id = map.id; @@ -402,7 +402,7 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, data->req_meta_valid = 1; } - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, map.tag); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, map.tag); if (len <= STROBE_MAX_STR_LEN) { descr->tag_len = len; payload += len; @@ -418,15 +418,15 @@ static __always_inline void *read_map_var(struct strobemeta_cfg *cfg, break; descr->key_lens[i] = 0; - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, - map.entries[i].key); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, + map.entries[i].key); if (len <= STROBE_MAX_STR_LEN) { descr->key_lens[i] = len; payload += len; } descr->val_lens[i] = 0; - len = bpf_probe_read_str(payload, STROBE_MAX_STR_LEN, - map.entries[i].val); + len = bpf_probe_read_user_str(payload, STROBE_MAX_STR_LEN, + map.entries[i].val); if (len <= STROBE_MAX_STR_LEN) { descr->val_lens[i] = len; payload += len; diff --git a/tools/testing/selftests/bpf/progs/test_tcp_estats.c b/tools/testing/selftests/bpf/progs/test_tcp_estats.c index c8c595da38d4..87b7d934ce73 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_estats.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_estats.c @@ -38,7 +38,7 @@ #include #include "bpf_helpers.h" -#define _(P) ({typeof(P) val = 0; bpf_probe_read(&val, sizeof(val), &P); val;}) +#define _(P) ({typeof(P) val = 0; bpf_probe_read_kernel(&val, sizeof(val), &P); val;}) #define TCP_ESTATS_MAGIC 0xBAADBEEF /* This test case needs "sock" and "pt_regs" data structure. From fa553d9b57d4a98a160d1926b4e263e7a78c0cf3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 2 Nov 2019 00:18:03 +0100 Subject: [PATCH 30/30] bpf, testing: Add selftest to read/write sockaddr from user space Tested on x86-64 and Ilya was also kind enough to give it a spin on s390x, both passing with probe_user:OK there. The test is using the newly added bpf_probe_read_user() to dump sockaddr from connect call into .bss BPF map and overrides the user buffer via bpf_probe_write_user(): # ./test_progs [...] #17 pkt_md_access:OK #18 probe_user:OK #19 prog_run_xattr:OK [...] Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Tested-by: Ilya Leoshkevich Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/90f449d8af25354e05080e82fc6e2d3179da30ea.1572649915.git.daniel@iogearbox.net --- .../selftests/bpf/prog_tests/probe_user.c | 78 +++++++++++++++++++ .../selftests/bpf/progs/test_probe_user.c | 26 +++++++ 2 files changed, 104 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/probe_user.c create mode 100644 tools/testing/selftests/bpf/progs/test_probe_user.c diff --git a/tools/testing/selftests/bpf/prog_tests/probe_user.c b/tools/testing/selftests/bpf/prog_tests/probe_user.c new file mode 100644 index 000000000000..8a3187dec048 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/probe_user.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +void test_probe_user(void) +{ +#define kprobe_name "__sys_connect" + const char *prog_name = "kprobe/" kprobe_name; + const char *obj_file = "./test_probe_user.o"; + DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, ); + int err, results_map_fd, sock_fd, duration = 0; + struct sockaddr curr, orig, tmp; + struct sockaddr_in *in = (struct sockaddr_in *)&curr; + struct bpf_link *kprobe_link = NULL; + struct bpf_program *kprobe_prog; + struct bpf_object *obj; + static const int zero = 0; + + obj = bpf_object__open_file(obj_file, &opts); + if (CHECK(IS_ERR(obj), "obj_open_file", "err %ld\n", PTR_ERR(obj))) + return; + + kprobe_prog = bpf_object__find_program_by_title(obj, prog_name); + if (CHECK(!kprobe_prog, "find_probe", + "prog '%s' not found\n", prog_name)) + goto cleanup; + + err = bpf_object__load(obj); + if (CHECK(err, "obj_load", "err %d\n", err)) + goto cleanup; + + results_map_fd = bpf_find_map(__func__, obj, "test_pro.bss"); + if (CHECK(results_map_fd < 0, "find_bss_map", + "err %d\n", results_map_fd)) + goto cleanup; + + kprobe_link = bpf_program__attach_kprobe(kprobe_prog, false, + kprobe_name); + if (CHECK(IS_ERR(kprobe_link), "attach_kprobe", + "err %ld\n", PTR_ERR(kprobe_link))) { + kprobe_link = NULL; + goto cleanup; + } + + memset(&curr, 0, sizeof(curr)); + in->sin_family = AF_INET; + in->sin_port = htons(5555); + in->sin_addr.s_addr = inet_addr("255.255.255.255"); + memcpy(&orig, &curr, sizeof(curr)); + + sock_fd = socket(AF_INET, SOCK_STREAM, 0); + if (CHECK(sock_fd < 0, "create_sock_fd", "err %d\n", sock_fd)) + goto cleanup; + + connect(sock_fd, &curr, sizeof(curr)); + close(sock_fd); + + err = bpf_map_lookup_elem(results_map_fd, &zero, &tmp); + if (CHECK(err, "get_kprobe_res", + "failed to get kprobe res: %d\n", err)) + goto cleanup; + + in = (struct sockaddr_in *)&tmp; + if (CHECK(memcmp(&tmp, &orig, sizeof(orig)), "check_kprobe_res", + "wrong kprobe res from probe read: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; + + memset(&tmp, 0xab, sizeof(tmp)); + + in = (struct sockaddr_in *)&curr; + if (CHECK(memcmp(&curr, &tmp, sizeof(tmp)), "check_kprobe_res", + "wrong kprobe res from probe write: %s:%u\n", + inet_ntoa(in->sin_addr), ntohs(in->sin_port))) + goto cleanup; +cleanup: + bpf_link__destroy(kprobe_link); + bpf_object__close(obj); +} diff --git a/tools/testing/selftests/bpf/progs/test_probe_user.c b/tools/testing/selftests/bpf/progs/test_probe_user.c new file mode 100644 index 000000000000..1871e2ece0c4 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_probe_user.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include + +#include "bpf_helpers.h" +#include "bpf_tracing.h" + +static struct sockaddr_in old; + +SEC("kprobe/__sys_connect") +int handle_sys_connect(struct pt_regs *ctx) +{ + void *ptr = (void *)PT_REGS_PARM2(ctx); + struct sockaddr_in new; + + bpf_probe_read_user(&old, sizeof(old), ptr); + __builtin_memset(&new, 0xab, sizeof(new)); + bpf_probe_write_user(ptr, &new, sizeof(new)); + + return 0; +} + +char _license[] SEC("license") = "GPL";