mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 13:20:52 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-08-24 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix verifier precision tracking with BPF-to-BPF calls, from Alexei. 2) Fix a use-after-free in prog symbol exposure, from Daniel. 3) Several s390x JIT fixes plus BE related fixes in BPF kselftests, from Ilya. 4) Fix memory leak by unpinning XDP umem pages in error path, from Ivan. 5) Fix a potential use-after-free on flow dissector detach, from Jakub. 6) Fix bpftool to close prog fd after showing metadata, from Quentin. 7) BPF kselftest config and TEST_PROGS_EXTENDED fixes, from Anders. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
211c462452
@ -863,7 +863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||
break;
|
||||
case BPF_ALU64 | BPF_NEG: /* dst = -dst */
|
||||
/* lcgr %dst,%dst */
|
||||
EMIT4(0xb9130000, dst_reg, dst_reg);
|
||||
EMIT4(0xb9030000, dst_reg, dst_reg);
|
||||
break;
|
||||
/*
|
||||
* BPF_FROM_BE/LE
|
||||
@ -1049,8 +1049,8 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||
/* llgf %w1,map.max_entries(%b2) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0016, REG_W1, REG_0, BPF_REG_2,
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
/* clgrj %b3,%w1,0xa,label0: if %b3 >= %w1 goto out */
|
||||
EMIT6_PCREL_LABEL(0xec000000, 0x0065, BPF_REG_3,
|
||||
/* clrj %b3,%w1,0xa,label0: if (u32)%b3 >= (u32)%w1 goto out */
|
||||
EMIT6_PCREL_LABEL(0xec000000, 0x0077, BPF_REG_3,
|
||||
REG_W1, 0, 0xa);
|
||||
|
||||
/*
|
||||
@ -1076,8 +1076,10 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
|
||||
* goto out;
|
||||
*/
|
||||
|
||||
/* sllg %r1,%b3,3: %r1 = index * 8 */
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, BPF_REG_3, REG_0, 3);
|
||||
/* llgfr %r1,%b3: %r1 = (u32) index */
|
||||
EMIT4(0xb9160000, REG_1, BPF_REG_3);
|
||||
/* sllg %r1,%r1,3: %r1 *= 8 */
|
||||
EMIT6_DISP_LH(0xeb000000, 0x000d, REG_1, REG_1, REG_0, 3);
|
||||
/* lg %r1,prog(%b2,%r1) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_1, BPF_REG_2,
|
||||
REG_1, offsetof(struct bpf_array, ptrs));
|
||||
|
@ -1707,20 +1707,26 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
|
||||
if (err)
|
||||
goto free_used_maps;
|
||||
|
||||
err = bpf_prog_new_fd(prog);
|
||||
if (err < 0) {
|
||||
/* failed to allocate fd.
|
||||
* bpf_prog_put() is needed because the above
|
||||
* bpf_prog_alloc_id() has published the prog
|
||||
* to the userspace and the userspace may
|
||||
* have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
|
||||
*/
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Upon success of bpf_prog_alloc_id(), the BPF prog is
|
||||
* effectively publicly exposed. However, retrieving via
|
||||
* bpf_prog_get_fd_by_id() will take another reference,
|
||||
* therefore it cannot be gone underneath us.
|
||||
*
|
||||
* Only for the time /after/ successful bpf_prog_new_fd()
|
||||
* and before returning to userspace, we might just hold
|
||||
* one reference and any parallel close on that fd could
|
||||
* rip everything out. Hence, below notifications must
|
||||
* happen before bpf_prog_new_fd().
|
||||
*
|
||||
* Also, any failure handling from this point onwards must
|
||||
* be using bpf_prog_put() given the program is exposed.
|
||||
*/
|
||||
bpf_prog_kallsyms_add(prog);
|
||||
perf_event_bpf_event(prog, PERF_BPF_EVENT_PROG_LOAD, 0);
|
||||
|
||||
err = bpf_prog_new_fd(prog);
|
||||
if (err < 0)
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
|
||||
free_used_maps:
|
||||
|
@ -985,9 +985,6 @@ static void __mark_reg_unbounded(struct bpf_reg_state *reg)
|
||||
reg->smax_value = S64_MAX;
|
||||
reg->umin_value = 0;
|
||||
reg->umax_value = U64_MAX;
|
||||
|
||||
/* constant backtracking is enabled for root only for now */
|
||||
reg->precise = capable(CAP_SYS_ADMIN) ? false : true;
|
||||
}
|
||||
|
||||
/* Mark a register as having a completely unknown (scalar) value. */
|
||||
@ -1014,7 +1011,11 @@ static void mark_reg_unknown(struct bpf_verifier_env *env,
|
||||
__mark_reg_not_init(regs + regno);
|
||||
return;
|
||||
}
|
||||
__mark_reg_unknown(regs + regno);
|
||||
regs += regno;
|
||||
__mark_reg_unknown(regs);
|
||||
/* constant backtracking is enabled for root without bpf2bpf calls */
|
||||
regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ?
|
||||
true : false;
|
||||
}
|
||||
|
||||
static void __mark_reg_not_init(struct bpf_reg_state *reg)
|
||||
|
@ -8757,13 +8757,13 @@ sk_reuseport_is_valid_access(int off, int size,
|
||||
return size == size_default;
|
||||
|
||||
/* Fields that allow narrowing */
|
||||
case offsetof(struct sk_reuseport_md, eth_protocol):
|
||||
case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
|
||||
if (size < FIELD_SIZEOF(struct sk_buff, protocol))
|
||||
return false;
|
||||
/* fall through */
|
||||
case offsetof(struct sk_reuseport_md, ip_protocol):
|
||||
case offsetof(struct sk_reuseport_md, bind_inany):
|
||||
case offsetof(struct sk_reuseport_md, len):
|
||||
case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
|
||||
case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
|
||||
case bpf_ctx_range(struct sk_reuseport_md, len):
|
||||
bpf_ctx_record_field_size(info, size_default);
|
||||
return bpf_ctx_narrow_access_ok(off, size, size_default);
|
||||
|
||||
|
@ -142,8 +142,8 @@ int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr)
|
||||
mutex_unlock(&flow_dissector_mutex);
|
||||
return -ENOENT;
|
||||
}
|
||||
bpf_prog_put(attached);
|
||||
RCU_INIT_POINTER(net->flow_dissector_prog, NULL);
|
||||
bpf_prog_put(attached);
|
||||
mutex_unlock(&flow_dissector_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||
umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
|
||||
if (!umem->pages) {
|
||||
err = -ENOMEM;
|
||||
goto out_account;
|
||||
goto out_pin;
|
||||
}
|
||||
|
||||
for (i = 0; i < umem->npgs; i++)
|
||||
@ -373,6 +373,8 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
|
||||
|
||||
return 0;
|
||||
|
||||
out_pin:
|
||||
xdp_umem_unpin_pages(umem);
|
||||
out_account:
|
||||
xdp_umem_unaccount_pages(umem);
|
||||
return err;
|
||||
|
@ -363,7 +363,9 @@ static int do_show(int argc, char **argv)
|
||||
if (fd < 0)
|
||||
return -1;
|
||||
|
||||
return show_prog(fd);
|
||||
err = show_prog(fd);
|
||||
close(fd);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (argc)
|
||||
|
@ -34,6 +34,9 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test
|
||||
BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c)))
|
||||
TEST_GEN_FILES = $(BPF_OBJ_FILES)
|
||||
|
||||
BTF_C_FILES = $(wildcard progs/btf_dump_test_case_*.c)
|
||||
TEST_FILES = $(BTF_C_FILES)
|
||||
|
||||
# Also test sub-register code-gen if LLVM has eBPF v3 processor support which
|
||||
# contains both ALU32 and JMP32 instructions.
|
||||
SUBREG_CODEGEN := $(shell echo "int cal(int a) { return a > 0; }" | \
|
||||
@ -68,7 +71,8 @@ TEST_PROGS := test_kmod.sh \
|
||||
TEST_PROGS_EXTENDED := with_addr.sh \
|
||||
with_tunnels.sh \
|
||||
tcp_client.py \
|
||||
tcp_server.py
|
||||
tcp_server.py \
|
||||
test_xdp_vlan.sh
|
||||
|
||||
# Compile but not part of 'make run_tests'
|
||||
TEST_GEN_PROGS_EXTENDED = test_libbpf_open test_sock_addr test_skb_cgroup_id_user \
|
||||
|
@ -34,3 +34,4 @@ CONFIG_NET_MPLS_GSO=m
|
||||
CONFIG_MPLS_ROUTING=m
|
||||
CONFIG_MPLS_IPTUNNEL=m
|
||||
CONFIG_IPV6_SIT=m
|
||||
CONFIG_BPF_JIT=y
|
||||
|
@ -97,6 +97,13 @@ int test_btf_dump_case(int n, struct btf_dump_test_case *test_case)
|
||||
}
|
||||
|
||||
snprintf(test_file, sizeof(test_file), "progs/%s.c", test_case->name);
|
||||
if (access(test_file, R_OK) == -1)
|
||||
/*
|
||||
* When the test is run with O=, kselftest copies TEST_FILES
|
||||
* without preserving the directory structure.
|
||||
*/
|
||||
snprintf(test_file, sizeof(test_file), "%s.c",
|
||||
test_case->name);
|
||||
/*
|
||||
* Diff test output and expected test output, contained between
|
||||
* START-EXPECTED-OUTPUT and END-EXPECTED-OUTPUT lines in test case.
|
||||
|
@ -20,9 +20,9 @@ int main(int argc, char **argv)
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_get_local_storage),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1),
|
||||
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
|
||||
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
||||
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0), /* map fd */
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0), /* flags, not used */
|
||||
@ -30,7 +30,7 @@ int main(int argc, char **argv)
|
||||
BPF_FUNC_get_local_storage),
|
||||
BPF_MOV64_IMM(BPF_REG_1, 1),
|
||||
BPF_STX_XADD(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
|
||||
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0x1),
|
||||
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
|
||||
BPF_EXIT_INSN(),
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <bpf/bpf.h>
|
||||
|
||||
#include "cgroup_helpers.h"
|
||||
#include "bpf_endian.h"
|
||||
#include "bpf_rlimit.h"
|
||||
#include "bpf_util.h"
|
||||
|
||||
@ -232,7 +233,8 @@ static struct sock_test tests[] = {
|
||||
/* if (ip == expected && port == expected) */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||
offsetof(struct bpf_sock, src_ip6[3])),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x01000000, 4),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
|
||||
__bpf_constant_ntohl(0x00000001), 4),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||
offsetof(struct bpf_sock, src_port)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x2001, 2),
|
||||
@ -261,7 +263,8 @@ static struct sock_test tests[] = {
|
||||
/* if (ip == expected && port == expected) */
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||
offsetof(struct bpf_sock, src_ip4)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x0100007F, 4),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7,
|
||||
__bpf_constant_ntohl(0x7F000001), 4),
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_6,
|
||||
offsetof(struct bpf_sock, src_port)),
|
||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0x1002, 2),
|
||||
|
Loading…
Reference in New Issue
Block a user