mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 06:40:53 +07:00
Merge branch 'BPF-map-value-adjust-fix'
Daniel Borkmann says: ==================== BPF map value adjust fix First patch in the series is the actual fix and the remaining patches are just updates to selftests. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
5067f4cf23
@ -43,6 +43,7 @@ struct bpf_reg_state {
|
||||
u32 min_align;
|
||||
u32 aux_off;
|
||||
u32 aux_off_align;
|
||||
bool value_from_signed;
|
||||
};
|
||||
|
||||
enum bpf_stack_slot_type {
|
||||
|
@ -504,6 +504,7 @@ static void reset_reg_range_values(struct bpf_reg_state *regs, u32 regno)
|
||||
{
|
||||
regs[regno].min_value = BPF_REGISTER_MIN_RANGE;
|
||||
regs[regno].max_value = BPF_REGISTER_MAX_RANGE;
|
||||
regs[regno].value_from_signed = false;
|
||||
regs[regno].min_align = 0;
|
||||
}
|
||||
|
||||
@ -777,12 +778,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int insn_idx, int off,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
|
||||
static bool __is_pointer_value(bool allow_ptr_leaks,
|
||||
const struct bpf_reg_state *reg)
|
||||
{
|
||||
if (env->allow_ptr_leaks)
|
||||
if (allow_ptr_leaks)
|
||||
return false;
|
||||
|
||||
switch (env->cur_state.regs[regno].type) {
|
||||
switch (reg->type) {
|
||||
case UNKNOWN_VALUE:
|
||||
case CONST_IMM:
|
||||
return false;
|
||||
@ -791,6 +793,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
|
||||
}
|
||||
|
||||
static int check_pkt_ptr_alignment(const struct bpf_reg_state *reg,
|
||||
int off, int size, bool strict)
|
||||
{
|
||||
@ -1832,10 +1839,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
dst_align = dst_reg->min_align;
|
||||
|
||||
/* We don't know anything about what was done to this register, mark it
|
||||
* as unknown.
|
||||
* as unknown. Also, if both derived bounds came from signed/unsigned
|
||||
* mixed compares and one side is unbounded, we cannot really do anything
|
||||
* with them as boundaries cannot be trusted. Thus, arithmetic of two
|
||||
* regs of such kind will get invalidated bounds on the dst side.
|
||||
*/
|
||||
if (min_val == BPF_REGISTER_MIN_RANGE &&
|
||||
max_val == BPF_REGISTER_MAX_RANGE) {
|
||||
if ((min_val == BPF_REGISTER_MIN_RANGE &&
|
||||
max_val == BPF_REGISTER_MAX_RANGE) ||
|
||||
(BPF_SRC(insn->code) == BPF_X &&
|
||||
((min_val != BPF_REGISTER_MIN_RANGE &&
|
||||
max_val == BPF_REGISTER_MAX_RANGE) ||
|
||||
(min_val == BPF_REGISTER_MIN_RANGE &&
|
||||
max_val != BPF_REGISTER_MAX_RANGE) ||
|
||||
(dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
|
||||
dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
|
||||
(dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
|
||||
dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
|
||||
regs[insn->dst_reg].value_from_signed !=
|
||||
regs[insn->src_reg].value_from_signed)) {
|
||||
reset_reg_range_values(regs, insn->dst_reg);
|
||||
return;
|
||||
}
|
||||
@ -2023,6 +2044,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
||||
regs[insn->dst_reg].max_value = insn->imm;
|
||||
regs[insn->dst_reg].min_value = insn->imm;
|
||||
regs[insn->dst_reg].min_align = calc_align(insn->imm);
|
||||
regs[insn->dst_reg].value_from_signed = false;
|
||||
}
|
||||
|
||||
} else if (opcode > BPF_END) {
|
||||
@ -2198,40 +2220,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
|
||||
struct bpf_reg_state *false_reg, u64 val,
|
||||
u8 opcode)
|
||||
{
|
||||
bool value_from_signed = true;
|
||||
bool is_range = true;
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_JEQ:
|
||||
/* If this is false then we know nothing Jon Snow, but if it is
|
||||
* true then we know for sure.
|
||||
*/
|
||||
true_reg->max_value = true_reg->min_value = val;
|
||||
is_range = false;
|
||||
break;
|
||||
case BPF_JNE:
|
||||
/* If this is true we know nothing Jon Snow, but if it is false
|
||||
* we know the value for sure;
|
||||
*/
|
||||
false_reg->max_value = false_reg->min_value = val;
|
||||
is_range = false;
|
||||
break;
|
||||
case BPF_JGT:
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
false_reg->min_value = 0;
|
||||
value_from_signed = false;
|
||||
/* fallthrough */
|
||||
case BPF_JSGT:
|
||||
if (true_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(true_reg, 0);
|
||||
if (false_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(false_reg, 0);
|
||||
if (opcode == BPF_JGT) {
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
false_reg->min_value = 0;
|
||||
}
|
||||
/* If this is false then we know the maximum val is val,
|
||||
* otherwise we know the min val is val+1.
|
||||
*/
|
||||
false_reg->max_value = val;
|
||||
false_reg->value_from_signed = value_from_signed;
|
||||
true_reg->min_value = val + 1;
|
||||
true_reg->value_from_signed = value_from_signed;
|
||||
break;
|
||||
case BPF_JGE:
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
false_reg->min_value = 0;
|
||||
value_from_signed = false;
|
||||
/* fallthrough */
|
||||
case BPF_JSGE:
|
||||
if (true_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(true_reg, 0);
|
||||
if (false_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(false_reg, 0);
|
||||
if (opcode == BPF_JGE) {
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
false_reg->min_value = 0;
|
||||
}
|
||||
/* If this is false then we know the maximum value is val - 1,
|
||||
* otherwise we know the mimimum value is val.
|
||||
*/
|
||||
false_reg->max_value = val - 1;
|
||||
false_reg->value_from_signed = value_from_signed;
|
||||
true_reg->min_value = val;
|
||||
true_reg->value_from_signed = value_from_signed;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -2239,6 +2284,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
|
||||
|
||||
check_reg_overflow(false_reg);
|
||||
check_reg_overflow(true_reg);
|
||||
if (is_range) {
|
||||
if (__is_pointer_value(false, false_reg))
|
||||
reset_reg_range_values(false_reg, 0);
|
||||
if (__is_pointer_value(false, true_reg))
|
||||
reset_reg_range_values(true_reg, 0);
|
||||
}
|
||||
}
|
||||
|
||||
/* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
|
||||
@ -2248,41 +2299,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
|
||||
struct bpf_reg_state *false_reg, u64 val,
|
||||
u8 opcode)
|
||||
{
|
||||
bool value_from_signed = true;
|
||||
bool is_range = true;
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_JEQ:
|
||||
/* If this is false then we know nothing Jon Snow, but if it is
|
||||
* true then we know for sure.
|
||||
*/
|
||||
true_reg->max_value = true_reg->min_value = val;
|
||||
is_range = false;
|
||||
break;
|
||||
case BPF_JNE:
|
||||
/* If this is true we know nothing Jon Snow, but if it is false
|
||||
* we know the value for sure;
|
||||
*/
|
||||
false_reg->max_value = false_reg->min_value = val;
|
||||
is_range = false;
|
||||
break;
|
||||
case BPF_JGT:
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
true_reg->min_value = 0;
|
||||
value_from_signed = false;
|
||||
/* fallthrough */
|
||||
case BPF_JSGT:
|
||||
if (true_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(true_reg, 0);
|
||||
if (false_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(false_reg, 0);
|
||||
if (opcode == BPF_JGT) {
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
true_reg->min_value = 0;
|
||||
}
|
||||
/*
|
||||
* If this is false, then the val is <= the register, if it is
|
||||
* true the register <= to the val.
|
||||
*/
|
||||
false_reg->min_value = val;
|
||||
false_reg->value_from_signed = value_from_signed;
|
||||
true_reg->max_value = val - 1;
|
||||
true_reg->value_from_signed = value_from_signed;
|
||||
break;
|
||||
case BPF_JGE:
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
true_reg->min_value = 0;
|
||||
value_from_signed = false;
|
||||
/* fallthrough */
|
||||
case BPF_JSGE:
|
||||
if (true_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(true_reg, 0);
|
||||
if (false_reg->value_from_signed != value_from_signed)
|
||||
reset_reg_range_values(false_reg, 0);
|
||||
if (opcode == BPF_JGE) {
|
||||
/* Unsigned comparison, the minimum value is 0. */
|
||||
true_reg->min_value = 0;
|
||||
}
|
||||
/* If this is false then constant < register, if it is true then
|
||||
* the register < constant.
|
||||
*/
|
||||
false_reg->min_value = val + 1;
|
||||
false_reg->value_from_signed = value_from_signed;
|
||||
true_reg->max_value = val;
|
||||
true_reg->value_from_signed = value_from_signed;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -2290,6 +2364,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
|
||||
|
||||
check_reg_overflow(false_reg);
|
||||
check_reg_overflow(true_reg);
|
||||
if (is_range) {
|
||||
if (__is_pointer_value(false, false_reg))
|
||||
reset_reg_range_values(false_reg, 0);
|
||||
if (__is_pointer_value(false, true_reg))
|
||||
reset_reg_range_values(true_reg, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
|
||||
|
@ -120,7 +120,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, int strict_alignment,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz)
|
||||
char *log_buf, size_t log_buf_sz, int log_level)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
|
||||
@ -131,7 +131,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
attr.license = ptr_to_u64(license);
|
||||
attr.log_buf = ptr_to_u64(log_buf);
|
||||
attr.log_size = log_buf_sz;
|
||||
attr.log_level = 2;
|
||||
attr.log_level = log_level;
|
||||
log_buf[0] = 0;
|
||||
attr.kern_version = kern_version;
|
||||
attr.prog_flags = strict_alignment ? BPF_F_STRICT_ALIGNMENT : 0;
|
||||
|
@ -38,7 +38,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
|
||||
size_t insns_cnt, int strict_alignment,
|
||||
const char *license, __u32 kern_version,
|
||||
char *log_buf, size_t log_buf_sz);
|
||||
char *log_buf, size_t log_buf_sz, int log_level);
|
||||
|
||||
int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
|
@ -380,7 +380,7 @@ static int do_test_single(struct bpf_align_test *test)
|
||||
prog_len = probe_filter_length(prog);
|
||||
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
prog, prog_len, 1, "GPL", 0,
|
||||
bpf_vlog, sizeof(bpf_vlog));
|
||||
bpf_vlog, sizeof(bpf_vlog), 2);
|
||||
if (fd_prog < 0) {
|
||||
printf("Failed to load program.\n");
|
||||
printf("%s", bpf_vlog);
|
||||
|
@ -4969,7 +4969,7 @@ static struct bpf_test tests[] = {
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val), 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
@ -4995,7 +4995,7 @@ static struct bpf_test tests[] = {
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val) + 1, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
@ -5023,7 +5023,7 @@ static struct bpf_test tests[] = {
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val) - 20, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
@ -5050,7 +5050,7 @@ static struct bpf_test tests[] = {
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_2,
|
||||
sizeof(struct test_val) - 19, 4),
|
||||
BPF_MOV64_IMM(BPF_REG_4, 0),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JSGE, BPF_REG_4, BPF_REG_2, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_3, 0),
|
||||
BPF_EMIT_CALL(BPF_FUNC_probe_read),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
@ -5510,6 +5510,476 @@ static struct bpf_test tests[] = {
|
||||
.errstr = "invalid bpf_context access",
|
||||
.prog_type = BPF_PROG_TYPE_LWT_IN,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, positive bounds",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 4, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 2",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
|
||||
BPF_MOV64_IMM(BPF_REG_8, 0),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R8 invalid mem access 'inv'",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 3",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 4),
|
||||
BPF_MOV64_REG(BPF_REG_8, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_8, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_8, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R8 invalid mem access 'inv'",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 4",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1),
|
||||
BPF_ALU64_REG(BPF_AND, BPF_REG_1, BPF_REG_2),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 5",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 5),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 4),
|
||||
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 invalid mem access",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 6",
|
||||
.insns = {
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, -512),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_6, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_6, 5),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_4, 1, 4),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 1),
|
||||
BPF_MOV64_IMM(BPF_REG_5, 0),
|
||||
BPF_ST_MEM(BPF_H, BPF_REG_10, -512, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_load_bytes),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.errstr_unpriv = "R4 min value is negative, either use unsigned",
|
||||
.errstr = "R4 min value is negative, either use unsigned",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 7",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 8",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 1024 * 1024 * 1024 + 1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 9",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 10",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_LD_IMM64(BPF_REG_2, -9223372036854775808ULL),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 11",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 12",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
/* Dead branch. */
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 13",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -6),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 14",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, 2),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_7, 1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 0, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_7, 4, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_7),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 15",
|
||||
.insns = {
|
||||
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
|
||||
offsetof(struct __sk_buff, mark)),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -1),
|
||||
BPF_MOV64_IMM(BPF_REG_8, 2),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 42, 6),
|
||||
BPF_JMP_REG(BPF_JSGT, BPF_REG_8, BPF_REG_1, 3),
|
||||
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 1, 2),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_2, -3),
|
||||
BPF_JMP_IMM(BPF_JA, 0, 0, -7),
|
||||
},
|
||||
.fixup_map1 = { 4 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
{
|
||||
"bounds checks mixing signed and unsigned, variant 16",
|
||||
.insns = {
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
||||
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
||||
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_map_lookup_elem),
|
||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
|
||||
BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, -8),
|
||||
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -16),
|
||||
BPF_MOV64_IMM(BPF_REG_2, -6),
|
||||
BPF_JMP_REG(BPF_JGE, BPF_REG_2, BPF_REG_1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
|
||||
BPF_JMP_IMM(BPF_JGT, BPF_REG_0, 1, 2),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
BPF_ST_MEM(BPF_B, BPF_REG_0, 0, 0),
|
||||
BPF_MOV64_IMM(BPF_REG_0, 0),
|
||||
BPF_EXIT_INSN(),
|
||||
},
|
||||
.fixup_map1 = { 3 },
|
||||
.errstr_unpriv = "R0 pointer arithmetic prohibited",
|
||||
.errstr = "R0 min value is negative",
|
||||
.result = REJECT,
|
||||
.result_unpriv = REJECT,
|
||||
},
|
||||
};
|
||||
|
||||
static int probe_filter_length(const struct bpf_insn *fp)
|
||||
@ -5633,7 +6103,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||
|
||||
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
|
||||
"GPL", 0, bpf_vlog, sizeof(bpf_vlog));
|
||||
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
|
||||
|
||||
expected_ret = unpriv && test->result_unpriv != UNDEF ?
|
||||
test->result_unpriv : test->result;
|
||||
|
Loading…
Reference in New Issue
Block a user