mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-28 11:18:45 +07:00
Merge branch 'bpf-smap-followups'
Daniel Borkmann says: ==================== Two BPF smap related followups Fixing preemption imbalance and consolidating prologue generation. Thanks! ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
755f17a1fc
@ -135,7 +135,8 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb)
|
||||
/* Fall through and free skb otherwise */
|
||||
case SK_DROP:
|
||||
default:
|
||||
preempt_enable();
|
||||
if (rc != SK_REDIRECT)
|
||||
preempt_enable();
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
@ -3455,8 +3455,8 @@ static bool sock_filter_is_valid_access(int off, int size,
|
||||
return true;
|
||||
}
|
||||
|
||||
static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
const struct bpf_prog *prog)
|
||||
static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
const struct bpf_prog *prog, int drop_verdict)
|
||||
{
|
||||
struct bpf_insn *insn = insn_buf;
|
||||
|
||||
@ -3483,7 +3483,7 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
* return TC_ACT_SHOT;
|
||||
*/
|
||||
*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
|
||||
*insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, TC_ACT_SHOT);
|
||||
*insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
|
||||
*insn++ = BPF_EXIT_INSN();
|
||||
|
||||
/* restore: */
|
||||
@ -3494,6 +3494,12 @@ static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
return insn - insn_buf;
|
||||
}
|
||||
|
||||
static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
const struct bpf_prog *prog)
|
||||
{
|
||||
return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
|
||||
}
|
||||
|
||||
static bool tc_cls_act_is_valid_access(int off, int size,
|
||||
enum bpf_access_type type,
|
||||
struct bpf_insn_access_aux *info)
|
||||
@ -3600,40 +3606,7 @@ static bool sock_ops_is_valid_access(int off, int size,
|
||||
static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
|
||||
const struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_insn *insn = insn_buf;
|
||||
|
||||
if (!direct_write)
|
||||
return 0;
|
||||
|
||||
/* if (!skb->cloned)
|
||||
* goto start;
|
||||
*
|
||||
* (Fast-path, otherwise approximation that we might be
|
||||
* a clone, do the rest in helper.)
|
||||
*/
|
||||
*insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
|
||||
*insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
|
||||
*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
|
||||
|
||||
/* ret = bpf_skb_pull_data(skb, 0); */
|
||||
*insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
|
||||
*insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
|
||||
*insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
|
||||
BPF_FUNC_skb_pull_data);
|
||||
/* if (!ret)
|
||||
* goto restore;
|
||||
* return SK_DROP;
|
||||
*/
|
||||
*insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
|
||||
*insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, SK_DROP);
|
||||
*insn++ = BPF_EXIT_INSN();
|
||||
|
||||
/* restore: */
|
||||
*insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
|
||||
/* start: */
|
||||
*insn++ = prog->insnsi[0];
|
||||
|
||||
return insn - insn_buf;
|
||||
return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
|
||||
}
|
||||
|
||||
static bool sk_skb_is_valid_access(int off, int size,
|
||||
|
Loading…
Reference in New Issue
Block a user