mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-22 09:35:49 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Pablo Neira Ayuso says: ==================== Netfilter fixes for net The following patchset contains Netfilter fixes for your net tree: 1) Skip ip_sabotage_in() for packet making into the VRF driver, otherwise packets are dropped, from David Ahern. 2) Clang compilation warning uncovering typo in the nft_validate_register_store() call from nft_osf, from Stefan Agner. 3) Double sizeof netlink message length calculations in ctnetlink, from zhong jiang. 4) Missing rb_erase() on batch full in rbtree garbage collector, from Taehee Yoo. 5) Calm down compilation warning in nf_hook(), from Florian Westphal. 6) Missing check for non-null sk in xt_socket before validating netns procedence, from Flavio Leitner. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c8424ddd97
@ -215,6 +215,8 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
|
||||
break;
|
||||
case NFPROTO_ARP:
|
||||
#ifdef CONFIG_NETFILTER_FAMILY_ARP
|
||||
if (WARN_ON_ONCE(hook >= ARRAY_SIZE(net->nf.hooks_arp)))
|
||||
break;
|
||||
hook_head = rcu_dereference(net->nf.hooks_arp[hook]);
|
||||
#endif
|
||||
break;
|
||||
|
@ -835,7 +835,8 @@ static unsigned int ip_sabotage_in(void *priv,
|
||||
struct sk_buff *skb,
|
||||
const struct nf_hook_state *state)
|
||||
{
|
||||
if (skb->nf_bridge && !skb->nf_bridge->in_prerouting) {
|
||||
if (skb->nf_bridge && !skb->nf_bridge->in_prerouting &&
|
||||
!netif_is_l3_master(skb->dev)) {
|
||||
state->okfn(state->net, state->sk, skb);
|
||||
return NF_STOLEN;
|
||||
}
|
||||
|
@ -1213,8 +1213,8 @@ static const struct nla_policy tcp_nla_policy[CTA_PROTOINFO_TCP_MAX+1] = {
|
||||
#define TCP_NLATTR_SIZE ( \
|
||||
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + 1) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(sizeof(struct nf_ct_tcp_flags))))
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)) + \
|
||||
NLA_ALIGN(NLA_HDRLEN + sizeof(struct nf_ct_tcp_flags)))
|
||||
|
||||
static int nlattr_to_tcp(struct nlattr *cda[], struct nf_conn *ct)
|
||||
{
|
||||
|
@ -49,7 +49,7 @@ static int nft_osf_init(const struct nft_ctx *ctx,
|
||||
|
||||
priv->dreg = nft_parse_register(tb[NFTA_OSF_DREG]);
|
||||
err = nft_validate_register_store(ctx, priv->dreg, NULL,
|
||||
NFTA_DATA_VALUE, NFT_OSF_MAXGENRELEN);
|
||||
NFT_DATA_VALUE, NFT_OSF_MAXGENRELEN);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
@ -355,12 +355,11 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
|
||||
|
||||
static void nft_rbtree_gc(struct work_struct *work)
|
||||
{
|
||||
struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
|
||||
struct nft_set_gc_batch *gcb = NULL;
|
||||
struct rb_node *node, *prev = NULL;
|
||||
struct nft_rbtree_elem *rbe;
|
||||
struct nft_rbtree *priv;
|
||||
struct rb_node *node;
|
||||
struct nft_set *set;
|
||||
int i;
|
||||
|
||||
priv = container_of(work, struct nft_rbtree, gc_work.work);
|
||||
set = nft_set_container_of(priv);
|
||||
@ -371,7 +370,7 @@ static void nft_rbtree_gc(struct work_struct *work)
|
||||
rbe = rb_entry(node, struct nft_rbtree_elem, node);
|
||||
|
||||
if (nft_rbtree_interval_end(rbe)) {
|
||||
prev = node;
|
||||
rbe_end = rbe;
|
||||
continue;
|
||||
}
|
||||
if (!nft_set_elem_expired(&rbe->ext))
|
||||
@ -379,29 +378,30 @@ static void nft_rbtree_gc(struct work_struct *work)
|
||||
if (nft_set_elem_mark_busy(&rbe->ext))
|
||||
continue;
|
||||
|
||||
if (rbe_prev) {
|
||||
rb_erase(&rbe_prev->node, &priv->root);
|
||||
rbe_prev = NULL;
|
||||
}
|
||||
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
|
||||
if (!gcb)
|
||||
break;
|
||||
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, rbe);
|
||||
rbe_prev = rbe;
|
||||
|
||||
if (prev) {
|
||||
rbe = rb_entry(prev, struct nft_rbtree_elem, node);
|
||||
if (rbe_end) {
|
||||
atomic_dec(&set->nelems);
|
||||
nft_set_gc_batch_add(gcb, rbe);
|
||||
prev = NULL;
|
||||
nft_set_gc_batch_add(gcb, rbe_end);
|
||||
rb_erase(&rbe_end->node, &priv->root);
|
||||
rbe_end = NULL;
|
||||
}
|
||||
node = rb_next(node);
|
||||
if (!node)
|
||||
break;
|
||||
}
|
||||
if (gcb) {
|
||||
for (i = 0; i < gcb->head.cnt; i++) {
|
||||
rbe = gcb->elems[i];
|
||||
rb_erase(&rbe->node, &priv->root);
|
||||
}
|
||||
}
|
||||
if (rbe_prev)
|
||||
rb_erase(&rbe_prev->node, &priv->root);
|
||||
write_seqcount_end(&priv->count);
|
||||
write_unlock_bh(&priv->lock);
|
||||
|
||||
|
@ -56,7 +56,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
|
||||
struct sk_buff *pskb = (struct sk_buff *)skb;
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (!net_eq(xt_net(par), sock_net(sk)))
|
||||
if (sk && !net_eq(xt_net(par), sock_net(sk)))
|
||||
sk = NULL;
|
||||
|
||||
if (!sk)
|
||||
@ -117,7 +117,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
|
||||
struct sk_buff *pskb = (struct sk_buff *)skb;
|
||||
struct sock *sk = skb->sk;
|
||||
|
||||
if (!net_eq(xt_net(par), sock_net(sk)))
|
||||
if (sk && !net_eq(xt_net(par), sock_net(sk)))
|
||||
sk = NULL;
|
||||
|
||||
if (!sk)
|
||||
|
Loading…
Reference in New Issue
Block a user