mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-23 07:09:37 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix leak in dpaa_eth error paths, from Dan Carpenter. 2) Use after free when using IPV6_RECVPKTINFO, from Andrey Konovalov. 3) fanout_release() cannot be invoked from atomic contexts, from Anoob Soman. 4) Fix bogus attempt at lockdep annotation in IRDA. 5) dev_fill_metadata_dst() can OOP on a NULL dst cache pointer, from Paolo Abeni. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: irda: Fix lockdep annotations in hashbin_delete(). vxlan: fix oops in dev_fill_metadata_dst dccp: fix freeing skb too early for IPV6_RECVPKTINFO dpaa_eth: small leak on error packet: Do not call fanout_release from atomic contexts
This commit is contained in:
commit
3dd9c12726
@ -1668,7 +1668,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
|
||||
|
||||
free_buffers:
|
||||
/* compensate sw bpool counter changes */
|
||||
for (i--; i > 0; i--) {
|
||||
for (i--; i >= 0; i--) {
|
||||
dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
|
||||
if (dpaa_bp) {
|
||||
count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
|
||||
|
@ -2439,7 +2439,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
|
||||
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
|
||||
info->key.u.ipv4.dst,
|
||||
&info->key.u.ipv4.src, dport, sport, NULL, info);
|
||||
&info->key.u.ipv4.src, dport, sport,
|
||||
&info->dst_cache, info);
|
||||
if (IS_ERR(rt))
|
||||
return PTR_ERR(rt);
|
||||
ip_rt_put(rt);
|
||||
@ -2450,7 +2451,8 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
|
||||
|
||||
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
|
||||
info->key.label, &info->key.u.ipv6.dst,
|
||||
&info->key.u.ipv6.src, dport, sport, NULL, info);
|
||||
&info->key.u.ipv6.src, dport, sport,
|
||||
&info->dst_cache, info);
|
||||
if (IS_ERR(ndst))
|
||||
return PTR_ERR(ndst);
|
||||
dst_release(ndst);
|
||||
|
@ -606,7 +606,8 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
|
||||
if (inet_csk(sk)->icsk_af_ops->conn_request(sk,
|
||||
skb) < 0)
|
||||
return 1;
|
||||
goto discard;
|
||||
consume_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
if (dh->dccph_type == DCCP_PKT_RESET)
|
||||
goto discard;
|
||||
|
@ -383,9 +383,6 @@ EXPORT_SYMBOL(hashbin_new);
|
||||
* for deallocating this structure if it's complex. If not the user can
|
||||
* just supply kfree, which should take care of the job.
|
||||
*/
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static int hashbin_lock_depth = 0;
|
||||
#endif
|
||||
int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
|
||||
{
|
||||
irda_queue_t* queue;
|
||||
@ -396,22 +393,27 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
|
||||
IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
|
||||
|
||||
/* Synchronize */
|
||||
if ( hashbin->hb_type & HB_LOCK ) {
|
||||
spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
|
||||
hashbin_lock_depth++);
|
||||
}
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
|
||||
|
||||
/*
|
||||
* Free the entries in the hashbin, TODO: use hashbin_clear when
|
||||
* it has been shown to work
|
||||
*/
|
||||
for (i = 0; i < HASHBIN_SIZE; i ++ ) {
|
||||
queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
|
||||
while (queue ) {
|
||||
if (free_func)
|
||||
(*free_func)(queue);
|
||||
queue = dequeue_first(
|
||||
(irda_queue_t**) &hashbin->hb_queue[i]);
|
||||
while (1) {
|
||||
queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
|
||||
|
||||
if (!queue)
|
||||
break;
|
||||
|
||||
if (free_func) {
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
|
||||
free_func(queue);
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_lock_irqsave(&hashbin->hb_spinlock, flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -420,12 +422,8 @@ int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
|
||||
hashbin->magic = ~HB_MAGIC;
|
||||
|
||||
/* Release lock */
|
||||
if ( hashbin->hb_type & HB_LOCK) {
|
||||
if (hashbin->hb_type & HB_LOCK)
|
||||
spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
hashbin_lock_depth--;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Free the hashbin structure
|
||||
|
@ -1497,6 +1497,8 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po)
|
||||
f->arr[f->num_members] = sk;
|
||||
smp_wmb();
|
||||
f->num_members++;
|
||||
if (f->num_members == 1)
|
||||
dev_add_pack(&f->prot_hook);
|
||||
spin_unlock(&f->lock);
|
||||
}
|
||||
|
||||
@ -1513,6 +1515,8 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
|
||||
BUG_ON(i >= f->num_members);
|
||||
f->arr[i] = f->arr[f->num_members - 1];
|
||||
f->num_members--;
|
||||
if (f->num_members == 0)
|
||||
__dev_remove_pack(&f->prot_hook);
|
||||
spin_unlock(&f->lock);
|
||||
}
|
||||
|
||||
@ -1693,7 +1697,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||
match->prot_hook.func = packet_rcv_fanout;
|
||||
match->prot_hook.af_packet_priv = match;
|
||||
match->prot_hook.id_match = match_fanout_group;
|
||||
dev_add_pack(&match->prot_hook);
|
||||
list_add(&match->list, &fanout_list);
|
||||
}
|
||||
err = -EINVAL;
|
||||
@ -1718,7 +1721,12 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void fanout_release(struct sock *sk)
|
||||
/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
|
||||
* pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
|
||||
* It is the responsibility of the caller to call fanout_release_data() and
|
||||
* free the returned packet_fanout (after synchronize_net())
|
||||
*/
|
||||
static struct packet_fanout *fanout_release(struct sock *sk)
|
||||
{
|
||||
struct packet_sock *po = pkt_sk(sk);
|
||||
struct packet_fanout *f;
|
||||
@ -1728,17 +1736,17 @@ static void fanout_release(struct sock *sk)
|
||||
if (f) {
|
||||
po->fanout = NULL;
|
||||
|
||||
if (atomic_dec_and_test(&f->sk_ref)) {
|
||||
if (atomic_dec_and_test(&f->sk_ref))
|
||||
list_del(&f->list);
|
||||
dev_remove_pack(&f->prot_hook);
|
||||
fanout_release_data(f);
|
||||
kfree(f);
|
||||
}
|
||||
else
|
||||
f = NULL;
|
||||
|
||||
if (po->rollover)
|
||||
kfree_rcu(po->rollover, rcu);
|
||||
}
|
||||
mutex_unlock(&fanout_mutex);
|
||||
|
||||
return f;
|
||||
}
|
||||
|
||||
static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
|
||||
@ -2912,6 +2920,7 @@ static int packet_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct packet_sock *po;
|
||||
struct packet_fanout *f;
|
||||
struct net *net;
|
||||
union tpacket_req_u req_u;
|
||||
|
||||
@ -2951,9 +2960,14 @@ static int packet_release(struct socket *sock)
|
||||
packet_set_ring(sk, &req_u, 1, 1);
|
||||
}
|
||||
|
||||
fanout_release(sk);
|
||||
f = fanout_release(sk);
|
||||
|
||||
synchronize_net();
|
||||
|
||||
if (f) {
|
||||
fanout_release_data(f);
|
||||
kfree(f);
|
||||
}
|
||||
/*
|
||||
* Now the socket is dead. No more input will appear.
|
||||
*/
|
||||
@ -3905,7 +3919,6 @@ static int packet_notifier(struct notifier_block *this,
|
||||
}
|
||||
if (msg == NETDEV_UNREGISTER) {
|
||||
packet_cached_dev_reset(po);
|
||||
fanout_release(sk);
|
||||
po->ifindex = -1;
|
||||
if (po->prot_hook.dev)
|
||||
dev_put(po->prot_hook.dev);
|
||||
|
Loading…
Reference in New Issue
Block a user