mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 06:30:53 +07:00
tcp/dccp: fix possible race __inet_lookup_established()
Michal Kubecek and Firo Yang did a very nice analysis of crashes happening in __inet_lookup_established(). Since a TCP socket can go from TCP_ESTABLISH to TCP_LISTEN (via a close()/socket()/listen() cycle) without a RCU grace period, I should not have changed listeners linkage in their hash table. They must use the nulls protocol (Documentation/RCU/rculist_nulls.txt), so that a lookup can detect a socket in a hash list was moved in another one. Since we added code in commitd296ba60d8
("soreuseport: Resolve merge conflict for v4/v6 ordering fix"), we have to add hlist_nulls_add_tail_rcu() helper. Fixes:3b24d854cb
("tcp/dccp: do not touch listener sk_refcnt under synflood") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Michal Kubecek <mkubecek@suse.cz> Reported-by: Firo Yang <firo.yang@suse.com> Reviewed-by: Michal Kubecek <mkubecek@suse.cz> Link: https://lore.kernel.org/netdev/20191120083919.GH27852@unicorn.suse.cz/ Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
This commit is contained in:
parent
8f9cc1ee29
commit
8dbd76e79a
@ -100,6 +100,43 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_add_tail_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist_nulls,
|
||||
* while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
||||
* or hlist_nulls_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
|
||||
struct hlist_nulls_head *h)
|
||||
{
|
||||
struct hlist_nulls_node *i, *last = NULL;
|
||||
|
||||
/* Note: write side code, so rcu accessors are not needed. */
|
||||
for (i = h->first; !is_a_nulls(i); i = i->next)
|
||||
last = i;
|
||||
|
||||
if (last) {
|
||||
n->next = last->next;
|
||||
n->pprev = &last->next;
|
||||
rcu_assign_pointer(hlist_next_rcu(last), n);
|
||||
} else {
|
||||
hlist_nulls_add_head_rcu(n, h);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
|
@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
|
||||
struct hlist_head chain;
|
||||
};
|
||||
|
||||
/*
|
||||
* Sockets can be hashed in established or listening table
|
||||
/* Sockets can be hashed in established or listening table.
|
||||
* We must use different 'nulls' end-of-chain value for all hash buckets :
|
||||
* A socket might transition from ESTABLISH to LISTEN state without
|
||||
* RCU grace period. A lookup in ehash table needs to handle this case.
|
||||
*/
|
||||
#define LISTENING_NULLS_BASE (1U << 29)
|
||||
struct inet_listen_hashbucket {
|
||||
spinlock_t lock;
|
||||
unsigned int count;
|
||||
struct hlist_head head;
|
||||
union {
|
||||
struct hlist_head head;
|
||||
struct hlist_nulls_head nulls_head;
|
||||
};
|
||||
};
|
||||
|
||||
/* This is for listening sockets, thus all sockets which possess wildcards. */
|
||||
|
@ -722,6 +722,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
|
||||
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
|
||||
}
|
||||
|
||||
static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
||||
{
|
||||
hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
|
||||
}
|
||||
|
||||
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
|
||||
{
|
||||
sock_hold(sk);
|
||||
|
@ -911,11 +911,12 @@ void inet_diag_dump_icsk(struct inet_hashinfo *hashinfo, struct sk_buff *skb,
|
||||
|
||||
for (i = s_i; i < INET_LHTABLE_SIZE; i++) {
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
struct hlist_nulls_node *node;
|
||||
|
||||
num = 0;
|
||||
ilb = &hashinfo->listening_hash[i];
|
||||
spin_lock(&ilb->lock);
|
||||
sk_for_each(sk, &ilb->head) {
|
||||
sk_nulls_for_each(sk, node, &ilb->nulls_head) {
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
|
@ -516,10 +516,11 @@ static int inet_reuseport_add_sock(struct sock *sk,
|
||||
struct inet_listen_hashbucket *ilb)
|
||||
{
|
||||
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
|
||||
const struct hlist_nulls_node *node;
|
||||
struct sock *sk2;
|
||||
kuid_t uid = sock_i_uid(sk);
|
||||
|
||||
sk_for_each_rcu(sk2, &ilb->head) {
|
||||
sk_nulls_for_each_rcu(sk2, node, &ilb->nulls_head) {
|
||||
if (sk2 != sk &&
|
||||
sk2->sk_family == sk->sk_family &&
|
||||
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
|
||||
@ -555,9 +556,9 @@ int __inet_hash(struct sock *sk, struct sock *osk)
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
|
||||
sk->sk_family == AF_INET6)
|
||||
hlist_add_tail_rcu(&sk->sk_node, &ilb->head);
|
||||
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
|
||||
else
|
||||
hlist_add_head_rcu(&sk->sk_node, &ilb->head);
|
||||
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
|
||||
inet_hash2(hashinfo, sk);
|
||||
ilb->count++;
|
||||
sock_set_flag(sk, SOCK_RCU_FREE);
|
||||
@ -606,11 +607,9 @@ void inet_unhash(struct sock *sk)
|
||||
reuseport_detach_sock(sk);
|
||||
if (ilb) {
|
||||
inet_unhash2(hashinfo, sk);
|
||||
__sk_del_node_init(sk);
|
||||
ilb->count--;
|
||||
} else {
|
||||
__sk_nulls_del_node_init_rcu(sk);
|
||||
ilb->count--;
|
||||
}
|
||||
__sk_nulls_del_node_init_rcu(sk);
|
||||
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
|
||||
unlock:
|
||||
spin_unlock_bh(lock);
|
||||
@ -750,7 +749,8 @@ void inet_hashinfo_init(struct inet_hashinfo *h)
|
||||
|
||||
for (i = 0; i < INET_LHTABLE_SIZE; i++) {
|
||||
spin_lock_init(&h->listening_hash[i].lock);
|
||||
INIT_HLIST_HEAD(&h->listening_hash[i].head);
|
||||
INIT_HLIST_NULLS_HEAD(&h->listening_hash[i].nulls_head,
|
||||
i + LISTENING_NULLS_BASE);
|
||||
h->listening_hash[i].count = 0;
|
||||
}
|
||||
|
||||
|
@ -2147,13 +2147,14 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
||||
struct tcp_iter_state *st = seq->private;
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct inet_listen_hashbucket *ilb;
|
||||
struct hlist_nulls_node *node;
|
||||
struct sock *sk = cur;
|
||||
|
||||
if (!sk) {
|
||||
get_head:
|
||||
ilb = &tcp_hashinfo.listening_hash[st->bucket];
|
||||
spin_lock(&ilb->lock);
|
||||
sk = sk_head(&ilb->head);
|
||||
sk = sk_nulls_head(&ilb->nulls_head);
|
||||
st->offset = 0;
|
||||
goto get_sk;
|
||||
}
|
||||
@ -2161,9 +2162,9 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
|
||||
++st->num;
|
||||
++st->offset;
|
||||
|
||||
sk = sk_next(sk);
|
||||
sk = sk_nulls_next(sk);
|
||||
get_sk:
|
||||
sk_for_each_from(sk) {
|
||||
sk_nulls_for_each_from(sk, node) {
|
||||
if (!net_eq(sock_net(sk), net))
|
||||
continue;
|
||||
if (sk->sk_family == afinfo->family)
|
||||
|
Loading…
Reference in New Issue
Block a user