mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-16 16:36:58 +07:00
d7efc6c11b
Alexander Potapenko reported use of uninitialized memory [1] This happens when inserting a request socket into TCP ehash, in __sk_nulls_add_node_rcu(), since sk_reuseport is not initialized. Bug was added by commitd894ba18d4
("soreuseport: fix ordering for mixed v4/v6 sockets") Note thatd296ba60d8
("soreuseport: Resolve merge conflict for v4/v6 ordering fix") missed the opportunity to get rid of hlist_nulls_add_tail_rcu() : Both UDP sockets and TCP/DCCP listeners no longer use __sk_nulls_add_node_rcu() for their hash insertion. Since all other sockets have unique 4-tuple, the reuseport status has no special meaning, so we can always use hlist_nulls_add_head_rcu() for them and save few cycles/instructions. [1] ================================================================== BUG: KMSAN: use of uninitialized memory in inet_ehash_insert+0xd40/0x1050 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.13.0+ #3288 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: <IRQ> __dump_stack lib/dump_stack.c:16 dump_stack+0x185/0x1d0 lib/dump_stack.c:52 kmsan_report+0x13f/0x1c0 mm/kmsan/kmsan.c:1016 __msan_warning_32+0x69/0xb0 mm/kmsan/kmsan_instr.c:766 __sk_nulls_add_node_rcu ./include/net/sock.h:684 inet_ehash_insert+0xd40/0x1050 net/ipv4/inet_hashtables.c:413 reqsk_queue_hash_req net/ipv4/inet_connection_sock.c:754 inet_csk_reqsk_queue_hash_add+0x1cc/0x300 net/ipv4/inet_connection_sock.c:765 tcp_conn_request+0x31e7/0x36f0 net/ipv4/tcp_input.c:6414 tcp_v4_conn_request+0x16d/0x220 net/ipv4/tcp_ipv4.c:1314 tcp_rcv_state_process+0x42a/0x7210 net/ipv4/tcp_input.c:5917 tcp_v4_do_rcv+0xa6a/0xcd0 net/ipv4/tcp_ipv4.c:1483 tcp_v4_rcv+0x3de0/0x4ab0 net/ipv4/tcp_ipv4.c:1763 ip_local_deliver_finish+0x6bb/0xcb0 net/ipv4/ip_input.c:216 NF_HOOK ./include/linux/netfilter.h:248 ip_local_deliver+0x3fa/0x480 net/ipv4/ip_input.c:257 dst_input ./include/net/dst.h:477 ip_rcv_finish+0x6fb/0x1540 net/ipv4/ip_input.c:397 NF_HOOK ./include/linux/netfilter.h:248 ip_rcv+0x10f6/0x15c0 net/ipv4/ip_input.c:488 __netif_receive_skb_core+0x36f6/0x3f60 net/core/dev.c:4298 __netif_receive_skb net/core/dev.c:4336 netif_receive_skb_internal+0x63c/0x19c0 net/core/dev.c:4497 napi_skb_finish net/core/dev.c:4858 napi_gro_receive+0x629/0xa50 net/core/dev.c:4889 e1000_receive_skb drivers/net/ethernet/intel/e1000/e1000_main.c:4018 e1000_clean_rx_irq+0x1492/0x1d30 drivers/net/ethernet/intel/e1000/e1000_main.c:4474 e1000_clean+0x43aa/0x5970 drivers/net/ethernet/intel/e1000/e1000_main.c:3819 napi_poll net/core/dev.c:5500 net_rx_action+0x73c/0x1820 net/core/dev.c:5566 __do_softirq+0x4b4/0x8dd kernel/softirq.c:284 invoke_softirq kernel/softirq.c:364 irq_exit+0x203/0x240 kernel/softirq.c:405 exiting_irq+0xe/0x10 ./arch/x86/include/asm/apic.h:638 do_IRQ+0x15e/0x1a0 arch/x86/kernel/irq.c:263 common_interrupt+0x86/0x86 Fixes:d894ba18d4
("soreuseport: fix ordering for mixed v4/v6 sockets") Fixes:d296ba60d8
("soreuseport: Resolve merge conflict for v4/v6 ordering fix") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: Alexander Potapenko <glider@google.com> Acked-by: Craig Gallek <kraig@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
138 lines
4.9 KiB
C
138 lines
4.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_RCULIST_NULLS_H
|
|
#define _LINUX_RCULIST_NULLS_H
|
|
|
|
#ifdef __KERNEL__
|
|
|
|
/*
|
|
* RCU-protected list version
|
|
*/
|
|
#include <linux/list_nulls.h>
|
|
#include <linux/rcupdate.h>
|
|
|
|
/**
|
|
* hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
|
|
* @n: the element to delete from the hash list.
|
|
*
|
|
* Note: hlist_nulls_unhashed() on the node return true after this. It is
|
|
* useful for RCU based read lockfree traversal if the writer side
|
|
* must know if the list entry is still hashed or already unhashed.
|
|
*
|
|
* In particular, it means that we can not poison the forward pointers
|
|
* that may still be used for walking the hash list and we can only
|
|
* zero the pprev pointer so list_unhashed() will return true after
|
|
* this.
|
|
*
|
|
* The caller must take whatever precautions are necessary (such as
|
|
* holding appropriate locks) to avoid racing with another
|
|
* list-mutation primitive, such as hlist_nulls_add_head_rcu() or
|
|
* hlist_nulls_del_rcu(), running on this same list. However, it is
|
|
* perfectly legal to run concurrently with the _rcu list-traversal
|
|
* primitives, such as hlist_nulls_for_each_entry_rcu().
|
|
*/
|
|
static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
|
|
{
|
|
if (!hlist_nulls_unhashed(n)) {
|
|
__hlist_nulls_del(n);
|
|
n->pprev = NULL;
|
|
}
|
|
}
|
|
|
|
#define hlist_nulls_first_rcu(head) \
|
|
(*((struct hlist_nulls_node __rcu __force **)&(head)->first))
|
|
|
|
#define hlist_nulls_next_rcu(node) \
|
|
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
|
|
|
|
/**
|
|
* hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
|
|
* @n: the element to delete from the hash list.
|
|
*
|
|
* Note: hlist_nulls_unhashed() on entry does not return true after this,
|
|
* the entry is in an undefined state. It is useful for RCU based
|
|
* lockfree traversal.
|
|
*
|
|
* In particular, it means that we can not poison the forward
|
|
* pointers that may still be used for walking the hash list.
|
|
*
|
|
* The caller must take whatever precautions are necessary
|
|
* (such as holding appropriate locks) to avoid racing
|
|
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
|
* or hlist_nulls_del_rcu(), running on this same list.
|
|
* However, it is perfectly legal to run concurrently with
|
|
* the _rcu list-traversal primitives, such as
|
|
* hlist_nulls_for_each_entry().
|
|
*/
|
|
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
|
|
{
|
|
__hlist_nulls_del(n);
|
|
n->pprev = LIST_POISON2;
|
|
}
|
|
|
|
/**
|
|
* hlist_nulls_add_head_rcu
|
|
* @n: the element to add to the hash list.
|
|
* @h: the list to add to.
|
|
*
|
|
* Description:
|
|
* Adds the specified element to the specified hlist_nulls,
|
|
* while permitting racing traversals.
|
|
*
|
|
* The caller must take whatever precautions are necessary
|
|
* (such as holding appropriate locks) to avoid racing
|
|
* with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
|
|
* or hlist_nulls_del_rcu(), running on this same list.
|
|
* However, it is perfectly legal to run concurrently with
|
|
* the _rcu list-traversal primitives, such as
|
|
* hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
|
|
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
|
* list-traversal primitive must be guarded by rcu_read_lock().
|
|
*/
|
|
static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
|
|
struct hlist_nulls_head *h)
|
|
{
|
|
struct hlist_nulls_node *first = h->first;
|
|
|
|
n->next = first;
|
|
n->pprev = &h->first;
|
|
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
|
|
if (!is_a_nulls(first))
|
|
first->pprev = &n->next;
|
|
}
|
|
|
|
/**
|
|
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
|
|
* @tpos: the type * to use as a loop cursor.
|
|
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
|
|
* @head: the head for your list.
|
|
* @member: the name of the hlist_nulls_node within the struct.
|
|
*
|
|
* The barrier() is needed to make sure compiler doesn't cache first element [1],
|
|
* as this loop can be restarted [2]
|
|
* [1] Documentation/atomic_ops.txt around line 114
|
|
* [2] Documentation/RCU/rculist_nulls.txt around line 146
|
|
*/
|
|
#define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \
|
|
for (({barrier();}), \
|
|
pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
|
|
(!is_a_nulls(pos)) && \
|
|
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
|
|
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
|
|
|
|
/**
|
|
* hlist_nulls_for_each_entry_safe -
|
|
* iterate over list of given type safe against removal of list entry
|
|
* @tpos: the type * to use as a loop cursor.
|
|
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
|
|
* @head: the head for your list.
|
|
* @member: the name of the hlist_nulls_node within the struct.
|
|
*/
|
|
#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
|
|
for (({barrier();}), \
|
|
pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \
|
|
(!is_a_nulls(pos)) && \
|
|
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); \
|
|
pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
|
|
#endif
|
|
#endif
|