mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 10:39:23 +07:00
369f61bee0
team interface could be nested and it's lock variable could be nested too.
But this lock uses static lockdep key and there is no nested locking
handling code such as mutex_lock_nested() and so on.
so the Lockdep would warn about the circular locking scenario that
couldn't happen.
In order to fix, this patch makes the team module to use dynamic lock key
instead of static key.
Test commands:
ip link add team0 type team
ip link add team1 type team
ip link set team0 master team1
ip link set team0 nomaster
ip link set team1 master team0
ip link set team1 nomaster
Splat that looks like:
[ 40.364352] WARNING: possible recursive locking detected
[ 40.364964] 5.4.0-rc3+ #96 Not tainted
[ 40.365405] --------------------------------------------
[ 40.365973] ip/750 is trying to acquire lock:
[ 40.366542] ffff888060b34c40 (&team->lock){+.+.}, at: team_set_mac_address+0x151/0x290 [team]
[ 40.367689]
but task is already holding lock:
[ 40.368729] ffff888051201c40 (&team->lock){+.+.}, at: team_del_slave+0x29/0x60 [team]
[ 40.370280]
other info that might help us debug this:
[ 40.371159] Possible unsafe locking scenario:
[ 40.371942] CPU0
[ 40.372338] ----
[ 40.372673] lock(&team->lock);
[ 40.373115] lock(&team->lock);
[ 40.373549]
*** DEADLOCK ***
[ 40.374432] May be due to missing lock nesting notation
[ 40.375338] 2 locks held by ip/750:
[ 40.375851] #0: ffffffffabcc42b0 (rtnl_mutex){+.+.}, at: rtnetlink_rcv_msg+0x466/0x8a0
[ 40.376927] #1: ffff888051201c40 (&team->lock){+.+.}, at: team_del_slave+0x29/0x60 [team]
[ 40.377989]
stack backtrace:
[ 40.378650] CPU: 0 PID: 750 Comm: ip Not tainted 5.4.0-rc3+ #96
[ 40.379368] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
[ 40.380574] Call Trace:
[ 40.381208] dump_stack+0x7c/0xbb
[ 40.381959] __lock_acquire+0x269d/0x3de0
[ 40.382817] ? register_lock_class+0x14d0/0x14d0
[ 40.383784] ? check_chain_key+0x236/0x5d0
[ 40.384518] lock_acquire+0x164/0x3b0
[ 40.385074] ? team_set_mac_address+0x151/0x290 [team]
[ 40.385805] __mutex_lock+0x14d/0x14c0
[ 40.386371] ? team_set_mac_address+0x151/0x290 [team]
[ 40.387038] ? team_set_mac_address+0x151/0x290 [team]
[ 40.387632] ? mutex_lock_io_nested+0x1380/0x1380
[ 40.388245] ? team_del_slave+0x60/0x60 [team]
[ 40.388752] ? rcu_read_lock_sched_held+0x90/0xc0
[ 40.389304] ? rcu_read_lock_bh_held+0xa0/0xa0
[ 40.389819] ? lock_acquire+0x164/0x3b0
[ 40.390285] ? lockdep_rtnl_is_held+0x16/0x20
[ 40.390797] ? team_port_get_rtnl+0x90/0xe0 [team]
[ 40.391353] ? __module_text_address+0x13/0x140
[ 40.391886] ? team_set_mac_address+0x151/0x290 [team]
[ 40.392547] team_set_mac_address+0x151/0x290 [team]
[ 40.393111] dev_set_mac_address+0x1f0/0x3f0
[ ... ]
Fixes: 3d249d4ca7
("net: introduce ethernet teaming device")
Signed-off-by: Taehee Yoo <ap420073@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
319 lines
7.9 KiB
C
319 lines
7.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
|
/*
|
|
* include/linux/if_team.h - Network team device driver header
|
|
* Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
|
|
*/
|
|
#ifndef _LINUX_IF_TEAM_H_
|
|
#define _LINUX_IF_TEAM_H_
|
|
|
|
#include <linux/netpoll.h>
|
|
#include <net/sch_generic.h>
|
|
#include <linux/types.h>
|
|
#include <uapi/linux/if_team.h>
|
|
|
|
struct team_pcpu_stats {
|
|
u64 rx_packets;
|
|
u64 rx_bytes;
|
|
u64 rx_multicast;
|
|
u64 tx_packets;
|
|
u64 tx_bytes;
|
|
struct u64_stats_sync syncp;
|
|
u32 rx_dropped;
|
|
u32 tx_dropped;
|
|
u32 rx_nohandler;
|
|
};
|
|
|
|
struct team;
|
|
|
|
struct team_port {
|
|
struct net_device *dev;
|
|
struct hlist_node hlist; /* node in enabled ports hash list */
|
|
struct list_head list; /* node in ordinary list */
|
|
struct team *team;
|
|
int index; /* index of enabled port. If disabled, it's set to -1 */
|
|
|
|
bool linkup; /* either state.linkup or user.linkup */
|
|
|
|
struct {
|
|
bool linkup;
|
|
u32 speed;
|
|
u8 duplex;
|
|
} state;
|
|
|
|
/* Values set by userspace */
|
|
struct {
|
|
bool linkup;
|
|
bool linkup_enabled;
|
|
} user;
|
|
|
|
/* Custom gennetlink interface related flags */
|
|
bool changed;
|
|
bool removed;
|
|
|
|
/*
|
|
* A place for storing original values of the device before it
|
|
* become a port.
|
|
*/
|
|
struct {
|
|
unsigned char dev_addr[MAX_ADDR_LEN];
|
|
unsigned int mtu;
|
|
} orig;
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
struct netpoll *np;
|
|
#endif
|
|
|
|
s32 priority; /* lower number ~ higher priority */
|
|
u16 queue_id;
|
|
struct list_head qom_list; /* node in queue override mapping list */
|
|
struct rcu_head rcu;
|
|
long mode_priv[0];
|
|
};
|
|
|
|
static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
|
|
{
|
|
return rcu_dereference(dev->rx_handler_data);
|
|
}
|
|
|
|
static inline bool team_port_enabled(struct team_port *port)
|
|
{
|
|
return port->index != -1;
|
|
}
|
|
|
|
static inline bool team_port_txable(struct team_port *port)
|
|
{
|
|
return port->linkup && team_port_enabled(port);
|
|
}
|
|
|
|
static inline bool team_port_dev_txable(const struct net_device *port_dev)
|
|
{
|
|
struct team_port *port;
|
|
bool txable;
|
|
|
|
rcu_read_lock();
|
|
port = team_port_get_rcu(port_dev);
|
|
txable = port ? team_port_txable(port) : false;
|
|
rcu_read_unlock();
|
|
|
|
return txable;
|
|
}
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
static inline void team_netpoll_send_skb(struct team_port *port,
|
|
struct sk_buff *skb)
|
|
{
|
|
struct netpoll *np = port->np;
|
|
|
|
if (np)
|
|
netpoll_send_skb(np, skb);
|
|
}
|
|
#else
|
|
static inline void team_netpoll_send_skb(struct team_port *port,
|
|
struct sk_buff *skb)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
struct team_mode_ops {
|
|
int (*init)(struct team *team);
|
|
void (*exit)(struct team *team);
|
|
rx_handler_result_t (*receive)(struct team *team,
|
|
struct team_port *port,
|
|
struct sk_buff *skb);
|
|
bool (*transmit)(struct team *team, struct sk_buff *skb);
|
|
int (*port_enter)(struct team *team, struct team_port *port);
|
|
void (*port_leave)(struct team *team, struct team_port *port);
|
|
void (*port_change_dev_addr)(struct team *team, struct team_port *port);
|
|
void (*port_enabled)(struct team *team, struct team_port *port);
|
|
void (*port_disabled)(struct team *team, struct team_port *port);
|
|
};
|
|
|
|
extern int team_modeop_port_enter(struct team *team, struct team_port *port);
|
|
extern void team_modeop_port_change_dev_addr(struct team *team,
|
|
struct team_port *port);
|
|
|
|
enum team_option_type {
|
|
TEAM_OPTION_TYPE_U32,
|
|
TEAM_OPTION_TYPE_STRING,
|
|
TEAM_OPTION_TYPE_BINARY,
|
|
TEAM_OPTION_TYPE_BOOL,
|
|
TEAM_OPTION_TYPE_S32,
|
|
};
|
|
|
|
struct team_option_inst_info {
|
|
u32 array_index;
|
|
struct team_port *port; /* != NULL if per-port */
|
|
};
|
|
|
|
struct team_gsetter_ctx {
|
|
union {
|
|
u32 u32_val;
|
|
const char *str_val;
|
|
struct {
|
|
const void *ptr;
|
|
u32 len;
|
|
} bin_val;
|
|
bool bool_val;
|
|
s32 s32_val;
|
|
} data;
|
|
struct team_option_inst_info *info;
|
|
};
|
|
|
|
struct team_option {
|
|
struct list_head list;
|
|
const char *name;
|
|
bool per_port;
|
|
unsigned int array_size; /* != 0 means the option is array */
|
|
enum team_option_type type;
|
|
int (*init)(struct team *team, struct team_option_inst_info *info);
|
|
int (*getter)(struct team *team, struct team_gsetter_ctx *ctx);
|
|
int (*setter)(struct team *team, struct team_gsetter_ctx *ctx);
|
|
};
|
|
|
|
extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info);
|
|
extern void team_options_change_check(struct team *team);
|
|
|
|
struct team_mode {
|
|
const char *kind;
|
|
struct module *owner;
|
|
size_t priv_size;
|
|
size_t port_priv_size;
|
|
const struct team_mode_ops *ops;
|
|
enum netdev_lag_tx_type lag_tx_type;
|
|
};
|
|
|
|
#define TEAM_PORT_HASHBITS 4
|
|
#define TEAM_PORT_HASHENTRIES (1 << TEAM_PORT_HASHBITS)
|
|
|
|
#define TEAM_MODE_PRIV_LONGS 4
|
|
#define TEAM_MODE_PRIV_SIZE (sizeof(long) * TEAM_MODE_PRIV_LONGS)
|
|
|
|
struct team {
|
|
struct net_device *dev; /* associated netdevice */
|
|
struct team_pcpu_stats __percpu *pcpu_stats;
|
|
|
|
struct mutex lock; /* used for overall locking, e.g. port lists write */
|
|
|
|
/*
|
|
* List of enabled ports and their count
|
|
*/
|
|
int en_port_count;
|
|
struct hlist_head en_port_hlist[TEAM_PORT_HASHENTRIES];
|
|
|
|
struct list_head port_list; /* list of all ports */
|
|
|
|
struct list_head option_list;
|
|
struct list_head option_inst_list; /* list of option instances */
|
|
|
|
const struct team_mode *mode;
|
|
struct team_mode_ops ops;
|
|
bool user_carrier_enabled;
|
|
bool queue_override_enabled;
|
|
struct list_head *qom_lists; /* array of queue override mapping lists */
|
|
bool port_mtu_change_allowed;
|
|
struct {
|
|
unsigned int count;
|
|
unsigned int interval; /* in ms */
|
|
atomic_t count_pending;
|
|
struct delayed_work dw;
|
|
} notify_peers;
|
|
struct {
|
|
unsigned int count;
|
|
unsigned int interval; /* in ms */
|
|
atomic_t count_pending;
|
|
struct delayed_work dw;
|
|
} mcast_rejoin;
|
|
struct lock_class_key team_lock_key;
|
|
long mode_priv[TEAM_MODE_PRIV_LONGS];
|
|
};
|
|
|
|
static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
|
|
struct sk_buff *skb)
|
|
{
|
|
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
|
|
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
|
|
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
|
|
|
|
skb->dev = port->dev;
|
|
if (unlikely(netpoll_tx_running(team->dev))) {
|
|
team_netpoll_send_skb(port, skb);
|
|
return 0;
|
|
}
|
|
return dev_queue_xmit(skb);
|
|
}
|
|
|
|
static inline struct hlist_head *team_port_index_hash(struct team *team,
|
|
int port_index)
|
|
{
|
|
return &team->en_port_hlist[port_index & (TEAM_PORT_HASHENTRIES - 1)];
|
|
}
|
|
|
|
static inline struct team_port *team_get_port_by_index(struct team *team,
|
|
int port_index)
|
|
{
|
|
struct team_port *port;
|
|
struct hlist_head *head = team_port_index_hash(team, port_index);
|
|
|
|
hlist_for_each_entry(port, head, hlist)
|
|
if (port->index == port_index)
|
|
return port;
|
|
return NULL;
|
|
}
|
|
|
|
static inline int team_num_to_port_index(struct team *team, unsigned int num)
|
|
{
|
|
int en_port_count = READ_ONCE(team->en_port_count);
|
|
|
|
if (unlikely(!en_port_count))
|
|
return 0;
|
|
return num % en_port_count;
|
|
}
|
|
|
|
static inline struct team_port *team_get_port_by_index_rcu(struct team *team,
|
|
int port_index)
|
|
{
|
|
struct team_port *port;
|
|
struct hlist_head *head = team_port_index_hash(team, port_index);
|
|
|
|
hlist_for_each_entry_rcu(port, head, hlist)
|
|
if (port->index == port_index)
|
|
return port;
|
|
return NULL;
|
|
}
|
|
|
|
static inline struct team_port *
|
|
team_get_first_port_txable_rcu(struct team *team, struct team_port *port)
|
|
{
|
|
struct team_port *cur;
|
|
|
|
if (likely(team_port_txable(port)))
|
|
return port;
|
|
cur = port;
|
|
list_for_each_entry_continue_rcu(cur, &team->port_list, list)
|
|
if (team_port_txable(cur))
|
|
return cur;
|
|
list_for_each_entry_rcu(cur, &team->port_list, list) {
|
|
if (cur == port)
|
|
break;
|
|
if (team_port_txable(cur))
|
|
return cur;
|
|
}
|
|
return NULL;
|
|
}
|
|
|
|
extern int team_options_register(struct team *team,
|
|
const struct team_option *option,
|
|
size_t option_count);
|
|
extern void team_options_unregister(struct team *team,
|
|
const struct team_option *option,
|
|
size_t option_count);
|
|
extern int team_mode_register(const struct team_mode *mode);
|
|
extern void team_mode_unregister(const struct team_mode *mode);
|
|
|
|
#define TEAM_DEFAULT_NUM_TX_QUEUES 16
|
|
#define TEAM_DEFAULT_NUM_RX_QUEUES 16
|
|
|
|
#define MODULE_ALIAS_TEAM_MODE(kind) MODULE_ALIAS("team-mode-" kind)
|
|
|
|
#endif /* _LINUX_IF_TEAM_H_ */
|