mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-18 22:56:45 +07:00
Merge branch 'stacked_netdevice_locking'
Vlad Yasevich says:
====================
Fix lockdep issues with stacked devices
Recent commit dc8eaaa006
vlan: Fix lockdep warning when vlan dev handle notification
attempted to solve lockdep issues with vlans where multiple
vlans were stacked. However, the code does not work correctly
when the vlan stack is interspersed with other devices in between
the vlans. Additionally, similar lockdep issues show up with other
devices.
This series provides a generic way to solve these issue for any
devices that can be stacked. It also addresses the concern for
vlan and macvlan devices. I am not sure whether it makes sense
to do so for other types like team, vxlan, and bond.
Thanks
-vlad
Since v2:
- Remove rcu variants from patch1, since that function is called
only under rtnl.
- Fix whitespace problems reported by checkpatch
Since v1:
- Fixed up a goofed-up rebase.
* is_vlan_dev() should be bool and that change belongs in patch3.
* patch4 should not have any vlan changes in it.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
6bd64ac0f9
@ -517,6 +517,11 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
|
||||
#define MACVLAN_STATE_MASK \
|
||||
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
|
||||
|
||||
static int macvlan_get_nest_level(struct net_device *dev)
|
||||
{
|
||||
return ((struct macvlan_dev *)netdev_priv(dev))->nest_level;
|
||||
}
|
||||
|
||||
static void macvlan_set_lockdep_class_one(struct net_device *dev,
|
||||
struct netdev_queue *txq,
|
||||
void *_unused)
|
||||
@ -527,8 +532,9 @@ static void macvlan_set_lockdep_class_one(struct net_device *dev,
|
||||
|
||||
static void macvlan_set_lockdep_class(struct net_device *dev)
|
||||
{
|
||||
lockdep_set_class(&dev->addr_list_lock,
|
||||
&macvlan_netdev_addr_lock_key);
|
||||
lockdep_set_class_and_subclass(&dev->addr_list_lock,
|
||||
&macvlan_netdev_addr_lock_key,
|
||||
macvlan_get_nest_level(dev));
|
||||
netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL);
|
||||
}
|
||||
|
||||
@ -723,6 +729,7 @@ static const struct net_device_ops macvlan_netdev_ops = {
|
||||
.ndo_fdb_add = macvlan_fdb_add,
|
||||
.ndo_fdb_del = macvlan_fdb_del,
|
||||
.ndo_fdb_dump = ndo_dflt_fdb_dump,
|
||||
.ndo_get_lock_subclass = macvlan_get_nest_level,
|
||||
};
|
||||
|
||||
void macvlan_common_setup(struct net_device *dev)
|
||||
@ -851,6 +858,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
|
||||
vlan->dev = dev;
|
||||
vlan->port = port;
|
||||
vlan->set_features = MACVLAN_FEATURES;
|
||||
vlan->nest_level = dev_get_nest_level(lowerdev, netif_is_macvlan) + 1;
|
||||
|
||||
vlan->mode = MACVLAN_MODE_VEPA;
|
||||
if (data && data[IFLA_MACVLAN_MODE])
|
||||
|
@ -56,6 +56,7 @@ struct macvlan_dev {
|
||||
int numqueues;
|
||||
netdev_features_t tap_features;
|
||||
int minor;
|
||||
int nest_level;
|
||||
};
|
||||
|
||||
static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
|
||||
|
@ -73,7 +73,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb)
|
||||
/* found in socket.c */
|
||||
extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *));
|
||||
|
||||
static inline int is_vlan_dev(struct net_device *dev)
|
||||
static inline bool is_vlan_dev(struct net_device *dev)
|
||||
{
|
||||
return dev->priv_flags & IFF_802_1Q_VLAN;
|
||||
}
|
||||
@ -159,6 +159,7 @@ struct vlan_dev_priv {
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
struct netpoll *netpoll;
|
||||
#endif
|
||||
unsigned int nest_level;
|
||||
};
|
||||
|
||||
static inline struct vlan_dev_priv *vlan_dev_priv(const struct net_device *dev)
|
||||
|
@ -1144,6 +1144,7 @@ struct net_device_ops {
|
||||
netdev_tx_t (*ndo_dfwd_start_xmit) (struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
void *priv);
|
||||
int (*ndo_get_lock_subclass)(struct net_device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
@ -2950,7 +2951,12 @@ static inline void netif_addr_lock(struct net_device *dev)
|
||||
|
||||
static inline void netif_addr_lock_nested(struct net_device *dev)
|
||||
{
|
||||
spin_lock_nested(&dev->addr_list_lock, SINGLE_DEPTH_NESTING);
|
||||
int subclass = SINGLE_DEPTH_NESTING;
|
||||
|
||||
if (dev->netdev_ops->ndo_get_lock_subclass)
|
||||
subclass = dev->netdev_ops->ndo_get_lock_subclass(dev);
|
||||
|
||||
spin_lock_nested(&dev->addr_list_lock, subclass);
|
||||
}
|
||||
|
||||
static inline void netif_addr_lock_bh(struct net_device *dev)
|
||||
@ -3077,6 +3083,14 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
|
||||
priv; \
|
||||
priv = netdev_lower_get_next_private_rcu(dev, &(iter)))
|
||||
|
||||
void *netdev_lower_get_next(struct net_device *dev,
|
||||
struct list_head **iter);
|
||||
#define netdev_for_each_lower_dev(dev, ldev, iter) \
|
||||
for (iter = &(dev)->adj_list.lower, \
|
||||
ldev = netdev_lower_get_next(dev, &(iter)); \
|
||||
ldev; \
|
||||
ldev = netdev_lower_get_next(dev, &(iter)))
|
||||
|
||||
void *netdev_adjacent_get_private(struct list_head *adj_list);
|
||||
void *netdev_lower_get_first_private_rcu(struct net_device *dev);
|
||||
struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
|
||||
@ -3092,6 +3106,8 @@ void netdev_upper_dev_unlink(struct net_device *dev,
|
||||
void netdev_adjacent_rename_links(struct net_device *dev, char *oldname);
|
||||
void *netdev_lower_dev_get_private(struct net_device *dev,
|
||||
struct net_device *lower_dev);
|
||||
int dev_get_nest_level(struct net_device *dev,
|
||||
bool (*type_check)(struct net_device *dev));
|
||||
int skb_checksum_help(struct sk_buff *skb);
|
||||
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
|
||||
netdev_features_t features, bool tx_path);
|
||||
|
@ -169,6 +169,7 @@ int register_vlan_dev(struct net_device *dev)
|
||||
if (err < 0)
|
||||
goto out_uninit_mvrp;
|
||||
|
||||
vlan->nest_level = dev_get_nest_level(real_dev, is_vlan_dev) + 1;
|
||||
err = register_netdevice(dev);
|
||||
if (err < 0)
|
||||
goto out_uninit_mvrp;
|
||||
|
@ -493,48 +493,10 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
|
||||
}
|
||||
}
|
||||
|
||||
static int vlan_calculate_locking_subclass(struct net_device *real_dev)
|
||||
{
|
||||
int subclass = 0;
|
||||
|
||||
while (is_vlan_dev(real_dev)) {
|
||||
subclass++;
|
||||
real_dev = vlan_dev_priv(real_dev)->real_dev;
|
||||
}
|
||||
|
||||
return subclass;
|
||||
}
|
||||
|
||||
static void vlan_dev_mc_sync(struct net_device *to, struct net_device *from)
|
||||
{
|
||||
int err = 0, subclass;
|
||||
|
||||
subclass = vlan_calculate_locking_subclass(to);
|
||||
|
||||
spin_lock_nested(&to->addr_list_lock, subclass);
|
||||
err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
|
||||
if (!err)
|
||||
__dev_set_rx_mode(to);
|
||||
spin_unlock(&to->addr_list_lock);
|
||||
}
|
||||
|
||||
static void vlan_dev_uc_sync(struct net_device *to, struct net_device *from)
|
||||
{
|
||||
int err = 0, subclass;
|
||||
|
||||
subclass = vlan_calculate_locking_subclass(to);
|
||||
|
||||
spin_lock_nested(&to->addr_list_lock, subclass);
|
||||
err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
|
||||
if (!err)
|
||||
__dev_set_rx_mode(to);
|
||||
spin_unlock(&to->addr_list_lock);
|
||||
}
|
||||
|
||||
static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
|
||||
{
|
||||
vlan_dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
vlan_dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
dev_mc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
dev_uc_sync(vlan_dev_priv(vlan_dev)->real_dev, vlan_dev);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -562,6 +524,11 @@ static void vlan_dev_set_lockdep_class(struct net_device *dev, int subclass)
|
||||
netdev_for_each_tx_queue(dev, vlan_dev_set_lockdep_one, &subclass);
|
||||
}
|
||||
|
||||
static int vlan_dev_get_lock_subclass(struct net_device *dev)
|
||||
{
|
||||
return vlan_dev_priv(dev)->nest_level;
|
||||
}
|
||||
|
||||
static const struct header_ops vlan_header_ops = {
|
||||
.create = vlan_dev_hard_header,
|
||||
.rebuild = vlan_dev_rebuild_header,
|
||||
@ -597,7 +564,6 @@ static const struct net_device_ops vlan_netdev_ops;
|
||||
static int vlan_dev_init(struct net_device *dev)
|
||||
{
|
||||
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
|
||||
int subclass = 0;
|
||||
|
||||
netif_carrier_off(dev);
|
||||
|
||||
@ -646,8 +612,7 @@ static int vlan_dev_init(struct net_device *dev)
|
||||
|
||||
SET_NETDEV_DEVTYPE(dev, &vlan_type);
|
||||
|
||||
subclass = vlan_calculate_locking_subclass(dev);
|
||||
vlan_dev_set_lockdep_class(dev, subclass);
|
||||
vlan_dev_set_lockdep_class(dev, vlan_dev_get_lock_subclass(dev));
|
||||
|
||||
vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
|
||||
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
|
||||
@ -819,6 +784,7 @@ static const struct net_device_ops vlan_netdev_ops = {
|
||||
.ndo_netpoll_cleanup = vlan_dev_netpoll_cleanup,
|
||||
#endif
|
||||
.ndo_fix_features = vlan_dev_fix_features,
|
||||
.ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
|
||||
};
|
||||
|
||||
void vlan_setup(struct net_device *dev)
|
||||
|
@ -4622,6 +4622,32 @@ void *netdev_lower_get_next_private_rcu(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
|
||||
|
||||
/**
|
||||
* netdev_lower_get_next - Get the next device from the lower neighbour
|
||||
* list
|
||||
* @dev: device
|
||||
* @iter: list_head ** of the current position
|
||||
*
|
||||
* Gets the next netdev_adjacent from the dev's lower neighbour
|
||||
* list, starting from iter position. The caller must hold RTNL lock or
|
||||
* its own locking that guarantees that the neighbour lower
|
||||
* list will remain unchainged.
|
||||
*/
|
||||
void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
|
||||
{
|
||||
struct netdev_adjacent *lower;
|
||||
|
||||
lower = list_entry((*iter)->next, struct netdev_adjacent, list);
|
||||
|
||||
if (&lower->list == &dev->adj_list.lower)
|
||||
return NULL;
|
||||
|
||||
*iter = &lower->list;
|
||||
|
||||
return lower->dev;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_get_next);
|
||||
|
||||
/**
|
||||
* netdev_lower_get_first_private_rcu - Get the first ->private from the
|
||||
* lower neighbour list, RCU
|
||||
@ -5072,6 +5098,30 @@ void *netdev_lower_dev_get_private(struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_lower_dev_get_private);
|
||||
|
||||
|
||||
int dev_get_nest_level(struct net_device *dev,
|
||||
bool (*type_check)(struct net_device *dev))
|
||||
{
|
||||
struct net_device *lower = NULL;
|
||||
struct list_head *iter;
|
||||
int max_nest = -1;
|
||||
int nest;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
netdev_for_each_lower_dev(dev, lower, iter) {
|
||||
nest = dev_get_nest_level(lower, type_check);
|
||||
if (max_nest < nest)
|
||||
max_nest = nest;
|
||||
}
|
||||
|
||||
if (type_check(dev))
|
||||
max_nest++;
|
||||
|
||||
return max_nest;
|
||||
}
|
||||
EXPORT_SYMBOL(dev_get_nest_level);
|
||||
|
||||
static void dev_change_rx_flags(struct net_device *dev, int flags)
|
||||
{
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
@ -5237,7 +5287,6 @@ void __dev_set_rx_mode(struct net_device *dev)
|
||||
if (ops->ndo_set_rx_mode)
|
||||
ops->ndo_set_rx_mode(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(__dev_set_rx_mode);
|
||||
|
||||
void dev_set_rx_mode(struct net_device *dev)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user