netns: don't disable BHs when locking "nsid_lock"

When peernet2id() had to lock "nsid_lock" before iterating through the
nsid table, we had to disable BHs, because VXLAN can call peernet2id()
from the xmit path:
  vxlan_xmit() -> vxlan_fdb_miss() -> vxlan_fdb_notify()
    -> __vxlan_fdb_notify() -> vxlan_fdb_info() -> peernet2id().

Now that peernet2id() uses RCU protection, "nsid_lock" isn't used in BH
context anymore. Therefore, we can safely use plain
spin_lock()/spin_unlock() and let BHs run when holding "nsid_lock".

Signed-off-by: Guillaume Nault <gnault@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Guillaume Nault 2020-01-13 22:39:23 +01:00 committed by David S. Miller
parent 2dce224f46
commit 8d7e5dee97

View File

@ -237,10 +237,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
if (refcount_read(&net->count) == 0) if (refcount_read(&net->count) == 0)
return NETNSA_NSID_NOT_ASSIGNED; return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_bh(&net->nsid_lock); spin_lock(&net->nsid_lock);
id = __peernet2id(net, peer); id = __peernet2id(net, peer);
if (id >= 0) { if (id >= 0) {
spin_unlock_bh(&net->nsid_lock); spin_unlock(&net->nsid_lock);
return id; return id;
} }
@ -250,12 +250,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
* just been idr_remove()'d from there in cleanup_net(). * just been idr_remove()'d from there in cleanup_net().
*/ */
if (!maybe_get_net(peer)) { if (!maybe_get_net(peer)) {
spin_unlock_bh(&net->nsid_lock); spin_unlock(&net->nsid_lock);
return NETNSA_NSID_NOT_ASSIGNED; return NETNSA_NSID_NOT_ASSIGNED;
} }
id = alloc_netid(net, peer, -1); id = alloc_netid(net, peer, -1);
spin_unlock_bh(&net->nsid_lock); spin_unlock(&net->nsid_lock);
put_net(peer); put_net(peer);
if (id < 0) if (id < 0)
@ -520,20 +520,20 @@ static void unhash_nsid(struct net *net, struct net *last)
for_each_net(tmp) { for_each_net(tmp) {
int id; int id;
spin_lock_bh(&tmp->nsid_lock); spin_lock(&tmp->nsid_lock);
id = __peernet2id(tmp, net); id = __peernet2id(tmp, net);
if (id >= 0) if (id >= 0)
idr_remove(&tmp->netns_ids, id); idr_remove(&tmp->netns_ids, id);
spin_unlock_bh(&tmp->nsid_lock); spin_unlock(&tmp->nsid_lock);
if (id >= 0) if (id >= 0)
rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL, rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
GFP_KERNEL); GFP_KERNEL);
if (tmp == last) if (tmp == last)
break; break;
} }
spin_lock_bh(&net->nsid_lock); spin_lock(&net->nsid_lock);
idr_destroy(&net->netns_ids); idr_destroy(&net->netns_ids);
spin_unlock_bh(&net->nsid_lock); spin_unlock(&net->nsid_lock);
} }
static LLIST_HEAD(cleanup_list); static LLIST_HEAD(cleanup_list);
@ -746,9 +746,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
return PTR_ERR(peer); return PTR_ERR(peer);
} }
spin_lock_bh(&net->nsid_lock); spin_lock(&net->nsid_lock);
if (__peernet2id(net, peer) >= 0) { if (__peernet2id(net, peer) >= 0) {
spin_unlock_bh(&net->nsid_lock); spin_unlock(&net->nsid_lock);
err = -EEXIST; err = -EEXIST;
NL_SET_BAD_ATTR(extack, nla); NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack, NL_SET_ERR_MSG(extack,
@ -757,7 +757,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
} }
err = alloc_netid(net, peer, nsid); err = alloc_netid(net, peer, nsid);
spin_unlock_bh(&net->nsid_lock); spin_unlock(&net->nsid_lock);
if (err >= 0) { if (err >= 0) {
rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
nlh, GFP_KERNEL); nlh, GFP_KERNEL);