mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-02 12:46:40 +07:00
net/mlx5e: Protect encap hash table with mutex
To remove dependency on rtnl lock, protect encap hash table from concurrent modifications with new "encap_tbl_lock" mutex. Use the mutex to protect internal encap entry state from concurrent modification. This is necessary because a flow can be attached to multiple encap entries simultaneously, which significantly complicates using finer grained per-entry lock. Signed-off-by: Vlad Buslov <vladbu@mellanox.com> Reviewed-by: Roi Dayan <roid@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
948993f2be
commit
61086f3910
@ -1478,33 +1478,51 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
|
||||
}
|
||||
}
|
||||
|
||||
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
|
||||
static void mlx5e_encap_dealloc(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
|
||||
{
|
||||
if (!refcount_dec_and_test(&e->refcnt))
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&e->flows));
|
||||
mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
|
||||
|
||||
if (e->flags & MLX5_ENCAP_ENTRY_VALID)
|
||||
mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
|
||||
|
||||
hash_del_rcu(&e->encap_hlist);
|
||||
kfree(e->encap_header);
|
||||
kfree(e);
|
||||
}
|
||||
|
||||
void mlx5e_encap_put(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
|
||||
if (!refcount_dec_and_mutex_lock(&e->refcnt, &esw->offloads.encap_tbl_lock))
|
||||
return;
|
||||
hash_del_rcu(&e->encap_hlist);
|
||||
mutex_unlock(&esw->offloads.encap_tbl_lock);
|
||||
|
||||
mlx5e_encap_dealloc(priv, e);
|
||||
}
|
||||
|
||||
static void mlx5e_detach_encap(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow, int out_index)
|
||||
{
|
||||
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
|
||||
/* flow wasn't fully initialized */
|
||||
if (!flow->encaps[out_index].e)
|
||||
if (!e)
|
||||
return;
|
||||
|
||||
mutex_lock(&esw->offloads.encap_tbl_lock);
|
||||
list_del(&flow->encaps[out_index].list);
|
||||
|
||||
mlx5e_encap_put(priv, flow->encaps[out_index].e);
|
||||
flow->encaps[out_index].e = NULL;
|
||||
if (!refcount_dec_and_test(&e->refcnt)) {
|
||||
mutex_unlock(&esw->offloads.encap_tbl_lock);
|
||||
return;
|
||||
}
|
||||
hash_del_rcu(&e->encap_hlist);
|
||||
mutex_unlock(&esw->offloads.encap_tbl_lock);
|
||||
|
||||
mlx5e_encap_dealloc(priv, e);
|
||||
}
|
||||
|
||||
static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
|
||||
@ -2882,6 +2900,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
|
||||
hash_key = hash_encap_info(&key);
|
||||
|
||||
mutex_lock(&esw->offloads.encap_tbl_lock);
|
||||
e = mlx5e_encap_get(priv, &key, hash_key);
|
||||
|
||||
/* must verify if encap is valid or not */
|
||||
@ -2889,8 +2908,10 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
goto attach_flow;
|
||||
|
||||
e = kzalloc(sizeof(*e), GFP_KERNEL);
|
||||
if (!e)
|
||||
return -ENOMEM;
|
||||
if (!e) {
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
refcount_set(&e->refcnt, 1);
|
||||
e->tun_info = tun_info;
|
||||
@ -2922,10 +2943,12 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
} else {
|
||||
*encap_valid = false;
|
||||
}
|
||||
mutex_unlock(&esw->offloads.encap_tbl_lock);
|
||||
|
||||
return err;
|
||||
|
||||
out_err:
|
||||
mutex_unlock(&esw->offloads.encap_tbl_lock);
|
||||
kfree(e);
|
||||
return err;
|
||||
}
|
||||
|
@ -1999,6 +1999,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
if (err)
|
||||
goto abort;
|
||||
|
||||
mutex_init(&esw->offloads.encap_tbl_lock);
|
||||
hash_init(esw->offloads.encap_tbl);
|
||||
mutex_init(&esw->offloads.mod_hdr.lock);
|
||||
hash_init(esw->offloads.mod_hdr.hlist);
|
||||
@ -2039,6 +2040,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||
destroy_workqueue(esw->work_queue);
|
||||
esw_offloads_cleanup_reps(esw);
|
||||
mutex_destroy(&esw->offloads.mod_hdr.lock);
|
||||
mutex_destroy(&esw->offloads.encap_tbl_lock);
|
||||
kfree(esw->vports);
|
||||
kfree(esw);
|
||||
}
|
||||
|
@ -181,6 +181,7 @@ struct mlx5_esw_offload {
|
||||
struct mlx5_eswitch_rep *vport_reps;
|
||||
struct list_head peer_flows;
|
||||
struct mutex peer_mutex;
|
||||
struct mutex encap_tbl_lock; /* protects encap_tbl */
|
||||
DECLARE_HASHTABLE(encap_tbl, 8);
|
||||
struct mod_hdr_tbl mod_hdr;
|
||||
DECLARE_HASHTABLE(termtbl_tbl, 8);
|
||||
|
Loading…
Reference in New Issue
Block a user