mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 01:32:47 +07:00
mlx5-shared-4.16-1
mlx5 shared code for both rdma-next and net-next trees. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJaRXPQAAoJEEg/ir3gV/o+H4wH/2CkV3tOLfRekNd4CFSoH78A zH0Gjwa3P7aXybTmhXbMNCYLEoVEZ5pSlToOmjz1FrmxhH62JQ80WyKOcYtiHMBg 3x5tFZboLc9tMGwPhyBJBjyiH+Gh9ZMoD6hBFgSvIG/hNPUb1W48/Pc+R61gOrMw 6ADU+6mIf5cHNQ4c/V/SBlfiQjSXN4Y38knhTeZy8dLcZZVg1eMn+pj7W/haAyb6 t3IMEaUmlDYwQmtxTT2snK4VutEPfxYGv1gyKSkZXmY74aRvSzlgV7PqXM3qsV4W 8ZEhEHZJGi6NXC2hk5FQSSPWhQOhAmpjTHm8aImK0SIf68YajjzaZnT9S+eMmdY= =uMjj -----END PGP SIGNATURE----- Merge tag 'mlx5-shared-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux Saeed Mahameed says: ==================== Mellanox, mlx5 E-Switch updates 2017-12-19 This series includes updates for mlx5 E-Switch infrastructures, to be merged into net-next and rdma-next trees. Mark's patches provide E-Switch refactoring that generalize the mlx5 E-Switch vf representors interfaces and data structures. The serious is mainly focused on moving ethernet (netdev) specific representors logic out of E-Switch (eswitch.c) into mlx5e representor module (en_rep.c), which provides better separation and allows future support for other types of vf representors (e.g. RDMA). Gal's patches at the end of this serious, provide a simple syntax fix and two other patches that handles vport ingress/egress ACL steering name spaces to be aligned with the Firmware/Hardware specs. V1->V2: - Addressed coding style comments in patches #1 and #7 - The series is still based on rc4, as now I see net-next is also @rc4. V2->V3: - Fixed compilation warning, reported by Dave. Please pull and let me know if there's any problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d367341b25
@ -190,6 +190,63 @@ int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5e_rep_sq *rep_sq, *tmp;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
return;
|
||||
|
||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
|
||||
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
|
||||
list_del(&rep_sq->list);
|
||||
kfree(rep_sq);
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep,
|
||||
u16 *sqns_array, int sqns_num)
|
||||
{
|
||||
struct mlx5_flow_handle *flow_rule;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
struct mlx5e_rep_sq *rep_sq;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
return 0;
|
||||
|
||||
rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
for (i = 0; i < sqns_num; i++) {
|
||||
rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
|
||||
if (!rep_sq) {
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Add re-inject rule to the PF/representor sqs */
|
||||
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
|
||||
rep->vport,
|
||||
sqns_array[i]);
|
||||
if (IS_ERR(flow_rule)) {
|
||||
err = PTR_ERR(flow_rule);
|
||||
kfree(rep_sq);
|
||||
goto out_err;
|
||||
}
|
||||
rep_sq->send_to_vport_rule = flow_rule;
|
||||
list_add(&rep_sq->list, &rpriv->vport_sqs_list);
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
mlx5e_sqs2vport_stop(esw, rep);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
@ -210,7 +267,7 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
|
||||
sqs[num_sqs++] = c->sq[tc].sqn;
|
||||
}
|
||||
|
||||
err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
|
||||
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
|
||||
kfree(sqs);
|
||||
|
||||
out:
|
||||
@ -225,7 +282,7 @@ void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch_rep *rep = rpriv->rep;
|
||||
|
||||
mlx5_eswitch_sqs2vport_stop(esw, rep);
|
||||
mlx5e_sqs2vport_stop(esw, rep);
|
||||
}
|
||||
|
||||
static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
|
||||
@ -238,7 +295,7 @@ static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
|
||||
#endif
|
||||
unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
|
||||
DELAY_PROBE_TIME);
|
||||
struct net_device *netdev = rpriv->rep->netdev;
|
||||
struct net_device *netdev = rpriv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
|
||||
@ -259,7 +316,7 @@ static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
|
||||
neigh_update.neigh_stats_work.work);
|
||||
struct net_device *netdev = rpriv->rep->netdev;
|
||||
struct net_device *netdev = rpriv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_neigh_hash_entry *nhe;
|
||||
|
||||
@ -355,7 +412,7 @@ static int mlx5e_rep_netevent_event(struct notifier_block *nb,
|
||||
struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
|
||||
neigh_update.netevent_nb);
|
||||
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
|
||||
struct net_device *netdev = rpriv->rep->netdev;
|
||||
struct net_device *netdev = rpriv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_neigh_hash_entry *nhe = NULL;
|
||||
struct mlx5e_neigh m_neigh = {};
|
||||
@ -483,7 +540,7 @@ static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
|
||||
static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
|
||||
{
|
||||
struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
|
||||
struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
|
||||
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
|
||||
|
||||
unregister_netevent_notifier(&neigh_update->netevent_nb);
|
||||
|
||||
@ -904,7 +961,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
err = PTR_ERR(flow_rule);
|
||||
goto err_destroy_direct_tirs;
|
||||
}
|
||||
rep->vport_rx_rule = flow_rule;
|
||||
rpriv->vport_rx_rule = flow_rule;
|
||||
|
||||
err = mlx5e_tc_init(priv);
|
||||
if (err)
|
||||
@ -913,7 +970,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
return 0;
|
||||
|
||||
err_del_flow_rule:
|
||||
mlx5_del_flow_rules(rep->vport_rx_rule);
|
||||
mlx5_del_flow_rules(rpriv->vport_rx_rule);
|
||||
err_destroy_direct_tirs:
|
||||
mlx5e_destroy_direct_tirs(priv);
|
||||
err_destroy_direct_rqts:
|
||||
@ -924,10 +981,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
|
||||
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch_rep *rep = rpriv->rep;
|
||||
|
||||
mlx5e_tc_cleanup(priv);
|
||||
mlx5_del_flow_rules(rep->vport_rx_rule);
|
||||
mlx5_del_flow_rules(rpriv->vport_rx_rule);
|
||||
mlx5e_destroy_direct_tirs(priv);
|
||||
mlx5e_destroy_direct_rqts(priv);
|
||||
}
|
||||
@ -967,10 +1023,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
|
||||
/* e-Switch vport representors */
|
||||
|
||||
static int
|
||||
mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(rep->netdev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
|
||||
|
||||
int err;
|
||||
|
||||
@ -992,10 +1048,10 @@ mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(rep->netdev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
|
||||
|
||||
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
|
||||
mlx5e_remove_sqs_fwd_rules(priv);
|
||||
@ -1008,8 +1064,9 @@ mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
}
|
||||
|
||||
static int
|
||||
mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct mlx5e_rep_priv *uplink_rpriv;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
struct net_device *netdev;
|
||||
struct mlx5e_priv *upriv;
|
||||
@ -1019,7 +1076,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
if (!rpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
|
||||
netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
|
||||
if (!netdev) {
|
||||
pr_warn("Failed to create representor netdev for vport %d\n",
|
||||
rep->vport);
|
||||
@ -1027,8 +1084,10 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rep->netdev = netdev;
|
||||
rpriv->netdev = netdev;
|
||||
rpriv->rep = rep;
|
||||
rep->rep_if[REP_ETH].priv = rpriv;
|
||||
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
|
||||
|
||||
err = mlx5e_attach_netdev(netdev_priv(netdev));
|
||||
if (err) {
|
||||
@ -1044,7 +1103,8 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
goto err_detach_netdev;
|
||||
}
|
||||
|
||||
upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
|
||||
uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
|
||||
upriv = netdev_priv(uplink_rpriv->netdev);
|
||||
err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
|
||||
upriv);
|
||||
if (err)
|
||||
@ -1076,16 +1136,19 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
}
|
||||
|
||||
static void
|
||||
mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
|
||||
mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
struct net_device *netdev = rep->netdev;
|
||||
struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
|
||||
struct net_device *netdev = rpriv->netdev;
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5e_rep_priv *uplink_rpriv;
|
||||
void *ppriv = priv->ppriv;
|
||||
struct mlx5e_priv *upriv;
|
||||
|
||||
unregister_netdev(rep->netdev);
|
||||
upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
|
||||
unregister_netdev(netdev);
|
||||
uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
|
||||
REP_ETH);
|
||||
upriv = netdev_priv(uplink_rpriv->netdev);
|
||||
tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
|
||||
upriv);
|
||||
mlx5e_rep_neigh_cleanup(rpriv);
|
||||
@ -1100,18 +1163,13 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
|
||||
int vport;
|
||||
u8 mac[ETH_ALEN];
|
||||
|
||||
mlx5_query_nic_vport_mac_address(mdev, 0, mac);
|
||||
|
||||
for (vport = 1; vport < total_vfs; vport++) {
|
||||
struct mlx5_eswitch_rep rep;
|
||||
struct mlx5_eswitch_rep_if rep_if = {};
|
||||
|
||||
rep.load = mlx5e_vport_rep_load;
|
||||
rep.unload = mlx5e_vport_rep_unload;
|
||||
rep.vport = vport;
|
||||
ether_addr_copy(rep.hw_id, mac);
|
||||
mlx5_eswitch_register_vport_rep(esw, vport, &rep);
|
||||
rep_if.load = mlx5e_vport_rep_load;
|
||||
rep_if.unload = mlx5e_vport_rep_unload;
|
||||
mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1123,21 +1181,24 @@ static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
|
||||
int vport;
|
||||
|
||||
for (vport = 1; vport < total_vfs; vport++)
|
||||
mlx5_eswitch_unregister_vport_rep(esw, vport);
|
||||
mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
|
||||
}
|
||||
|
||||
void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
struct mlx5_eswitch_rep rep;
|
||||
struct mlx5_eswitch_rep_if rep_if;
|
||||
struct mlx5e_rep_priv *rpriv;
|
||||
|
||||
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
|
||||
rep.load = mlx5e_nic_rep_load;
|
||||
rep.unload = mlx5e_nic_rep_unload;
|
||||
rep.vport = FDB_UPLINK_VPORT;
|
||||
rep.netdev = priv->netdev;
|
||||
mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
|
||||
rpriv = priv->ppriv;
|
||||
rpriv->netdev = priv->netdev;
|
||||
|
||||
rep_if.load = mlx5e_nic_rep_load;
|
||||
rep_if.unload = mlx5e_nic_rep_unload;
|
||||
rep_if.priv = rpriv;
|
||||
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
|
||||
mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
|
||||
|
||||
mlx5e_rep_register_vf_vports(priv); /* VFs vports */
|
||||
}
|
||||
@ -1148,7 +1209,7 @@ void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
|
||||
struct mlx5_eswitch *esw = mdev->priv.eswitch;
|
||||
|
||||
mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
|
||||
mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
|
||||
mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
|
||||
}
|
||||
|
||||
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
|
||||
|
@ -56,8 +56,17 @@ struct mlx5e_neigh_update_table {
|
||||
struct mlx5e_rep_priv {
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
struct mlx5e_neigh_update_table neigh_update;
|
||||
struct net_device *netdev;
|
||||
struct mlx5_flow_handle *vport_rx_rule;
|
||||
struct list_head vport_sqs_list;
|
||||
};
|
||||
|
||||
static inline
|
||||
struct mlx5e_rep_priv *mlx5e_rep_to_rep_priv(struct mlx5_eswitch_rep *rep)
|
||||
{
|
||||
return (struct mlx5e_rep_priv *)rep->rep_if[REP_ETH].priv;
|
||||
}
|
||||
|
||||
struct mlx5e_neigh {
|
||||
struct net_device *dev;
|
||||
union {
|
||||
@ -124,6 +133,11 @@ struct mlx5e_encap_entry {
|
||||
int encap_size;
|
||||
};
|
||||
|
||||
struct mlx5e_rep_sq {
|
||||
struct mlx5_flow_handle *send_to_vport_rule;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev);
|
||||
void mlx5e_register_vport_reps(struct mlx5e_priv *priv);
|
||||
void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv);
|
||||
|
@ -617,7 +617,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
|
||||
FLOW_DISSECTOR_KEY_ENC_PORTS,
|
||||
f->mask);
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
|
||||
struct net_device *up_dev = uplink_rpriv->netdev;
|
||||
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
|
||||
|
||||
/* Full udp dst port must be given */
|
||||
@ -1507,6 +1508,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
||||
int *out_ttl)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct mlx5e_rep_priv *uplink_rpriv;
|
||||
struct rtable *rt;
|
||||
struct neighbour *n = NULL;
|
||||
|
||||
@ -1520,9 +1522,10 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
|
||||
#else
|
||||
return -EOPNOTSUPP;
|
||||
#endif
|
||||
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
|
||||
/* if the egress device isn't on the same HW e-switch, we use the uplink */
|
||||
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
|
||||
*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
*out_dev = uplink_rpriv->netdev;
|
||||
else
|
||||
*out_dev = rt->dst.dev;
|
||||
|
||||
@ -1543,6 +1546,7 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
|
||||
struct neighbour **out_n,
|
||||
int *out_ttl)
|
||||
{
|
||||
struct mlx5e_rep_priv *uplink_rpriv;
|
||||
struct neighbour *n = NULL;
|
||||
struct dst_entry *dst;
|
||||
|
||||
@ -1557,9 +1561,10 @@ static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
|
||||
|
||||
*out_ttl = ip6_dst_hoplimit(dst);
|
||||
|
||||
uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
|
||||
/* if the egress device isn't on the same HW e-switch, we use the uplink */
|
||||
if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
|
||||
*out_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
*out_dev = uplink_rpriv->netdev;
|
||||
else
|
||||
*out_dev = dst->dev;
|
||||
#else
|
||||
@ -1859,7 +1864,9 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow)
|
||||
{
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
|
||||
struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
|
||||
REP_ETH);
|
||||
struct net_device *up_dev = uplink_rpriv->netdev;
|
||||
unsigned short family = ip_tunnel_info_af(tun_info);
|
||||
struct mlx5e_priv *up_priv = netdev_priv(up_dev);
|
||||
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
|
||||
|
@ -867,9 +867,10 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
||||
esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
|
||||
vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
|
||||
|
||||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS,
|
||||
vport->vport);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
|
||||
esw_warn(dev, "Failed to get E-Switch egress flow namespace for vport (%d)\n", vport->vport);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -984,9 +985,10 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||||
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||||
|
||||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
vport->vport);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
@ -1290,7 +1292,7 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
|
||||
|
||||
err = mlx5_create_scheduling_element_cmd(dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
&tsar_ctx,
|
||||
tsar_ctx,
|
||||
&esw->qos.root_tsar_id);
|
||||
if (err) {
|
||||
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
|
||||
@ -1333,20 +1335,20 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
|
||||
if (vport->qos.enabled)
|
||||
return -EEXIST;
|
||||
|
||||
MLX5_SET(scheduling_context, &sched_ctx, element_type,
|
||||
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
|
||||
vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
|
||||
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
|
||||
element_attributes);
|
||||
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
|
||||
MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
|
||||
esw->qos.root_tsar_id);
|
||||
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
|
||||
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
|
||||
initial_max_rate);
|
||||
MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
|
||||
MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
|
||||
|
||||
err = mlx5_create_scheduling_element_cmd(dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
&sched_ctx,
|
||||
sched_ctx,
|
||||
&vport->qos.esw_tsar_ix);
|
||||
if (err) {
|
||||
esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
|
||||
@ -1392,22 +1394,22 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
|
||||
if (!vport->qos.enabled)
|
||||
return -EIO;
|
||||
|
||||
MLX5_SET(scheduling_context, &sched_ctx, element_type,
|
||||
MLX5_SET(scheduling_context, sched_ctx, element_type,
|
||||
SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
|
||||
vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx,
|
||||
vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
|
||||
element_attributes);
|
||||
MLX5_SET(vport_element, vport_elem, vport_number, vport_num);
|
||||
MLX5_SET(scheduling_context, &sched_ctx, parent_element_id,
|
||||
MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
|
||||
esw->qos.root_tsar_id);
|
||||
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
|
||||
MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
|
||||
max_rate);
|
||||
MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
|
||||
MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
|
||||
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
|
||||
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
|
||||
|
||||
err = mlx5_modify_scheduling_element_cmd(dev,
|
||||
SCHEDULING_HIERARCHY_E_SWITCH,
|
||||
&sched_ctx,
|
||||
sched_ctx,
|
||||
vport->qos.esw_tsar_ix,
|
||||
bitmask);
|
||||
if (err) {
|
||||
@ -1644,13 +1646,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
goto abort;
|
||||
}
|
||||
|
||||
esw->offloads.vport_reps =
|
||||
kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
|
||||
GFP_KERNEL);
|
||||
if (!esw->offloads.vport_reps) {
|
||||
err = -ENOMEM;
|
||||
err = esw_offloads_init_reps(esw);
|
||||
if (err)
|
||||
goto abort;
|
||||
}
|
||||
|
||||
hash_init(esw->offloads.encap_tbl);
|
||||
hash_init(esw->offloads.mod_hdr_tbl);
|
||||
@ -1681,8 +1679,8 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
abort:
|
||||
if (esw->work_queue)
|
||||
destroy_workqueue(esw->work_queue);
|
||||
esw_offloads_cleanup_reps(esw);
|
||||
kfree(esw->vports);
|
||||
kfree(esw->offloads.vport_reps);
|
||||
kfree(esw);
|
||||
return err;
|
||||
}
|
||||
@ -1696,7 +1694,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
|
||||
|
||||
esw->dev->priv.eswitch = NULL;
|
||||
destroy_workqueue(esw->work_queue);
|
||||
kfree(esw->offloads.vport_reps);
|
||||
esw_offloads_cleanup_reps(esw);
|
||||
kfree(esw->vports);
|
||||
kfree(esw);
|
||||
}
|
||||
|
@ -45,6 +45,11 @@ enum {
|
||||
SRIOV_OFFLOADS
|
||||
};
|
||||
|
||||
enum {
|
||||
REP_ETH,
|
||||
NUM_REP_TYPES,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
|
||||
#define MLX5_MAX_UC_PER_VPORT(dev) \
|
||||
@ -133,25 +138,21 @@ struct mlx5_eswitch_fdb {
|
||||
};
|
||||
};
|
||||
|
||||
struct mlx5_esw_sq {
|
||||
struct mlx5_flow_handle *send_to_vport_rule;
|
||||
struct list_head list;
|
||||
struct mlx5_eswitch_rep;
|
||||
struct mlx5_eswitch_rep_if {
|
||||
int (*load)(struct mlx5_core_dev *dev,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
void (*unload)(struct mlx5_eswitch_rep *rep);
|
||||
void *priv;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct mlx5_eswitch_rep {
|
||||
int (*load)(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
void (*unload)(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
|
||||
u16 vport;
|
||||
u8 hw_id[ETH_ALEN];
|
||||
struct net_device *netdev;
|
||||
|
||||
struct mlx5_flow_handle *vport_rx_rule;
|
||||
struct list_head vport_sqs_list;
|
||||
u16 vlan;
|
||||
u32 vlan_refcount;
|
||||
bool valid;
|
||||
};
|
||||
|
||||
struct mlx5_esw_offload {
|
||||
@ -197,6 +198,8 @@ struct mlx5_eswitch {
|
||||
|
||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
|
||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||
|
||||
/* E-Switch API */
|
||||
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
|
||||
@ -221,6 +224,10 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
|
||||
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
||||
int vport,
|
||||
struct ifla_vf_stats *vf_stats);
|
||||
struct mlx5_flow_handle *
|
||||
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
|
||||
u32 sqn);
|
||||
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
|
||||
|
||||
struct mlx5_flow_spec;
|
||||
struct mlx5_esw_flow_attr;
|
||||
@ -257,12 +264,6 @@ struct mlx5_esw_flow_attr {
|
||||
struct mlx5e_tc_flow_parse_attr *parse_attr;
|
||||
};
|
||||
|
||||
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep,
|
||||
u16 *sqns_array, int sqns_num);
|
||||
void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
|
||||
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
|
||||
int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
|
||||
@ -272,10 +273,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
|
||||
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
|
||||
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
struct mlx5_eswitch_rep_if *rep_if,
|
||||
u8 rep_type);
|
||||
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index);
|
||||
struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw);
|
||||
int vport_index,
|
||||
u8 rep_type);
|
||||
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
|
||||
|
||||
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
|
||||
struct mlx5_esw_flow_attr *attr);
|
||||
|
@ -130,7 +130,7 @@ static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
|
||||
esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
|
||||
for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
|
||||
rep = &esw->offloads.vport_reps[vf_vport];
|
||||
if (!rep->valid)
|
||||
if (!rep->rep_if[REP_ETH].valid)
|
||||
continue;
|
||||
|
||||
err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
|
||||
@ -302,7 +302,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct mlx5_flow_handle *
|
||||
struct mlx5_flow_handle *
|
||||
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
@ -339,57 +339,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
|
||||
return flow_rule;
|
||||
}
|
||||
|
||||
void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep)
|
||||
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
|
||||
{
|
||||
struct mlx5_esw_sq *esw_sq, *tmp;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
|
||||
mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
|
||||
list_del(&esw_sq->list);
|
||||
kfree(esw_sq);
|
||||
}
|
||||
}
|
||||
|
||||
int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
|
||||
struct mlx5_eswitch_rep *rep,
|
||||
u16 *sqns_array, int sqns_num)
|
||||
{
|
||||
struct mlx5_flow_handle *flow_rule;
|
||||
struct mlx5_esw_sq *esw_sq;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (esw->mode != SRIOV_OFFLOADS)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < sqns_num; i++) {
|
||||
esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
|
||||
if (!esw_sq) {
|
||||
err = -ENOMEM;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Add re-inject rule to the PF/representor sqs */
|
||||
flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
|
||||
rep->vport,
|
||||
sqns_array[i]);
|
||||
if (IS_ERR(flow_rule)) {
|
||||
err = PTR_ERR(flow_rule);
|
||||
kfree(esw_sq);
|
||||
goto out_err;
|
||||
}
|
||||
esw_sq->send_to_vport_rule = flow_rule;
|
||||
list_add(&esw_sq->list, &rep->vport_sqs_list);
|
||||
}
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
mlx5_eswitch_sqs2vport_stop(esw, rep);
|
||||
return err;
|
||||
mlx5_del_flow_rules(rule);
|
||||
}
|
||||
|
||||
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
|
||||
@ -732,10 +684,109 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
|
||||
return err;
|
||||
}
|
||||
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
||||
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
kfree(esw->offloads.vport_reps);
|
||||
}
|
||||
|
||||
int esw_offloads_init_reps(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_esw_offload *offloads;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
u8 hw_id[ETH_ALEN];
|
||||
int vport;
|
||||
|
||||
esw->offloads.vport_reps = kcalloc(total_vfs,
|
||||
sizeof(struct mlx5_eswitch_rep),
|
||||
GFP_KERNEL);
|
||||
if (!esw->offloads.vport_reps)
|
||||
return -ENOMEM;
|
||||
|
||||
offloads = &esw->offloads;
|
||||
mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
|
||||
|
||||
for (vport = 0; vport < total_vfs; vport++) {
|
||||
rep = &offloads->vport_reps[vport];
|
||||
|
||||
rep->vport = vport;
|
||||
ether_addr_copy(rep->hw_id, hw_id);
|
||||
}
|
||||
|
||||
offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
|
||||
u8 rep_type)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int vport;
|
||||
|
||||
for (vport = nvports - 1; vport >= 0; vport--) {
|
||||
rep = &esw->offloads.vport_reps[vport];
|
||||
if (!rep->rep_if[rep_type].valid)
|
||||
continue;
|
||||
|
||||
rep->rep_if[rep_type].unload(rep);
|
||||
}
|
||||
}
|
||||
|
||||
static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
u8 rep_type = NUM_REP_TYPES;
|
||||
|
||||
while (rep_type-- > 0)
|
||||
esw_offloads_unload_reps_type(esw, nvports, rep_type);
|
||||
}
|
||||
|
||||
static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
|
||||
u8 rep_type)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int vport;
|
||||
int err;
|
||||
|
||||
for (vport = 0; vport < nvports; vport++) {
|
||||
rep = &esw->offloads.vport_reps[vport];
|
||||
if (!rep->rep_if[rep_type].valid)
|
||||
continue;
|
||||
|
||||
err = rep->rep_if[rep_type].load(esw->dev, rep);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
esw_offloads_unload_reps_type(esw, vport, rep_type);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
u8 rep_type = 0;
|
||||
int err;
|
||||
|
||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
|
||||
err = esw_offloads_load_reps_type(esw, nvports, rep_type);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
|
||||
return err;
|
||||
|
||||
err_reps:
|
||||
while (rep_type-- > 0)
|
||||
esw_offloads_unload_reps_type(esw, nvports, rep_type);
|
||||
return err;
|
||||
}
|
||||
|
||||
int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
@ -755,25 +806,13 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
||||
if (err)
|
||||
goto create_fg_err;
|
||||
|
||||
for (vport = 0; vport < nvports; vport++) {
|
||||
rep = &esw->offloads.vport_reps[vport];
|
||||
if (!rep->valid)
|
||||
continue;
|
||||
|
||||
err = rep->load(esw, rep);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
err = esw_offloads_load_reps(esw, nvports);
|
||||
if (err)
|
||||
goto err_reps;
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
for (vport--; vport >= 0; vport--) {
|
||||
rep = &esw->offloads.vport_reps[vport];
|
||||
if (!rep->valid)
|
||||
continue;
|
||||
rep->unload(esw, rep);
|
||||
}
|
||||
esw_destroy_vport_rx_group(esw);
|
||||
|
||||
create_fg_err:
|
||||
@ -814,16 +853,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
||||
|
||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
int vport;
|
||||
|
||||
for (vport = nvports - 1; vport >= 0; vport--) {
|
||||
rep = &esw->offloads.vport_reps[vport];
|
||||
if (!rep->valid)
|
||||
continue;
|
||||
rep->unload(esw, rep);
|
||||
}
|
||||
|
||||
esw_offloads_unload_reps(esw, nvports);
|
||||
esw_destroy_vport_rx_group(esw);
|
||||
esw_destroy_offloads_table(esw);
|
||||
esw_destroy_offloads_fdb_tables(esw);
|
||||
@ -1120,27 +1150,23 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
|
||||
|
||||
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index,
|
||||
struct mlx5_eswitch_rep *__rep)
|
||||
struct mlx5_eswitch_rep_if *__rep_if,
|
||||
u8 rep_type)
|
||||
{
|
||||
struct mlx5_esw_offload *offloads = &esw->offloads;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
struct mlx5_eswitch_rep_if *rep_if;
|
||||
|
||||
rep = &offloads->vport_reps[vport_index];
|
||||
rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
|
||||
|
||||
memset(rep, 0, sizeof(*rep));
|
||||
rep_if->load = __rep_if->load;
|
||||
rep_if->unload = __rep_if->unload;
|
||||
rep_if->priv = __rep_if->priv;
|
||||
|
||||
rep->load = __rep->load;
|
||||
rep->unload = __rep->unload;
|
||||
rep->vport = __rep->vport;
|
||||
rep->netdev = __rep->netdev;
|
||||
ether_addr_copy(rep->hw_id, __rep->hw_id);
|
||||
|
||||
INIT_LIST_HEAD(&rep->vport_sqs_list);
|
||||
rep->valid = true;
|
||||
rep_if->valid = true;
|
||||
}
|
||||
|
||||
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index)
|
||||
int vport_index, u8 rep_type)
|
||||
{
|
||||
struct mlx5_esw_offload *offloads = &esw->offloads;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
@ -1148,17 +1174,17 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
|
||||
rep = &offloads->vport_reps[vport_index];
|
||||
|
||||
if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
|
||||
rep->unload(esw, rep);
|
||||
rep->rep_if[rep_type].unload(rep);
|
||||
|
||||
rep->valid = false;
|
||||
rep->rep_if[rep_type].valid = false;
|
||||
}
|
||||
|
||||
struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
|
||||
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
|
||||
{
|
||||
#define UPLINK_REP_INDEX 0
|
||||
struct mlx5_esw_offload *offloads = &esw->offloads;
|
||||
struct mlx5_eswitch_rep *rep;
|
||||
|
||||
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
|
||||
return rep->netdev;
|
||||
return rep->rep_if[rep_type].priv;
|
||||
}
|
||||
|
@ -2026,16 +2026,6 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
return &steering->fdb_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
|
||||
if (steering->esw_egress_root_ns)
|
||||
return &steering->esw_egress_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
|
||||
if (steering->esw_ingress_root_ns)
|
||||
return &steering->esw_ingress_root_ns->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
|
||||
if (steering->sniffer_rx_root_ns)
|
||||
return &steering->sniffer_rx_root_ns->ns;
|
||||
@ -2066,6 +2056,33 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_get_flow_namespace);
|
||||
|
||||
struct mlx5_flow_namespace *mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
|
||||
enum mlx5_flow_namespace_type type,
|
||||
int vport)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
if (!steering || vport >= MLX5_TOTAL_VPORTS(dev))
|
||||
return NULL;
|
||||
|
||||
switch (type) {
|
||||
case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
|
||||
if (steering->esw_egress_root_ns &&
|
||||
steering->esw_egress_root_ns[vport])
|
||||
return &steering->esw_egress_root_ns[vport]->ns;
|
||||
else
|
||||
return NULL;
|
||||
case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
|
||||
if (steering->esw_ingress_root_ns &&
|
||||
steering->esw_ingress_root_ns[vport])
|
||||
return &steering->esw_ingress_root_ns[vport]->ns;
|
||||
else
|
||||
return NULL;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
|
||||
unsigned int prio, int num_levels)
|
||||
{
|
||||
@ -2343,13 +2360,41 @@ static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
|
||||
clean_tree(&root_ns->ns.node);
|
||||
}
|
||||
|
||||
static void cleanup_egress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int i;
|
||||
|
||||
if (!steering->esw_egress_root_ns)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
}
|
||||
|
||||
static void cleanup_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int i;
|
||||
|
||||
if (!steering->esw_ingress_root_ns)
|
||||
return;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++)
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
}
|
||||
|
||||
void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
|
||||
cleanup_root_ns(steering->root_ns);
|
||||
cleanup_root_ns(steering->esw_egress_root_ns);
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns);
|
||||
cleanup_egress_acls_root_ns(dev);
|
||||
cleanup_ingress_acls_root_ns(dev);
|
||||
cleanup_root_ns(steering->fdb_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_rx_root_ns);
|
||||
cleanup_root_ns(steering->sniffer_tx_root_ns);
|
||||
@ -2418,34 +2463,86 @@ static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
return PTR_ERR(prio);
|
||||
}
|
||||
|
||||
static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
|
||||
static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
||||
steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
|
||||
if (!steering->esw_egress_root_ns)
|
||||
steering->esw_egress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
|
||||
if (!steering->esw_egress_root_ns[vport])
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
|
||||
MLX5_TOTAL_VPORTS(steering->dev));
|
||||
prio = fs_create_prio(&steering->esw_egress_root_ns[vport]->ns, 0, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
}
|
||||
|
||||
static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
|
||||
static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering, int vport)
|
||||
{
|
||||
struct fs_prio *prio;
|
||||
|
||||
steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
|
||||
if (!steering->esw_ingress_root_ns)
|
||||
steering->esw_ingress_root_ns[vport] = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
|
||||
if (!steering->esw_ingress_root_ns[vport])
|
||||
return -ENOMEM;
|
||||
|
||||
/* create 1 prio*/
|
||||
prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
|
||||
MLX5_TOTAL_VPORTS(steering->dev));
|
||||
prio = fs_create_prio(&steering->esw_ingress_root_ns[vport]->ns, 0, 1);
|
||||
return PTR_ERR_OR_ZERO(prio);
|
||||
}
|
||||
|
||||
static int init_egress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
steering->esw_egress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
|
||||
sizeof(*steering->esw_egress_root_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->esw_egress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
|
||||
err = init_egress_acl_root_ns(steering, i);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_root_ns:
|
||||
for (i--; i >= 0; i--)
|
||||
cleanup_root_ns(steering->esw_egress_root_ns[i]);
|
||||
kfree(steering->esw_egress_root_ns);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int init_ingress_acls_root_ns(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering = dev->priv.steering;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
steering->esw_ingress_root_ns = kcalloc(MLX5_TOTAL_VPORTS(dev),
|
||||
sizeof(*steering->esw_ingress_root_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->esw_ingress_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MLX5_TOTAL_VPORTS(dev); i++) {
|
||||
err = init_ingress_acl_root_ns(steering, i);
|
||||
if (err)
|
||||
goto cleanup_root_ns;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup_root_ns:
|
||||
for (i--; i >= 0; i--)
|
||||
cleanup_root_ns(steering->esw_ingress_root_ns[i]);
|
||||
kfree(steering->esw_ingress_root_ns);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct mlx5_flow_steering *steering;
|
||||
@ -2488,12 +2585,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
|
||||
goto err;
|
||||
}
|
||||
if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
|
||||
err = init_egress_acl_root_ns(steering);
|
||||
err = init_egress_acls_root_ns(dev);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
|
||||
err = init_ingress_acl_root_ns(steering);
|
||||
err = init_ingress_acls_root_ns(dev);
|
||||
if (err)
|
||||
goto err;
|
||||
}
|
||||
|
@ -71,8 +71,8 @@ struct mlx5_flow_steering {
|
||||
struct kmem_cache *ftes_cache;
|
||||
struct mlx5_flow_root_namespace *root_ns;
|
||||
struct mlx5_flow_root_namespace *fdb_root_ns;
|
||||
struct mlx5_flow_root_namespace *esw_egress_root_ns;
|
||||
struct mlx5_flow_root_namespace *esw_ingress_root_ns;
|
||||
struct mlx5_flow_root_namespace **esw_egress_root_ns;
|
||||
struct mlx5_flow_root_namespace **esw_ingress_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_tx_root_ns;
|
||||
struct mlx5_flow_root_namespace *sniffer_rx_root_ns;
|
||||
};
|
||||
|
@ -95,6 +95,10 @@ struct mlx5_flow_destination {
|
||||
struct mlx5_flow_namespace *
|
||||
mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
|
||||
enum mlx5_flow_namespace_type type);
|
||||
struct mlx5_flow_namespace *
|
||||
mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
|
||||
enum mlx5_flow_namespace_type type,
|
||||
int vport);
|
||||
|
||||
struct mlx5_flow_table *
|
||||
mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
|
||||
|
Loading…
Reference in New Issue
Block a user