mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:40:52 +07:00
mlx5-updates-2020-08-03
This patchset introduces some updates to mlx5 driver. 1) Jakub converts mlx5 to use the new udp tunnel infrastructure. Starting with a hack to allow drivers to request a static configuration of the default vxlan port, and then a patch that converts mlx5. 2) Parav implements change_carrier ndo for VF eswitch representors, to speedup link state control of representors netdevices. 3) Alex Vesker, makes a simple update to software steering to fix an issue with push vlan action sequence 4) Leon removes a redundant dump stack on error flow. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl8oRdgACgkQSD+KveBX +j4/LQgAkSjNzOaS7bVDzhoYL3aBQOMIzgocJUeVi7xXH8IO1uy55mNDrKBqjxbW dy9U9VsvV5i2V2qkkQLvHVkoDSg8Buo2Uxu4OrZHOLN0KfbFrra4VvmB1CzEBix8 FICnQaZZcE7529P04TgZ8Mo9vRb5VdJFhqED5Nvegy+y8FolEsQYbjIoDBE6wa0j Meqa/29+XCE5FzTOjbbQWizAnRZMbkxtSSreDNgeHxke9eMSO+fmwKScng63QUfl 7nfU6dW6A0d1kHhpL5RqAFOcmkpSdqYaA3SA+/8pPT9X3yOAkxE6KTKGIixpB9JX zQt+Wkna49jJ/JfDQB5vgww5c0HjAQ== =j0fG -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2020-08-03' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2020-08-03 This patchset introduces some updates to mlx5 driver. 1) Jakub converts mlx5 to use the new udp tunnel infrastructure. Starting with a hack to allow drivers to request a static configuration of the default vxlan port, and then a patch that converts mlx5. 2) Parav implements change_carrier ndo for VF eswitch representors, to speedup link state control of representors netdevices. 3) Alex Vesker, makes a simple update to software steering to fix an issue with push vlan action sequence 4) Leon removes a redundant dump stack on error flow. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
76769c38b4
@ -1263,6 +1263,9 @@ Kernel response contents:
|
||||
| | | | ``ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE`` | u32 | tunnel type |
|
||||
+-+-+-+---------------------------------------+--------+---------------------+
|
||||
|
||||
For UDP tunnel table empty ``ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES`` indicates that
|
||||
the table contains static entries, hard-coded by the NIC.
|
||||
|
||||
Request translation
|
||||
===================
|
||||
|
||||
|
@ -45,6 +45,7 @@
|
||||
#include <linux/mlx5/transobj.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <net/udp_tunnel.h>
|
||||
#include <net/switchdev.h>
|
||||
#include <net/xdp.h>
|
||||
#include <linux/dim.h>
|
||||
@ -792,6 +793,7 @@ struct mlx5e_priv {
|
||||
u16 drop_rq_q_counter;
|
||||
struct notifier_block events_nb;
|
||||
|
||||
struct udp_tunnel_nic_info nic_info;
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
struct mlx5e_dcbx dcbx;
|
||||
#endif
|
||||
@ -1012,6 +1014,7 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
|
||||
int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
|
||||
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
||||
mlx5e_fp_preactivate preactivate);
|
||||
void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
|
||||
|
||||
/* ethtool helpers */
|
||||
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
@ -1080,8 +1083,6 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
|
||||
void mlx5e_rx_dim_work(struct work_struct *work);
|
||||
void mlx5e_tx_dim_work(struct work_struct *work);
|
||||
|
||||
void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
|
||||
void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
|
||||
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
||||
struct net_device *netdev,
|
||||
netdev_features_t features);
|
||||
|
@ -4191,83 +4191,6 @@ int mlx5e_get_vf_stats(struct net_device *dev,
|
||||
}
|
||||
#endif
|
||||
|
||||
struct mlx5e_vxlan_work {
|
||||
struct work_struct work;
|
||||
struct mlx5e_priv *priv;
|
||||
u16 port;
|
||||
};
|
||||
|
||||
static void mlx5e_vxlan_add_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work =
|
||||
container_of(work, struct mlx5e_vxlan_work, work);
|
||||
struct mlx5e_priv *priv = vxlan_work->priv;
|
||||
u16 port = vxlan_work->port;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5_vxlan_add_port(priv->mdev->vxlan, port);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
kfree(vxlan_work);
|
||||
}
|
||||
|
||||
static void mlx5e_vxlan_del_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work =
|
||||
container_of(work, struct mlx5e_vxlan_work, work);
|
||||
struct mlx5e_priv *priv = vxlan_work->priv;
|
||||
u16 port = vxlan_work->port;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5_vxlan_del_port(priv->mdev->vxlan, port);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
kfree(vxlan_work);
|
||||
}
|
||||
|
||||
static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work;
|
||||
|
||||
vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
|
||||
if (!vxlan_work)
|
||||
return;
|
||||
|
||||
if (add)
|
||||
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
|
||||
else
|
||||
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
|
||||
|
||||
vxlan_work->priv = priv;
|
||||
vxlan_work->port = port;
|
||||
queue_work(priv->wq, &vxlan_work->work);
|
||||
}
|
||||
|
||||
void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
return;
|
||||
|
||||
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
return;
|
||||
|
||||
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
|
||||
}
|
||||
|
||||
void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
return;
|
||||
|
||||
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
return;
|
||||
|
||||
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
|
||||
struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
@ -4597,8 +4520,8 @@ const struct net_device_ops mlx5e_netdev_ops = {
|
||||
.ndo_change_mtu = mlx5e_change_nic_mtu,
|
||||
.ndo_do_ioctl = mlx5e_ioctl,
|
||||
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
|
||||
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
|
||||
.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
|
||||
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
|
||||
.ndo_features_check = mlx5e_features_check,
|
||||
.ndo_tx_timeout = mlx5e_tx_timeout,
|
||||
.ndo_bpf = mlx5e_xdp,
|
||||
@ -4869,6 +4792,39 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
|
||||
unsigned int entry, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
|
||||
}
|
||||
|
||||
static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
|
||||
unsigned int entry, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
|
||||
}
|
||||
|
||||
void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
return;
|
||||
|
||||
priv->nic_info.set_port = mlx5e_vxlan_set_port;
|
||||
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
|
||||
priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
|
||||
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
|
||||
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
|
||||
/* Don't count the space hard-coded to the IANA port */
|
||||
priv->nic_info.tables[0].n_entries =
|
||||
mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
|
||||
|
||||
priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
|
||||
}
|
||||
|
||||
static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
@ -4912,6 +4868,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
mlx5e_vxlan_set_netdev_info(priv);
|
||||
|
||||
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
|
||||
mlx5e_any_tunnel_proto_supported(mdev)) {
|
||||
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
|
||||
@ -5217,8 +5175,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
||||
rtnl_lock();
|
||||
if (netif_running(netdev))
|
||||
mlx5e_open(netdev);
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
udp_tunnel_nic_reset_ntf(priv->netdev);
|
||||
netif_device_attach(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
@ -5233,8 +5190,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
||||
rtnl_lock();
|
||||
if (netif_running(priv->netdev))
|
||||
mlx5e_close(priv->netdev);
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_drop_rx_info(priv->netdev);
|
||||
netif_device_detach(priv->netdev);
|
||||
rtnl_unlock();
|
||||
|
||||
|
@ -611,6 +611,29 @@ static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
|
||||
return &rpriv->dl_port;
|
||||
}
|
||||
|
||||
static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_rep_priv *rpriv = priv->ppriv;
|
||||
struct mlx5_eswitch_rep *rep = rpriv->rep;
|
||||
int err;
|
||||
|
||||
if (new_carrier) {
|
||||
err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||
rep->vport, 1, MLX5_VPORT_ADMIN_STATE_UP);
|
||||
if (err)
|
||||
return err;
|
||||
netif_carrier_on(dev);
|
||||
} else {
|
||||
err = mlx5_modify_vport_admin_state(priv->mdev, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||
rep->vport, 1, MLX5_VPORT_ADMIN_STATE_DOWN);
|
||||
if (err)
|
||||
return err;
|
||||
netif_carrier_off(dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_rep = {
|
||||
.ndo_open = mlx5e_rep_open,
|
||||
.ndo_stop = mlx5e_rep_close,
|
||||
@ -621,6 +644,7 @@ static const struct net_device_ops mlx5e_netdev_ops_rep = {
|
||||
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
|
||||
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
|
||||
.ndo_change_mtu = mlx5e_rep_change_mtu,
|
||||
.ndo_change_carrier = mlx5e_rep_change_carrier,
|
||||
};
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
|
||||
@ -634,8 +658,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
|
||||
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
|
||||
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
|
||||
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
|
||||
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
|
||||
.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
|
||||
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
|
||||
.ndo_features_check = mlx5e_features_check,
|
||||
.ndo_set_vf_mac = mlx5e_set_vf_mac,
|
||||
.ndo_set_vf_rate = mlx5e_set_vf_rate,
|
||||
@ -706,6 +730,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
||||
/* we want a persistent mac for the uplink rep */
|
||||
mlx5_query_mac_address(mdev, netdev->dev_addr);
|
||||
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
|
||||
mlx5e_vxlan_set_netdev_info(priv);
|
||||
mlx5e_dcbnl_build_rep_netdev(netdev);
|
||||
} else {
|
||||
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
|
||||
|
@ -846,18 +846,15 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev,
|
||||
{
|
||||
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
|
||||
struct mlx5_flow_table *iter;
|
||||
int i = 0;
|
||||
int err;
|
||||
|
||||
fs_for_each_ft(iter, prio) {
|
||||
i++;
|
||||
err = root->cmds->modify_flow_table(root, iter, ft);
|
||||
if (err) {
|
||||
mlx5_core_warn(dev, "Failed to modify flow table %d\n",
|
||||
iter->id);
|
||||
mlx5_core_err(dev,
|
||||
"Failed to modify flow table id %d, type %d, err %d\n",
|
||||
iter->id, iter->type, err);
|
||||
/* The driver is out of sync with the FW */
|
||||
if (i > 1)
|
||||
WARN_ON(true);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
@ -42,21 +42,14 @@ struct mlx5_vxlan {
|
||||
struct mlx5_core_dev *mdev;
|
||||
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
|
||||
DECLARE_HASHTABLE(htable, 4);
|
||||
int num_ports;
|
||||
struct mutex sync_lock; /* sync add/del port HW operations */
|
||||
};
|
||||
|
||||
struct mlx5_vxlan_port {
|
||||
struct hlist_node hlist;
|
||||
refcount_t refcount;
|
||||
u16 udp_port;
|
||||
};
|
||||
|
||||
static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
|
||||
}
|
||||
|
||||
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
|
||||
@ -109,48 +102,24 @@ static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 p
|
||||
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
|
||||
{
|
||||
struct mlx5_vxlan_port *vxlanp;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&vxlan->sync_lock);
|
||||
vxlanp = vxlan_lookup_port(vxlan, port);
|
||||
if (vxlanp) {
|
||||
refcount_inc(&vxlanp->refcount);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
|
||||
mlx5_core_info(vxlan->mdev,
|
||||
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
|
||||
port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
|
||||
ret = -ENOSPC;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
int ret;
|
||||
|
||||
vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
|
||||
if (!vxlanp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_delete_port;
|
||||
if (!vxlanp)
|
||||
return -ENOMEM;
|
||||
vxlanp->udp_port = port;
|
||||
|
||||
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
|
||||
if (ret) {
|
||||
kfree(vxlanp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vxlanp->udp_port = port;
|
||||
refcount_set(&vxlanp->refcount, 1);
|
||||
|
||||
mutex_lock(&vxlan->sync_lock);
|
||||
hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
|
||||
|
||||
vxlan->num_ports++;
|
||||
mutex_unlock(&vxlan->sync_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_delete_port:
|
||||
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&vxlan->sync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
|
||||
@ -161,18 +130,15 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
|
||||
mutex_lock(&vxlan->sync_lock);
|
||||
|
||||
vxlanp = vxlan_lookup_port(vxlan, port);
|
||||
if (!vxlanp) {
|
||||
if (WARN_ON(!vxlanp)) {
|
||||
ret = -ENOENT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (refcount_dec_and_test(&vxlanp->refcount)) {
|
||||
hash_del_rcu(&vxlanp->hlist);
|
||||
synchronize_rcu();
|
||||
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
|
||||
kfree(vxlanp);
|
||||
vxlan->num_ports--;
|
||||
}
|
||||
hash_del_rcu(&vxlanp->hlist);
|
||||
synchronize_rcu();
|
||||
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
|
||||
kfree(vxlanp);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&vxlan->sync_lock);
|
||||
|
@ -37,6 +37,11 @@
|
||||
struct mlx5_vxlan;
|
||||
struct mlx5_vxlan_port;
|
||||
|
||||
static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
|
||||
}
|
||||
|
||||
static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
|
||||
{
|
||||
/* not allowed reason is encoded in vxlan pointer as error,
|
||||
|
@ -279,29 +279,9 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
|
||||
/* The order of the actions are must to be keep, only the following
|
||||
* order is supported by SW steering:
|
||||
* TX: push vlan -> modify header -> encap
|
||||
* TX: modify header -> push vlan -> encap
|
||||
* RX: decap -> pop vlan -> modify header
|
||||
*/
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
|
||||
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
|
||||
if (!tmp_action) {
|
||||
err = -ENOMEM;
|
||||
goto free_actions;
|
||||
}
|
||||
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
||||
actions[num_actions++] = tmp_action;
|
||||
}
|
||||
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
|
||||
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
|
||||
if (!tmp_action) {
|
||||
err = -ENOMEM;
|
||||
goto free_actions;
|
||||
}
|
||||
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
||||
actions[num_actions++] = tmp_action;
|
||||
}
|
||||
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
|
||||
enum mlx5dr_action_reformat_type decap_type =
|
||||
DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
|
||||
@ -354,6 +334,26 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
|
||||
actions[num_actions++] =
|
||||
fte->action.modify_hdr->action.dr_action;
|
||||
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
|
||||
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[0]);
|
||||
if (!tmp_action) {
|
||||
err = -ENOMEM;
|
||||
goto free_actions;
|
||||
}
|
||||
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
||||
actions[num_actions++] = tmp_action;
|
||||
}
|
||||
|
||||
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
|
||||
tmp_action = create_action_push_vlan(domain, &fte->action.vlan[1]);
|
||||
if (!tmp_action) {
|
||||
err = -ENOMEM;
|
||||
goto free_actions;
|
||||
}
|
||||
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
|
||||
actions[num_actions++] = tmp_action;
|
||||
}
|
||||
|
||||
if (delay_encap_set)
|
||||
actions[num_actions++] =
|
||||
fte->action.pkt_reformat->action.dr_action;
|
||||
|
@ -193,6 +193,11 @@ enum udp_tunnel_nic_info_flags {
|
||||
UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(1),
|
||||
/* Device supports only IPv4 tunnels */
|
||||
UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(2),
|
||||
/* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
|
||||
* This port must not be counted towards n_entries of any table.
|
||||
* Driver will not receive any callback associated with port 4789.
|
||||
*/
|
||||
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(3),
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#include <linux/ethtool_netlink.h>
|
||||
#include <net/udp_tunnel.h>
|
||||
#include <net/vxlan.h>
|
||||
|
||||
#include "bitset.h"
|
||||
#include "common.h"
|
||||
@ -18,6 +19,20 @@ static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
|
||||
static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
|
||||
ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
|
||||
|
||||
static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
|
||||
{
|
||||
ssize_t size;
|
||||
|
||||
size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
|
||||
udp_tunnel_type_names, compact);
|
||||
if (size < 0)
|
||||
return size;
|
||||
|
||||
return size +
|
||||
nla_total_size(0) + /* _UDP_TABLE */
|
||||
nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
|
||||
struct netlink_ext_ack *extack)
|
||||
@ -25,8 +40,8 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
|
||||
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
|
||||
const struct udp_tunnel_nic_info *info;
|
||||
unsigned int i;
|
||||
ssize_t ret;
|
||||
size_t size;
|
||||
int ret;
|
||||
|
||||
info = req_base->dev->udp_tunnel_nic_info;
|
||||
if (!info) {
|
||||
@ -39,13 +54,10 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
|
||||
|
||||
for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
|
||||
if (!info->tables[i].n_entries)
|
||||
return size;
|
||||
break;
|
||||
|
||||
size += nla_total_size(0); /* _UDP_TABLE */
|
||||
size += nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
|
||||
ret = ethnl_bitset32_size(&info->tables[i].tunnel_types, NULL,
|
||||
__ETHTOOL_UDP_TUNNEL_TYPE_CNT,
|
||||
udp_tunnel_type_names, compact);
|
||||
ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
|
||||
compact);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
size += ret;
|
||||
@ -53,6 +65,17 @@ ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
|
||||
size += udp_tunnel_nic_dump_size(req_base->dev, i);
|
||||
}
|
||||
|
||||
if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
|
||||
ret = ethnl_udp_table_reply_size(0, compact);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
size += ret;
|
||||
|
||||
size += nla_total_size(0) + /* _TABLE_ENTRY */
|
||||
nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
|
||||
nla_total_size(sizeof(u32)); /* _ENTRY_TYPE */
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -62,7 +85,7 @@ ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
|
||||
{
|
||||
bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
|
||||
const struct udp_tunnel_nic_info *info;
|
||||
struct nlattr *ports, *table;
|
||||
struct nlattr *ports, *table, *entry;
|
||||
unsigned int i;
|
||||
|
||||
info = req_base->dev->udp_tunnel_nic_info;
|
||||
@ -97,10 +120,40 @@ ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
|
||||
nla_nest_end(skb, table);
|
||||
}
|
||||
|
||||
if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
|
||||
u32 zero = 0;
|
||||
|
||||
table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
|
||||
if (!table)
|
||||
goto err_cancel_ports;
|
||||
|
||||
if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
|
||||
goto err_cancel_table;
|
||||
|
||||
if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
|
||||
&zero, NULL,
|
||||
__ETHTOOL_UDP_TUNNEL_TYPE_CNT,
|
||||
udp_tunnel_type_names, compact))
|
||||
goto err_cancel_table;
|
||||
|
||||
entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
|
||||
|
||||
if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
|
||||
htons(IANA_VXLAN_UDP_PORT)) ||
|
||||
nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
|
||||
ilog2(UDP_TUNNEL_TYPE_VXLAN)))
|
||||
goto err_cancel_entry;
|
||||
|
||||
nla_nest_end(skb, entry);
|
||||
nla_nest_end(skb, table);
|
||||
}
|
||||
|
||||
nla_nest_end(skb, ports);
|
||||
|
||||
return 0;
|
||||
|
||||
err_cancel_entry:
|
||||
nla_nest_cancel(skb, entry);
|
||||
err_cancel_table:
|
||||
nla_nest_cancel(skb, table);
|
||||
err_cancel_ports:
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <net/udp_tunnel.h>
|
||||
#include <net/vxlan.h>
|
||||
|
||||
enum udp_tunnel_nic_table_entry_flags {
|
||||
UDP_TUNNEL_NIC_ENTRY_ADD = BIT(0),
|
||||
@ -504,6 +505,12 @@ __udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
|
||||
return;
|
||||
if (!netif_running(dev) && info->flags & UDP_TUNNEL_NIC_INFO_OPEN_ONLY)
|
||||
return;
|
||||
if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN &&
|
||||
ti->port == htons(IANA_VXLAN_UDP_PORT)) {
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
netdev_warn(dev, "device assumes port 4789 will be used by vxlan tunnels\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!udp_tunnel_nic_is_capable(dev, utn, ti))
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user