mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 22:30:52 +07:00
mlx5-updates-2017-04-22
Sparse and compiler warnings fixes from Stephen Hemminger. From Roi Dayan and Or Gerlitz, Add devlink and mlx5 support for controlling E-Switch encapsulation mode, this knob will enable HW support for applying encapsulation/decapsulation to VF traffic as part of SRIOV e-switch offloading. -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJY+5cRAAoJEEg/ir3gV/o+5c8H/1/khPzy26B2lWyjPC8CRCQF eSd0tiHLgIqbZTbnIHTR+NbZ/SUFaukoJi8OKn1fGFHCCajWvPP4xkENVKrUdi3q kOgNZb/R1V0j6SdELyoMalFPjAscTgdmwYMnry+vcjOxJ+H2uUTnMKXwFf8IsBjz EINy8oZ5jZcejmft0c2O5HN4Bt/7U5ttM3CroAdcvPT9lq2DFJL2uCABhTO/1DdY b7uVa47FnkqxX19Ebn7fjp5r3diGYOmCPMjdC89C//rbkLB8FN61EkcSLpGY3YNm djmCPQ+xaa3ielmBpOk3AMayFEtYW0nDMj9eWECVByadRQZ2qz9wTVXBp5CX9zg= =E3Jt -----END PGP SIGNATURE----- Merge tag 'mlx5-updates-2017-04-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== mlx5-updates-2017-04-22 Sparse and compiler warnings fixes from Stephen Hemminger. From Roi Dayan and Or Gerlitz, Add devlink and mlx5 support for controlling E-Switch encapsulation mode, this knob will enable HW support for applying encapsulation/decapsulation to VF traffic as part of SRIOV e-switch offloading. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
bc95cd8e8b
@ -39,6 +39,7 @@
|
||||
#include "en.h"
|
||||
#include "en_tc.h"
|
||||
#include "eswitch.h"
|
||||
#include "ipoib.h"
|
||||
|
||||
static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
|
||||
{
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include "en.h"
|
||||
#include "ipoib.h"
|
||||
|
||||
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
|
||||
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
|
||||
|
@ -1806,6 +1806,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
|
||||
esw->enabled_vports = 0;
|
||||
esw->mode = SRIOV_NONE;
|
||||
esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_BASIC;
|
||||
else
|
||||
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
|
||||
|
||||
dev->priv.eswitch = esw;
|
||||
return 0;
|
||||
|
@ -210,6 +210,7 @@ struct mlx5_esw_offload {
|
||||
DECLARE_HASHTABLE(encap_tbl, 8);
|
||||
u8 inline_mode;
|
||||
u64 num_flows;
|
||||
u8 encap;
|
||||
};
|
||||
|
||||
struct mlx5_eswitch {
|
||||
@ -322,6 +323,8 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
|
||||
int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode);
|
||||
int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
|
||||
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
|
||||
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
|
||||
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index,
|
||||
struct mlx5_eswitch_rep *rep);
|
||||
|
@ -426,22 +426,68 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
|
||||
return err;
|
||||
}
|
||||
|
||||
#define MAX_PF_SQ 256
|
||||
#define ESW_OFFLOADS_NUM_GROUPS 4
|
||||
|
||||
static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
||||
static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_table_attr ft_attr = {};
|
||||
int table_size, ix, esw_size, err = 0;
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *fdb = NULL;
|
||||
struct mlx5_flow_group *g;
|
||||
u32 *flow_group_in;
|
||||
void *match_criteria;
|
||||
int esw_size, err = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get FDB flow namespace\n");
|
||||
err = -EOPNOTSUPP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
|
||||
MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
|
||||
|
||||
esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
|
||||
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
||||
|
||||
if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
|
||||
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
|
||||
|
||||
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
|
||||
esw_size,
|
||||
ESW_OFFLOADS_NUM_GROUPS, 0,
|
||||
flags);
|
||||
if (IS_ERR(fdb)) {
|
||||
err = PTR_ERR(fdb);
|
||||
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
|
||||
goto out;
|
||||
}
|
||||
esw->fdb_table.fdb = fdb;
|
||||
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
mlx5_destroy_flow_table(esw->fdb_table.fdb);
|
||||
}
|
||||
|
||||
#define MAX_PF_SQ 256
|
||||
|
||||
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_table_attr ft_attr = {};
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *fdb = NULL;
|
||||
int table_size, ix, err = 0;
|
||||
struct mlx5_flow_group *g;
|
||||
void *match_criteria;
|
||||
u32 *flow_group_in;
|
||||
|
||||
esw_debug(esw->dev, "Create offloads FDB Tables\n");
|
||||
flow_group_in = mlx5_vzalloc(inlen);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
@ -453,27 +499,9 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
||||
goto ns_err;
|
||||
}
|
||||
|
||||
esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
|
||||
MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
|
||||
|
||||
esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
|
||||
1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
||||
|
||||
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
|
||||
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
|
||||
|
||||
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
|
||||
esw_size,
|
||||
ESW_OFFLOADS_NUM_GROUPS, 0,
|
||||
flags);
|
||||
if (IS_ERR(fdb)) {
|
||||
err = PTR_ERR(fdb);
|
||||
esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
|
||||
err = esw_create_offloads_fast_fdb_table(esw);
|
||||
if (err)
|
||||
goto fast_fdb_err;
|
||||
}
|
||||
esw->fdb_table.fdb = fdb;
|
||||
|
||||
table_size = nvports + MAX_PF_SQ + 1;
|
||||
|
||||
@ -545,18 +573,18 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
|
||||
static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (!esw->fdb_table.fdb)
|
||||
return;
|
||||
|
||||
esw_debug(esw->dev, "Destroy offloads FDB Table\n");
|
||||
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
|
||||
mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
|
||||
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
|
||||
|
||||
mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
|
||||
mlx5_destroy_flow_table(esw->fdb_table.fdb);
|
||||
esw_destroy_offloads_fast_fdb_table(esw);
|
||||
}
|
||||
|
||||
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
|
||||
@ -716,7 +744,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
err = esw_create_offloads_fdb_table(esw, nvports);
|
||||
err = esw_create_offloads_fdb_tables(esw, nvports);
|
||||
if (err)
|
||||
goto create_fdb_err;
|
||||
|
||||
@ -753,7 +781,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
||||
esw_destroy_offloads_table(esw);
|
||||
|
||||
create_ft_err:
|
||||
esw_destroy_offloads_fdb_table(esw);
|
||||
esw_destroy_offloads_fdb_tables(esw);
|
||||
|
||||
create_fdb_err:
|
||||
/* enable back PF RoCE */
|
||||
@ -799,7 +827,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
|
||||
|
||||
esw_destroy_vport_rx_group(esw);
|
||||
esw_destroy_offloads_table(esw);
|
||||
esw_destroy_offloads_fdb_table(esw);
|
||||
esw_destroy_offloads_fdb_tables(esw);
|
||||
}
|
||||
|
||||
static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
|
||||
@ -1016,6 +1044,66 @@ int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
|
||||
(!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
|
||||
!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_LEGACY) {
|
||||
esw->offloads.encap = encap;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (esw->offloads.encap == encap)
|
||||
return 0;
|
||||
|
||||
if (esw->offloads.num_flows > 0) {
|
||||
esw_warn(dev, "Can't set encapsulation when flows are configured\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
esw_destroy_offloads_fast_fdb_table(esw);
|
||||
|
||||
esw->offloads.encap = encap;
|
||||
err = esw_create_offloads_fast_fdb_table(esw);
|
||||
if (err) {
|
||||
esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
|
||||
esw->offloads.encap = !encap;
|
||||
(void) esw_create_offloads_fast_fdb_table(esw);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
struct mlx5_eswitch *esw = dev->priv.eswitch;
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, vport_group_manager))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (esw->mode == SRIOV_NONE)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
*encap = esw->offloads.encap;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
|
||||
int vport_index,
|
||||
struct mlx5_eswitch_rep *__rep)
|
||||
|
@ -178,7 +178,7 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
|
||||
static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5i_priv *ipriv = priv->ppriv;
|
||||
|
||||
@ -359,9 +359,10 @@ static int mlx5i_close(struct net_device *netdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef notusedyet
|
||||
/* IPoIB RDMA netdev callbacks */
|
||||
int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
|
||||
union ib_gid *gid, u16 lid, int set_qkey)
|
||||
static int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
|
||||
union ib_gid *gid, u16 lid, int set_qkey)
|
||||
{
|
||||
struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
|
||||
struct mlx5_core_dev *mdev = epriv->mdev;
|
||||
@ -377,8 +378,8 @@ int mlx5i_attach_mcast(struct net_device *netdev, struct ib_device *hca,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
|
||||
union ib_gid *gid, u16 lid)
|
||||
static int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
|
||||
union ib_gid *gid, u16 lid)
|
||||
{
|
||||
struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
|
||||
struct mlx5_core_dev *mdev = epriv->mdev;
|
||||
@ -395,7 +396,7 @@ int mlx5i_detach_mcast(struct net_device *netdev, struct ib_device *hca,
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
|
||||
static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
|
||||
struct ib_ah *address, u32 dqpn, u32 dqkey)
|
||||
{
|
||||
struct mlx5e_priv *epriv = mlx5i_epriv(dev);
|
||||
@ -404,6 +405,7 @@ int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
|
||||
|
||||
return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, dqkey);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
@ -418,10 +420,10 @@ static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
|
||||
struct ib_device *ibdev,
|
||||
const char *name,
|
||||
void (*setup)(struct net_device *))
|
||||
static struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
|
||||
struct ib_device *ibdev,
|
||||
const char *name,
|
||||
void (*setup)(struct net_device *))
|
||||
{
|
||||
const struct mlx5e_profile *profile = &mlx5i_nic_profile;
|
||||
int nch = profile->max_nch(mdev);
|
||||
@ -480,7 +482,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
|
||||
}
|
||||
EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
|
||||
|
||||
void mlx5_rdma_netdev_free(struct net_device *netdev)
|
||||
static void mlx5_rdma_netdev_free(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
|
||||
const struct mlx5e_profile *profile = priv->profile;
|
||||
|
@ -1280,6 +1280,8 @@ static const struct devlink_ops mlx5_devlink_ops = {
|
||||
.eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
|
||||
.eswitch_inline_mode_set = mlx5_devlink_eswitch_inline_mode_set,
|
||||
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
|
||||
.eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
|
||||
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -268,6 +268,8 @@ struct devlink_ops {
|
||||
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
|
||||
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
|
||||
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
|
||||
int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
|
||||
int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
|
||||
};
|
||||
|
||||
static inline void *devlink_priv(struct devlink *devlink)
|
||||
|
@ -119,6 +119,11 @@ enum devlink_eswitch_inline_mode {
|
||||
DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT,
|
||||
};
|
||||
|
||||
enum devlink_eswitch_encap_mode {
|
||||
DEVLINK_ESWITCH_ENCAP_MODE_NONE,
|
||||
DEVLINK_ESWITCH_ENCAP_MODE_BASIC,
|
||||
};
|
||||
|
||||
enum devlink_attr {
|
||||
/* don't change the order or add anything between, this is ABI! */
|
||||
DEVLINK_ATTR_UNSPEC,
|
||||
@ -195,6 +200,8 @@ enum devlink_attr {
|
||||
|
||||
DEVLINK_ATTR_PAD,
|
||||
|
||||
DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */
|
||||
|
||||
/* add new attributes above here, update the policy in devlink.c */
|
||||
|
||||
__DEVLINK_ATTR_MAX,
|
||||
|
@ -1397,10 +1397,10 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
|
||||
u32 seq, int flags)
|
||||
{
|
||||
const struct devlink_ops *ops = devlink->ops;
|
||||
u8 inline_mode, encap_mode;
|
||||
void *hdr;
|
||||
int err = 0;
|
||||
u16 mode;
|
||||
u8 inline_mode;
|
||||
|
||||
hdr = genlmsg_put(msg, portid, seq, &devlink_nl_family, flags, cmd);
|
||||
if (!hdr)
|
||||
@ -1429,6 +1429,15 @@ static int devlink_nl_eswitch_fill(struct sk_buff *msg, struct devlink *devlink,
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
if (ops->eswitch_encap_mode_get) {
|
||||
err = ops->eswitch_encap_mode_get(devlink, &encap_mode);
|
||||
if (err)
|
||||
goto nla_put_failure;
|
||||
err = nla_put_u8(msg, DEVLINK_ATTR_ESWITCH_ENCAP_MODE, encap_mode);
|
||||
if (err)
|
||||
goto nla_put_failure;
|
||||
}
|
||||
|
||||
genlmsg_end(msg, hdr);
|
||||
return 0;
|
||||
|
||||
@ -1468,9 +1477,9 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
|
||||
{
|
||||
struct devlink *devlink = info->user_ptr[0];
|
||||
const struct devlink_ops *ops = devlink->ops;
|
||||
u16 mode;
|
||||
u8 inline_mode;
|
||||
u8 inline_mode, encap_mode;
|
||||
int err = 0;
|
||||
u16 mode;
|
||||
|
||||
if (!ops)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1493,6 +1502,16 @@ static int devlink_nl_cmd_eswitch_set_doit(struct sk_buff *skb,
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]) {
|
||||
if (!ops->eswitch_encap_mode_set)
|
||||
return -EOPNOTSUPP;
|
||||
encap_mode = nla_get_u8(info->attrs[DEVLINK_ATTR_ESWITCH_ENCAP_MODE]);
|
||||
err = ops->eswitch_encap_mode_set(devlink, encap_mode);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2190,6 +2209,7 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
|
||||
[DEVLINK_ATTR_SB_TC_INDEX] = { .type = NLA_U16 },
|
||||
[DEVLINK_ATTR_ESWITCH_MODE] = { .type = NLA_U16 },
|
||||
[DEVLINK_ATTR_ESWITCH_INLINE_MODE] = { .type = NLA_U8 },
|
||||
[DEVLINK_ATTR_ESWITCH_ENCAP_MODE] = { .type = NLA_U8 },
|
||||
[DEVLINK_ATTR_DPIPE_TABLE_NAME] = { .type = NLA_NUL_STRING },
|
||||
[DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED] = { .type = NLA_U8 },
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user