mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-26 03:30:54 +07:00
mlx5-fixes-2018-07-18
-----BEGIN PGP SIGNATURE----- iQEcBAABAgAGBQJbT+cLAAoJEEg/ir3gV/o+I5QH/3LQemGzH33iNsg4khpPeNA+ Q4mGd2jqbwfL17FTSGpTsPje6rpwzR+j8W1fGTx1vzYmE79ZyDu4EwHS7YZJcGyz q8P0HgrUe4NrJV8mlOpbIRbTuSwfqultw2qRpmCfLf5kK1nqSIPpUHIfBUMqwy0o O7GJrytUI4Av+r5Px/6bjb5kBaVe5YBe0tg8nSrN2vtzHVQWm+5/uaNRW2SrCN+4 5SI2AsWyMwfGCC+IE8i9OlIFCy6Iu2vwcUabK+6EeGKP4Wb6rukyG01TkQPSd7gy ozcAjvj+ppHmVFath1uzLCFU3RbKt6GbVRGaFQg5jO5vvK3uzFJnm59Vqw/WzNs= =UXsy -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2018-07-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2018-07-18 The following series provides fixes to mlx5 core and net device driver. Please pull and let me know if there's any problem. For -stable v4.7 net/mlx5e: Don't allow aRFS for encapsulated packets net/mlx5e: Fix quota counting in aRFS expire flow For -stable v4.15 net/mlx5e: Only allow offloading decap egress (egdev) flows net/mlx5e: Refine ets validation function net/mlx5: Adjust clock overflow work period For -stable v4.17 net/mlx5: E-Switch, UBSAN fix undefined behavior in mlx5_eswitch_mode ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a6fc8594a5
@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
|
||||
int i;
|
||||
|
||||
buf->size = size;
|
||||
buf->npages = 1 << get_order(size);
|
||||
buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
|
||||
buf->page_shift = PAGE_SHIFT;
|
||||
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
|
||||
GFP_KERNEL);
|
||||
|
@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
|
||||
HLIST_HEAD(del_list);
|
||||
spin_lock_bh(&priv->fs.arfs.arfs_lock);
|
||||
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
|
||||
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
|
||||
break;
|
||||
if (!work_pending(&arfs_rule->arfs_work) &&
|
||||
rps_may_expire_flow(priv->netdev,
|
||||
arfs_rule->rxq, arfs_rule->flow_id,
|
||||
arfs_rule->filter_id)) {
|
||||
hlist_del_init(&arfs_rule->hlist);
|
||||
hlist_add_head(&arfs_rule->hlist, &del_list);
|
||||
if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
|
||||
@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
|
||||
skb->protocol != htons(ETH_P_IPV6))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
if (skb->encapsulation)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
|
||||
if (!arfs_t)
|
||||
return -EPROTONOSUPPORT;
|
||||
|
@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
|
||||
}
|
||||
|
||||
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
|
||||
struct ieee_ets *ets)
|
||||
struct ieee_ets *ets,
|
||||
bool zero_sum_allowed)
|
||||
{
|
||||
bool have_ets_tc = false;
|
||||
int bw_sum = 0;
|
||||
@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
|
||||
}
|
||||
|
||||
if (have_ets_tc && bw_sum != 100) {
|
||||
netdev_err(netdev,
|
||||
"Failed to validate ETS: BW sum is illegal\n");
|
||||
if (bw_sum || (!bw_sum && !zero_sum_allowed))
|
||||
netdev_err(netdev,
|
||||
"Failed to validate ETS: BW sum is illegal\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = mlx5e_dbcnl_validate_ets(netdev, ets);
|
||||
err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
|
||||
ets.prio_tc[i]);
|
||||
}
|
||||
|
||||
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
|
||||
if (err) {
|
||||
netdev_err(netdev,
|
||||
"%s, Failed to validate ETS: %d\n", __func__, err);
|
||||
err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
|
||||
if (err) {
|
||||
|
@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
|
||||
else
|
||||
actions = flow->nic_attr->action;
|
||||
|
||||
if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
|
||||
!(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
|
||||
return false;
|
||||
|
||||
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
|
||||
return modify_header_match_supported(&parse_attr->spec, exts);
|
||||
|
||||
|
@ -2216,6 +2216,6 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
||||
|
||||
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
|
||||
{
|
||||
return esw->mode;
|
||||
return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
|
||||
|
@ -1887,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
||||
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
|
||||
if (!fwd_next_prio_supported(ft))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
if (dest)
|
||||
if (dest_num)
|
||||
return ERR_PTR(-EINVAL);
|
||||
mutex_lock(&root->chain_lock);
|
||||
next_ft = find_next_chained_ft(prio);
|
||||
|
@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
|
||||
void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
struct mlx5_clock *clock = &mdev->clock;
|
||||
u64 overflow_cycles;
|
||||
u64 ns;
|
||||
u64 frac = 0;
|
||||
u32 dev_freq;
|
||||
@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
|
||||
|
||||
/* Calculate period in seconds to call the overflow watchdog - to make
|
||||
* sure counter is checked at least once every wrap around.
|
||||
* The period is calculated as the minimum between max HW cycles count
|
||||
* (The clock source mask) and max amount of cycles that can be
|
||||
* multiplied by clock multiplier where the result doesn't exceed
|
||||
* 64bits.
|
||||
*/
|
||||
ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
|
||||
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
|
||||
overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
|
||||
|
||||
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
|
||||
frac, &frac);
|
||||
do_div(ns, NSEC_PER_SEC / 2 / HZ);
|
||||
do_div(ns, NSEC_PER_SEC / HZ);
|
||||
clock->overflow_period = ns;
|
||||
|
||||
mdev->clock_info_page = alloc_page(GFP_KERNEL);
|
||||
|
@ -113,35 +113,45 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
|
||||
struct mlx5_wq_qp *qp)
|
||||
static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
|
||||
struct mlx5_wq_qp *qp)
|
||||
{
|
||||
struct mlx5_frag_buf_ctrl *sq_fbc;
|
||||
struct mlx5_frag_buf *rqb, *sqb;
|
||||
|
||||
rqb = &qp->rq.fbc.frag_buf;
|
||||
rqb = &qp->rq.fbc.frag_buf;
|
||||
*rqb = *buf;
|
||||
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||
rqb->npages = 1 << get_order(rqb->size);
|
||||
rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
|
||||
|
||||
sqb = &qp->sq.fbc.frag_buf;
|
||||
*sqb = *buf;
|
||||
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
|
||||
sqb->npages = 1 << get_order(sqb->size);
|
||||
sq_fbc = &qp->sq.fbc;
|
||||
sqb = &sq_fbc->frag_buf;
|
||||
*sqb = *buf;
|
||||
sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
|
||||
sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
|
||||
sqb->frags += rqb->npages; /* first part is for the rq */
|
||||
if (sq_fbc->strides_offset)
|
||||
sqb->frags--;
|
||||
}
|
||||
|
||||
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
void *qpc, struct mlx5_wq_qp *wq,
|
||||
struct mlx5_wq_ctrl *wq_ctrl)
|
||||
{
|
||||
u32 sq_strides_offset;
|
||||
int err;
|
||||
|
||||
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
|
||||
MLX5_GET(qpc, qpc, log_rq_size),
|
||||
&wq->rq.fbc);
|
||||
mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
|
||||
MLX5_GET(qpc, qpc, log_sq_size),
|
||||
&wq->sq.fbc);
|
||||
|
||||
sq_strides_offset =
|
||||
((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
|
||||
|
||||
mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
|
||||
MLX5_GET(qpc, qpc, log_sq_size),
|
||||
sq_strides_offset,
|
||||
&wq->sq.fbc);
|
||||
|
||||
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
|
||||
if (err) {
|
||||
@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
|
||||
goto err_db_free;
|
||||
}
|
||||
|
||||
mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
|
||||
mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
|
||||
|
||||
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
|
||||
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
|
||||
|
@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
|
||||
struct mlx5_frag_buf frag_buf;
|
||||
u32 sz_m1;
|
||||
u32 frag_sz_m1;
|
||||
u32 strides_offset;
|
||||
u8 log_sz;
|
||||
u8 log_stride;
|
||||
u8 log_frag_strides;
|
||||
@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
|
||||
return key & 0xffffff00u;
|
||||
}
|
||||
|
||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
|
||||
u32 strides_offset,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
{
|
||||
fbc->log_stride = log_stride;
|
||||
fbc->log_sz = log_sz;
|
||||
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
|
||||
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
|
||||
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
|
||||
fbc->strides_offset = strides_offset;
|
||||
}
|
||||
|
||||
static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
|
||||
struct mlx5_frag_buf_ctrl *fbc)
|
||||
{
|
||||
mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
|
||||
}
|
||||
|
||||
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
|
||||
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
|
||||
u32 ix)
|
||||
{
|
||||
unsigned int frag = (ix >> fbc->log_frag_strides);
|
||||
unsigned int frag;
|
||||
|
||||
ix += fbc->strides_offset;
|
||||
frag = ix >> fbc->log_frag_strides;
|
||||
|
||||
return fbc->frag_buf.frags[frag].buf +
|
||||
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
|
||||
|
Loading…
Reference in New Issue
Block a user