mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-12 23:06:03 +07:00
net/mlx5e: Expand XPS cpumask to cover all online cpus
Currently we have one cpu in XPS cpumask per tx queue, this is good enough for default configuration where there is a tx queue per cpu. However, once configuration changes to use less tx queues, part of the cpus are not XPS-mapped and so the select queue decision falls back to hash calculation and balancing is not guaranteed. Expand XPS cpumask to enable using all cpus even when number of tx queues is smaller than number of cpus. Signed-off-by: Moshe Shemesh <moshe@mellanox.com> Reviewed-off-by: Tariq Toukan <tariqt@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
79d356ef2c
commit
149e566fef
@ -638,6 +638,7 @@ struct mlx5e_channel {
|
||||
struct hwtstamp_config *tstamp;
|
||||
int ix;
|
||||
int cpu;
|
||||
cpumask_var_t xps_cpumask;
|
||||
};
|
||||
|
||||
struct mlx5e_channels {
|
||||
|
@ -1949,6 +1949,29 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int mlx5e_alloc_xps_cpumask(struct mlx5e_channel *c,
|
||||
struct mlx5e_params *params)
|
||||
{
|
||||
int num_comp_vectors = mlx5_comp_vectors_count(c->mdev);
|
||||
int irq;
|
||||
|
||||
if (!zalloc_cpumask_var(&c->xps_cpumask, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
|
||||
for (irq = c->ix; irq < num_comp_vectors; irq += params->num_channels) {
|
||||
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(c->mdev, irq));
|
||||
|
||||
cpumask_set_cpu(cpu, c->xps_cpumask);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_free_xps_cpumask(struct mlx5e_channel *c)
|
||||
{
|
||||
free_cpumask_var(c->xps_cpumask);
|
||||
}
|
||||
|
||||
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
struct mlx5e_params *params,
|
||||
struct mlx5e_channel_param *cparam,
|
||||
@ -1981,9 +2004,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
c->num_tc = params->num_tc;
|
||||
c->xdp = !!params->xdp_prog;
|
||||
c->stats = &priv->channel_stats[ix].ch;
|
||||
|
||||
c->irq_desc = irq_to_desc(irq);
|
||||
|
||||
err = mlx5e_alloc_xps_cpumask(c, params);
|
||||
if (err)
|
||||
goto err_free_channel;
|
||||
|
||||
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
|
||||
|
||||
err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
|
||||
@ -2066,6 +2092,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||
|
||||
err_napi_del:
|
||||
netif_napi_del(&c->napi);
|
||||
mlx5e_free_xps_cpumask(c);
|
||||
|
||||
err_free_channel:
|
||||
kvfree(c);
|
||||
|
||||
return err;
|
||||
@ -2078,7 +2107,7 @@ static void mlx5e_activate_channel(struct mlx5e_channel *c)
|
||||
for (tc = 0; tc < c->num_tc; tc++)
|
||||
mlx5e_activate_txqsq(&c->sq[tc]);
|
||||
mlx5e_activate_rq(&c->rq);
|
||||
netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
|
||||
netif_set_xps_queue(c->netdev, c->xps_cpumask, c->ix);
|
||||
}
|
||||
|
||||
static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
|
||||
@ -2106,6 +2135,7 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
|
||||
mlx5e_close_tx_cqs(c);
|
||||
mlx5e_close_cq(&c->icosq.cq);
|
||||
netif_napi_del(&c->napi);
|
||||
mlx5e_free_xps_cpumask(c);
|
||||
|
||||
kvfree(c);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user