mlxsw: spectrum_cnt: Move sub_pools under per-instance pool struct

Currently, the global static array of subpools is used. Make it
per-instance as multiple instances of the mlxsw driver can have
different values.

Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jiri Pirko 2020-03-18 15:48:51 +02:00 committed by David S. Miller
parent 4d21ed2e3d
commit c33fbe949f

View File

@ -18,10 +18,11 @@ struct mlxsw_sp_counter_pool {
unsigned int pool_size; unsigned int pool_size;
unsigned long *usage; /* Usage bitmap */ unsigned long *usage; /* Usage bitmap */
spinlock_t counter_pool_lock; /* Protects counter pool allocations */ spinlock_t counter_pool_lock; /* Protects counter pool allocations */
struct mlxsw_sp_counter_sub_pool *sub_pools; unsigned int sub_pools_count;
struct mlxsw_sp_counter_sub_pool sub_pools[];
}; };
static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = { static const struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
[MLXSW_SP_COUNTER_SUB_POOL_FLOW] = { [MLXSW_SP_COUNTER_SUB_POOL_FLOW] = {
.bank_count = 6, .bank_count = 6,
}, },
@ -32,6 +33,7 @@ static struct mlxsw_sp_counter_sub_pool mlxsw_sp_counter_sub_pools[] = {
static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
{ {
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
unsigned int total_bank_config = 0; unsigned int total_bank_config = 0;
unsigned int pool_size; unsigned int pool_size;
unsigned int bank_size; unsigned int bank_size;
@ -40,8 +42,8 @@ static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_BANK_SIZE); bank_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_BANK_SIZE);
/* Check config is valid, no bank over subscription */ /* Check config is valid, no bank over subscription */
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) for (i = 0; i < pool->sub_pools_count; i++)
total_bank_config += mlxsw_sp_counter_sub_pools[i].bank_count; total_bank_config += pool->sub_pools[i].bank_count;
if (total_bank_config > pool_size / bank_size + 1) if (total_bank_config > pool_size / bank_size + 1)
return -EINVAL; return -EINVAL;
return 0; return 0;
@ -49,16 +51,17 @@ static int mlxsw_sp_counter_pool_validate(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
{ {
struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;
struct mlxsw_sp_counter_sub_pool *sub_pool; struct mlxsw_sp_counter_sub_pool *sub_pool;
/* Prepare generic flow pool*/ /* Prepare generic flow pool*/
sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW]; sub_pool = &pool->sub_pools[MLXSW_SP_COUNTER_SUB_POOL_FLOW];
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_PACKETS_BYTES))
return -EIO; return -EIO;
sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
COUNTER_SIZE_PACKETS_BYTES); COUNTER_SIZE_PACKETS_BYTES);
/* Prepare erif pool*/ /* Prepare erif pool*/
sub_pool = &mlxsw_sp_counter_sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF]; sub_pool = &pool->sub_pools[MLXSW_SP_COUNTER_SUB_POOL_RIF];
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC)) if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_SIZE_ROUTER_BASIC))
return -EIO; return -EIO;
sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, sub_pool->entry_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
@ -68,6 +71,7 @@ static int mlxsw_sp_counter_sub_pools_prepare(struct mlxsw_sp *mlxsw_sp)
int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp) int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
{ {
unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
struct mlxsw_sp_counter_sub_pool *sub_pool; struct mlxsw_sp_counter_sub_pool *sub_pool;
struct mlxsw_sp_counter_pool *pool; struct mlxsw_sp_counter_pool *pool;
unsigned int base_index; unsigned int base_index;
@ -80,18 +84,23 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
!MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_BANK_SIZE)) !MLXSW_CORE_RES_VALID(mlxsw_sp->core, COUNTER_BANK_SIZE))
return -EIO; return -EIO;
pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
GFP_KERNEL);
if (!pool)
return -ENOMEM;
mlxsw_sp->counter_pool = pool;
memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools,
sub_pools_count * sizeof(*sub_pool));
pool->sub_pools_count = sub_pools_count;
spin_lock_init(&pool->counter_pool_lock);
err = mlxsw_sp_counter_pool_validate(mlxsw_sp); err = mlxsw_sp_counter_pool_validate(mlxsw_sp);
if (err) if (err)
return err; goto err_pool_validate;
err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp); err = mlxsw_sp_counter_sub_pools_prepare(mlxsw_sp);
if (err) if (err)
return err; goto err_sub_pools_prepare;
pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return -ENOMEM;
spin_lock_init(&pool->counter_pool_lock);
pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE); pool->pool_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, COUNTER_POOL_SIZE);
map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long); map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
@ -104,12 +113,11 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
goto err_usage_alloc; goto err_usage_alloc;
} }
pool->sub_pools = mlxsw_sp_counter_sub_pools;
/* Allocation is based on bank count which should be /* Allocation is based on bank count which should be
* specified for each sub pool statically. * specified for each sub pool statically.
*/ */
base_index = 0; base_index = 0;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_counter_sub_pools); i++) { for (i = 0; i < pool->sub_pools_count; i++) {
sub_pool = &pool->sub_pools[i]; sub_pool = &pool->sub_pools[i];
sub_pool->size = sub_pool->bank_count * bank_size; sub_pool->size = sub_pool->bank_count * bank_size;
sub_pool->base_index = base_index; sub_pool->base_index = base_index;
@ -119,10 +127,11 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
sub_pool->size = pool->pool_size - sub_pool->base_index; sub_pool->size = pool->pool_size - sub_pool->base_index;
} }
mlxsw_sp->counter_pool = pool;
return 0; return 0;
err_usage_alloc: err_usage_alloc:
err_sub_pools_prepare:
err_pool_validate:
kfree(pool); kfree(pool);
return err; return err;
} }
@ -147,7 +156,7 @@ int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp,
unsigned int stop_index; unsigned int stop_index;
int i, err; int i, err;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id]; sub_pool = &pool->sub_pools[sub_pool_id];
stop_index = sub_pool->base_index + sub_pool->size; stop_index = sub_pool->base_index + sub_pool->size;
entry_index = sub_pool->base_index; entry_index = sub_pool->base_index;
@ -186,7 +195,7 @@ void mlxsw_sp_counter_free(struct mlxsw_sp *mlxsw_sp,
if (WARN_ON(counter_index >= pool->pool_size)) if (WARN_ON(counter_index >= pool->pool_size))
return; return;
sub_pool = &mlxsw_sp_counter_sub_pools[sub_pool_id]; sub_pool = &pool->sub_pools[sub_pool_id];
spin_lock(&pool->counter_pool_lock); spin_lock(&pool->counter_pool_lock);
for (i = 0; i < sub_pool->entry_size; i++) for (i = 0; i < sub_pool->entry_size; i++)
__clear_bit(counter_index + i, pool->usage); __clear_bit(counter_index + i, pool->usage);