2018-08-09 15:59:11 +07:00
|
|
|
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
|
|
|
|
/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
|
2015-10-16 19:01:37 +07:00
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 22:10:01 +07:00
|
|
|
#include <linux/dcbnl.h>
|
2016-04-06 22:10:03 +07:00
|
|
|
#include <linux/if_ether.h>
|
2016-04-14 23:19:30 +07:00
|
|
|
#include <linux/list.h>
|
2019-04-22 19:08:41 +07:00
|
|
|
#include <linux/netlink.h>
|
2015-10-16 19:01:37 +07:00
|
|
|
|
|
|
|
#include "spectrum.h"
|
|
|
|
#include "core.h"
|
|
|
|
#include "port.h"
|
|
|
|
#include "reg.h"
|
|
|
|
|
2017-05-17 00:38:24 +07:00
|
|
|
struct mlxsw_sp_sb_pr {
|
|
|
|
enum mlxsw_reg_sbpr_mode mode;
|
|
|
|
u32 size;
|
2019-04-22 19:08:43 +07:00
|
|
|
u8 freeze_mode:1,
|
|
|
|
freeze_size:1;
|
2017-05-17 00:38:24 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_cp_sb_occ {
|
|
|
|
u32 cur;
|
|
|
|
u32 max;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sp_sb_cm {
|
|
|
|
u32 min_buff;
|
|
|
|
u32 max_buff;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u16 pool_index;
|
2017-05-17 00:38:24 +07:00
|
|
|
struct mlxsw_cp_sb_occ occ;
|
2019-04-22 19:08:45 +07:00
|
|
|
u8 freeze_pool:1,
|
|
|
|
freeze_thresh:1;
|
2017-05-17 00:38:24 +07:00
|
|
|
};
|
|
|
|
|
2018-09-20 13:21:28 +07:00
|
|
|
#define MLXSW_SP_SB_INFI -1U
|
2019-10-23 13:05:00 +07:00
|
|
|
#define MLXSW_SP_SB_REST -2U
|
2018-09-20 13:21:28 +07:00
|
|
|
|
2017-05-17 00:38:24 +07:00
|
|
|
struct mlxsw_sp_sb_pm {
|
|
|
|
u32 min_buff;
|
|
|
|
u32 max_buff;
|
|
|
|
struct mlxsw_cp_sb_occ occ;
|
|
|
|
};
|
|
|
|
|
2019-02-21 02:32:23 +07:00
|
|
|
struct mlxsw_sp_sb_mm {
|
|
|
|
u32 min_buff;
|
|
|
|
u32 max_buff;
|
|
|
|
u16 pool_index;
|
|
|
|
};
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
struct mlxsw_sp_sb_pool_des {
|
|
|
|
enum mlxsw_reg_sbxx_dir dir;
|
|
|
|
u8 pool;
|
|
|
|
};
|
|
|
|
|
2019-04-22 19:08:42 +07:00
|
|
|
#define MLXSW_SP_SB_POOL_ING 0
|
|
|
|
#define MLXSW_SP_SB_POOL_EGR 4
|
|
|
|
#define MLXSW_SP_SB_POOL_EGR_MC 8
|
2019-04-22 19:08:51 +07:00
|
|
|
#define MLXSW_SP_SB_POOL_ING_CPU 9
|
|
|
|
#define MLXSW_SP_SB_POOL_EGR_CPU 10
|
2019-04-22 19:08:42 +07:00
|
|
|
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_pool_des mlxsw_sp1_sb_pool_dess[] = {
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 2},
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 3},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 0},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
|
2019-04-22 19:08:51 +07:00
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 4},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 4},
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
};
|
|
|
|
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 0},
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 1},
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 2},
|
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 3},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 0},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
|
2019-04-10 13:58:17 +07:00
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 15},
|
2019-04-22 19:08:51 +07:00
|
|
|
{MLXSW_REG_SBXX_DIR_INGRESS, 4},
|
|
|
|
{MLXSW_REG_SBXX_DIR_EGRESS, 4},
|
2019-02-21 02:32:25 +07:00
|
|
|
};
|
|
|
|
|
2018-09-20 13:21:26 +07:00
|
|
|
#define MLXSW_SP_SB_ING_TC_COUNT 8
|
|
|
|
#define MLXSW_SP_SB_EG_TC_COUNT 16
|
2017-05-17 00:38:24 +07:00
|
|
|
|
|
|
|
struct mlxsw_sp_sb_port {
|
2018-09-20 13:21:26 +07:00
|
|
|
struct mlxsw_sp_sb_cm ing_cms[MLXSW_SP_SB_ING_TC_COUNT];
|
|
|
|
struct mlxsw_sp_sb_cm eg_cms[MLXSW_SP_SB_EG_TC_COUNT];
|
2019-02-21 02:32:14 +07:00
|
|
|
struct mlxsw_sp_sb_pm *pms;
|
2017-05-17 00:38:24 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct mlxsw_sp_sb {
|
2019-02-21 02:32:14 +07:00
|
|
|
struct mlxsw_sp_sb_pr *prs;
|
2017-05-17 00:38:24 +07:00
|
|
|
struct mlxsw_sp_sb_port *ports;
|
|
|
|
u32 cell_size;
|
2019-02-21 02:32:29 +07:00
|
|
|
u32 max_headroom_cells;
|
2018-09-20 13:21:27 +07:00
|
|
|
u64 sb_size;
|
2017-05-17 00:38:24 +07:00
|
|
|
};
|
|
|
|
|
2019-02-21 02:32:12 +07:00
|
|
|
struct mlxsw_sp_sb_vals {
|
2019-02-21 02:32:16 +07:00
|
|
|
unsigned int pool_count;
|
|
|
|
const struct mlxsw_sp_sb_pool_des *pool_dess;
|
2019-02-21 02:32:18 +07:00
|
|
|
const struct mlxsw_sp_sb_pm *pms;
|
2019-04-22 19:08:56 +07:00
|
|
|
const struct mlxsw_sp_sb_pm *pms_cpu;
|
2019-02-21 02:32:20 +07:00
|
|
|
const struct mlxsw_sp_sb_pr *prs;
|
2019-02-21 02:32:23 +07:00
|
|
|
const struct mlxsw_sp_sb_mm *mms;
|
2019-02-21 02:32:22 +07:00
|
|
|
const struct mlxsw_sp_sb_cm *cms_ingress;
|
|
|
|
const struct mlxsw_sp_sb_cm *cms_egress;
|
|
|
|
const struct mlxsw_sp_sb_cm *cms_cpu;
|
2019-02-21 02:32:23 +07:00
|
|
|
unsigned int mms_count;
|
2019-02-21 02:32:22 +07:00
|
|
|
unsigned int cms_ingress_count;
|
|
|
|
unsigned int cms_egress_count;
|
|
|
|
unsigned int cms_cpu_count;
|
2019-02-21 02:32:12 +07:00
|
|
|
};
|
|
|
|
|
2017-05-17 00:38:24 +07:00
|
|
|
u32 mlxsw_sp_cells_bytes(const struct mlxsw_sp *mlxsw_sp, u32 cells)
|
|
|
|
{
|
|
|
|
return mlxsw_sp->sb->cell_size * cells;
|
|
|
|
}
|
|
|
|
|
|
|
|
u32 mlxsw_sp_bytes_cells(const struct mlxsw_sp *mlxsw_sp, u32 bytes)
|
|
|
|
{
|
|
|
|
return DIV_ROUND_UP(bytes, mlxsw_sp->sb->cell_size);
|
|
|
|
}
|
|
|
|
|
2019-02-21 02:32:29 +07:00
|
|
|
u32 mlxsw_sp_sb_max_headroom_cells(const struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
|
|
|
return mlxsw_sp->sb->max_headroom_cells;
|
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:19 +07:00
|
|
|
static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u16 pool_index)
|
2016-04-14 23:19:19 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
return &mlxsw_sp->sb->prs[pool_index];
|
2016-04-14 23:19:19 +07:00
|
|
|
}
|
|
|
|
|
2018-09-20 13:21:26 +07:00
|
|
|
static bool mlxsw_sp_sb_cm_exists(u8 pg_buff, enum mlxsw_reg_sbxx_dir dir)
|
|
|
|
{
|
|
|
|
if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
return pg_buff < MLXSW_SP_SB_ING_TC_COUNT;
|
|
|
|
else
|
|
|
|
return pg_buff < MLXSW_SP_SB_EG_TC_COUNT;
|
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:19 +07:00
|
|
|
static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
u8 local_port, u8 pg_buff,
|
|
|
|
enum mlxsw_reg_sbxx_dir dir)
|
|
|
|
{
|
2018-09-20 13:21:26 +07:00
|
|
|
struct mlxsw_sp_sb_port *sb_port = &mlxsw_sp->sb->ports[local_port];
|
|
|
|
|
|
|
|
WARN_ON(!mlxsw_sp_sb_cm_exists(pg_buff, dir));
|
|
|
|
if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
return &sb_port->ing_cms[pg_buff];
|
|
|
|
else
|
|
|
|
return &sb_port->eg_cms[pg_buff];
|
2016-04-14 23:19:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u8 local_port, u16 pool_index)
|
2016-04-14 23:19:19 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
return &mlxsw_sp->sb->ports[local_port].pms[pool_index];
|
2016-04-14 23:19:19 +07:00
|
|
|
}
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
|
2018-09-20 13:21:28 +07:00
|
|
|
enum mlxsw_reg_sbpr_mode mode,
|
|
|
|
u32 size, bool infi_size)
|
2016-04-14 23:19:16 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-21 02:32:16 +07:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 23:19:16 +07:00
|
|
|
char sbpr_pl[MLXSW_REG_SBPR_LEN];
|
2016-04-14 23:19:19 +07:00
|
|
|
struct mlxsw_sp_sb_pr *pr;
|
|
|
|
int err;
|
2016-04-14 23:19:16 +07:00
|
|
|
|
2018-09-20 13:21:28 +07:00
|
|
|
mlxsw_reg_sbpr_pack(sbpr_pl, des->pool, des->dir, mode,
|
|
|
|
size, infi_size);
|
2016-04-14 23:19:19 +07:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
2018-09-20 13:21:28 +07:00
|
|
|
if (infi_size)
|
|
|
|
size = mlxsw_sp_bytes_cells(mlxsw_sp, mlxsw_sp->sb->sb_size);
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-04-14 23:19:19 +07:00
|
|
|
pr->mode = mode;
|
|
|
|
pr->size = size;
|
|
|
|
return 0;
|
2016-04-14 23:19:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u8 pg_buff, u32 min_buff, u32 max_buff,
|
2018-09-20 13:21:29 +07:00
|
|
|
bool infi_max, u16 pool_index)
|
2016-04-14 23:19:16 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-21 02:32:16 +07:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 23:19:16 +07:00
|
|
|
char sbcm_pl[MLXSW_REG_SBCM_LEN];
|
2018-09-20 13:21:29 +07:00
|
|
|
struct mlxsw_sp_sb_cm *cm;
|
2016-04-14 23:19:19 +07:00
|
|
|
int err;
|
2016-04-14 23:19:16 +07:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, des->dir,
|
2018-09-20 13:21:29 +07:00
|
|
|
min_buff, max_buff, infi_max, des->pool);
|
2016-04-14 23:19:19 +07:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
2018-09-20 13:21:29 +07:00
|
|
|
|
2018-09-20 13:21:26 +07:00
|
|
|
if (mlxsw_sp_sb_cm_exists(pg_buff, des->dir)) {
|
2018-09-20 13:21:29 +07:00
|
|
|
if (infi_max)
|
|
|
|
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
|
|
|
|
mlxsw_sp->sb->sb_size);
|
2016-04-14 23:19:19 +07:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff,
|
|
|
|
des->dir);
|
2016-04-14 23:19:19 +07:00
|
|
|
cm->min_buff = min_buff;
|
|
|
|
cm->max_buff = max_buff;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
cm->pool_index = pool_index;
|
2016-04-14 23:19:19 +07:00
|
|
|
}
|
|
|
|
return 0;
|
2016-04-14 23:19:16 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u16 pool_index, u32 min_buff, u32 max_buff)
|
2016-04-14 23:19:16 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-21 02:32:16 +07:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 23:19:16 +07:00
|
|
|
char sbpm_pl[MLXSW_REG_SBPM_LEN];
|
2016-04-14 23:19:19 +07:00
|
|
|
struct mlxsw_sp_sb_pm *pm;
|
|
|
|
int err;
|
2016-04-14 23:19:16 +07:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir, false,
|
2016-04-14 23:19:27 +07:00
|
|
|
min_buff, max_buff);
|
2016-04-14 23:19:19 +07:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
|
2016-04-14 23:19:19 +07:00
|
|
|
pm->min_buff = min_buff;
|
|
|
|
pm->max_buff = max_buff;
|
|
|
|
return 0;
|
2016-04-14 23:19:16 +07:00
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:30 +07:00
|
|
|
static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u16 pool_index, struct list_head *bulk_list)
|
2016-04-14 23:19:30 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-21 02:32:16 +07:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 23:19:30 +07:00
|
|
|
char sbpm_pl[MLXSW_REG_SBPM_LEN];
|
|
|
|
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT &&
|
|
|
|
des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
return 0;
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
|
|
|
|
true, 0, 0);
|
2016-04-14 23:19:30 +07:00
|
|
|
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
|
|
|
|
bulk_list, NULL, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
|
|
|
|
char *sbpm_pl, size_t sbpm_pl_len,
|
|
|
|
unsigned long cb_priv)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
|
|
|
|
|
|
|
|
mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
u16 pool_index, struct list_head *bulk_list)
|
2016-04-14 23:19:30 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des =
|
2019-02-21 02:32:16 +07:00
|
|
|
&mlxsw_sp->sb_vals->pool_dess[pool_index];
|
2016-04-14 23:19:30 +07:00
|
|
|
char sbpm_pl[MLXSW_REG_SBPM_LEN];
|
|
|
|
struct mlxsw_sp_sb_pm *pm;
|
|
|
|
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT &&
|
|
|
|
des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
return 0;
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool_index);
|
|
|
|
mlxsw_reg_sbpm_pack(sbpm_pl, local_port, des->pool, des->dir,
|
|
|
|
false, 0, 0);
|
2016-04-14 23:19:30 +07:00
|
|
|
return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
|
|
|
|
bulk_list,
|
|
|
|
mlxsw_sp_sb_pm_occ_query_cb,
|
|
|
|
(unsigned long) pm);
|
|
|
|
}
|
|
|
|
|
2019-02-21 02:32:27 +07:00
|
|
|
/* 1/4 of a headroom necessary for 100Gbps port and 100m cable. */
|
|
|
|
#define MLXSW_SP_PB_HEADROOM 25632
|
2016-04-15 20:09:38 +07:00
|
|
|
#define MLXSW_SP_PB_UNUSED 8
|
2015-10-16 19:01:37 +07:00
|
|
|
|
|
|
|
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
2019-02-21 02:32:27 +07:00
|
|
|
const u32 pbs[] = {
|
|
|
|
[0] = MLXSW_SP_PB_HEADROOM * mlxsw_sp_port->mapping.width,
|
2019-04-22 19:08:52 +07:00
|
|
|
[9] = MLXSW_PORT_MAX_MTU,
|
2019-02-21 02:32:27 +07:00
|
|
|
};
|
2017-03-24 14:02:51 +07:00
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
2015-10-16 19:01:37 +07:00
|
|
|
char pbmc_pl[MLXSW_REG_PBMC_LEN];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
|
|
|
|
0xffff, 0xffff / 2);
|
2019-02-21 02:32:27 +07:00
|
|
|
for (i = 0; i < ARRAY_SIZE(pbs); i++) {
|
|
|
|
u16 size = mlxsw_sp_bytes_cells(mlxsw_sp, pbs[i]);
|
2017-03-24 14:02:51 +07:00
|
|
|
|
2016-04-15 20:09:38 +07:00
|
|
|
if (i == MLXSW_SP_PB_UNUSED)
|
2016-04-14 23:19:17 +07:00
|
|
|
continue;
|
2017-03-24 14:02:51 +07:00
|
|
|
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, size);
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
2016-04-06 22:10:05 +07:00
|
|
|
mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
|
|
|
|
MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
|
2017-03-24 14:02:51 +07:00
|
|
|
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 22:10:01 +07:00
|
|
|
static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
|
|
|
char pptb_pl[MLXSW_REG_PPTB_LEN];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
|
|
|
|
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
|
2016-07-15 16:15:02 +07:00
|
|
|
mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 22:10:01 +07:00
|
|
|
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
|
|
|
|
pptb_pl);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
|
|
|
|
}
|
|
|
|
|
2019-02-21 02:32:14 +07:00
|
|
|
static int mlxsw_sp_sb_port_init(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
struct mlxsw_sp_sb_port *sb_port)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_sb_pm *pms;
|
|
|
|
|
2019-02-21 02:32:16 +07:00
|
|
|
pms = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*pms),
|
|
|
|
GFP_KERNEL);
|
2019-02-21 02:32:14 +07:00
|
|
|
if (!pms)
|
|
|
|
return -ENOMEM;
|
|
|
|
sb_port->pms = pms;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_port_fini(struct mlxsw_sp_sb_port *sb_port)
|
|
|
|
{
|
|
|
|
kfree(sb_port->pms);
|
|
|
|
}
|
|
|
|
|
2017-03-24 14:02:48 +07:00
|
|
|
static int mlxsw_sp_sb_ports_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
|
|
|
unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
|
2019-02-21 02:32:14 +07:00
|
|
|
struct mlxsw_sp_sb_pr *prs;
|
|
|
|
int i;
|
|
|
|
int err;
|
2017-03-24 14:02:48 +07:00
|
|
|
|
2017-05-17 00:38:24 +07:00
|
|
|
mlxsw_sp->sb->ports = kcalloc(max_ports,
|
|
|
|
sizeof(struct mlxsw_sp_sb_port),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!mlxsw_sp->sb->ports)
|
2017-03-24 14:02:48 +07:00
|
|
|
return -ENOMEM;
|
2019-02-21 02:32:14 +07:00
|
|
|
|
2019-02-21 02:32:16 +07:00
|
|
|
prs = kcalloc(mlxsw_sp->sb_vals->pool_count, sizeof(*prs),
|
|
|
|
GFP_KERNEL);
|
2019-02-21 02:32:14 +07:00
|
|
|
if (!prs) {
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto err_alloc_prs;
|
|
|
|
}
|
|
|
|
mlxsw_sp->sb->prs = prs;
|
|
|
|
|
|
|
|
for (i = 0; i < max_ports; i++) {
|
|
|
|
err = mlxsw_sp_sb_port_init(mlxsw_sp, &mlxsw_sp->sb->ports[i]);
|
|
|
|
if (err)
|
|
|
|
goto err_sb_port_init;
|
|
|
|
}
|
|
|
|
|
2017-03-24 14:02:48 +07:00
|
|
|
return 0;
|
2019-02-21 02:32:14 +07:00
|
|
|
|
|
|
|
err_sb_port_init:
|
|
|
|
for (i--; i >= 0; i--)
|
|
|
|
mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
|
|
|
|
kfree(mlxsw_sp->sb->prs);
|
|
|
|
err_alloc_prs:
|
|
|
|
kfree(mlxsw_sp->sb->ports);
|
|
|
|
return err;
|
2017-03-24 14:02:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_ports_fini(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
2019-02-21 02:32:14 +07:00
|
|
|
int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = max_ports - 1; i >= 0; i--)
|
|
|
|
mlxsw_sp_sb_port_fini(&mlxsw_sp->sb->ports[i]);
|
|
|
|
kfree(mlxsw_sp->sb->prs);
|
2017-05-17 00:38:24 +07:00
|
|
|
kfree(mlxsw_sp->sb->ports);
|
2017-03-24 14:02:48 +07:00
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:18 +07:00
|
|
|
#define MLXSW_SP_SB_PR(_mode, _size) \
|
2016-04-14 23:19:17 +07:00
|
|
|
{ \
|
|
|
|
.mode = _mode, \
|
|
|
|
.size = _size, \
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:46 +07:00
|
|
|
#define MLXSW_SP_SB_PR_EXT(_mode, _size, _freeze_mode, _freeze_size) \
|
|
|
|
{ \
|
|
|
|
.mode = _mode, \
|
|
|
|
.size = _size, \
|
|
|
|
.freeze_mode = _freeze_mode, \
|
|
|
|
.freeze_size = _freeze_size, \
|
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:51 +07:00
|
|
|
#define MLXSW_SP1_SB_PR_CPU_SIZE (256 * 1000)
|
2019-02-21 02:32:25 +07:00
|
|
|
|
2019-04-22 19:08:50 +07:00
|
|
|
/* Order according to mlxsw_sp1_sb_pool_dess */
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_pr mlxsw_sp1_sb_prs[] = {
|
2019-10-23 13:05:00 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
|
2016-04-14 23:19:18 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2019-04-22 19:08:52 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2019-10-23 13:05:00 +07:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
|
|
|
|
true, false),
|
2016-04-14 23:19:18 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2016-04-14 23:19:20 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
|
2019-04-22 19:08:46 +07:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
|
|
|
|
true, true),
|
2019-04-22 19:08:51 +07:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
|
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
MLXSW_SP1_SB_PR_CPU_SIZE, true, false),
|
2016-04-14 23:19:17 +07:00
|
|
|
};
|
|
|
|
|
2019-04-22 19:08:51 +07:00
|
|
|
#define MLXSW_SP2_SB_PR_CPU_SIZE (256 * 1000)
|
2019-02-21 02:32:25 +07:00
|
|
|
|
2019-04-22 19:08:50 +07:00
|
|
|
/* Order according to mlxsw_sp2_sb_pool_dess */
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
|
2019-10-23 13:05:00 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST),
|
2019-02-21 02:32:25 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
2019-04-22 19:08:52 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
2019-10-23 13:05:00 +07:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC, MLXSW_SP_SB_REST,
|
|
|
|
true, false),
|
2019-02-21 02:32:25 +07:00
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
|
|
|
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
|
2019-04-22 19:08:46 +07:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI,
|
|
|
|
true, true),
|
2019-04-22 19:08:51 +07:00
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
|
|
|
|
MLXSW_SP_SB_PR_EXT(MLXSW_REG_SBPR_MODE_DYNAMIC,
|
|
|
|
MLXSW_SP2_SB_PR_CPU_SIZE, true, false),
|
2019-02-21 02:32:25 +07:00
|
|
|
};
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
const struct mlxsw_sp_sb_pr *prs,
|
2019-10-23 13:05:00 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *pool_dess,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
size_t prs_len)
|
2015-10-16 19:01:37 +07:00
|
|
|
{
|
2019-10-23 13:05:00 +07:00
|
|
|
/* Round down, unlike mlxsw_sp_bytes_cells(). */
|
2019-10-30 23:01:52 +07:00
|
|
|
u32 sb_cells = div_u64(mlxsw_sp->sb->sb_size, mlxsw_sp->sb->cell_size);
|
2019-10-23 13:05:00 +07:00
|
|
|
u32 rest_cells[2] = {sb_cells, sb_cells};
|
2015-10-16 19:01:37 +07:00
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
|
2019-10-23 13:05:00 +07:00
|
|
|
/* Calculate how much space to give to the "REST" pools in either
|
|
|
|
* direction.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < prs_len; i++) {
|
|
|
|
enum mlxsw_reg_sbxx_dir dir = pool_dess[i].dir;
|
|
|
|
u32 size = prs[i].size;
|
|
|
|
u32 size_cells;
|
|
|
|
|
|
|
|
if (size == MLXSW_SP_SB_INFI || size == MLXSW_SP_SB_REST)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
|
|
|
|
if (WARN_ON_ONCE(size_cells > rest_cells[dir]))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rest_cells[dir] -= size_cells;
|
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:18 +07:00
|
|
|
for (i = 0; i < prs_len; i++) {
|
2018-09-20 13:21:28 +07:00
|
|
|
u32 size = prs[i].size;
|
|
|
|
u32 size_cells;
|
|
|
|
|
|
|
|
if (size == MLXSW_SP_SB_INFI) {
|
|
|
|
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
|
|
|
|
0, true);
|
2019-10-23 13:05:00 +07:00
|
|
|
} else if (size == MLXSW_SP_SB_REST) {
|
|
|
|
size_cells = rest_cells[pool_dess[i].dir];
|
|
|
|
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
|
|
|
|
size_cells, false);
|
2018-09-20 13:21:28 +07:00
|
|
|
} else {
|
|
|
|
size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size);
|
|
|
|
err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, prs[i].mode,
|
|
|
|
size_cells, false);
|
|
|
|
}
|
2015-10-16 19:01:37 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:17 +07:00
|
|
|
#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
|
|
|
|
{ \
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
.max_buff = _max_buff, \
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
.pool_index = _pool, \
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:42 +07:00
|
|
|
#define MLXSW_SP_SB_CM_ING(_min_buff, _max_buff) \
|
|
|
|
{ \
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
.max_buff = _max_buff, \
|
|
|
|
.pool_index = MLXSW_SP_SB_POOL_ING, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MLXSW_SP_SB_CM_EGR(_min_buff, _max_buff) \
|
|
|
|
{ \
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
.max_buff = _max_buff, \
|
|
|
|
.pool_index = MLXSW_SP_SB_POOL_EGR, \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MLXSW_SP_SB_CM_EGR_MC(_min_buff, _max_buff) \
|
|
|
|
{ \
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
.max_buff = _max_buff, \
|
|
|
|
.pool_index = MLXSW_SP_SB_POOL_EGR_MC, \
|
2019-04-22 19:08:49 +07:00
|
|
|
.freeze_pool = true, \
|
|
|
|
.freeze_thresh = true, \
|
2019-04-22 19:08:42 +07:00
|
|
|
}
|
|
|
|
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_ingress[] = {
|
2019-04-22 19:08:42 +07:00
|
|
|
MLXSW_SP_SB_CM_ING(10000, 8),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
|
2019-04-22 19:08:52 +07:00
|
|
|
MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
|
2015-10-16 19:01:37 +07:00
|
|
|
};
|
|
|
|
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_ingress[] = {
|
2019-04-22 19:08:42 +07:00
|
|
|
MLXSW_SP_SB_CM_ING(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_CM_ING(0, 0), /* dummy, this PG does not exist */
|
2019-04-22 19:08:52 +07:00
|
|
|
MLXSW_SP_SB_CM(10000, 8, MLXSW_SP_SB_POOL_ING_CPU),
|
2019-02-21 02:32:25 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp1_sb_cms_egress[] = {
|
2019-04-22 19:08:42 +07:00
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1500, 9),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1, 0xff),
|
2016-04-14 23:19:17 +07:00
|
|
|
};
|
|
|
|
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
|
2019-04-22 19:08:42 +07:00
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR(0, 7),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR_MC(0, MLXSW_SP_SB_INFI),
|
|
|
|
MLXSW_SP_SB_CM_EGR(1, 0xff),
|
2019-02-21 02:32:25 +07:00
|
|
|
};
|
|
|
|
|
2019-04-22 19:08:56 +07:00
|
|
|
#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, MLXSW_SP_SB_POOL_EGR_CPU)
|
2015-10-16 19:01:37 +07:00
|
|
|
|
|
|
|
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
|
2016-04-14 23:19:17 +07:00
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
2019-04-22 19:08:56 +07:00
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
2016-04-14 23:19:17 +07:00
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
2019-04-22 19:08:56 +07:00
|
|
|
MLXSW_SP_SB_CM(1000, 8, MLXSW_SP_SB_POOL_EGR_CPU),
|
2016-04-14 23:19:17 +07:00
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
|
|
|
MLXSW_SP_CPU_PORT_SB_CM,
|
2015-10-16 19:01:37 +07:00
|
|
|
};
|
|
|
|
|
2018-09-20 13:21:31 +07:00
|
|
|
static bool
|
|
|
|
mlxsw_sp_sb_pool_is_static(struct mlxsw_sp *mlxsw_sp, u16 pool_index)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
|
|
|
|
|
|
|
return pr->mode == MLXSW_REG_SBPR_MODE_STATIC;
|
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:17 +07:00
|
|
|
static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
|
|
|
enum mlxsw_reg_sbxx_dir dir,
|
|
|
|
const struct mlxsw_sp_sb_cm *cms,
|
|
|
|
size_t cms_len)
|
2015-10-16 19:01:37 +07:00
|
|
|
{
|
2019-02-21 02:32:16 +07:00
|
|
|
const struct mlxsw_sp_sb_vals *sb_vals = mlxsw_sp->sb_vals;
|
2015-10-16 19:01:37 +07:00
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
for (i = 0; i < cms_len; i++) {
|
|
|
|
const struct mlxsw_sp_sb_cm *cm;
|
2017-03-24 14:02:51 +07:00
|
|
|
u32 min_buff;
|
2018-09-20 13:21:31 +07:00
|
|
|
u32 max_buff;
|
2015-10-16 19:01:37 +07:00
|
|
|
|
2016-04-14 23:19:17 +07:00
|
|
|
if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
continue; /* PG number 8 does not exist, skip it */
|
2015-10-16 19:01:37 +07:00
|
|
|
cm = &cms[i];
|
2019-02-21 02:32:16 +07:00
|
|
|
if (WARN_ON(sb_vals->pool_dess[cm->pool_index].dir != dir))
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
continue;
|
|
|
|
|
2017-03-24 14:02:51 +07:00
|
|
|
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, cm->min_buff);
|
2018-09-20 13:21:31 +07:00
|
|
|
max_buff = cm->max_buff;
|
|
|
|
if (max_buff == MLXSW_SP_SB_INFI) {
|
2018-09-20 13:21:29 +07:00
|
|
|
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
|
|
|
|
min_buff, 0,
|
|
|
|
true, cm->pool_index);
|
2018-09-20 13:21:31 +07:00
|
|
|
} else {
|
|
|
|
if (mlxsw_sp_sb_pool_is_static(mlxsw_sp,
|
|
|
|
cm->pool_index))
|
|
|
|
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp,
|
|
|
|
max_buff);
|
2018-09-20 13:21:29 +07:00
|
|
|
err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i,
|
2018-09-20 13:21:31 +07:00
|
|
|
min_buff, max_buff,
|
2018-09-20 13:21:29 +07:00
|
|
|
false, cm->pool_index);
|
2018-09-20 13:21:31 +07:00
|
|
|
}
|
2015-10-16 19:01:37 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
2019-02-21 02:32:22 +07:00
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
2016-04-14 23:19:17 +07:00
|
|
|
int err;
|
|
|
|
|
2019-02-21 02:32:22 +07:00
|
|
|
err = __mlxsw_sp_sb_cms_init(mlxsw_sp,
|
2016-04-14 23:19:17 +07:00
|
|
|
mlxsw_sp_port->local_port,
|
|
|
|
MLXSW_REG_SBXX_DIR_INGRESS,
|
2019-02-21 02:32:22 +07:00
|
|
|
mlxsw_sp->sb_vals->cms_ingress,
|
|
|
|
mlxsw_sp->sb_vals->cms_ingress_count);
|
2016-04-14 23:19:17 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
|
|
|
|
mlxsw_sp_port->local_port,
|
|
|
|
MLXSW_REG_SBXX_DIR_EGRESS,
|
2019-02-21 02:32:22 +07:00
|
|
|
mlxsw_sp->sb_vals->cms_egress,
|
|
|
|
mlxsw_sp->sb_vals->cms_egress_count);
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
2016-04-14 23:19:17 +07:00
|
|
|
return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
|
2019-02-21 02:32:22 +07:00
|
|
|
mlxsw_sp->sb_vals->cms_cpu,
|
|
|
|
mlxsw_sp->sb_vals->cms_cpu_count);
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
2016-04-14 23:19:17 +07:00
|
|
|
#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
|
|
|
|
{ \
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
.max_buff = _max_buff, \
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:50 +07:00
|
|
|
/* Order according to mlxsw_sp1_sb_pool_dess */
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_pm mlxsw_sp1_sb_pms[] = {
|
2016-04-14 23:19:22 +07:00
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2019-04-22 19:08:52 +07:00
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2016-04-14 23:19:17 +07:00
|
|
|
MLXSW_SP_SB_PM(0, 7),
|
2016-04-14 23:19:22 +07:00
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2018-09-20 13:21:32 +07:00
|
|
|
MLXSW_SP_SB_PM(10000, 90000),
|
2019-04-22 19:08:51 +07:00
|
|
|
MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2016-04-14 23:19:17 +07:00
|
|
|
};
|
|
|
|
|
2019-04-22 19:08:50 +07:00
|
|
|
/* Order according to mlxsw_sp2_sb_pool_dess */
|
2019-02-21 02:32:25 +07:00
|
|
|
static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
|
|
|
|
MLXSW_SP_SB_PM(0, 7),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
2019-04-22 19:08:52 +07:00
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
2019-02-21 02:32:25 +07:00
|
|
|
MLXSW_SP_SB_PM(0, 7),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
2019-04-10 13:58:17 +07:00
|
|
|
MLXSW_SP_SB_PM(10000, 90000),
|
2019-04-22 19:08:51 +07:00
|
|
|
MLXSW_SP_SB_PM(0, 8), /* 50% occupancy */
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
|
2019-02-21 02:32:25 +07:00
|
|
|
};
|
|
|
|
|
2019-04-22 19:08:56 +07:00
|
|
|
/* Order according to mlxsw_sp*_sb_pool_dess */
|
|
|
|
static const struct mlxsw_sp_sb_pm mlxsw_sp_cpu_port_sb_pms[] = {
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, 90000),
|
|
|
|
MLXSW_SP_SB_PM(0, 0),
|
|
|
|
MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
|
|
|
|
};
|
|
|
|
|
2019-04-22 19:08:54 +07:00
|
|
|
static int mlxsw_sp_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
|
2019-04-22 19:08:55 +07:00
|
|
|
const struct mlxsw_sp_sb_pm *pms,
|
|
|
|
bool skip_ingress)
|
2015-10-16 19:01:37 +07:00
|
|
|
{
|
2019-04-22 19:08:54 +07:00
|
|
|
int i, err;
|
2015-10-16 19:01:37 +07:00
|
|
|
|
2019-02-21 02:32:18 +07:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
|
2019-04-22 19:08:54 +07:00
|
|
|
const struct mlxsw_sp_sb_pm *pm = &pms[i];
|
2019-04-22 19:08:55 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des;
|
2018-09-20 13:21:31 +07:00
|
|
|
u32 max_buff;
|
2018-09-20 13:21:30 +07:00
|
|
|
u32 min_buff;
|
2015-10-16 19:01:37 +07:00
|
|
|
|
2019-04-22 19:08:55 +07:00
|
|
|
des = &mlxsw_sp->sb_vals->pool_dess[i];
|
|
|
|
if (skip_ingress && des->dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
continue;
|
|
|
|
|
2018-09-20 13:21:30 +07:00
|
|
|
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, pm->min_buff);
|
2018-09-20 13:21:31 +07:00
|
|
|
max_buff = pm->max_buff;
|
|
|
|
if (mlxsw_sp_sb_pool_is_static(mlxsw_sp, i))
|
|
|
|
max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, max_buff);
|
2019-04-22 19:08:54 +07:00
|
|
|
err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, min_buff,
|
|
|
|
max_buff);
|
2015-10-16 19:01:37 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:54 +07:00
|
|
|
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
|
|
|
|
return mlxsw_sp_sb_pms_init(mlxsw_sp, mlxsw_sp_port->local_port,
|
2019-04-22 19:08:55 +07:00
|
|
|
mlxsw_sp->sb_vals->pms, false);
|
2019-04-22 19:08:54 +07:00
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:56 +07:00
|
|
|
static int mlxsw_sp_cpu_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
|
|
|
return mlxsw_sp_sb_pms_init(mlxsw_sp, 0, mlxsw_sp->sb_vals->pms_cpu,
|
|
|
|
true);
|
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:42 +07:00
|
|
|
#define MLXSW_SP_SB_MM(_min_buff, _max_buff) \
|
2016-04-14 23:19:17 +07:00
|
|
|
{ \
|
|
|
|
.min_buff = _min_buff, \
|
|
|
|
.max_buff = _max_buff, \
|
2019-04-22 19:08:42 +07:00
|
|
|
.pool_index = MLXSW_SP_SB_POOL_EGR, \
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
|
2019-04-22 19:08:42 +07:00
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
|
|
|
MLXSW_SP_SB_MM(0, 6),
|
2015-10-16 19:01:37 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
|
|
|
char sbmm_pl[MLXSW_REG_SBMM_LEN];
|
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
|
2019-02-21 02:32:23 +07:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->mms_count; i++) {
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
const struct mlxsw_sp_sb_pool_des *des;
|
2015-10-16 19:01:37 +07:00
|
|
|
const struct mlxsw_sp_sb_mm *mc;
|
2017-03-24 14:02:51 +07:00
|
|
|
u32 min_buff;
|
2015-10-16 19:01:37 +07:00
|
|
|
|
2019-02-21 02:32:23 +07:00
|
|
|
mc = &mlxsw_sp->sb_vals->mms[i];
|
2019-02-21 02:32:16 +07:00
|
|
|
des = &mlxsw_sp->sb_vals->pool_dess[mc->pool_index];
|
2018-09-20 13:21:31 +07:00
|
|
|
/* All pools used by sb_mm's are initialized using dynamic
|
|
|
|
* thresholds, therefore 'max_buff' isn't specified in cells.
|
2017-03-24 14:02:51 +07:00
|
|
|
*/
|
|
|
|
min_buff = mlxsw_sp_bytes_cells(mlxsw_sp, mc->min_buff);
|
|
|
|
mlxsw_reg_sbmm_pack(sbmm_pl, i, min_buff, mc->max_buff,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
des->pool);
|
2015-10-16 19:01:37 +07:00
|
|
|
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2019-02-21 02:32:16 +07:00
|
|
|
static void mlxsw_sp_pool_count(struct mlxsw_sp *mlxsw_sp,
|
|
|
|
u16 *p_ingress_len, u16 *p_egress_len)
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
2019-04-22 19:08:50 +07:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; ++i) {
|
2019-02-21 02:32:16 +07:00
|
|
|
if (mlxsw_sp->sb_vals->pool_dess[i].dir ==
|
2019-04-22 19:08:50 +07:00
|
|
|
MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
(*p_ingress_len)++;
|
|
|
|
else
|
|
|
|
(*p_egress_len)++;
|
|
|
|
}
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
|
2019-04-22 19:08:50 +07:00
|
|
|
WARN(*p_egress_len == 0, "No egress pools\n");
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
}
|
|
|
|
|
2019-02-21 02:32:12 +07:00
|
|
|
const struct mlxsw_sp_sb_vals mlxsw_sp1_sb_vals = {
|
2019-02-21 02:32:25 +07:00
|
|
|
.pool_count = ARRAY_SIZE(mlxsw_sp1_sb_pool_dess),
|
|
|
|
.pool_dess = mlxsw_sp1_sb_pool_dess,
|
|
|
|
.pms = mlxsw_sp1_sb_pms,
|
2019-04-22 19:08:56 +07:00
|
|
|
.pms_cpu = mlxsw_sp_cpu_port_sb_pms,
|
2019-02-21 02:32:25 +07:00
|
|
|
.prs = mlxsw_sp1_sb_prs,
|
2019-02-21 02:32:23 +07:00
|
|
|
.mms = mlxsw_sp_sb_mms,
|
2019-02-21 02:32:25 +07:00
|
|
|
.cms_ingress = mlxsw_sp1_sb_cms_ingress,
|
|
|
|
.cms_egress = mlxsw_sp1_sb_cms_egress,
|
2019-02-21 02:32:22 +07:00
|
|
|
.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
|
2019-02-21 02:32:23 +07:00
|
|
|
.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
|
2019-02-21 02:32:25 +07:00
|
|
|
.cms_ingress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_ingress),
|
|
|
|
.cms_egress_count = ARRAY_SIZE(mlxsw_sp1_sb_cms_egress),
|
2019-02-21 02:32:22 +07:00
|
|
|
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
|
2019-02-21 02:32:12 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
const struct mlxsw_sp_sb_vals mlxsw_sp2_sb_vals = {
|
2019-02-21 02:32:25 +07:00
|
|
|
.pool_count = ARRAY_SIZE(mlxsw_sp2_sb_pool_dess),
|
|
|
|
.pool_dess = mlxsw_sp2_sb_pool_dess,
|
|
|
|
.pms = mlxsw_sp2_sb_pms,
|
2019-04-22 19:08:56 +07:00
|
|
|
.pms_cpu = mlxsw_sp_cpu_port_sb_pms,
|
2019-02-21 02:32:25 +07:00
|
|
|
.prs = mlxsw_sp2_sb_prs,
|
2019-02-21 02:32:23 +07:00
|
|
|
.mms = mlxsw_sp_sb_mms,
|
2019-02-21 02:32:25 +07:00
|
|
|
.cms_ingress = mlxsw_sp2_sb_cms_ingress,
|
|
|
|
.cms_egress = mlxsw_sp2_sb_cms_egress,
|
2019-02-21 02:32:22 +07:00
|
|
|
.cms_cpu = mlxsw_sp_cpu_port_sb_cms,
|
2019-02-21 02:32:23 +07:00
|
|
|
.mms_count = ARRAY_SIZE(mlxsw_sp_sb_mms),
|
2019-02-21 02:32:25 +07:00
|
|
|
.cms_ingress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_ingress),
|
|
|
|
.cms_egress_count = ARRAY_SIZE(mlxsw_sp2_sb_cms_egress),
|
2019-02-21 02:32:22 +07:00
|
|
|
.cms_cpu_count = ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms),
|
2019-02-21 02:32:12 +07:00
|
|
|
};
|
|
|
|
|
2015-10-16 19:01:37 +07:00
|
|
|
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
2019-02-21 02:32:29 +07:00
|
|
|
u32 max_headroom_size;
|
2019-04-22 19:08:50 +07:00
|
|
|
u16 ing_pool_count = 0;
|
|
|
|
u16 eg_pool_count = 0;
|
2015-10-16 19:01:37 +07:00
|
|
|
int err;
|
|
|
|
|
2017-03-24 14:02:51 +07:00
|
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, CELL_SIZE))
|
|
|
|
return -EIO;
|
|
|
|
|
2019-10-23 13:04:59 +07:00
|
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, GUARANTEED_SHARED_BUFFER))
|
2017-03-24 14:02:49 +07:00
|
|
|
return -EIO;
|
|
|
|
|
2019-02-21 02:32:29 +07:00
|
|
|
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_HEADROOM_SIZE))
|
|
|
|
return -EIO;
|
|
|
|
|
2017-05-17 00:38:24 +07:00
|
|
|
mlxsw_sp->sb = kzalloc(sizeof(*mlxsw_sp->sb), GFP_KERNEL);
|
|
|
|
if (!mlxsw_sp->sb)
|
|
|
|
return -ENOMEM;
|
|
|
|
mlxsw_sp->sb->cell_size = MLXSW_CORE_RES_GET(mlxsw_sp->core, CELL_SIZE);
|
2018-09-20 13:21:27 +07:00
|
|
|
mlxsw_sp->sb->sb_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
2019-10-23 13:04:59 +07:00
|
|
|
GUARANTEED_SHARED_BUFFER);
|
2019-02-21 02:32:29 +07:00
|
|
|
max_headroom_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
|
|
|
MAX_HEADROOM_SIZE);
|
|
|
|
/* Round down, because this limit must not be overstepped. */
|
|
|
|
mlxsw_sp->sb->max_headroom_cells = max_headroom_size /
|
|
|
|
mlxsw_sp->sb->cell_size;
|
|
|
|
|
2017-03-24 14:02:48 +07:00
|
|
|
err = mlxsw_sp_sb_ports_init(mlxsw_sp);
|
2015-10-16 19:01:37 +07:00
|
|
|
if (err)
|
2017-05-17 00:38:24 +07:00
|
|
|
goto err_sb_ports_init;
|
2019-02-21 02:32:20 +07:00
|
|
|
err = mlxsw_sp_sb_prs_init(mlxsw_sp, mlxsw_sp->sb_vals->prs,
|
2019-10-23 13:05:00 +07:00
|
|
|
mlxsw_sp->sb_vals->pool_dess,
|
2019-02-21 02:32:20 +07:00
|
|
|
mlxsw_sp->sb_vals->pool_count);
|
2017-03-24 14:02:48 +07:00
|
|
|
if (err)
|
|
|
|
goto err_sb_prs_init;
|
2015-10-16 19:01:37 +07:00
|
|
|
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
|
|
|
|
if (err)
|
2017-03-24 14:02:48 +07:00
|
|
|
goto err_sb_cpu_port_sb_cms_init;
|
2019-04-22 19:08:56 +07:00
|
|
|
err = mlxsw_sp_cpu_port_sb_pms_init(mlxsw_sp);
|
|
|
|
if (err)
|
|
|
|
goto err_sb_cpu_port_pms_init;
|
2015-10-16 19:01:37 +07:00
|
|
|
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
|
2016-04-14 23:19:24 +07:00
|
|
|
if (err)
|
2017-03-24 14:02:48 +07:00
|
|
|
goto err_sb_mms_init;
|
2019-02-21 02:32:16 +07:00
|
|
|
mlxsw_sp_pool_count(mlxsw_sp, &ing_pool_count, &eg_pool_count);
|
2018-09-20 13:21:27 +07:00
|
|
|
err = devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
|
|
|
|
mlxsw_sp->sb->sb_size,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
ing_pool_count,
|
|
|
|
eg_pool_count,
|
2018-09-20 13:21:26 +07:00
|
|
|
MLXSW_SP_SB_ING_TC_COUNT,
|
|
|
|
MLXSW_SP_SB_EG_TC_COUNT);
|
2017-03-24 14:02:48 +07:00
|
|
|
if (err)
|
|
|
|
goto err_devlink_sb_register;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_devlink_sb_register:
|
|
|
|
err_sb_mms_init:
|
2019-04-22 19:08:56 +07:00
|
|
|
err_sb_cpu_port_pms_init:
|
2017-03-24 14:02:48 +07:00
|
|
|
err_sb_cpu_port_sb_cms_init:
|
|
|
|
err_sb_prs_init:
|
|
|
|
mlxsw_sp_sb_ports_fini(mlxsw_sp);
|
2017-05-17 00:38:24 +07:00
|
|
|
err_sb_ports_init:
|
|
|
|
kfree(mlxsw_sp->sb);
|
2017-03-24 14:02:48 +07:00
|
|
|
return err;
|
2016-04-14 23:19:24 +07:00
|
|
|
}
|
2015-10-16 19:01:37 +07:00
|
|
|
|
2016-04-14 23:19:24 +07:00
|
|
|
void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
|
|
|
|
{
|
|
|
|
devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
|
2017-03-24 14:02:48 +07:00
|
|
|
mlxsw_sp_sb_ports_fini(mlxsw_sp);
|
2017-05-17 00:38:24 +07:00
|
|
|
kfree(mlxsw_sp->sb);
|
2015-10-16 19:01:37 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
|
{
|
|
|
|
int err;
|
|
|
|
|
mlxsw: spectrum: Map all switch priorities to priority group 0
During transmission, the skb's priority is used to map the skb to a
traffic class, where the idea is to group priorities with similar
characteristics (e.g. lossy, lossless) to the same traffic class. By
default, all priorities are mapped to traffic class 0.
In the device, we model the skb's priority as the switch priority, which
is assigned to a packet according to its PCP value and ingress port
(untagged packets are assigned the port's default switch priority - 0).
At ingress, the packet is directed to a priority group (PG) buffer in
the port's headroom buffer according to the packet's switch priority and
switch priority to buffer mapping.
While it's possible to configure the egress mapping between skb's
priority (switch priority) and traffic class, there is no mechanism to
configure the ingress mapping to a PG.
In order to keep things simple and since grouping certain priorities into
a traffic class at egress also implies they should be grouped the same
at ingress, treat a PG as the ingress counterpart of an egress traffic
class.
Having established the above, during initialization map all the switch
priorities to PG0 in accordance with the Linux defaults for traffic
class mapping.
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-04-06 22:10:01 +07:00
|
|
|
err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
|
2015-10-16 19:01:37 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
err = mlxsw_sp_port_sb_pms_init(mlxsw_sp_port);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2016-04-14 23:19:24 +07:00
|
|
|
|
|
|
|
int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
struct devlink_sb_pool_info *pool_info)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
2019-02-21 02:32:16 +07:00
|
|
|
enum mlxsw_reg_sbxx_dir dir;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
struct mlxsw_sp_sb_pr *pr;
|
2016-04-14 23:19:24 +07:00
|
|
|
|
2019-02-21 02:32:16 +07:00
|
|
|
dir = mlxsw_sp->sb_vals->pool_dess[pool_index].dir;
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-09-19 13:29:26 +07:00
|
|
|
pool_info->pool_type = (enum devlink_sb_pool_type) dir;
|
2017-03-24 14:02:51 +07:00
|
|
|
pool_info->size = mlxsw_sp_cells_bytes(mlxsw_sp, pr->size);
|
2016-09-19 13:29:26 +07:00
|
|
|
pool_info->threshold_type = (enum devlink_sb_threshold_type) pr->mode;
|
2019-02-02 08:56:28 +07:00
|
|
|
pool_info->cell_size = mlxsw_sp->sb->cell_size;
|
2016-04-14 23:19:24 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index, u16 pool_index, u32 size,
|
2019-04-22 19:08:41 +07:00
|
|
|
enum devlink_sb_threshold_type threshold_type,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 23:19:24 +07:00
|
|
|
{
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
2017-03-24 14:02:51 +07:00
|
|
|
u32 pool_size = mlxsw_sp_bytes_cells(mlxsw_sp, size);
|
2019-04-22 19:08:43 +07:00
|
|
|
const struct mlxsw_sp_sb_pr *pr;
|
2016-09-19 13:29:26 +07:00
|
|
|
enum mlxsw_reg_sbpr_mode mode;
|
2016-04-14 23:19:24 +07:00
|
|
|
|
2019-04-22 19:08:43 +07:00
|
|
|
mode = (enum mlxsw_reg_sbpr_mode) threshold_type;
|
|
|
|
pr = &mlxsw_sp->sb_vals->prs[pool_index];
|
|
|
|
|
2019-10-23 13:04:59 +07:00
|
|
|
if (size > MLXSW_CORE_RES_GET(mlxsw_sp->core,
|
|
|
|
GUARANTEED_SHARED_BUFFER)) {
|
2019-04-22 19:08:41 +07:00
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Exceeded shared buffer size");
|
2016-11-29 00:01:24 +07:00
|
|
|
return -EINVAL;
|
2019-04-22 19:08:41 +07:00
|
|
|
}
|
2016-11-29 00:01:24 +07:00
|
|
|
|
2019-04-22 19:08:43 +07:00
|
|
|
if (pr->freeze_mode && pr->mode != mode) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's threshold type is forbidden");
|
|
|
|
return -EINVAL;
|
2019-10-25 16:09:48 +07:00
|
|
|
}
|
2019-04-22 19:08:43 +07:00
|
|
|
|
|
|
|
if (pr->freeze_size && pr->size != size) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing this pool's size is forbidden");
|
|
|
|
return -EINVAL;
|
2019-10-25 16:09:48 +07:00
|
|
|
}
|
2019-04-22 19:08:43 +07:00
|
|
|
|
2018-09-20 13:21:28 +07:00
|
|
|
return mlxsw_sp_sb_pr_write(mlxsw_sp, pool_index, mode,
|
|
|
|
pool_size, false);
|
2016-04-14 23:19:24 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
|
|
|
|
u32 max_buff)
|
2016-04-14 23:19:24 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-04-14 23:19:24 +07:00
|
|
|
|
|
|
|
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
|
|
|
|
return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
|
2017-03-24 14:02:51 +07:00
|
|
|
return mlxsw_sp_cells_bytes(mlxsw_sp, max_buff);
|
2016-04-14 23:19:24 +07:00
|
|
|
}
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u16 pool_index,
|
2019-04-22 19:08:41 +07:00
|
|
|
u32 threshold, u32 *p_max_buff,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 23:19:24 +07:00
|
|
|
{
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool_index);
|
2016-04-14 23:19:24 +07:00
|
|
|
|
|
|
|
if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
|
|
|
|
int val;
|
|
|
|
|
|
|
|
val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
|
|
|
|
if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
|
2019-04-22 19:08:41 +07:00
|
|
|
val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Invalid dynamic threshold value");
|
2016-04-14 23:19:24 +07:00
|
|
|
return -EINVAL;
|
2019-04-22 19:08:41 +07:00
|
|
|
}
|
2016-04-14 23:19:24 +07:00
|
|
|
*p_max_buff = val;
|
|
|
|
} else {
|
2017-03-24 14:02:51 +07:00
|
|
|
*p_max_buff = mlxsw_sp_bytes_cells(mlxsw_sp, threshold);
|
2016-04-14 23:19:24 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
u32 *p_threshold)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
pool_index);
|
2016-04-14 23:19:24 +07:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool_index,
|
2016-04-14 23:19:24 +07:00
|
|
|
pm->max_buff);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
2019-04-22 19:08:41 +07:00
|
|
|
u32 threshold, struct netlink_ext_ack *extack)
|
2016-04-14 23:19:24 +07:00
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
u32 max_buff;
|
|
|
|
int err;
|
|
|
|
|
2019-09-16 22:04:20 +07:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's threshold is forbidden");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
|
2019-04-22 19:08:41 +07:00
|
|
|
threshold, &max_buff, extack);
|
2016-04-14 23:19:24 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool_index,
|
2016-04-14 23:19:24 +07:00
|
|
|
0, max_buff);
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
u16 *p_pool_index, u32 *p_threshold)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
u8 pg_buff = tc_index;
|
2016-09-19 13:29:26 +07:00
|
|
|
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
|
2016-04-14 23:19:24 +07:00
|
|
|
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
|
|
|
|
pg_buff, dir);
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
*p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool_index,
|
2016-04-14 23:19:24 +07:00
|
|
|
cm->max_buff);
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
*p_pool_index = cm->pool_index;
|
2016-04-14 23:19:24 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
2019-04-22 19:08:41 +07:00
|
|
|
u16 pool_index, u32 threshold,
|
|
|
|
struct netlink_ext_ack *extack)
|
2016-04-14 23:19:24 +07:00
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
2019-04-22 19:08:45 +07:00
|
|
|
const struct mlxsw_sp_sb_cm *cm;
|
2016-04-14 23:19:24 +07:00
|
|
|
u8 pg_buff = tc_index;
|
2016-09-19 13:29:26 +07:00
|
|
|
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
|
2016-04-14 23:19:24 +07:00
|
|
|
u32 max_buff;
|
|
|
|
int err;
|
|
|
|
|
2019-09-16 22:04:20 +07:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing CPU port's binding is forbidden");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2019-04-22 19:08:41 +07:00
|
|
|
if (dir != mlxsw_sp->sb_vals->pool_dess[pool_index].dir) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Binding egress TC to ingress pool and vice versa is forbidden");
|
2016-08-19 19:43:48 +07:00
|
|
|
return -EINVAL;
|
2019-04-22 19:08:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dir == MLXSW_REG_SBXX_DIR_INGRESS)
|
|
|
|
cm = &mlxsw_sp->sb_vals->cms_ingress[tc_index];
|
|
|
|
else
|
|
|
|
cm = &mlxsw_sp->sb_vals->cms_egress[tc_index];
|
|
|
|
|
|
|
|
if (cm->freeze_pool && cm->pool_index != pool_index) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Binding this TC to a different pool is forbidden");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cm->freeze_thresh && cm->max_buff != threshold) {
|
|
|
|
NL_SET_ERR_MSG_MOD(extack, "Changing this TC's threshold is forbidden");
|
|
|
|
return -EINVAL;
|
2019-04-22 19:08:41 +07:00
|
|
|
}
|
2016-08-19 19:43:48 +07:00
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool_index,
|
2019-04-22 19:08:41 +07:00
|
|
|
threshold, &max_buff, extack);
|
2016-04-14 23:19:24 +07:00
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff,
|
2018-09-20 13:21:29 +07:00
|
|
|
0, max_buff, false, pool_index);
|
2016-04-14 23:19:24 +07:00
|
|
|
}
|
2016-04-14 23:19:30 +07:00
|
|
|
|
|
|
|
#define MASKED_COUNT_MAX \
|
2018-09-20 13:21:26 +07:00
|
|
|
(MLXSW_REG_SBSR_REC_MAX_COUNT / \
|
|
|
|
(MLXSW_SP_SB_ING_TC_COUNT + MLXSW_SP_SB_EG_TC_COUNT))
|
2016-04-14 23:19:30 +07:00
|
|
|
|
|
|
|
struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
|
|
|
|
u8 masked_count;
|
|
|
|
u8 local_port_1;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
|
|
|
|
char *sbsr_pl, size_t sbsr_pl_len,
|
|
|
|
unsigned long cb_priv)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
|
|
|
|
u8 masked_count;
|
|
|
|
u8 local_port;
|
|
|
|
int rec_index = 0;
|
|
|
|
struct mlxsw_sp_sb_cm *cm;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
|
|
|
|
|
|
|
|
masked_count = 0;
|
|
|
|
for (local_port = cb_ctx.local_port_1;
|
2017-03-24 14:02:48 +07:00
|
|
|
local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
continue;
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port == MLXSW_PORT_CPU_PORT) {
|
|
|
|
/* Ingress quotas are not supported for the CPU port */
|
|
|
|
masked_count++;
|
|
|
|
continue;
|
|
|
|
}
|
2018-09-20 13:21:26 +07:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
|
|
|
|
MLXSW_REG_SBXX_DIR_INGRESS);
|
|
|
|
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
|
|
|
|
&cm->occ.cur, &cm->occ.max);
|
|
|
|
}
|
|
|
|
if (++masked_count == cb_ctx.masked_count)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
masked_count = 0;
|
|
|
|
for (local_port = cb_ctx.local_port_1;
|
2017-03-24 14:02:48 +07:00
|
|
|
local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
continue;
|
2018-09-20 13:21:26 +07:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
|
|
|
|
MLXSW_REG_SBXX_DIR_EGRESS);
|
|
|
|
mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
|
|
|
|
&cm->occ.cur, &cm->occ.max);
|
|
|
|
}
|
|
|
|
if (++masked_count == cb_ctx.masked_count)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
|
|
|
|
unsigned long cb_priv;
|
|
|
|
LIST_HEAD(bulk_list);
|
|
|
|
char *sbsr_pl;
|
|
|
|
u8 masked_count;
|
|
|
|
u8 local_port_1;
|
2019-09-16 22:04:22 +07:00
|
|
|
u8 local_port;
|
2016-04-14 23:19:30 +07:00
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
int err2;
|
|
|
|
|
|
|
|
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
|
|
|
|
if (!sbsr_pl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-09-16 22:04:22 +07:00
|
|
|
local_port = MLXSW_PORT_CPU_PORT;
|
2016-04-14 23:19:30 +07:00
|
|
|
next_batch:
|
|
|
|
local_port_1 = local_port;
|
|
|
|
masked_count = 0;
|
|
|
|
mlxsw_reg_sbsr_pack(sbsr_pl, false);
|
2018-09-20 13:21:26 +07:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
|
2016-04-14 23:19:30 +07:00
|
|
|
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
|
2018-09-20 13:21:26 +07:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
|
2016-04-14 23:19:30 +07:00
|
|
|
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
|
2017-03-24 14:02:48 +07:00
|
|
|
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
continue;
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port != MLXSW_PORT_CPU_PORT) {
|
|
|
|
/* Ingress quotas are not supported for the CPU port */
|
|
|
|
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
|
|
|
|
local_port, 1);
|
|
|
|
}
|
2016-04-14 23:19:30 +07:00
|
|
|
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
|
2019-02-21 02:32:16 +07:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
|
|
|
|
&bulk_list);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (++masked_count == MASKED_COUNT_MAX)
|
|
|
|
goto do_query;
|
|
|
|
}
|
|
|
|
|
|
|
|
do_query:
|
|
|
|
cb_ctx.masked_count = masked_count;
|
|
|
|
cb_ctx.local_port_1 = local_port_1;
|
|
|
|
memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
|
|
|
|
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
|
|
|
|
&bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
|
|
|
|
cb_priv);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
|
|
|
|
local_port++;
|
2016-04-14 23:19:30 +07:00
|
|
|
goto next_batch;
|
2019-09-16 22:04:22 +07:00
|
|
|
}
|
2016-04-14 23:19:30 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
kfree(sbsr_pl);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
|
|
|
|
unsigned int sb_index)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
|
|
|
LIST_HEAD(bulk_list);
|
|
|
|
char *sbsr_pl;
|
|
|
|
unsigned int masked_count;
|
2019-09-16 22:04:22 +07:00
|
|
|
u8 local_port;
|
2016-04-14 23:19:30 +07:00
|
|
|
int i;
|
|
|
|
int err;
|
|
|
|
int err2;
|
|
|
|
|
|
|
|
sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
|
|
|
|
if (!sbsr_pl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-09-16 22:04:22 +07:00
|
|
|
local_port = MLXSW_PORT_CPU_PORT;
|
2016-04-14 23:19:30 +07:00
|
|
|
next_batch:
|
|
|
|
masked_count = 0;
|
|
|
|
mlxsw_reg_sbsr_pack(sbsr_pl, true);
|
2018-09-20 13:21:26 +07:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_ING_TC_COUNT; i++)
|
2016-04-14 23:19:30 +07:00
|
|
|
mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
|
2018-09-20 13:21:26 +07:00
|
|
|
for (i = 0; i < MLXSW_SP_SB_EG_TC_COUNT; i++)
|
2016-04-14 23:19:30 +07:00
|
|
|
mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
|
2017-03-24 14:02:48 +07:00
|
|
|
for (; local_port < mlxsw_core_max_ports(mlxsw_core); local_port++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
if (!mlxsw_sp->ports[local_port])
|
|
|
|
continue;
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port != MLXSW_PORT_CPU_PORT) {
|
|
|
|
/* Ingress quotas are not supported for the CPU port */
|
|
|
|
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
|
|
|
|
local_port, 1);
|
|
|
|
}
|
2016-04-14 23:19:30 +07:00
|
|
|
mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
|
2019-02-21 02:32:16 +07:00
|
|
|
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
|
2016-04-14 23:19:30 +07:00
|
|
|
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
|
|
|
|
&bulk_list);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (++masked_count == MASKED_COUNT_MAX)
|
|
|
|
goto do_query;
|
|
|
|
}
|
|
|
|
|
|
|
|
do_query:
|
|
|
|
err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
|
|
|
|
&bulk_list, NULL, 0);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
2019-09-16 22:04:22 +07:00
|
|
|
if (local_port < mlxsw_core_max_ports(mlxsw_core)) {
|
|
|
|
local_port++;
|
2016-04-14 23:19:30 +07:00
|
|
|
goto next_batch;
|
2019-09-16 22:04:22 +07:00
|
|
|
}
|
2016-04-14 23:19:30 +07:00
|
|
|
|
|
|
|
out:
|
|
|
|
err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
|
|
|
|
if (!err)
|
|
|
|
err = err2;
|
|
|
|
kfree(sbsr_pl);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 pool_index,
|
|
|
|
u32 *p_cur, u32 *p_max)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
|
mlxsw: spectrum_buffers: Use devlink pool indices throughout
Currently, mlxsw assumes that each ingress pool has its egress
counterpart, and that pool index for purposes of caching matches the
index with which the hardware should be configured. As we want to expose
the MC pool, both of these assumptions break.
Instead, maintain the pool index as long as possible. Unify ingress and
egress caches and use the pool index as cache index as well. Only
translate to FW pool numbering when actually packing the registers. This
simplifies things considerably, as the pool index is the only quantity
necessary to uniquely identify a pool, and the pool/direction split is
not necessary until firmware is talked to.
To support the mapping between pool indices and pool numbers and
directions, which is not neatly mathematical anymore, introduce a pool
descriptor table, indexed by pool index, to facilitate the translation.
Include the MC pool in the descriptor table as well, so that it can be
referenced from mlxsw_sp_sb_cms_egress.
Signed-off-by: Petr Machata <petrm@mellanox.com>
Reviewed-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: Ido Schimmel <idosch@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-09-20 13:21:25 +07:00
|
|
|
pool_index);
|
2016-04-14 23:19:30 +07:00
|
|
|
|
2017-03-24 14:02:51 +07:00
|
|
|
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.cur);
|
|
|
|
*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, pm->occ.max);
|
2016-04-14 23:19:30 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
|
|
|
|
unsigned int sb_index, u16 tc_index,
|
|
|
|
enum devlink_sb_pool_type pool_type,
|
|
|
|
u32 *p_cur, u32 *p_max)
|
|
|
|
{
|
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port =
|
|
|
|
mlxsw_core_port_driver_priv(mlxsw_core_port);
|
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
|
u8 local_port = mlxsw_sp_port->local_port;
|
|
|
|
u8 pg_buff = tc_index;
|
2016-09-19 13:29:26 +07:00
|
|
|
enum mlxsw_reg_sbxx_dir dir = (enum mlxsw_reg_sbxx_dir) pool_type;
|
2016-04-14 23:19:30 +07:00
|
|
|
struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
|
|
|
|
pg_buff, dir);
|
|
|
|
|
2017-03-24 14:02:51 +07:00
|
|
|
*p_cur = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.cur);
|
|
|
|
*p_max = mlxsw_sp_cells_bytes(mlxsw_sp, cm->occ.max);
|
2016-04-14 23:19:30 +07:00
|
|
|
return 0;
|
|
|
|
}
|