mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-19 04:16:10 +07:00
Merge branch 'mlxsw-Introduce-support-for-CQEv1-2'
Ido Schimmel says: ==================== mlxsw: Introduce support for CQEv1/2 Jiri says: Current SwitchX2 and Spectrum FWs support CQEv0 and that is what we implement in mlxsw. Spectrum FW also supports CQE v1 and v2. However, Spectrum-2 won't support CQEv0. Prepare for it and setup the CQE versions to use according to what is queried from FW. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
0e913f28ba
@ -424,10 +424,15 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_rdq_sz, 0x04, 24, 8);
|
|||||||
MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
|
MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_num_rdqs, 0x04, 0, 8);
|
||||||
|
|
||||||
/* cmd_mbox_query_aq_cap_log_max_cq_sz
|
/* cmd_mbox_query_aq_cap_log_max_cq_sz
|
||||||
* Log (base 2) of max CQEs allowed on CQ.
|
* Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv0 and CQEv1.
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
|
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cq_sz, 0x08, 24, 8);
|
||||||
|
|
||||||
|
/* cmd_mbox_query_aq_cap_log_max_cqv2_sz
|
||||||
|
* Log (base 2) of the Maximum CQEs allowed in a CQ for CQEv2.
|
||||||
|
*/
|
||||||
|
MLXSW_ITEM32(cmd_mbox, query_aq_cap, log_max_cqv2_sz, 0x08, 16, 8);
|
||||||
|
|
||||||
/* cmd_mbox_query_aq_cap_max_num_cqs
|
/* cmd_mbox_query_aq_cap_max_num_cqs
|
||||||
* Maximum number of CQs.
|
* Maximum number of CQs.
|
||||||
*/
|
*/
|
||||||
@ -662,6 +667,12 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
|
|||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
|
MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
|
||||||
|
|
||||||
|
/* cmd_mbox_config_set_cqe_version
|
||||||
|
* Capability bit. Setting a bit to 1 configures the profile
|
||||||
|
* according to the mailbox contents.
|
||||||
|
*/
|
||||||
|
MLXSW_ITEM32(cmd_mbox, config_profile, set_cqe_version, 0x08, 0, 1);
|
||||||
|
|
||||||
/* cmd_mbox_config_profile_max_vepa_channels
|
/* cmd_mbox_config_profile_max_vepa_channels
|
||||||
* Maximum number of VEPA channels per port (0 through 16)
|
* Maximum number of VEPA channels per port (0 through 16)
|
||||||
* 0 - multi-channel VEPA is disabled
|
* 0 - multi-channel VEPA is disabled
|
||||||
@ -841,6 +852,14 @@ MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_type,
|
|||||||
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
|
MLXSW_ITEM32_INDEXED(cmd_mbox, config_profile, swid_config_properties,
|
||||||
0x60, 0, 8, 0x08, 0x00, false);
|
0x60, 0, 8, 0x08, 0x00, false);
|
||||||
|
|
||||||
|
/* cmd_mbox_config_profile_cqe_version
|
||||||
|
* CQE version:
|
||||||
|
* 0: CQE version is 0
|
||||||
|
* 1: CQE version is either 1 or 2
|
||||||
|
* CQE ver 1 or 2 is configured by Completion Queue Context field cqe_ver.
|
||||||
|
*/
|
||||||
|
MLXSW_ITEM32(cmd_mbox, config_profile, cqe_version, 0xB0, 0, 8);
|
||||||
|
|
||||||
/* ACCESS_REG - Access EMAD Supported Register
|
/* ACCESS_REG - Access EMAD Supported Register
|
||||||
* ----------------------------------
|
* ----------------------------------
|
||||||
* OpMod == 0 (N/A), INMmod == 0 (N/A)
|
* OpMod == 0 (N/A), INMmod == 0 (N/A)
|
||||||
@ -1032,11 +1051,15 @@ static inline int mlxsw_cmd_sw2hw_cq(struct mlxsw_core *mlxsw_core,
|
|||||||
0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
|
0, cq_number, in_mbox, MLXSW_CMD_MBOX_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* cmd_mbox_sw2hw_cq_cv
|
enum mlxsw_cmd_mbox_sw2hw_cq_cqe_ver {
|
||||||
|
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1,
|
||||||
|
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* cmd_mbox_sw2hw_cq_cqe_ver
|
||||||
* CQE Version.
|
* CQE Version.
|
||||||
* 0 - CQE Version 0, 1 - CQE Version 1
|
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cv, 0x00, 28, 4);
|
MLXSW_ITEM32(cmd_mbox, sw2hw_cq, cqe_ver, 0x00, 28, 4);
|
||||||
|
|
||||||
/* cmd_mbox_sw2hw_cq_c_eqn
|
/* cmd_mbox_sw2hw_cq_c_eqn
|
||||||
* Event Queue this CQ reports completion events to.
|
* Event Queue this CQ reports completion events to.
|
||||||
|
@ -117,6 +117,7 @@ struct mlxsw_pci_queue {
|
|||||||
struct {
|
struct {
|
||||||
u32 comp_sdq_count;
|
u32 comp_sdq_count;
|
||||||
u32 comp_rdq_count;
|
u32 comp_rdq_count;
|
||||||
|
enum mlxsw_pci_cqe_v v;
|
||||||
} cq;
|
} cq;
|
||||||
struct {
|
struct {
|
||||||
u32 ev_cmd_count;
|
u32 ev_cmd_count;
|
||||||
@ -155,6 +156,8 @@ struct mlxsw_pci {
|
|||||||
} cmd;
|
} cmd;
|
||||||
struct mlxsw_bus_info bus_info;
|
struct mlxsw_bus_info bus_info;
|
||||||
const struct pci_device_id *id;
|
const struct pci_device_id *id;
|
||||||
|
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
|
||||||
|
u8 num_sdq_cqs; /* Number of CQs used for SDQs */
|
||||||
};
|
};
|
||||||
|
|
||||||
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
|
static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
|
||||||
@ -202,24 +205,6 @@ static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
|
|||||||
return owner_bit != !!(q->consumer_counter & q->count);
|
return owner_bit != !!(q->consumer_counter & q->count);
|
||||||
}
|
}
|
||||||
|
|
||||||
static char *
|
|
||||||
mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
|
|
||||||
u32 (*get_elem_owner_func)(const char *))
|
|
||||||
{
|
|
||||||
struct mlxsw_pci_queue_elem_info *elem_info;
|
|
||||||
char *elem;
|
|
||||||
bool owner_bit;
|
|
||||||
|
|
||||||
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
|
||||||
elem = elem_info->elem;
|
|
||||||
owner_bit = get_elem_owner_func(elem);
|
|
||||||
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
|
|
||||||
return NULL;
|
|
||||||
q->consumer_counter++;
|
|
||||||
rmb(); /* make sure we read owned bit before the rest of elem */
|
|
||||||
return elem;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct mlxsw_pci_queue_type_group *
|
static struct mlxsw_pci_queue_type_group *
|
||||||
mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
|
mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
|
||||||
enum mlxsw_pci_queue_type q_type)
|
enum mlxsw_pci_queue_type q_type)
|
||||||
@ -494,6 +479,17 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
|
||||||
|
struct mlxsw_pci_queue *q)
|
||||||
|
{
|
||||||
|
q->u.cq.v = mlxsw_pci->max_cqe_ver;
|
||||||
|
|
||||||
|
/* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
|
||||||
|
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
|
||||||
|
q->num < mlxsw_pci->num_sdq_cqs)
|
||||||
|
q->u.cq.v = MLXSW_PCI_CQE_V1;
|
||||||
|
}
|
||||||
|
|
||||||
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||||
struct mlxsw_pci_queue *q)
|
struct mlxsw_pci_queue *q)
|
||||||
{
|
{
|
||||||
@ -505,10 +501,16 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
|||||||
for (i = 0; i < q->count; i++) {
|
for (i = 0; i < q->count; i++) {
|
||||||
char *elem = mlxsw_pci_queue_elem_get(q, i);
|
char *elem = mlxsw_pci_queue_elem_get(q, i);
|
||||||
|
|
||||||
mlxsw_pci_cqe_owner_set(elem, 1);
|
mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
|
if (q->u.cq.v == MLXSW_PCI_CQE_V1)
|
||||||
|
mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
|
||||||
|
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
|
||||||
|
else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
|
||||||
|
mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
|
||||||
|
MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
|
||||||
|
|
||||||
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
|
mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
|
||||||
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
|
mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
|
||||||
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
|
mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
|
||||||
@ -559,7 +561,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||||||
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
||||||
struct mlxsw_pci_queue *q,
|
struct mlxsw_pci_queue *q,
|
||||||
u16 consumer_counter_limit,
|
u16 consumer_counter_limit,
|
||||||
char *cqe)
|
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = mlxsw_pci->pdev;
|
struct pci_dev *pdev = mlxsw_pci->pdev;
|
||||||
struct mlxsw_pci_queue_elem_info *elem_info;
|
struct mlxsw_pci_queue_elem_info *elem_info;
|
||||||
@ -579,10 +581,11 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||||||
if (q->consumer_counter++ != consumer_counter_limit)
|
if (q->consumer_counter++ != consumer_counter_limit)
|
||||||
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
|
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
|
||||||
|
|
||||||
if (mlxsw_pci_cqe_lag_get(cqe)) {
|
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
|
||||||
rx_info.is_lag = true;
|
rx_info.is_lag = true;
|
||||||
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe);
|
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
|
||||||
rx_info.lag_port_index = mlxsw_pci_cqe_lag_port_index_get(cqe);
|
rx_info.lag_port_index =
|
||||||
|
mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
|
||||||
} else {
|
} else {
|
||||||
rx_info.is_lag = false;
|
rx_info.is_lag = false;
|
||||||
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
|
rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
|
||||||
@ -591,7 +594,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||||||
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
|
rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
|
||||||
|
|
||||||
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
|
byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
|
||||||
if (mlxsw_pci_cqe_crc_get(cqe))
|
if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
|
||||||
byte_count -= ETH_FCS_LEN;
|
byte_count -= ETH_FCS_LEN;
|
||||||
skb_put(skb, byte_count);
|
skb_put(skb, byte_count);
|
||||||
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
|
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
|
||||||
@ -608,7 +611,18 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
|
|||||||
|
|
||||||
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
|
static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
|
||||||
{
|
{
|
||||||
return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
|
struct mlxsw_pci_queue_elem_info *elem_info;
|
||||||
|
char *elem;
|
||||||
|
bool owner_bit;
|
||||||
|
|
||||||
|
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
||||||
|
elem = elem_info->elem;
|
||||||
|
owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
|
||||||
|
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
|
||||||
|
return NULL;
|
||||||
|
q->consumer_counter++;
|
||||||
|
rmb(); /* make sure we read owned bit before the rest of elem */
|
||||||
|
return elem;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlxsw_pci_cq_tasklet(unsigned long data)
|
static void mlxsw_pci_cq_tasklet(unsigned long data)
|
||||||
@ -621,8 +635,8 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
|
|||||||
|
|
||||||
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
|
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
|
||||||
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
|
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
|
||||||
u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
|
u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
|
||||||
u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
|
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
|
||||||
|
|
||||||
if (sendq) {
|
if (sendq) {
|
||||||
struct mlxsw_pci_queue *sdq;
|
struct mlxsw_pci_queue *sdq;
|
||||||
@ -636,7 +650,7 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
|
|||||||
|
|
||||||
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
|
rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
|
||||||
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
|
mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
|
||||||
wqe_counter, cqe);
|
wqe_counter, q->u.cq.v, cqe);
|
||||||
q->u.cq.comp_rdq_count++;
|
q->u.cq.comp_rdq_count++;
|
||||||
}
|
}
|
||||||
if (++items == credits)
|
if (++items == credits)
|
||||||
@ -648,6 +662,18 @@ static void mlxsw_pci_cq_tasklet(unsigned long data)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
|
||||||
|
{
|
||||||
|
return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
|
||||||
|
MLXSW_PCI_CQE01_COUNT;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
|
||||||
|
{
|
||||||
|
return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
|
||||||
|
MLXSW_PCI_CQE01_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||||
struct mlxsw_pci_queue *q)
|
struct mlxsw_pci_queue *q)
|
||||||
{
|
{
|
||||||
@ -696,7 +722,18 @@ static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
|
|||||||
|
|
||||||
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
|
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
|
||||||
{
|
{
|
||||||
return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
|
struct mlxsw_pci_queue_elem_info *elem_info;
|
||||||
|
char *elem;
|
||||||
|
bool owner_bit;
|
||||||
|
|
||||||
|
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
|
||||||
|
elem = elem_info->elem;
|
||||||
|
owner_bit = mlxsw_pci_eqe_owner_get(elem);
|
||||||
|
if (mlxsw_pci_elem_hw_owned(q, owner_bit))
|
||||||
|
return NULL;
|
||||||
|
q->consumer_counter++;
|
||||||
|
rmb(); /* make sure we read owned bit before the rest of elem */
|
||||||
|
return elem;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mlxsw_pci_eq_tasklet(unsigned long data)
|
static void mlxsw_pci_eq_tasklet(unsigned long data)
|
||||||
@ -749,11 +786,15 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
|
|||||||
struct mlxsw_pci_queue_ops {
|
struct mlxsw_pci_queue_ops {
|
||||||
const char *name;
|
const char *name;
|
||||||
enum mlxsw_pci_queue_type type;
|
enum mlxsw_pci_queue_type type;
|
||||||
|
void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
|
||||||
|
struct mlxsw_pci_queue *q);
|
||||||
int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
||||||
struct mlxsw_pci_queue *q);
|
struct mlxsw_pci_queue *q);
|
||||||
void (*fini)(struct mlxsw_pci *mlxsw_pci,
|
void (*fini)(struct mlxsw_pci *mlxsw_pci,
|
||||||
struct mlxsw_pci_queue *q);
|
struct mlxsw_pci_queue *q);
|
||||||
void (*tasklet)(unsigned long data);
|
void (*tasklet)(unsigned long data);
|
||||||
|
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
|
||||||
|
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
|
||||||
u16 elem_count;
|
u16 elem_count;
|
||||||
u8 elem_size;
|
u8 elem_size;
|
||||||
};
|
};
|
||||||
@ -776,11 +817,12 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
|
|||||||
|
|
||||||
static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
|
static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
|
||||||
.type = MLXSW_PCI_QUEUE_TYPE_CQ,
|
.type = MLXSW_PCI_QUEUE_TYPE_CQ,
|
||||||
|
.pre_init = mlxsw_pci_cq_pre_init,
|
||||||
.init = mlxsw_pci_cq_init,
|
.init = mlxsw_pci_cq_init,
|
||||||
.fini = mlxsw_pci_cq_fini,
|
.fini = mlxsw_pci_cq_fini,
|
||||||
.tasklet = mlxsw_pci_cq_tasklet,
|
.tasklet = mlxsw_pci_cq_tasklet,
|
||||||
.elem_count = MLXSW_PCI_CQE_COUNT,
|
.elem_count_f = mlxsw_pci_cq_elem_count,
|
||||||
.elem_size = MLXSW_PCI_CQE_SIZE
|
.elem_size_f = mlxsw_pci_cq_elem_size
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
|
static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
|
||||||
@ -800,10 +842,15 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
|||||||
int i;
|
int i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
spin_lock_init(&q->lock);
|
|
||||||
q->num = q_num;
|
q->num = q_num;
|
||||||
q->count = q_ops->elem_count;
|
if (q_ops->pre_init)
|
||||||
q->elem_size = q_ops->elem_size;
|
q_ops->pre_init(mlxsw_pci, q);
|
||||||
|
|
||||||
|
spin_lock_init(&q->lock);
|
||||||
|
q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
|
||||||
|
q_ops->elem_count;
|
||||||
|
q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
|
||||||
|
q_ops->elem_size;
|
||||||
q->type = q_ops->type;
|
q->type = q_ops->type;
|
||||||
q->pci = mlxsw_pci;
|
q->pci = mlxsw_pci;
|
||||||
|
|
||||||
@ -832,7 +879,7 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
|||||||
|
|
||||||
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
|
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
|
||||||
elem_info->elem =
|
elem_info->elem =
|
||||||
__mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
|
__mlxsw_pci_queue_elem_get(q, q->elem_size, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
mlxsw_cmd_mbox_zero(mbox);
|
mlxsw_cmd_mbox_zero(mbox);
|
||||||
@ -912,6 +959,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
|
|||||||
u8 rdq_log2sz;
|
u8 rdq_log2sz;
|
||||||
u8 num_cqs;
|
u8 num_cqs;
|
||||||
u8 cq_log2sz;
|
u8 cq_log2sz;
|
||||||
|
u8 cqv2_log2sz;
|
||||||
u8 num_eqs;
|
u8 num_eqs;
|
||||||
u8 eq_log2sz;
|
u8 eq_log2sz;
|
||||||
int err;
|
int err;
|
||||||
@ -927,6 +975,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
|
|||||||
rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
|
rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
|
||||||
num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
|
num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
|
||||||
cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
|
cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
|
||||||
|
cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
|
||||||
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
|
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
|
||||||
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
|
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
|
||||||
|
|
||||||
@ -938,12 +987,16 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
|
|||||||
|
|
||||||
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
|
if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
|
||||||
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
|
(1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
|
||||||
(1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
|
(1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
|
||||||
|
(mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
|
||||||
|
(1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
|
||||||
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
|
(1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
|
||||||
dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
|
dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mlxsw_pci->num_sdq_cqs = num_sdqs;
|
||||||
|
|
||||||
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
|
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
|
||||||
num_eqs);
|
num_eqs);
|
||||||
if (err) {
|
if (err) {
|
||||||
@ -1184,6 +1237,11 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
|
|||||||
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
|
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
|
||||||
&profile->swid_config[i]);
|
&profile->swid_config[i]);
|
||||||
|
|
||||||
|
if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
|
||||||
|
mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
|
||||||
|
mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
|
||||||
|
}
|
||||||
|
|
||||||
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
|
return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1378,6 +1436,21 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_query_resources;
|
goto err_query_resources;
|
||||||
|
|
||||||
|
if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
|
||||||
|
MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
|
||||||
|
mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
|
||||||
|
else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
|
||||||
|
MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
|
||||||
|
mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
|
||||||
|
else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
|
||||||
|
MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
|
||||||
|
!MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
|
||||||
|
mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
|
||||||
|
} else {
|
||||||
|
dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
|
||||||
|
goto err_cqe_v_check;
|
||||||
|
}
|
||||||
|
|
||||||
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
|
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_config_profile;
|
goto err_config_profile;
|
||||||
@ -1400,6 +1473,7 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
|
|||||||
mlxsw_pci_aqs_fini(mlxsw_pci);
|
mlxsw_pci_aqs_fini(mlxsw_pci);
|
||||||
err_aqs_init:
|
err_aqs_init:
|
||||||
err_config_profile:
|
err_config_profile:
|
||||||
|
err_cqe_v_check:
|
||||||
err_query_resources:
|
err_query_resources:
|
||||||
err_boardinfo:
|
err_boardinfo:
|
||||||
mlxsw_pci_fw_area_fini(mlxsw_pci);
|
mlxsw_pci_fw_area_fini(mlxsw_pci);
|
||||||
|
@ -82,10 +82,12 @@
|
|||||||
#define MLXSW_PCI_AQ_PAGES 8
|
#define MLXSW_PCI_AQ_PAGES 8
|
||||||
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
|
#define MLXSW_PCI_AQ_SIZE (MLXSW_PCI_PAGE_SIZE * MLXSW_PCI_AQ_PAGES)
|
||||||
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
|
#define MLXSW_PCI_WQE_SIZE 32 /* 32 bytes per element */
|
||||||
#define MLXSW_PCI_CQE_SIZE 16 /* 16 bytes per element */
|
#define MLXSW_PCI_CQE01_SIZE 16 /* 16 bytes per element */
|
||||||
|
#define MLXSW_PCI_CQE2_SIZE 32 /* 32 bytes per element */
|
||||||
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
|
#define MLXSW_PCI_EQE_SIZE 16 /* 16 bytes per element */
|
||||||
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
|
#define MLXSW_PCI_WQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_WQE_SIZE)
|
||||||
#define MLXSW_PCI_CQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE_SIZE)
|
#define MLXSW_PCI_CQE01_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE01_SIZE)
|
||||||
|
#define MLXSW_PCI_CQE2_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_CQE2_SIZE)
|
||||||
#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
|
#define MLXSW_PCI_EQE_COUNT (MLXSW_PCI_AQ_SIZE / MLXSW_PCI_EQE_SIZE)
|
||||||
#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
|
#define MLXSW_PCI_EQE_UPDATE_COUNT 0x80
|
||||||
|
|
||||||
@ -126,10 +128,48 @@ MLXSW_ITEM16_INDEXED(pci, wqe, byte_count, 0x02, 0, 14, 0x02, 0x00, false);
|
|||||||
*/
|
*/
|
||||||
MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
|
MLXSW_ITEM64_INDEXED(pci, wqe, address, 0x08, 0, 64, 0x8, 0x0, false);
|
||||||
|
|
||||||
|
enum mlxsw_pci_cqe_v {
|
||||||
|
MLXSW_PCI_CQE_V0,
|
||||||
|
MLXSW_PCI_CQE_V1,
|
||||||
|
MLXSW_PCI_CQE_V2,
|
||||||
|
};
|
||||||
|
|
||||||
|
#define mlxsw_pci_cqe_item_helpers(name, v0, v1, v2) \
|
||||||
|
static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \
|
||||||
|
{ \
|
||||||
|
switch (v) { \
|
||||||
|
default: \
|
||||||
|
case MLXSW_PCI_CQE_V0: \
|
||||||
|
return mlxsw_pci_cqe##v0##_##name##_get(cqe); \
|
||||||
|
case MLXSW_PCI_CQE_V1: \
|
||||||
|
return mlxsw_pci_cqe##v1##_##name##_get(cqe); \
|
||||||
|
case MLXSW_PCI_CQE_V2: \
|
||||||
|
return mlxsw_pci_cqe##v2##_##name##_get(cqe); \
|
||||||
|
} \
|
||||||
|
} \
|
||||||
|
static inline void mlxsw_pci_cqe_##name##_set(enum mlxsw_pci_cqe_v v, \
|
||||||
|
char *cqe, u32 val) \
|
||||||
|
{ \
|
||||||
|
switch (v) { \
|
||||||
|
default: \
|
||||||
|
case MLXSW_PCI_CQE_V0: \
|
||||||
|
mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \
|
||||||
|
break; \
|
||||||
|
case MLXSW_PCI_CQE_V1: \
|
||||||
|
mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \
|
||||||
|
break; \
|
||||||
|
case MLXSW_PCI_CQE_V2: \
|
||||||
|
mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \
|
||||||
|
break; \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
/* pci_cqe_lag
|
/* pci_cqe_lag
|
||||||
* Packet arrives from a port which is a LAG
|
* Packet arrives from a port which is a LAG
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
|
MLXSW_ITEM32(pci, cqe0, lag, 0x00, 23, 1);
|
||||||
|
MLXSW_ITEM32(pci, cqe12, lag, 0x00, 24, 1);
|
||||||
|
mlxsw_pci_cqe_item_helpers(lag, 0, 12, 12);
|
||||||
|
|
||||||
/* pci_cqe_system_port/lag_id
|
/* pci_cqe_system_port/lag_id
|
||||||
* When lag=0: System port on which the packet was received
|
* When lag=0: System port on which the packet was received
|
||||||
@ -138,8 +178,12 @@ MLXSW_ITEM32(pci, cqe, lag, 0x00, 23, 1);
|
|||||||
* bits [3:0] sub_port on which the packet was received
|
* bits [3:0] sub_port on which the packet was received
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
|
MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16);
|
||||||
MLXSW_ITEM32(pci, cqe, lag_id, 0x00, 4, 12);
|
MLXSW_ITEM32(pci, cqe0, lag_id, 0x00, 4, 12);
|
||||||
MLXSW_ITEM32(pci, cqe, lag_port_index, 0x00, 0, 4);
|
MLXSW_ITEM32(pci, cqe12, lag_id, 0x00, 0, 16);
|
||||||
|
mlxsw_pci_cqe_item_helpers(lag_id, 0, 12, 12);
|
||||||
|
MLXSW_ITEM32(pci, cqe0, lag_subport, 0x00, 0, 4);
|
||||||
|
MLXSW_ITEM32(pci, cqe12, lag_subport, 0x00, 16, 8);
|
||||||
|
mlxsw_pci_cqe_item_helpers(lag_subport, 0, 12, 12);
|
||||||
|
|
||||||
/* pci_cqe_wqe_counter
|
/* pci_cqe_wqe_counter
|
||||||
* WQE count of the WQEs completed on the associated dqn
|
* WQE count of the WQEs completed on the associated dqn
|
||||||
@ -162,28 +206,38 @@ MLXSW_ITEM32(pci, cqe, trap_id, 0x08, 0, 9);
|
|||||||
* Length include CRC. Indicates the length field includes
|
* Length include CRC. Indicates the length field includes
|
||||||
* the packet's CRC.
|
* the packet's CRC.
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, crc, 0x0C, 8, 1);
|
MLXSW_ITEM32(pci, cqe0, crc, 0x0C, 8, 1);
|
||||||
|
MLXSW_ITEM32(pci, cqe12, crc, 0x0C, 9, 1);
|
||||||
|
mlxsw_pci_cqe_item_helpers(crc, 0, 12, 12);
|
||||||
|
|
||||||
/* pci_cqe_e
|
/* pci_cqe_e
|
||||||
* CQE with Error.
|
* CQE with Error.
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, e, 0x0C, 7, 1);
|
MLXSW_ITEM32(pci, cqe0, e, 0x0C, 7, 1);
|
||||||
|
MLXSW_ITEM32(pci, cqe12, e, 0x00, 27, 1);
|
||||||
|
mlxsw_pci_cqe_item_helpers(e, 0, 12, 12);
|
||||||
|
|
||||||
/* pci_cqe_sr
|
/* pci_cqe_sr
|
||||||
* 1 - Send Queue
|
* 1 - Send Queue
|
||||||
* 0 - Receive Queue
|
* 0 - Receive Queue
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, sr, 0x0C, 6, 1);
|
MLXSW_ITEM32(pci, cqe0, sr, 0x0C, 6, 1);
|
||||||
|
MLXSW_ITEM32(pci, cqe12, sr, 0x00, 26, 1);
|
||||||
|
mlxsw_pci_cqe_item_helpers(sr, 0, 12, 12);
|
||||||
|
|
||||||
/* pci_cqe_dqn
|
/* pci_cqe_dqn
|
||||||
* Descriptor Queue (DQ) Number.
|
* Descriptor Queue (DQ) Number.
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, dqn, 0x0C, 1, 5);
|
MLXSW_ITEM32(pci, cqe0, dqn, 0x0C, 1, 5);
|
||||||
|
MLXSW_ITEM32(pci, cqe12, dqn, 0x0C, 1, 6);
|
||||||
|
mlxsw_pci_cqe_item_helpers(dqn, 0, 12, 12);
|
||||||
|
|
||||||
/* pci_cqe_owner
|
/* pci_cqe_owner
|
||||||
* Ownership bit.
|
* Ownership bit.
|
||||||
*/
|
*/
|
||||||
MLXSW_ITEM32(pci, cqe, owner, 0x0C, 0, 1);
|
MLXSW_ITEM32(pci, cqe01, owner, 0x0C, 0, 1);
|
||||||
|
MLXSW_ITEM32(pci, cqe2, owner, 0x1C, 0, 1);
|
||||||
|
mlxsw_pci_cqe_item_helpers(owner, 01, 01, 2);
|
||||||
|
|
||||||
/* pci_eqe_event_type
|
/* pci_eqe_event_type
|
||||||
* Event type.
|
* Event type.
|
||||||
|
@ -43,6 +43,9 @@ enum mlxsw_res_id {
|
|||||||
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
|
MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE,
|
||||||
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
|
MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE,
|
||||||
MLXSW_RES_ID_MAX_TRAP_GROUPS,
|
MLXSW_RES_ID_MAX_TRAP_GROUPS,
|
||||||
|
MLXSW_RES_ID_CQE_V0,
|
||||||
|
MLXSW_RES_ID_CQE_V1,
|
||||||
|
MLXSW_RES_ID_CQE_V2,
|
||||||
MLXSW_RES_ID_COUNTER_POOL_SIZE,
|
MLXSW_RES_ID_COUNTER_POOL_SIZE,
|
||||||
MLXSW_RES_ID_MAX_SPAN,
|
MLXSW_RES_ID_MAX_SPAN,
|
||||||
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
|
MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES,
|
||||||
@ -81,6 +84,9 @@ static u16 mlxsw_res_ids[] = {
|
|||||||
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
|
[MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002,
|
||||||
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
|
[MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003,
|
||||||
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
|
[MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201,
|
||||||
|
[MLXSW_RES_ID_CQE_V0] = 0x2210,
|
||||||
|
[MLXSW_RES_ID_CQE_V1] = 0x2211,
|
||||||
|
[MLXSW_RES_ID_CQE_V2] = 0x2212,
|
||||||
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
|
[MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410,
|
||||||
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
|
[MLXSW_RES_ID_MAX_SPAN] = 0x2420,
|
||||||
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
|
[MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443,
|
||||||
|
Loading…
Reference in New Issue
Block a user