Merge branch 'qed-sriov'

Yuval Mintz says:

====================
qed*: Add SR-IOV support

This patch adds SR-IOV support to qed/qede drivers, adding a new PCI
device ID for a VF that is shared between all the various PFs that
support IOV.

This is quite a massive series - the first 7 parts of the series add
the infrastructure of supporting vfs in qed - mainly adding support in a
HW-based vf<->pf channel, as well as diverging all existing configuration
flows based on the pf/vf decision. I.e., while PF-originated requests
head directly to HW/FW, the VF requests first have to traverse to the PF
which will perform the configuration.

The 8th patch is the one that adds the support for the VF device in qede.

The remaining 6 patches each adds some user-based API support related to
VFs that can be used over the PF - forcing mac/vlan, changing speed, etc.

Dave,

Sorry in advance for the length of the series. Most of the bulk here is in
the infrastructure patches that have to go together [or at least, it makes
little sense to try splitting them up].

Please consider applying this to `net-next'.

Thanks,
Yuval

Changes from previous revision:
------------------------------
 - V2 - Replace aligned_u64 with regular u64; This was possible as the
        shared structures [between PF and VF] were already sufficiently
        padded as-is in the API, making this redundant.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-05-12 00:04:08 -04:00
commit 488992916c
33 changed files with 8124 additions and 450 deletions

View File

@ -98,6 +98,16 @@ config QED
---help---
This enables the support for ...
config QED_SRIOV
bool "QLogic QED 25/40/100Gb SR-IOV support"
depends on QED && PCI_IOV
default y
---help---
This configuration parameter enables Single Root Input Output
Virtualization support for QED devices.
This allows for virtual function acceleration in virtualized
environments.
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED

View File

@ -3,3 +3,4 @@ obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
qed_selftest.o
qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o

View File

@ -152,6 +152,7 @@ enum QED_RESOURCES {
enum QED_FEATURE {
QED_PF_L2_QUE,
QED_VF,
QED_MAX_FEATURES,
};
@ -310,6 +311,8 @@ struct qed_hwfn {
bool first_on_engine;
bool hw_init_done;
u8 num_funcs_on_engine;
/* BAR access */
void __iomem *regview;
void __iomem *doorbells;
@ -360,6 +363,8 @@ struct qed_hwfn {
/* True if the driver requests for the link */
bool b_drv_link_init;
struct qed_vf_iov *vf_iov_info;
struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info;
struct qed_hw_cid_data *p_tx_cids;
@ -376,6 +381,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64];
#ifdef CONFIG_QED_SRIOV
struct workqueue_struct *iov_wq;
struct delayed_work iov_task;
unsigned long iov_task_flags;
#endif
struct z_stream_s *stream;
};
@ -484,7 +495,13 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
/* SRIOV */
struct qed_hw_sriov_info *p_iov_info;
#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
unsigned long tunn_mode;
bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@ -514,6 +531,8 @@ struct qed_dev {
const struct firmware *firmware;
};
#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
@ -535,8 +554,10 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
#define PURE_LB_TC 8
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */

View File

@ -24,11 +24,13 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@ -63,10 +65,12 @@ union conn_context {
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
u32 cids_per_vf;
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
enum ilt_clients {
@ -97,6 +101,10 @@ struct qed_ilt_client_cfg {
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
/* ILT client blocks for VFs */
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
u32 vf_total_lines;
};
/* Per Path -
@ -123,6 +131,11 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
@ -131,37 +144,60 @@ struct qed_cxt_mngr {
u32 pf_start_line;
};
static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
/* counts the iids for the CDU/CDUC ILT client configuration */
struct qed_cdu_iids {
u32 pf_cids;
u32 per_vf_cids;
};
static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
struct qed_cdu_iids *iids)
{
u32 type, pf_cids = 0;
u32 type;
for (type = 0; type < MAX_CONN_TYPES; type++)
pf_cids += p_mngr->conn_cfg[type].cid_count;
return pf_cids;
for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
}
}
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
int type;
u32 vf_cids = 0, type;
for (type = 0; type < MAX_CONN_TYPES; type++)
for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
}
DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
iids->vf_cids += vf_cids * p_mngr->vf_count;
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"iids: CIDS %08x vf_cids %08x\n",
iids->cids, iids->vf_cids);
}
/* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 cid_count)
u32 cid_count, u32 vf_cid_cnt)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
}
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
u32 *vf_cid)
{
if (vf_cid)
*vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
}
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
@ -210,10 +246,12 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
u32 curr_line, total, pf_cids;
struct qed_cdu_iids cdu_iids;
struct qed_qm_iids qm_iids;
u32 curr_line, total, i;
memset(&qm_iids, 0, sizeof(qm_iids));
memset(&cdu_iids, 0, sizeof(cdu_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@ -224,14 +262,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line;
/* CDUC PF */
p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */
pf_cids = qed_cxt_cdu_iids(p_mngr);
qed_cxt_cdu_iids(p_mngr, &cdu_iids);
p_blk = &p_cli->pf_blks[CDUC_BLK];
total = pf_cids * CONN_CXT_SIZE(p_hwfn);
total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
@ -239,17 +279,36 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
/* CDUC VF */
p_blk = &p_cli->vf_blks[CDUC_BLK];
total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->vf_total_lines = curr_line - p_blk->start_line;
for (i = 1; i < p_mngr->vf_count; i++)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
ILT_CLI_CDUC);
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
p_hwfn->qm_info.num_pqs, 0);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
qm_iids.vf_cids, 0,
p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs);
DP_VERBOSE(p_hwfn, QED_MSG_ILT,
"QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
DP_VERBOSE(p_hwfn,
QED_MSG_ILT,
"QM ILT Info, (cids=%d, vf_cids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
qm_iids.cids,
qm_iids.vf_cids,
p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000,
@ -358,7 +417,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk;
u32 size, i, j;
u32 size, i, j, k;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
@ -383,6 +442,16 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
if (rc != 0)
goto ilt_shadow_fail;
}
for (k = 0; k < p_mngr->vf_count; k++) {
for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
u32 lines = clients[i].vf_total_lines * k;
p_blk = &clients[i].vf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
if (rc != 0)
goto ilt_shadow_fail;
}
}
}
return 0;
@ -467,6 +536,9 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
if (p_hwfn->cdev->p_iov_info)
p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@ -579,8 +651,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids;
params.start_pq = qm_info->start_pq;
params.num_pf_pqs = qm_info->num_pqs;
params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
params.num_vf_pqs = qm_info->num_vf_pqs;
params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
@ -610,26 +684,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
u32 dq_pf_max_cid = 0;
u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
/* 5 - PF */
dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
/* Connection types 6 & 7 are not in use, yet they must be configured
* as the highest possible connection. Not configuring them means the
* defaults will be used, and with a large number of cids a bug may
* occur, if the defaults will be smaller than dq_pf_max_cid /
* dq_vf_max_cid.
*/
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
@ -653,6 +756,38 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
}
}
static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
{
struct qed_ilt_client_cfg *p_cli;
u32 blk_factor;
/* For simplicty we set the 'block' to be an ILT page */
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_VF_BASE_RT_OFFSET,
p_iov->first_vf_in_pf);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
p_iov->first_vf_in_pf + p_iov->total_vfs);
}
p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
if (p_cli->active) {
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
blk_factor);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
p_cli->pf_total_lines);
STORE_RT_REG(p_hwfn,
PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
p_cli->vf_total_lines);
}
}
/* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
@ -662,6 +797,7 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
qed_ilt_vf_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow;
@ -839,10 +975,10 @@ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
p_params->num_cons);
p_params->num_cons, 1);
return 0;
}

View File

@ -51,6 +51,9 @@ enum qed_cxt_elem_type {
QED_ELEM_TASK
};
u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*

View File

@ -30,6 +30,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
/* API common to all protocols */
enum BAR_ID {
@ -40,10 +42,14 @@ enum BAR_ID {
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
u32 bar_reg = (bar_id == BAR_ID_0 ?
PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
u32 val;
if (IS_VF(p_hwfn->cdev))
return 1 << 17;
val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@ -113,6 +119,9 @@ void qed_resc_free(struct qed_dev *cdev)
{
int i;
if (IS_VF(cdev))
return;
kfree(cdev->fw_data);
cdev->fw_data = NULL;
@ -136,20 +145,26 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
}
}
static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
{
u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
u8 num_vports, i, vport_id, num_ports;
u16 num_pqs, multi_cos_tcs = 1;
u16 num_vfs = 0;
#ifdef CONFIG_QED_SRIOV
if (p_hwfn->cdev->p_iov_info)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
#endif
memset(qm_info, 0, sizeof(*qm_info));
num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
/* Sanity checking that setup requires legal number of resources */
@ -185,8 +200,9 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
for (i = 0; i < multi_cos_tcs; i++, curr_queue++) {
struct init_qm_pq_params *params =
&qm_info->qm_pq_params[curr_queue];
params->vport_id = vport_id;
params->tc_id = p_hwfn->hw_info.non_offload_tc;
@ -194,13 +210,26 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
}
/* Then init pure-LB PQ */
qm_info->pure_lb_pq = i;
qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
qm_info->qm_pq_params[i].wrr_group = 1;
i++;
qm_info->pure_lb_pq = curr_queue;
qm_info->qm_pq_params[curr_queue].vport_id =
(u8) RESC_START(p_hwfn, QED_VPORT);
qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
curr_queue++;
qm_info->offload_pq = 0;
/* Then init per-VF PQs */
vf_offset = curr_queue;
for (i = 0; i < num_vfs; i++) {
/* First vport is used by the PF */
qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
qm_info->qm_pq_params[curr_queue].tc_id =
p_hwfn->hw_info.non_offload_tc;
qm_info->qm_pq_params[curr_queue].wrr_group = 1;
curr_queue++;
}
qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports;
@ -218,7 +247,8 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
qm_info->num_vf_pqs = num_vfs;
qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
for (i = 0; i < qm_info->num_vports; i++)
qm_info->qm_vport_params[i].vport_wfq = 1;
@ -242,6 +272,9 @@ int qed_resc_alloc(struct qed_dev *cdev)
struct qed_eq *p_eq;
int i, rc = 0;
if (IS_VF(cdev))
return rc;
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
return -ENOMEM;
@ -316,6 +349,10 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
rc = qed_iov_alloc(p_hwfn);
if (rc)
goto alloc_err;
/* EQ */
p_eq = qed_eq_alloc(p_hwfn, 256);
if (!p_eq) {
@ -358,6 +395,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
if (IS_VF(cdev))
return;
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@ -373,14 +413,15 @@ void qed_resc_setup(struct qed_dev *cdev)
p_hwfn->mcp_info->mfw_mb_length);
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
}
}
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 id)
struct qed_ptt *p_ptt, u16 id, bool is_vf)
{
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
@ -388,6 +429,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
@ -500,7 +544,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
u32 concrete_fid;
int rc = 0;
u8 vf_id;
qed_init_cau_rt_data(cdev);
@ -550,6 +596,14 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, 0x20b4,
qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
}
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
return rc;
}
@ -612,7 +666,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
if (rc != 0)
return rc;
@ -634,7 +688,8 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode);
rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
allow_npar_tx_switch);
if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
}
@ -690,13 +745,20 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
return rc;
if (IS_PF(cdev)) {
rc = qed_init_fw_data(cdev, bin_fw_data);
if (rc != 0)
return rc;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
p_hwfn->b_int_enabled = 1;
continue;
}
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@ -821,6 +883,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
@ -852,15 +919,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000);
}
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
cdev->hwfns[0].p_main_ptt,
false);
if (t_rc != 0)
rc = t_rc;
if (IS_PF(cdev)) {
/* Disable DMAE in PXP - in CMT, this should only be done for
* first hw-function, and only after all transactions have
* stopped for all active hw-functions.
*/
t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
cdev->hwfns[0].p_main_ptt, false);
if (t_rc != 0)
rc = t_rc;
}
return rc;
}
@ -871,7 +939,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
if (IS_VF(cdev)) {
qed_vf_pf_int_cleanup(p_hwfn);
continue;
}
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN,
@ -895,6 +968,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
if (IS_VF(p_hwfn->cdev))
return;
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@ -924,6 +1000,13 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
rc = qed_vf_pf_reset(p_hwfn);
if (rc)
return rc;
continue;
}
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
@ -1019,13 +1102,19 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
u32 *resc_start = p_hwfn->hw_info.resc_start;
u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
int num_funcs, i;
num_funcs = MAX_NUM_PFS_BB;
int i, max_vf_vlan_filters;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
#ifdef CONFIG_QED_SRIOV
max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
#else
max_vf_vlan_filters = 0;
#endif
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
@ -1230,6 +1319,51 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 reg_function_hide, tmp, eng_mask;
u8 num_funcs;
num_funcs = MAX_NUM_PFS_BB;
/* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
* in the other bits are selected.
* Bits 1-15 are for functions 1-15, respectively, and their value is
* '0' only for enabled functions (function 0 always exists and
* enabled).
* In case of CMT, only the "even" functions are enabled, and thus the
* number of functions for both hwfns is learnt from the same bits.
*/
reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
if (reg_function_hide & 0x1) {
if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
num_funcs = 0;
eng_mask = 0xaaaa;
} else {
num_funcs = 1;
eng_mask = 0x5554;
}
/* Get the number of the enabled functions on the engine */
tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
while (tmp) {
if (tmp & 0x1)
num_funcs++;
tmp >>= 0x1;
}
}
p_hwfn->num_funcs_on_engine = num_funcs;
DP_VERBOSE(p_hwfn,
NETIF_MSG_PROBE,
"PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
p_hwfn->rel_pf_id,
p_hwfn->abs_pf_id,
p_hwfn->num_funcs_on_engine);
}
static int
qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@ -1238,6 +1372,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
u32 port_mode;
int rc;
/* Since all information is common, only first hwfns should do this */
if (IS_LEAD_HWFN(p_hwfn)) {
rc = qed_iov_hw_info(p_hwfn);
if (rc)
return rc;
}
/* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0);
@ -1281,6 +1422,8 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
qed_get_num_funcs(p_hwfn, p_ptt);
qed_hw_get_resc(p_hwfn);
return rc;
@ -1346,6 +1489,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
if (IS_VF(p_hwfn->cdev))
return qed_vf_hw_prepare(p_hwfn);
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
@ -1397,6 +1543,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
err2:
if (IS_LEAD_HWFN(p_hwfn))
qed_iov_free_hw_info(p_hwfn->cdev);
qed_mcp_free(p_hwfn);
err1:
qed_hw_hwfn_free(p_hwfn);
@ -1411,7 +1559,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc;
/* Store the precompiled init data ptrs */
qed_init_iro_array(cdev);
if (IS_PF(cdev))
qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
@ -1443,9 +1592,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0.
*/
if (rc) {
qed_init_free(p_hwfn);
qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
if (IS_PF(cdev)) {
qed_init_free(p_hwfn);
qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
}
}
}
@ -1459,10 +1610,17 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
if (IS_VF(cdev)) {
qed_vf_pf_release(p_hwfn);
continue;
}
qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn);
}
qed_iov_free_hw_info(cdev);
}
int qed_chain_alloc(struct qed_dev *cdev,
@ -1732,6 +1890,32 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
return 0;
}
static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
{
struct qed_mcp_link_state *p_link;
int rc = 0;
p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
if (!p_link->min_pf_rate) {
p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
p_hwfn->qm_info.wfq_data[vp_id].configured = true;
return rc;
}
rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
if (rc == 0)
qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
else
DP_NOTICE(p_hwfn,
"Validation failed while configuring min rate\n");
return rc;
}
static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 min_pf_rate)
@ -1766,6 +1950,42 @@ static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
return rc;
}
/* Main API for qed clients to configure vport min rate.
* vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
* rate - Speed in Mbps needs to be assigned to a given vport.
*/
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
{
int i, rc = -EINVAL;
/* Currently not supported; Might change in future */
if (cdev->num_hwfns > 1) {
DP_NOTICE(cdev,
"WFQ configuration is not supported for this device\n");
return rc;
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
if (!rc) {
qed_ptt_release(p_hwfn, p_ptt);
return rc;
}
qed_ptt_release(p_hwfn, p_ptt);
}
return rc;
}
/* API to configure WFQ from mcp link change */
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
{
@ -1912,3 +2132,17 @@ int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
return rc;
}
void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_link_state *p_link;
p_link = &p_hwfn->mcp_info->link_output;
if (p_link->min_pf_rate)
qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
p_link->min_pf_rate);
memset(p_hwfn->qm_info.wfq_data, 0,
sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
}

View File

@ -182,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
};
/**
@ -208,6 +212,23 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords,
u32 flags);
/**
* @brief qed_dmae_host2host - copy data from to source address
* to a destination adress (for SRIOV) using the given ptt
*
* @param p_hwfn
* @param p_ptt
* @param source_addr
* @param dest_addr
* @param size_in_dwords
* @param params
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params);
/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
@ -282,11 +303,11 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
* @param is_vf - true iff cleanup is made for a VF.
*
* @return int
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 id);
struct qed_ptt *p_ptt, u16 id, bool is_vf);
#endif

View File

@ -29,9 +29,9 @@ struct qed_ptt;
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
COMMON_EVENT_RESERVED,
COMMON_EVENT_RESERVED2,
COMMON_EVENT_RESERVED3,
COMMON_EVENT_VF_START,
COMMON_EVENT_VF_STOP,
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_RESERVED4,
COMMON_EVENT_RESERVED5,
COMMON_EVENT_RESERVED6,
@ -44,8 +44,8 @@ enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
COMMON_RAMROD_RESERVED,
COMMON_RAMROD_RESERVED2,
COMMON_RAMROD_VF_START,
COMMON_RAMROD_VF_STOP,
COMMON_RAMROD_PF_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
@ -573,6 +573,14 @@ union event_ring_element {
struct event_ring_next_addr next_addr;
};
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
};
struct mstorm_vf_zone {
struct mstorm_non_trigger_vf_zone non_trigger;
};
enum personality_type {
BAD_PERSONALITY_TYP,
PERSONALITY_RESERVED,
@ -671,6 +679,16 @@ enum ports_mode {
MAX_PORTS_MODE
};
struct pstorm_non_trigger_vf_zone {
struct eth_pstorm_per_queue_stat eth_queue_stat;
struct regpair reserved[2];
};
struct pstorm_vf_zone {
struct pstorm_non_trigger_vf_zone non_trigger;
struct regpair reserved[7];
};
/* Ramrod Header of SPQE */
struct ramrod_header {
__le32 cid /* Slowpath Connection CID */;
@ -700,6 +718,36 @@ struct tstorm_per_port_stat {
struct regpair preroce_irregular_pkt;
};
struct ustorm_non_trigger_vf_zone {
struct eth_ustorm_per_queue_stat eth_queue_stat;
struct regpair vf_pf_msg_addr;
};
struct ustorm_trigger_vf_zone {
u8 vf_pf_msg_valid;
u8 reserved[7];
};
struct ustorm_vf_zone {
struct ustorm_non_trigger_vf_zone non_trigger;
struct ustorm_trigger_vf_zone trigger;
};
struct vf_start_ramrod_data {
u8 vf_id;
u8 enable_flr_ack;
__le16 opaque_fid;
u8 personality;
u8 reserved[3];
};
struct vf_stop_ramrod_data {
u8 vf_id;
u8 reserved0;
__le16 reserved1;
__le32 reserved2;
};
struct atten_status_block {
__le32 atten_bits;
__le32 atten_ack;
@ -1026,7 +1074,7 @@ enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
PHASE_RESERVED,
PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};

View File

@ -23,6 +23,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = qed_ptt_get_bar_addr(p_ptt);
if (IS_PF(p_hwfn->cdev)) {
qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
hw_offset = qed_ptt_get_bar_addr(p_ptt);
} else {
hw_offset = hw_addr + done;
}
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
{
u32 concrete_fid = 0;
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
return concrete_fid;
}
/* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
u16 opcode_b = 0;
u32 opcode = 0;
u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT;
}
opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
DMAE_CMD_DST_VF_ID_SHIFT);
if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
@ -742,6 +768,28 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
int
qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
dma_addr_t source_addr,
dma_addr_t dest_addr,
u32 size_in_dwords, struct qed_dmae_params *p_params)
{
int rc;
mutex_lock(&(p_hwfn->dmae_info.mutex));
rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
dest_addr,
QED_DMAE_ADDRESS_HOST_PHYS,
QED_DMAE_ADDRESS_HOST_PHYS,
size_in_dwords, p_params);
mutex_unlock(&(p_hwfn->dmae_info.mutex));
return rc;
}
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
enum protocol_type proto,
union qed_qm_pq_params *p_params)
@ -765,6 +813,9 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
if (p_params->eth.is_vf)
pq_id += p_hwfn->qm_info.vf_queues_offset +
p_params->eth.vf_id;
break;
default:
pq_id = 0;

View File

@ -220,6 +220,16 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief qed_vfid_to_concrete - build a concrete FID for a
* given VF ID
*
* @param p_hwfn
* @param p_ptt
* @param vfid
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it.

View File

@ -18,6 +18,7 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500
@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
if (IS_VF(p_hwfn->cdev))
return 0;
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->b_valid)

View File

@ -26,6 +26,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
struct qed_pi_info {
qed_int_comp_cb_t comp_cb;
@ -2513,6 +2515,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset;
u32 pi_offset;
if (IS_VF(p_hwfn->cdev))
return;
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
@ -2542,8 +2547,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0);
if (IS_PF(p_hwfn->cdev))
qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
sb_info->igu_sb_id, 0, 0);
}
/**
@ -2563,8 +2569,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
else
else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
else
igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
@ -2594,9 +2602,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3);
if (IS_PF(p_hwfn->cdev)) {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
(sb_info->igu_sb_id << 3);
} else {
sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
PXP_VF_BAR0_START_IGU +
((IGU_CMD_INT_ACK_BASE +
sb_info->igu_sb_id) << 3);
}
sb_info->flags |= QED_SB_INFO_INIT;
@ -2783,6 +2798,9 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{
p_hwfn->b_int_enabled = 0;
if (IS_VF(p_hwfn->cdev))
return;
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
@ -2935,9 +2953,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
u32 val, min_vf = 0, max_vf = 0;
u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk;
u32 val;
u16 sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
@ -2947,12 +2965,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info;
/* Initialize base sb / sb cnt for PFs */
/* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
min_vf = p_iov->first_vf_in_pf;
max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
}
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
@ -2986,14 +3011,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++;
}
}
} else {
if ((blk->function_id >= min_vf) &&
(blk->function_id < max_vf)) {
/* Available for VFs of this PF */
if (p_igu_info->igu_base_sb_iov == 0xffff) {
p_igu_info->igu_base_sb_iov = sb_id;
} else if (last_iov_sb_id != sb_id - 1) {
if (!val) {
DP_VERBOSE(p_hwfn->cdev,
NETIF_MSG_INTR,
"First uninitialized IGU CAM entry at index 0x%04x\n",
sb_id);
} else {
DP_NOTICE(p_hwfn->cdev,
"Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
p_hwfn->rel_pf_id,
last_iov_sb_id,
sb_id); }
break;
}
blk->status |= QED_IGU_STATUS_FREE;
p_hwfn->hw_info.p_igu_info->free_blks++;
last_iov_sb_id = sb_id;
}
}
}
p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_dsb_id);
DP_VERBOSE(
p_hwfn,
NETIF_MSG_INTR,
"IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
p_igu_info->igu_base_sb,
p_igu_info->igu_base_sb_iov,
p_igu_info->igu_sb_cnt,
p_igu_info->igu_sb_cnt_iov,
p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
@ -3116,6 +3170,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
/* Determine origin of SB id */
if ((sb_id >= p_info->igu_base_sb) &&
(sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
return sb_id - p_info->igu_base_sb;
} else if ((sb_id >= p_info->igu_base_sb_iov) &&
(sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
} else {
DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
return 0;
}
}
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
int i;

View File

@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
/* Fields of IGU VF CONFIGRATION REGISTER */
#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
@ -364,6 +370,16 @@ void qed_int_free(struct qed_hwfn *p_hwfn);
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief - Returns an Rx queue index appropriate for usage with given SB.
*
* @param p_hwfn
* @param sb_id - absolute index of SB
*
* @return index of Rx queue
*/
u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief - Enable Interrupt & Attention for hw function
*

View File

@ -31,124 +31,25 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_l2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
struct qed_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log;
u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
u32 rss_key[QED_RSS_KEY_SIZE];
};
enum qed_filter_opcode {
QED_FILTER_ADD,
QED_FILTER_REMOVE,
QED_FILTER_MOVE,
QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
QED_FILTER_FLUSH, /* Removes all filters */
};
enum qed_filter_ucast_type {
QED_FILTER_MAC,
QED_FILTER_VLAN,
QED_FILTER_MAC_VLAN,
QED_FILTER_INNER_MAC,
QED_FILTER_INNER_VLAN,
QED_FILTER_INNER_PAIR,
QED_FILTER_INNER_MAC_VNI_PAIR,
QED_FILTER_MAC_VNI_PAIR,
QED_FILTER_VNI,
};
struct qed_filter_ucast {
enum qed_filter_opcode opcode;
enum qed_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct qed_filter_mcast {
/* MOVE is not supported for multicast */
enum qed_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define QED_MAX_MC_ADDRS 64
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define QED_ACCEPT_NONE 0x01
#define QED_ACCEPT_UCAST_MATCHED 0x02
#define QED_ACCEPT_UCAST_UNMATCHED 0x04
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
};
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_approx_mcast_flg;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
};
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
QED_TPA_MODE_UNUSED,
QED_TPA_MODE_GRO,
QED_TPA_MODE_MAX
};
struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool drop_ttl0;
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id;
u16 mtu;
};
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
int rc = -EINVAL;
u16 rx_mode = 0;
u8 abs_vport_id = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
@ -198,6 +99,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
break;
}
p_ramrod->tx_switching_en = p_params->tx_switching;
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@ -205,6 +108,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
p_params->mtu,
p_params->remove_inner_vlan,
p_params->tpa_mode,
p_params->max_buffers_per_cqe,
p_params->only_untagged);
}
return qed_sp_eth_vport_start(p_hwfn, p_params);
}
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
@ -349,6 +267,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
}
}
static void
qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct qed_sge_tpa_params *p_params)
{
struct eth_vport_tpa_param *p_tpa;
if (!p_params) {
p_ramrod->common.update_tpa_param_flg = 0;
p_ramrod->common.update_tpa_en_flg = 0;
p_ramrod->common.update_tpa_param_flg = 0;
return;
}
p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
p_tpa = &p_ramrod->tpa_param;
p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
p_tpa->tpa_max_size = p_params->tpa_max_size;
p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
}
static void
qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
@ -370,20 +320,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
}
}
static int
qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
u8 abs_vport_id = 0;
u8 abs_vport_id = 0, val;
int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev)) {
rc = qed_vf_pf_vport_update(p_hwfn, p_params);
return rc;
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@ -412,6 +366,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
p_cmn->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
val = p_params->update_inner_vlan_removal_flg;
p_cmn->update_inner_vlan_removal_en_flg = val;
p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
val = p_params->update_default_vlan_enable_flg;
p_cmn->update_default_vlan_en_flg = val;
p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
val = p_params->update_anti_spoofing_en_flg;
p_ramrod->common.update_anti_spoofing_en_flg = val;
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@ -423,12 +398,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u8 vport_id)
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
{
struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
@ -436,6 +410,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_vport_stop(p_hwfn);
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@ -457,13 +434,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
struct qed_filter_accept_flags *p_accept_flags)
{
struct qed_sp_vport_update_params s_params;
memset(&s_params, 0, sizeof(s_params));
memcpy(&s_params.accept_flags, p_accept_flags,
sizeof(struct qed_filter_accept_flags));
return qed_vf_pf_vport_update(p_hwfn, &s_params);
}
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
u8 update_accept_any_vlan,
u8 accept_any_vlan,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@ -480,6 +470,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
if (IS_VF(cdev)) {
rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
if (rc)
return rc;
continue;
}
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
if (rc != 0) {
@ -514,16 +511,14 @@ static int qed_sp_release_queue_cid(
return 0;
}
static int
qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size)
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@ -592,8 +587,7 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size,
void __iomem **pp_prod)
u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
u64 init_prod_val = 0;
@ -601,6 +595,16 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u8 abs_stats_id = 0;
int rc;
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_rxq_start(p_hwfn,
params->queue_id,
params->sb,
params->sb_idx,
bd_max_bytes,
bd_chain_phys_addr,
cqe_pbl_addr, cqe_pbl_size, pp_prod);
}
rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
if (rc != 0)
return rc;
@ -643,10 +647,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
bool eq_completion_only,
bool cqe_completion)
int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
struct rx_queue_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_rx_cid;
u16 qid, abs_rx_q_id = 0;
int rc = -EINVAL;
u8 i;
memset(&init_data, 0, sizeof(init_data));
init_data.comp_mode = comp_mode;
init_data.p_comp_data = p_comp_data;
for (i = 0; i < num_rxqs; i++) {
qid = rx_queue_id + i;
p_rx_cid = &p_hwfn->p_rx_cids[qid];
/* Get SPQ entry */
init_data.cid = p_rx_cid->cid;
init_data.opaque_fid = p_rx_cid->opaque_fid;
rc = qed_sp_init_request(p_hwfn, &p_ent,
ETH_RAMROD_RX_QUEUE_UPDATE,
PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.rx_queue_update;
qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
p_ramrod->complete_cqe_flg = complete_cqe_flg;
p_ramrod->complete_event_flg = complete_event_flg;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
if (rc)
return rc;
}
return rc;
}
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
bool eq_completion_only, bool cqe_completion)
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@ -655,6 +708,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid;
@ -690,15 +746,14 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
}
static int
qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *p_params,
u8 stats_id,
dma_addr_t pbl_addr,
u16 pbl_size,
union qed_qm_pq_params *p_pq_params)
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *p_params,
u8 stats_id,
dma_addr_t pbl_addr,
u16 pbl_size,
union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@ -752,14 +807,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
u16 pbl_size,
void __iomem **pp_doorbell)
u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params;
u8 abs_stats_id = 0;
int rc;
if (IS_VF(p_hwfn->cdev)) {
return qed_vf_pf_txq_start(p_hwfn,
p_params->queue_id,
p_params->sb,
p_params->sb_idx,
pbl_addr, pbl_size, pp_doorbell);
}
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
if (rc)
return rc;
@ -800,14 +862,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
u16 tx_queue_id)
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
if (IS_VF(p_hwfn->cdev))
return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid;
@ -1003,11 +1067,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
return 0;
}
static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
struct vport_filter_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@ -1105,7 +1169,7 @@ static inline u32 qed_crc32c_le(u32 seed,
return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
}
static u8 qed_mcast_bin_from_mac(u8 *mac)
u8 qed_mcast_bin_from_mac(u8 *mac)
{
u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN);
@ -1188,11 +1252,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
qed_filter_mcast_cmd(struct qed_dev *cdev,
struct qed_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
static int qed_filter_mcast_cmd(struct qed_dev *cdev,
struct qed_filter_mcast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
int rc = 0;
int i;
@ -1208,8 +1271,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
u16 opaque_fid;
if (rc != 0)
break;
if (IS_VF(cdev)) {
qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
continue;
}
opaque_fid = p_hwfn->hw_info.opaque_fid;
@ -1234,8 +1299,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u16 opaque_fid;
if (rc != 0)
break;
if (IS_VF(cdev)) {
rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
continue;
}
opaque_fid = p_hwfn->hw_info.opaque_fid;
@ -1244,6 +1311,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
p_filter_cmd,
comp_mode,
p_comp_data);
if (rc != 0)
break;
}
return rc;
@ -1252,12 +1321,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
/* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len,
u16 statistics_bin)
u32 *p_len, u16 statistics_bin)
{
*p_addr = BAR0_MAP_REG_PSDM_RAM +
PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_pstorm_per_queue_stat);
if (IS_PF(p_hwfn->cdev)) {
*p_addr = BAR0_MAP_REG_PSDM_RAM +
PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_pstorm_per_queue_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.pstats.address;
*p_len = p_resp->pfdev_info.stats_info.pstats.len;
}
}
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@ -1272,32 +1348,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&pstats, 0, sizeof(pstats));
qed_memcpy_from(p_hwfn, p_ptt, &pstats,
pstats_addr, pstats_len);
qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
p_stats->tx_ucast_bytes +=
HILO_64_REGPAIR(pstats.sent_ucast_bytes);
p_stats->tx_mcast_bytes +=
HILO_64_REGPAIR(pstats.sent_mcast_bytes);
p_stats->tx_bcast_bytes +=
HILO_64_REGPAIR(pstats.sent_bcast_bytes);
p_stats->tx_ucast_pkts +=
HILO_64_REGPAIR(pstats.sent_ucast_pkts);
p_stats->tx_mcast_pkts +=
HILO_64_REGPAIR(pstats.sent_mcast_pkts);
p_stats->tx_bcast_pkts +=
HILO_64_REGPAIR(pstats.sent_bcast_pkts);
p_stats->tx_err_drop_pkts +=
HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len)
{
*p_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
*p_len = sizeof(struct tstorm_per_port_stat);
p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@ -1305,14 +1364,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats;
u32 tstats_addr, tstats_len;
__qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
if (IS_PF(p_hwfn->cdev)) {
tstats_addr = BAR0_MAP_REG_TSDM_RAM +
TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
tstats_len = sizeof(struct tstorm_per_port_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
}
memset(&tstats, 0, sizeof(tstats));
qed_memcpy_from(p_hwfn, p_ptt, &tstats,
tstats_addr, tstats_len);
qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
@ -1322,12 +1390,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len,
u16 statistics_bin)
u32 *p_len, u16 statistics_bin)
{
*p_addr = BAR0_MAP_REG_USDM_RAM +
USTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_ustorm_per_queue_stat);
if (IS_PF(p_hwfn->cdev)) {
*p_addr = BAR0_MAP_REG_USDM_RAM +
USTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_ustorm_per_queue_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.ustats.address;
*p_len = p_resp->pfdev_info.stats_info.ustats.len;
}
}
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@ -1342,31 +1417,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&ustats, 0, sizeof(ustats));
qed_memcpy_from(p_hwfn, p_ptt, &ustats,
ustats_addr, ustats_len);
qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
p_stats->rx_ucast_bytes +=
HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
p_stats->rx_mcast_bytes +=
HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
p_stats->rx_bcast_bytes +=
HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
p_stats->rx_ucast_pkts +=
HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
p_stats->rx_mcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
p_stats->rx_bcast_pkts +=
HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
u32 *p_len,
u16 statistics_bin)
u32 *p_len, u16 statistics_bin)
{
*p_addr = BAR0_MAP_REG_MSDM_RAM +
MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_mstorm_per_queue_stat);
if (IS_PF(p_hwfn->cdev)) {
*p_addr = BAR0_MAP_REG_MSDM_RAM +
MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
*p_len = sizeof(struct eth_mstorm_per_queue_stat);
} else {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
*p_addr = p_resp->pfdev_info.stats_info.mstats.address;
*p_len = p_resp->pfdev_info.stats_info.mstats.len;
}
}
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@ -1381,21 +1456,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&mstats, 0, sizeof(mstats));
qed_memcpy_from(p_hwfn, p_ptt, &mstats,
mstats_addr, mstats_len);
qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
p_stats->no_buff_discards +=
HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
p_stats->ttl0_discard +=
HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
p_stats->tpa_aborts_num +=
HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
@ -1468,44 +1539,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *stats,
u16 statistics_bin)
u16 statistics_bin, bool b_get_port_stats)
{
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
if (p_hwfn->mcp_info)
if (b_get_port_stats && p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
u8 fw_vport = 0;
int i;
u8 fw_vport = 0;
int i;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
struct qed_ptt *p_ptt;
struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
: NULL;
/* The main vport index is relative first */
if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
DP_ERR(p_hwfn, "No vport available!\n");
continue;
if (IS_PF(cdev)) {
/* The main vport index is relative first */
if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
DP_ERR(p_hwfn, "No vport available!\n");
goto out;
}
}
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
__qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
IS_PF(cdev) ? true : false);
qed_ptt_release(p_hwfn, p_ptt);
out:
if (IS_PF(cdev) && p_ptt)
qed_ptt_release(p_hwfn, p_ptt);
}
}
@ -1539,10 +1615,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
: NULL;
u32 addr = 0, len = 0;
if (!p_ptt) {
if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
@ -1559,7 +1636,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
qed_ptt_release(p_hwfn, p_ptt);
if (IS_PF(cdev))
qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
@ -1580,32 +1658,61 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i)
info->num_queues += FEAT_NUM(&cdev->hwfns[i],
QED_PF_L2_QUE);
if (cdev->int_params.fp_msix_cnt)
info->num_queues = min_t(u8, info->num_queues,
cdev->int_params.fp_msix_cnt);
if (IS_PF(cdev)) {
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i)
info->num_queues +=
FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
if (cdev->int_params.fp_msix_cnt)
info->num_queues =
min_t(u8, info->num_queues,
cdev->int_params.fp_msix_cnt);
} else {
info->num_queues = cdev->num_hwfns;
}
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
info->num_queues = cdev->num_hwfns;
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
if (cdev->num_hwfns > 1) {
u8 queues = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
info->num_queues += queues;
}
qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
&info->num_vlan_filters);
qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
}
info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
ether_addr_copy(info->port_mac,
cdev->hwfns[0].hw_info.hw_mac_addr);
qed_fill_dev_info(cdev, &info->common);
if (IS_VF(cdev))
memset(info->common.hw_mac, 0, ETH_ALEN);
return 0;
}
static void qed_register_eth_ops(struct qed_dev *cdev,
struct qed_eth_cb_ops *ops,
void *cookie)
struct qed_eth_cb_ops *ops, void *cookie)
{
cdev->protocol_ops.eth = ops;
cdev->ops_cookie = cookie;
cdev->protocol_ops.eth = ops;
cdev->ops_cookie = cookie;
/* For VF, we start bulletin reading */
if (IS_VF(cdev))
qed_vf_start_iov_wq(cdev);
}
static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
{
if (IS_PF(cdev))
return true;
return qed_vf_check_mac(&cdev->hwfns[0], mac);
}
static int qed_start_vport(struct qed_dev *cdev,
@ -1620,6 +1727,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
QED_TPA_MODE_NONE;
start.remove_inner_vlan = params->remove_inner_vlan;
start.only_untagged = true; /* untagged only */
start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
@ -1686,6 +1794,8 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
@ -1890,6 +2000,9 @@ static int qed_tunn_configure(struct qed_dev *cdev,
struct qed_tunn_update_params tunn_info;
int i, rc;
if (IS_VF(cdev))
return 0;
memset(&tunn_info, 0, sizeof(tunn_info));
if (tunn_params->update_vxlan_port == 1) {
tunn_info.update_vxlan_udp_port = 1;
@ -2041,10 +2154,18 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
#ifdef CONFIG_QED_SRIOV
extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#endif
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
#ifdef CONFIG_QED_SRIOV
.iov = &qed_iov_ops_pass,
#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
.check_mac = &qed_check_mac,
.vport_start = &qed_start_vport,
.vport_stop = &qed_stop_vport,
.vport_update = &qed_update_vport,

View File

@ -0,0 +1,239 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_L2_H
#define _QED_L2_H
#include <linux/types.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/qed/qed_eth_if.h>
#include "qed.h"
#include "qed_hw.h"
#include "qed_sp.h"
struct qed_sge_tpa_params {
u8 max_buffers_per_cqe;
u8 update_tpa_en_flg;
u8 tpa_ipv4_en_flg;
u8 tpa_ipv6_en_flg;
u8 tpa_ipv4_tunn_en_flg;
u8 tpa_ipv6_tunn_en_flg;
u8 update_tpa_param_flg;
u8 tpa_pkt_split_flg;
u8 tpa_hdr_data_split_flg;
u8 tpa_gro_consistent_flg;
u8 tpa_max_aggs_num;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
};
enum qed_filter_opcode {
QED_FILTER_ADD,
QED_FILTER_REMOVE,
QED_FILTER_MOVE,
QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
QED_FILTER_FLUSH, /* Removes all filters */
};
enum qed_filter_ucast_type {
QED_FILTER_MAC,
QED_FILTER_VLAN,
QED_FILTER_MAC_VLAN,
QED_FILTER_INNER_MAC,
QED_FILTER_INNER_VLAN,
QED_FILTER_INNER_PAIR,
QED_FILTER_INNER_MAC_VNI_PAIR,
QED_FILTER_MAC_VNI_PAIR,
QED_FILTER_VNI,
};
struct qed_filter_ucast {
enum qed_filter_opcode opcode;
enum qed_filter_ucast_type type;
u8 is_rx_filter;
u8 is_tx_filter;
u8 vport_to_add_to;
u8 vport_to_remove_from;
unsigned char mac[ETH_ALEN];
u8 assert_on_error;
u16 vlan;
u32 vni;
};
struct qed_filter_mcast {
/* MOVE is not supported for multicast */
enum qed_filter_opcode opcode;
u8 vport_to_add_to;
u8 vport_to_remove_from;
u8 num_mc_addrs;
#define QED_MAX_MC_ADDRS 64
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
};
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
bool eq_completion_only, bool cqe_completion);
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
enum qed_tpa_mode {
QED_TPA_MODE_NONE,
QED_TPA_MODE_UNUSED,
QED_TPA_MODE_GRO,
QED_TPA_MODE_MAX
};
struct qed_sp_vport_start_params {
enum qed_tpa_mode tpa_mode;
bool remove_inner_vlan;
bool tx_switching;
bool only_untagged;
bool drop_ttl0;
u8 max_buffers_per_cqe;
u32 concrete_fid;
u16 opaque_fid;
u8 vport_id;
u16 mtu;
};
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
struct qed_rss_params {
u8 update_rss_config;
u8 rss_enable;
u8 rss_eng_id;
u8 update_rss_capabilities;
u8 update_rss_ind_table;
u8 update_rss_key;
u8 rss_caps;
u8 rss_table_size_log;
u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
u32 rss_key[QED_RSS_KEY_SIZE];
};
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
u8 rx_accept_filter;
u8 tx_accept_filter;
#define QED_ACCEPT_NONE 0x01
#define QED_ACCEPT_UCAST_MATCHED 0x02
#define QED_ACCEPT_UCAST_UNMATCHED 0x04
#define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20
};
struct qed_sp_vport_update_params {
u16 opaque_fid;
u8 vport_id;
u8 update_vport_active_rx_flg;
u8 vport_active_rx_flg;
u8 update_vport_active_tx_flg;
u8 vport_active_tx_flg;
u8 update_inner_vlan_removal_flg;
u8 inner_vlan_removal_flg;
u8 silent_vlan_removal_flg;
u8 update_default_vlan_enable_flg;
u8 default_vlan_enable_flg;
u8 update_default_vlan_flg;
u16 default_vlan;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_approx_mcast_flg;
u8 update_anti_spoofing_en_flg;
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
unsigned long bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_vport_stop -
*
* This ramrod closes a VPort after all its RX and TX queues are terminated.
* An Assert is generated if any queues are left open.
*
* @param p_hwfn
* @param opaque_fid
* @param vport_id VPort ID
*
* @return int
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_filter_ucast *p_filter_cmd,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
* @brief qed_sp_rx_eth_queues_update -
*
* This ramrod updates an RX queue. It is used for setting the active state
* of the queue and updating the TPA and SGE parameters.
*
* @note At the moment - only used by non-linux VFs.
*
* @param p_hwfn
* @param rx_queue_id RX Queue ID
* @param num_rxqs Allow to update multiple rx
* queues, from rx_queue_id to
* (rx_queue_id + num_rxqs)
* @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set
*
* @return int
*/
int
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
u16 rx_queue_id,
u8 num_rxqs,
u8 complete_cqe_flg,
u8 complete_event_flg,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *params,
u8 stats_id,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
struct qed_queue_start_common_params *p_params,
u8 stats_id,
dma_addr_t pbl_addr,
u16 pbl_size,
union qed_qm_pq_params *p_pq_params);
u8 qed_mcast_bin_from_mac(u8 *mac);
#endif /* _QED_L2_H */

View File

@ -24,6 +24,7 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
@ -125,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO;
goto err1;
@ -175,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2;
}
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->db_size = pci_resource_len(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
if (!cdev->doorbells) {
DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
if (IS_PF(cdev)) {
cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
cdev->db_size = pci_resource_len(cdev->pdev, 2);
cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
if (!cdev->doorbells) {
DP_NOTICE(cdev, "Cannot map doorbell space\n");
return -ENOMEM;
}
}
return 0;
@ -207,20 +210,33 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
if (IS_PF(cdev)) {
dev_info->fw_major = FW_MAJOR_VERSION;
dev_info->fw_minor = FW_MINOR_VERSION;
dev_info->fw_rev = FW_REVISION_VERSION;
dev_info->fw_eng = FW_ENGINEERING_VERSION;
dev_info->mf_mode = cdev->mf_mode;
dev_info->tx_switching = true;
} else {
qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
&dev_info->fw_minor, &dev_info->fw_rev,
&dev_info->fw_eng);
}
qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
if (IS_PF(cdev)) {
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (ptt) {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
&dev_info->mfw_rev, NULL);
ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
if (ptt) {
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
&dev_info->flash_size);
qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
}
} else {
qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
&dev_info->mfw_rev, NULL);
}
return 0;
@ -257,9 +273,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
enum qed_protocol protocol,
u32 dp_module,
u8 dp_level)
struct qed_probe_params *params)
{
struct qed_dev *cdev;
int rc;
@ -268,9 +282,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
cdev->protocol = protocol;
cdev->protocol = params->protocol;
qed_init_dp(cdev, dp_module, dp_level);
if (params->is_vf)
cdev->b_is_vf = true;
qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev);
if (rc) {
@ -664,6 +681,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0;
}
static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
{
int rc;
memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
&cdev->int_params.in.num_vectors);
if (cdev->num_hwfns > 1) {
u8 vectors = 0;
qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
cdev->int_params.in.num_vectors += vectors;
}
/* We want a minimum of one fastpath vector per vf hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
rc = qed_set_int_mode(cdev, true);
if (rc)
return rc;
cdev->int_params.fp_msix_base = 0;
cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
return 0;
}
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
@ -749,34 +795,43 @@ static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
int rc;
int rc = -EINVAL;
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
if (rc) {
DP_NOTICE(cdev,
"Failed to find fw file - /lib/firmware/%s\n",
QED_FW_FILE_NAME);
if (qed_iov_wq_start(cdev))
goto err;
if (IS_PF(cdev)) {
rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
&cdev->pdev->dev);
if (rc) {
DP_NOTICE(cdev,
"Failed to find fw file - /lib/firmware/%s\n",
QED_FW_FILE_NAME);
goto err;
}
}
rc = qed_nic_setup(cdev);
if (rc)
goto err;
rc = qed_slowpath_setup_int(cdev, params->int_mode);
if (IS_PF(cdev))
rc = qed_slowpath_setup_int(cdev, params->int_mode);
else
rc = qed_slowpath_vf_setup_int(cdev);
if (rc)
goto err1;
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
if (rc) {
DP_NOTICE(cdev, "Failed to allocate stream memory\n");
goto err2;
}
if (IS_PF(cdev)) {
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
if (rc) {
DP_NOTICE(cdev, "Failed to allocate stream memory\n");
goto err2;
}
/* Start the slowpath */
data = cdev->firmware->data;
data = cdev->firmware->data;
}
memset(&tunn_info, 0, sizeof(tunn_info));
tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
@ -789,6 +844,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
/* Start the slowpath */
rc = qed_hw_init(cdev, &tunn_info, true,
cdev->int_params.out.int_mode,
true, data);
@ -798,18 +854,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
strlcpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
return rc;
if (IS_PF(cdev)) {
hwfn = QED_LEADING_HWFN(cdev);
drv_version.version = (params->drv_major << 24) |
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
strlcpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
DP_NOTICE(cdev, "Failed sending drv version command\n");
return rc;
}
}
qed_reset_vport_stats(cdev);
@ -818,13 +876,17 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2:
qed_hw_timers_stop_all(cdev);
qed_slowpath_irq_free(cdev);
if (IS_PF(cdev))
qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err:
release_firmware(cdev->firmware);
if (IS_PF(cdev))
release_firmware(cdev->firmware);
qed_iov_wq_stop(cdev, false);
return rc;
}
@ -834,15 +896,21 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
qed_free_stream_mem(cdev);
if (IS_PF(cdev)) {
qed_free_stream_mem(cdev);
qed_sriov_disable(cdev, true);
qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev);
qed_nic_stop(cdev);
qed_slowpath_irq_free(cdev);
}
qed_disable_msix(cdev);
qed_nic_reset(cdev);
release_firmware(cdev->firmware);
qed_iov_wq_stop(cdev, true);
if (IS_PF(cdev))
release_firmware(cdev->firmware);
return 0;
}
@ -932,6 +1000,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev)
return -ENODEV;
if (IS_VF(cdev))
return 0;
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
@ -1043,10 +1114,16 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(link_caps));
if (IS_PF(hwfn->cdev)) {
memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
sizeof(link_caps));
} else {
qed_vf_get_link_params(hwfn, &params);
qed_vf_get_link_state(hwfn, &link);
qed_vf_get_link_caps(hwfn, &link_caps);
}
/* Set the link parameters to pass to protocol driver */
if (link.link_up)
@ -1148,7 +1225,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
int i;
qed_fill_link(&cdev->hwfns[0], if_link);
for_each_hwfn(cdev, i)
qed_inform_vf_link_state(&cdev->hwfns[i]);
}
void qed_link_update(struct qed_hwfn *hwfn)
@ -1158,6 +1240,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link);
@ -1169,6 +1252,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt;
int i, rc;
if (IS_VF(cdev))
return 0;
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn);

View File

@ -19,6 +19,8 @@
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@ -440,6 +442,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_PATH);
u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 path_addr = SECTION_ADDR(mfw_path_offsize,
QED_PATH_ID(p_hwfn));
u32 disabled_vfs[VF_MAX_STATIC / 32];
int i;
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Reading Disabled VF information from [offset %08x], path_addr %08x\n",
mfw_path_offsize, path_addr);
for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
path_addr +
offsetof(struct public_path,
mcp_vf_disabled) +
sizeof(u32) * i);
DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
"FLR-ed VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
}
if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
}
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack)
{
u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
PUBLIC_FUNC);
u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
u32 func_addr = SECTION_ADDR(mfw_func_offsize,
MCP_PF_ID(p_hwfn));
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
int rc;
int i;
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
"Acking VFs [%08x,...,%08x] - %08x\n",
i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
memset(&mb_params, 0, sizeof(mb_params));
mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
mb_params.p_data_src = &union_data;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc) {
DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
return -EBUSY;
}
/* Clear the ACK bits */
for (i = 0; i < (VF_MAX_STATIC / 32); i++)
qed_wr(p_hwfn, p_ptt,
func_addr +
offsetof(struct public_func, drv_ack_vf_disabled) +
i * sizeof(u32), 0);
return rc;
}
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@ -751,6 +822,9 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
case MFW_DRV_MSG_VF_DISABLED:
qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
@ -787,26 +861,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc;
}
int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
u32 *p_mfw_ver)
int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
u32 global_offsize;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
if (IS_VF(p_hwfn->cdev)) {
if (p_hwfn->vf_iov_info) {
struct pfvf_acquire_resp_tlv *p_resp;
p_resp = &p_hwfn->vf_iov_info->acquire_resp;
*p_mfw_ver = p_resp->pfdev_info.mfw_ver;
return 0;
} else {
DP_VERBOSE(p_hwfn,
QED_MSG_IOV,
"VF requested MFW version prior to ACQUIRE\n");
return -EINVAL;
}
}
global_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
public_base,
SECTION_OFFSIZE_ADDR(p_hwfn->
mcp_info->public_base,
PUBLIC_GLOBAL));
*p_mfw_ver = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global, mfw_ver));
*p_mfw_ver =
qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
qed_ptt_release(p_hwfn, p_ptt);
if (p_running_bundle_id != NULL) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
running_bundle_id));
}
return 0;
}
@ -817,6 +907,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
if (IS_VF(cdev))
return -EINVAL;
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
@ -951,6 +1044,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{
u32 flash_size;
if (IS_VF(p_hwfn->cdev))
return -EINVAL;
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
@ -961,6 +1057,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num)
{
u32 resp = 0, param = 0, rc_param = 0;
int rc;
/* Only Leader can configure MSIX, and need to take CMT into account */
if (!IS_LEAD_HWFN(p_hwfn))
return 0;
num *= p_hwfn->cdev->num_hwfns;
param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
&resp, &rc_param);
if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
rc = -EINVAL;
} else {
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
num, vf_id);
}
return rc;
}
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,

View File

@ -149,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/**
* @brief Get the management firmware version value
*
* @param cdev - qed dev pointer
* @param mfw_ver - mfw version value
* @param p_hwfn
* @param p_ptt
* @param p_mfw_ver - mfw version value
* @param p_running_bundle_id - image id in nvram; Optional.
*
* @return int - 0 - operation was successul.
* @return int - 0 - operation was successful.
*/
int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
u32 *mfw_ver);
int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
* @brief Get media type value of the port.
@ -389,6 +392,18 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
* @brief Ack to mfw that driver finished FLR process for VFs
*
* @param p_hwfn
* @param p_ptt
* @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
*
* @param return int - 0 upon success.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/**
* @brief - calls during init to read shmem of all function-related info.
*
@ -418,6 +433,20 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
* @return true iff MFW is running and mcp_info is initialized
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
* @brief request MFW to configure MSI-X for a VF
*
* @param p_hwfn
* @param p_ptt
* @param vf_id - absolute inside engine
* @param num_sbs - number of entries to request
*
* @return int
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,

View File

@ -39,6 +39,10 @@
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
0x2aa118UL
#define PSWHST_REG_ZONE_PERMISSION_TABLE \
0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
@ -77,6 +81,8 @@
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
#define DORQ_REG_VF_USAGE_CNT \
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
@ -111,6 +117,8 @@
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
#define MISCS_REG_FUNCTION_HIDE \
0x0096f0UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
@ -119,6 +127,8 @@
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
#define CCFC_REG_STRONG_ENABLE_VF \
0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
@ -161,6 +171,10 @@
0x040200UL
#define PBF_REG_INIT \
0xd80000UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
0xd806c8UL
#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
@ -385,6 +399,8 @@
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
#define IGU_REG_VF_CONFIGURATION \
0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
@ -411,6 +427,8 @@
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
0x180408UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \

View File

@ -62,6 +62,9 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
struct vf_start_ramrod_data vf_start;
struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
@ -341,13 +344,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_tunn
* @param mode
* @param allow_npar_tx_switch
*
* @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode);
enum qed_mf_mode mode, bool allow_npar_tx_switch);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod

View File

@ -20,6 +20,7 @@
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
@ -298,7 +299,7 @@ qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_tunn_start_params *p_tunn,
enum qed_mf_mode mode)
enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@ -357,6 +358,16 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
&p_ramrod->tunnel_config);
p_hwfn->hw_info.personality = PERSONALITY_ETH;
if (IS_MF_SI(p_hwfn))
p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
p_ramrod->num_vfs = (u8) p_iov->total_vfs;
}
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,

View File

@ -27,6 +27,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
#include "qed_sriov.h"
/***************************************************************************
* Structures & Definitions
@ -242,10 +243,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return -EINVAL;
switch (p_eqe->protocol_id) {
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
default:
DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id);
return -EINVAL;
}
}
/***************************************************************************
@ -379,6 +387,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
if (IS_VF(p_hwfn->cdev))
return 0;
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,386 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_SRIOV_H
#define _QED_SRIOV_H
#include <linux/types.h>
#include "qed_vf.h"
#define QED_VF_ARRAY_LENGTH (3)
#define IS_VF(cdev) ((cdev)->b_is_vf)
#define IS_PF(cdev) (!((cdev)->b_is_vf))
#ifdef CONFIG_QED_SRIOV
#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
#else
#define IS_PF_SRIOV(p_hwfn) (0)
#endif
#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
#define QED_MAX_VF_CHAINS_PER_PF 16
#define QED_ETH_VF_NUM_VLAN_FILTERS 2
#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
(MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
enum qed_iov_vport_update_flag {
QED_IOV_VP_UPDATE_ACTIVATE,
QED_IOV_VP_UPDATE_VLAN_STRIP,
QED_IOV_VP_UPDATE_TX_SWITCH,
QED_IOV_VP_UPDATE_MCAST,
QED_IOV_VP_UPDATE_ACCEPT_PARAM,
QED_IOV_VP_UPDATE_RSS,
QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
QED_IOV_VP_UPDATE_SGE_TPA,
QED_IOV_VP_UPDATE_MAX,
};
struct qed_public_vf_info {
/* These copies will later be reflected in the bulletin board,
* but this copy should be newer.
*/
u8 forced_mac[ETH_ALEN];
u16 forced_vlan;
u8 mac[ETH_ALEN];
/* IFLA_VF_LINK_STATE_<X> */
int link_state;
/* Currently configured Tx rate in MB/sec. 0 if unconfigured */
int tx_rate;
};
/* This struct is part of qed_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/
struct qed_hw_sriov_info {
int pos; /* capability position */
int nres; /* number of resources */
u32 cap; /* SR-IOV Capabilities */
u16 ctrl; /* SR-IOV Control */
u16 total_vfs; /* total VFs associated with the PF */
u16 num_vfs; /* number of vfs that have been started */
u16 initial_vfs; /* initial VFs associated with the PF */
u16 nr_virtfn; /* number of VFs available */
u16 offset; /* first VF Routing ID offset */
u16 stride; /* following VF stride */
u16 vf_device_id; /* VF device id */
u32 pgsz; /* page size for BAR alignment */
u8 link; /* Function Dependency Link */
u32 first_vf_in_pf;
};
/* This mailbox is maintained per VF in its PF contains all information
* required for sending / receiving a message.
*/
struct qed_iov_vf_mbx {
union vfpf_tlvs *req_virt;
dma_addr_t req_phys;
union pfvf_tlvs *reply_virt;
dma_addr_t reply_phys;
/* Address in VF where a pending message is located */
dma_addr_t pending_req;
u8 *offset;
/* saved VF request header */
struct vfpf_first_tlv first_tlv;
};
struct qed_vf_q_info {
u16 fw_rx_qid;
u16 fw_tx_qid;
u8 fw_cid;
u8 rxq_active;
u8 txq_active;
};
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
VF_ACQUIRED, /* VF, acquired, but not initalized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */
};
struct qed_vf_vlan_shadow {
bool used;
u16 vid;
};
struct qed_vf_shadow_config {
/* Shadow copy of all guest vlans */
struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
u8 inner_vlan_removal;
};
/* PFs maintain an array of this structure, per VF */
struct qed_vf_info {
struct qed_iov_vf_mbx vf_mbx;
enum vf_state state;
bool b_init;
u8 to_disable;
struct qed_bulletin bulletin;
dma_addr_t vf_bulletin;
u32 concrete_fid;
u16 opaque_fid;
u16 mtu;
u8 vport_id;
u8 relative_vf_id;
u8 abs_vf_id;
#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
(p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
(p_vf)->abs_vf_id)
u8 vport_instance;
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
u8 num_active_rxqs;
struct qed_public_vf_info p_vf_info;
bool spoof_chk;
bool req_spoofchk_val;
/* Stores the configuration requested by VF */
struct qed_vf_shadow_config shadow_config;
/* A bitfield using bulletin's valid-map bits, used to indicate
* which of the bulletin board features have been configured.
*/
u64 configured_features;
#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
(1 << VLAN_ADDR_FORCED))
};
/* This structure is part of qed_hwfn and used only for PFs that have sriov
* capability enabled.
*/
struct qed_pf_iov {
struct qed_vf_info vfs_array[MAX_NUM_VFS];
u64 pending_events[QED_VF_ARRAY_LENGTH];
u64 pending_flr[QED_VF_ARRAY_LENGTH];
/* Allocate message address continuosuly and split to each VF */
void *mbx_msg_virt_addr;
dma_addr_t mbx_msg_phys_addr;
u32 mbx_msg_size;
void *mbx_reply_virt_addr;
dma_addr_t mbx_reply_phys_addr;
u32 mbx_reply_size;
void *p_bulletins;
dma_addr_t bulletins_phys;
u32 bulletins_size;
};
enum qed_iov_wq_flag {
QED_IOV_WQ_MSG_FLAG,
QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
QED_IOV_WQ_STOP_WQ_FLAG,
QED_IOV_WQ_FLR_FLAG,
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief - Given a VF index, return index of next [including that] active VF.
*
* @param p_hwfn
* @param rel_vf_id
*
* @return MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
/**
* @brief Read sriov related information and allocated resources
* reads from configuraiton space, shmem, etc.
*
* @param p_hwfn
*
* @return int
*/
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
* @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
*
* @param p_hwfn
* @param p_iov
* @param type
* @param length
*
* @return pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
* @brief list the types and lengths of the tlvs on the buffer
*
* @param p_hwfn
* @param tlvs_list
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
* @brief qed_iov_alloc - allocate sriov related resources
*
* @param p_hwfn
*
* @return int
*/
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
/**
* @brief qed_iov_setup - setup sriov related resources
*
* @param p_hwfn
* @param p_ptt
*/
void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
* @brief qed_iov_free - free sriov related resources
*
* @param p_hwfn
*/
void qed_iov_free(struct qed_hwfn *p_hwfn);
/**
* @brief free sriov related memory that was allocated during hw_prepare
*
* @param cdev
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
* @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data);
/**
* @brief Mark structs of vfs that have been FLR-ed.
*
* @param p_hwfn
* @param disabled_vfs - bitmask of all VFs on path that were FLRed
*
* @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
*/
int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
* @brief Search extended TLVs in request/reply buffer.
*
* @param p_hwfn
* @param p_tlvs_list - Pointer to tlvs list
* @param req_type - Type of TLV
*
* @return pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
int qed_iov_wq_start(struct qed_dev *cdev);
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id)
{
return MAX_NUM_VFS;
}
static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
{
return 0;
}
static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
}
static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
{
}
static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{
}
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo, union event_ring_data *data)
{
return -EINVAL;
}
static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs)
{
return 0;
}
static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
{
}
static inline int qed_iov_wq_start(struct qed_dev *cdev)
{
return 0;
}
static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
enum qed_iov_wq_flag flag)
{
}
static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
{
}
static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
{
return 0;
}
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
_i < MAX_NUM_VFS; \
_i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,990 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_VF_H
#define _QED_VF_H
#include "qed_l2.h"
#include "qed_mcp.h"
#define T_ETH_INDIRECTION_TABLE_SIZE 128
#define T_ETH_RSS_KEY_SIZE 10
struct vf_pf_resc_request {
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u16 padding;
};
struct hw_sb_info {
u16 hw_sb_id;
u8 sb_qid;
u8 padding[5];
};
#define TLV_BUFFER_SIZE 1024
enum {
PFVF_STATUS_WAITING,
PFVF_STATUS_SUCCESS,
PFVF_STATUS_FAILURE,
PFVF_STATUS_NOT_SUPPORTED,
PFVF_STATUS_NO_RESOURCE,
PFVF_STATUS_FORCED,
};
/* vf pf channel tlvs */
/* general tlv header (used for both vf->pf request and pf->vf response) */
struct channel_tlv {
u16 type;
u16 length;
};
/* header of first vf->pf tlv carries the offset used to calculate reponse
* buffer address
*/
struct vfpf_first_tlv {
struct channel_tlv tl;
u32 padding;
u64 reply_address;
};
/* header of pf->vf tlvs, carries the status of handling the request */
struct pfvf_tlv {
struct channel_tlv tl;
u8 status;
u8 padding[3];
};
/* response tlv used for most tlvs */
struct pfvf_def_resp_tlv {
struct pfvf_tlv hdr;
};
/* used to terminate and pad a tlv list */
struct channel_list_end_tlv {
struct channel_tlv tl;
u8 padding[4];
};
#define VFPF_ACQUIRE_OS_LINUX (0)
#define VFPF_ACQUIRE_OS_WINDOWS (1)
#define VFPF_ACQUIRE_OS_ESX (2)
#define VFPF_ACQUIRE_OS_SOLARIS (3)
#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
u64 capabilities;
u8 fw_major;
u8 fw_minor;
u8 fw_revision;
u8 fw_engineering;
u32 driver_version;
u16 opaque_fid; /* ME register value */
u8 os_type; /* VFPF_ACQUIRE_OS_* value */
u8 padding[5];
} vfdev_info;
struct vf_pf_resc_request resc_request;
u64 bulletin_addr;
u32 bulletin_size;
u32 padding;
};
/* receive side scaling tlv */
struct vfpf_vport_update_rss_tlv {
struct channel_tlv tl;
u8 update_rss_flags;
#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
u8 rss_enable;
u8 rss_caps;
u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
u32 rss_key[T_ETH_RSS_KEY_SIZE];
};
struct pfvf_storm_stats {
u32 address;
u32 len;
};
struct pfvf_stats_info {
struct pfvf_storm_stats mstats;
struct pfvf_storm_stats pstats;
struct pfvf_storm_stats tstats;
struct pfvf_storm_stats ustats;
};
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
struct pf_vf_pfdev_info {
u32 chip_num;
u32 mfw_ver;
u16 fw_major;
u16 fw_minor;
u16 fw_rev;
u16 fw_eng;
u64 capabilities;
#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
/* There are old PF versions where the PF might mistakenly override the sanity
* mechanism [version-based] and allow a VF that can't be supported to pass
* the acquisition phase.
* To overcome this, PFs now indicate that they're past that point and the new
* VFs would fail probe on the older PFs that fail to do so.
*/
#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
u16 db_size;
u8 indices_per_sb;
u8 os_type;
/* These should match the PF's qed_dev values */
u16 chip_rev;
u8 dev_type;
u8 padding;
struct pfvf_stats_info stats_info;
u8 port_mac[ETH_ALEN];
u8 padding2[2];
} pfdev_info;
struct pf_vf_resc {
#define PFVF_MAX_QUEUES_PER_VF 16
#define PFVF_MAX_SBS_PER_VF 16
struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
u8 cid[PFVF_MAX_QUEUES_PER_VF];
u8 num_rxqs;
u8 num_txqs;
u8 num_sbs;
u8 num_mac_filters;
u8 num_vlan_filters;
u8 num_mc_filters;
u8 padding[2];
} resc;
u32 bulletin_size;
u32 padding;
};
struct pfvf_start_queue_resp_tlv {
struct pfvf_tlv hdr;
u32 offset; /* offset to consumer/producer of queue */
u8 padding[4];
};
/* Setup Queue */
struct vfpf_start_rxq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
u64 rxq_addr;
u64 deprecated_sge_addr;
u64 cqe_pbl_addr;
u16 cqe_pbl_size;
u16 hw_sb;
u16 rx_qid;
u16 hc_rate; /* desired interrupts per sec. */
u16 bd_max_bytes;
u16 stat_id;
u8 sb_index;
u8 padding[3];
};
struct vfpf_start_txq_tlv {
struct vfpf_first_tlv first_tlv;
/* physical addresses */
u64 pbl_addr;
u16 pbl_size;
u16 stat_id;
u16 tx_qid;
u16 hw_sb;
u32 flags; /* VFPF_QUEUE_FLG_X flags */
u16 hc_rate; /* desired interrupts per sec. */
u8 sb_index;
u8 padding[3];
};
/* Stop RX Queue */
struct vfpf_stop_rxqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 rx_qid;
u8 num_rxqs;
u8 cqe_completion;
u8 padding[4];
};
/* Stop TX Queues */
struct vfpf_stop_txqs_tlv {
struct vfpf_first_tlv first_tlv;
u16 tx_qid;
u8 num_txqs;
u8 padding[5];
};
struct vfpf_update_rxq_tlv {
struct vfpf_first_tlv first_tlv;
u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
u16 rx_qid;
u8 num_rxqs;
u8 flags;
#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
u8 padding[4];
};
/* Set Queue Filters */
struct vfpf_q_mac_vlan_filter {
u32 flags;
#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
u8 mac[ETH_ALEN];
u16 vlan_tag;
u8 padding[4];
};
/* Start a vport */
struct vfpf_vport_start_tlv {
struct vfpf_first_tlv first_tlv;
u64 sb_addr[PFVF_MAX_SBS_PER_VF];
u32 tpa_mode;
u16 dep1;
u16 mtu;
u8 vport_id;
u8 inner_vlan_removal;
u8 only_untagged;
u8 max_buffers_per_cqe;
u8 padding[4];
};
/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
struct vfpf_vport_update_activate_tlv {
struct channel_tlv tl;
u8 update_rx;
u8 update_tx;
u8 active_rx;
u8 active_tx;
};
struct vfpf_vport_update_tx_switch_tlv {
struct channel_tlv tl;
u8 tx_switching;
u8 padding[3];
};
struct vfpf_vport_update_vlan_strip_tlv {
struct channel_tlv tl;
u8 remove_vlan;
u8 padding[3];
};
struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
u64 bins[8];
};
struct vfpf_vport_update_accept_param_tlv {
struct channel_tlv tl;
u8 update_rx_mode;
u8 update_tx_mode;
u8 rx_accept_filter;
u8 tx_accept_filter;
};
struct vfpf_vport_update_accept_any_vlan_tlv {
struct channel_tlv tl;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 padding[2];
};
struct vfpf_vport_update_sge_tpa_tlv {
struct channel_tlv tl;
u16 sge_tpa_flags;
#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
u8 update_sge_tpa_flags;
#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
u8 max_buffers_per_cqe;
u16 deprecated_sge_buff_size;
u16 tpa_max_size;
u16 tpa_min_size_to_start;
u16 tpa_min_size_to_cont;
u8 tpa_max_aggs_num;
u8 padding[7];
};
/* Primary tlv as a header for various extended tlvs for
* various functionalities in vport update ramrod.
*/
struct vfpf_vport_update_tlv {
struct vfpf_first_tlv first_tlv;
};
struct vfpf_ucast_filter_tlv {
struct vfpf_first_tlv first_tlv;
u8 opcode;
u8 type;
u8 mac[ETH_ALEN];
u16 vlan;
u16 padding[3];
};
struct tlv_buffer_size {
u8 tlv_buffer[TLV_BUFFER_SIZE];
};
union vfpf_tlvs {
struct vfpf_first_tlv first_tlv;
struct vfpf_acquire_tlv acquire;
struct vfpf_start_rxq_tlv start_rxq;
struct vfpf_start_txq_tlv start_txq;
struct vfpf_stop_rxqs_tlv stop_rxqs;
struct vfpf_stop_txqs_tlv stop_txqs;
struct vfpf_update_rxq_tlv update_rxq;
struct vfpf_vport_start_tlv start_vport;
struct vfpf_vport_update_tlv vport_update;
struct vfpf_ucast_filter_tlv ucast_filter;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
struct pfvf_def_resp_tlv default_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct tlv_buffer_size tlv_buf_size;
struct pfvf_start_queue_resp_tlv queue_start;
};
enum qed_bulletin_bit {
/* Alert the VF that a forced MAC was set by the PF */
MAC_ADDR_FORCED = 0,
/* Alert the VF that a forced VLAN was set by the PF */
VLAN_ADDR_FORCED = 2,
/* Indicate that `default_only_untagged' contains actual data */
VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
/* Alert the VF that suggested mac was sent by the PF.
* MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
*/
VFPF_BULLETIN_MAC_ADDR = 5
};
struct qed_bulletin_content {
/* crc of structure to ensure is not in mid-update */
u32 crc;
u32 version;
/* bitmap indicating which fields hold valid values */
u64 valid_bitmap;
/* used for MAC_ADDR or MAC_ADDR_FORCED */
u8 mac[ETH_ALEN];
/* If valid, 1 => only untagged Rx if no vlan is configured */
u8 default_only_untagged;
u8 padding;
/* The following is a 'copy' of qed_mcp_link_state,
* qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
* possible the structs will increase further along the road we cannot
* have it here; Instead we need to have all of its fields.
*/
u8 req_autoneg;
u8 req_autoneg_pause;
u8 req_forced_rx;
u8 req_forced_tx;
u8 padding2[4];
u32 req_adv_speed;
u32 req_forced_speed;
u32 req_loopback;
u32 padding3;
u8 link_up;
u8 full_duplex;
u8 autoneg;
u8 autoneg_complete;
u8 parallel_detection;
u8 pfc_enabled;
u8 partner_tx_flow_ctrl_en;
u8 partner_rx_flow_ctrl_en;
u8 partner_adv_pause;
u8 sfp_tx_fault;
u8 padding4[6];
u32 speed;
u32 partner_adv_speed;
u32 capability_speed;
/* Forced vlan */
u16 pvid;
u16 padding5;
};
struct qed_bulletin {
dma_addr_t phys;
struct qed_bulletin_content *p_virt;
u32 size;
};
enum {
CHANNEL_TLV_NONE, /* ends tlv sequence */
CHANNEL_TLV_ACQUIRE,
CHANNEL_TLV_VPORT_START,
CHANNEL_TLV_VPORT_UPDATE,
CHANNEL_TLV_VPORT_TEARDOWN,
CHANNEL_TLV_START_RXQ,
CHANNEL_TLV_START_TXQ,
CHANNEL_TLV_STOP_RXQS,
CHANNEL_TLV_STOP_TXQS,
CHANNEL_TLV_UPDATE_RXQ,
CHANNEL_TLV_INT_CLEANUP,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_UCAST_FILTER,
CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
CHANNEL_TLV_VPORT_UPDATE_MCAST,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
CHANNEL_TLV_VPORT_UPDATE_RSS,
CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
CHANNEL_TLV_MAX,
/* Required for iterating over vport-update tlvs.
* Will break in case non-sequential vport-update tlvs.
*/
CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
};
/* This data is held in the qed_hwfn structure for VFs only. */
struct qed_vf_iov {
union vfpf_tlvs *vf2pf_request;
dma_addr_t vf2pf_request_phys;
union pfvf_tlvs *pf2vf_reply;
dma_addr_t pf2vf_reply_phys;
/* Should be taken whenever the mailbox buffers are accessed */
struct mutex mutex;
u8 *offset;
/* Bulletin Board */
struct qed_bulletin bulletin;
struct qed_bulletin_content bulletin_shadow;
/* we set aside a copy of the acquire response */
struct pfvf_acquire_resp_tlv acquire_resp;
};
#ifdef CONFIG_QED_SRIOV
/**
* @brief Read the VF bulletin and act on it if needed
*
* @param p_hwfn
* @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
*
* @return enum _qed_status
*/
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
/**
* @brief Get link paramters for VF from qed
*
* @param p_hwfn
* @param params - the link params structure to be filled for the VF
*/
void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params);
/**
* @brief Get link state for VF from qed
*
* @param p_hwfn
* @param link - the link state structure to be filled for the VF
*/
void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link);
/**
* @brief Get link capabilities for VF from qed
*
* @param p_hwfn
* @param p_link_caps - the link capabilities structure to be filled for the VF
*/
void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps);
/**
* @brief Get number of Rx queues allocated for VF by qed
*
* @param p_hwfn
* @param num_rxqs - allocated RX queues
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
* @brief Get port mac address for VF
*
* @param p_hwfn
* @param port_mac - destination location for port mac
*/
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/**
* @brief Get number of VLAN filters allocated for VF by qed
*
* @param p_hwfn
* @param num_rxqs - allocated VLAN filters
*/
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
* @brief Check if VF can set a MAC address
*
* @param p_hwfn
* @param mac
*
* @return bool
*/
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
/**
* @brief Set firmware version information in dev_info from VFs acquire response tlv
*
* @param p_hwfn
* @param fw_major
* @param fw_minor
* @param fw_rev
* @param fw_eng
*/
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng);
/**
* @brief hw preparation for VF
* sends ACQUIRE message
*
* @param p_hwfn
*
* @return int
*/
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
* @brief VF - start the RX Queue by sending a message to the PF
* @param p_hwfn
* @param cid - zero based within the VF
* @param rx_queue_id - zero based within the VF
* @param sb - VF status block for this queue
* @param sb_index - Index within the status block
* @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl
* @param cqe_pbl_size - pbl size
* @param pp_prod - pointer to the producer to be
* used in fastpath
*
* @return int
*/
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_queue_id,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod);
/**
* @brief VF - start the TX queue by sending a message to the
* PF.
*
* @param p_hwfn
* @param tx_queue_id - zero based within the VF
* @param sb - status block for this queue
* @param sb_index - index within the status block
* @param bd_chain_phys_addr - physical address of tx chain
* @param pp_doorbell - pointer to address to which to
* write the doorbell too..
*
* @return int
*/
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell);
/**
* @brief VF - stop the RX queue by sending a message to the PF
*
* @param p_hwfn
* @param rx_qid
* @param cqe_completion
*
* @return int
*/
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion);
/**
* @brief VF - stop the TX queue by sending a message to the PF
*
* @param p_hwfn
* @param tx_qid
*
* @return int
*/
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
/**
* @brief VF - send a vport update command
*
* @param p_hwfn
* @param params
*
* @return int
*/
int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params);
/**
*
* @brief VF - send a close message to PF
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
/**
* @brief VF - free vf`s memories
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
/**
* @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
* @param p_hwfn
* @param sb_id
*
* @return INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
* @brief qed_vf_pf_vport_start - perform vport start for VF.
*
* @param p_hwfn
* @param vport_id
* @param mtu
* @param inner_vlan_removal
* @param tpa_mode
* @param max_buffers_per_cqe,
* @param only_untagged - default behavior regarding vlan acceptance
*
* @return enum _qed_status
*/
int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum qed_tpa_mode tpa_mode,
u8 max_buffers_per_cqe, u8 only_untagged);
/**
* @brief qed_vf_pf_vport_stop - stop the VF's vport
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_filter_ucast *p_param);
void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd);
/**
* @brief qed_vf_pf_int_cleanup - clean the SB of the VF
*
* @param p_hwfn
*
* @return enum _qed_status
*/
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
/**
* @brief - return the link params in a given bulletin board
*
* @param p_hwfn
* @param p_params - pointer to a struct to fill with link params
* @param p_bulletin
*/
void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *p_params,
struct qed_bulletin_content *p_bulletin);
/**
* @brief - return the link state in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link state
* @param p_bulletin
*/
void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content *p_bulletin);
/**
* @brief - return the link capabilities in a given bulletin board
*
* @param p_hwfn
* @param p_link - pointer to a struct to fill with link capabilities
* @param p_bulletin
*/
void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
struct qed_bulletin_content *p_bulletin);
void qed_iov_vf_task(struct work_struct *work);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params)
{
}
static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link)
{
}
static inline void
qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps)
{
}
static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
{
}
static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
{
}
static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters)
{
}
static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
{
return false;
}
static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng)
{
}
static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_queue_id,
u16 sb,
u8 sb_index,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_adr,
dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, void __iomem **pp_prod)
{
return -EINVAL;
}
static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 tx_queue_id,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell)
{
return -EINVAL;
}
static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion)
{
return -EINVAL;
}
static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
{
return -EINVAL;
}
static inline int
qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params)
{
return -EINVAL;
}
static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
{
return 0;
}
static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
u16 mtu,
u8 inner_vlan_removal,
enum qed_tpa_mode tpa_mode,
u8 max_buffers_per_cqe,
u8 only_untagged)
{
return -EINVAL;
}
static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_filter_ucast *p_param)
{
return -EINVAL;
}
static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd)
{
}
static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
{
return -EINVAL;
}
static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params
*p_params,
struct qed_bulletin_content
*p_bulletin)
{
}
static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content
*p_bulletin)
{
}
static inline void
__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
struct qed_bulletin_content *p_bulletin)
{
}
static inline void qed_iov_vf_task(struct work_struct *work)
{
}
#endif
#endif

View File

@ -112,6 +112,10 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
u32 flags;
#define QEDE_FLAG_IS_VF BIT(0)
#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;

View File

@ -151,6 +151,8 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
if (IS_VF(edev) && qede_stats_arr[i].pf_only)
continue;
strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
@ -194,8 +196,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
}
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0;
@ -214,6 +219,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
if (IS_VF(edev)) {
int i;
for (i = 0; i < QEDE_NUM_STATS; i++)
if (qede_stats_arr[i].pf_only)
num_stats--;
}
return num_stats + QEDE_NUM_RQSTATS;
case ETH_SS_PRIV_FLAGS:
return QEDE_PRI_FLAG_LEN;
@ -1142,7 +1154,34 @@ static const struct ethtool_ops qede_ethtool_ops = {
.self_test = qede_self_test,
};
static const struct ethtool_ops qede_vf_ethtool_ops = {
.get_settings = qede_get_settings,
.get_drvinfo = qede_get_drvinfo,
.get_msglevel = qede_get_msglevel,
.set_msglevel = qede_set_msglevel,
.get_link = qede_get_link,
.get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam,
.get_strings = qede_get_strings,
.get_ethtool_stats = qede_get_ethtool_stats,
.get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
.get_rxnfc = qede_get_rxnfc,
.set_rxnfc = qede_set_rxnfc,
.get_rxfh_indir_size = qede_get_rxfh_indir_size,
.get_rxfh_key_size = qede_get_rxfh_key_size,
.get_rxfh = qede_get_rxfh,
.set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
};
void qede_set_ethtool_ops(struct net_device *dev)
{
dev->ethtool_ops = &qede_ethtool_ops;
struct qede_dev *edev = netdev_priv(dev);
if (IS_VF(edev))
dev->ethtool_ops = &qede_vf_ethtool_ops;
else
dev->ethtool_ops = &qede_ethtool_ops;
}

View File

@ -63,6 +63,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@ -71,15 +72,22 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif
enum qede_pci_private {
QEDE_PRIVATE_PF,
QEDE_PRIVATE_VF
};
static const struct pci_device_id qede_pci_tbl[] = {
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
{ PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
{PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
{ 0 }
};
@ -94,17 +102,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
#ifdef CONFIG_QED_SRIOV
static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
{
struct qede_dev *edev = netdev_priv(ndev);
if (vlan > 4095) {
DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
return -EINVAL;
}
DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
vlan, vf);
return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
}
static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
{
struct qede_dev *edev = netdev_priv(ndev);
DP_VERBOSE(edev, QED_MSG_IOV,
"Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
if (!is_valid_ether_addr(mac)) {
DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
return -EINVAL;
}
return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
}
static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
{
struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
struct qed_dev_info *qed_info = &edev->dev_info.common;
int rc;
DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
/* Enable/Disable Tx switching for PF */
if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
struct qed_update_vport_params params;
memset(&params, 0, sizeof(params));
params.vport_id = 0;
params.update_tx_switching_flg = 1;
params.tx_switching_flg = num_vfs_param ? 1 : 0;
edev->ops->vport_update(edev->cdev, &params);
}
return rc;
}
#endif
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
#ifdef CONFIG_QED_SRIOV
.sriov_configure = qede_sriov_configure,
#endif
};
static void qede_force_mac(void *dev, u8 *mac)
{
struct qede_dev *edev = dev;
ether_addr_copy(edev->ndev->dev_addr, mac);
ether_addr_copy(edev->primary_mac, mac);
}
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
},
.force_mac = qede_force_mac,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@ -1730,6 +1808,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
#ifdef CONFIG_QED_SRIOV
static int qede_get_vf_config(struct net_device *dev, int vfidx,
struct ifla_vf_info *ivi)
{
struct qede_dev *edev = netdev_priv(dev);
if (!edev->ops)
return -EINVAL;
return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
}
static int qede_set_vf_rate(struct net_device *dev, int vfidx,
int min_tx_rate, int max_tx_rate)
{
struct qede_dev *edev = netdev_priv(dev);
return edev->ops->iov->set_rate(edev->cdev, vfidx, max_tx_rate,
max_tx_rate);
}
static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
{
struct qede_dev *edev = netdev_priv(dev);
if (!edev->ops)
return -EINVAL;
return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
}
static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
int link_state)
{
struct qede_dev *edev = netdev_priv(dev);
if (!edev->ops)
return -EINVAL;
return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
}
#endif
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{
struct qed_update_vport_params params;
@ -2049,9 +2170,19 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_mac = qede_set_vf_mac,
.ndo_set_vf_vlan = qede_set_vf_vlan,
#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
.ndo_get_stats64 = qede_get_stats64,
#ifdef CONFIG_QED_SRIOV
.ndo_set_vf_link_state = qede_set_vf_link_state,
.ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
.ndo_get_vf_config = qede_get_vf_config,
.ndo_set_vf_rate = qede_set_vf_rate,
#endif
#ifdef CONFIG_QEDE_VXLAN
.ndo_add_vxlan_port = qede_add_vxlan_port,
.ndo_del_vxlan_port = qede_del_vxlan_port,
@ -2283,8 +2414,9 @@ enum qede_probe_mode {
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
enum qede_probe_mode mode)
bool is_vf, enum qede_probe_mode mode)
{
struct qed_probe_params probe_params;
struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
@ -2294,8 +2426,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n");
cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
dp_module, dp_level);
memset(&probe_params, 0, sizeof(probe_params));
probe_params.protocol = QED_PROTOCOL_ETH;
probe_params.dp_module = dp_module;
probe_params.dp_level = dp_level;
probe_params.is_vf = is_vf;
cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
goto err0;
@ -2329,6 +2465,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2;
}
if (is_vf)
edev->flags |= QEDE_FLAG_IS_VF;
qede_init_ndev(edev);
rc = register_netdev(edev->ndev);
@ -2360,12 +2499,24 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
bool is_vf = false;
u32 dp_module = 0;
u8 dp_level = 0;
switch ((enum qede_pci_private)id->driver_data) {
case QEDE_PRIVATE_VF:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a VF\n");
is_vf = true;
break;
default:
if (debug & QED_LOG_VERBOSE_MASK)
dev_err(&pdev->dev, "Probing a PF\n");
}
qede_config_debug(debug, &dp_module, &dp_level);
return __qede_probe(pdev, dp_module, dp_level,
return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL);
}
@ -3062,6 +3213,7 @@ static int qede_start_queues(struct qede_dev *edev)
struct qed_dev *cdev = edev->cdev;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
bool reset_rss_indir = false;
@ -3155,6 +3307,12 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
qed_info->tx_switching) {
vport_update_params.update_tx_switching_flg = 1;
vport_update_params.tx_switching_flg = 1;
}
/* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
@ -3451,6 +3609,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT;
}
if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
DP_NOTICE(edev, "qed prevents setting MAC\n");
return -EINVAL;
}
ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) {

View File

@ -285,6 +285,63 @@
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024
#define PXP_VF_BAR0_START_IGU 0
#define PXP_VF_BAR0_IGU_LENGTH 0x3000
#define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \
PXP_VF_BAR0_IGU_LENGTH - 1)
#define PXP_VF_BAR0_START_DQ 0x3000
#define PXP_VF_BAR0_DQ_LENGTH 0x200
#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0
#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \
PXP_VF_BAR0_DQ_OPAQUE_OFFSET)
#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \
+ 4)
#define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \
PXP_VF_BAR0_DQ_LENGTH - 1)
#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200
#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200
#define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400
#define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600
#define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800
#define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00
#define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00
#define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \
+ \
PXP_VF_BAR0_SDM_LENGTH_ZONE_B \
- 1)
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
/* ILT Records */
#define PXP_NUM_ILT_RECORDS_BB 7600
#define PXP_NUM_ILT_RECORDS_K2 11000
@ -327,9 +384,14 @@ struct regpair {
__le32 hi;
};
struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct async_data async_info;
};

View File

@ -13,6 +13,7 @@
#include <linux/if_link.h>
#include <linux/qed/eth_common.h>
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
struct qed_dev_eth_info {
struct qed_dev_info common;
@ -34,6 +35,8 @@ struct qed_update_vport_params {
u8 vport_id;
u8 update_vport_active_flg;
u8 vport_active_flg;
u8 update_tx_switching_flg;
u8 tx_switching_flg;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
u8 update_rss_flg;
@ -121,10 +124,14 @@ struct qed_tunn_params {
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac);
};
struct qed_eth_ops {
const struct qed_common_ops *common;
#ifdef CONFIG_QED_SRIOV
const struct qed_iov_hv_ops *iov;
#endif
int (*fill_dev_info)(struct qed_dev *cdev,
struct qed_dev_eth_info *info);
@ -133,6 +140,8 @@ struct qed_eth_ops {
struct qed_eth_cb_ops *ops,
void *cookie);
bool(*check_mac) (struct qed_dev *cdev, u8 *mac);
int (*vport_start)(struct qed_dev *cdev,
struct qed_start_vport_params *params);

View File

@ -93,6 +93,7 @@ struct qed_dev_info {
u32 flash_size;
u8 mf_mode;
bool tx_switching;
};
enum qed_sb_type {
@ -140,6 +141,13 @@ struct qed_link_output {
u32 pause_config;
};
struct qed_probe_params {
enum qed_protocol protocol;
u32 dp_module;
u8 dp_level;
bool is_vf;
};
#define QED_DRV_VER_STR_SIZE 12
struct qed_slowpath_params {
u32 int_mode;
@ -207,8 +215,7 @@ struct qed_common_ops {
struct qed_selftest_ops *selftest;
struct qed_dev* (*probe)(struct pci_dev *dev,
enum qed_protocol protocol,
u32 dp_module, u8 dp_level);
struct qed_probe_params *params);
void (*remove)(struct qed_dev *cdev);

View File

@ -0,0 +1,34 @@
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
*
* This software is available under the terms of the GNU General Public License
* (GPL) Version 2, available from the file COPYING in the main directory of
* this source tree.
*/
#ifndef _QED_IOV_IF_H
#define _QED_IOV_IF_H
#include <linux/qed/qed_if.h>
/* Structs used by PF to control and manipulate child VFs */
struct qed_iov_hv_ops {
int (*configure)(struct qed_dev *cdev, int num_vfs_param);
int (*set_mac) (struct qed_dev *cdev, u8 *mac, int vfid);
int (*set_vlan) (struct qed_dev *cdev, u16 vid, int vfid);
int (*get_config) (struct qed_dev *cdev, int vf_id,
struct ifla_vf_info *ivi);
int (*set_link_state) (struct qed_dev *cdev, int vf_id,
int link_state);
int (*set_spoof) (struct qed_dev *cdev, int vfid, bool val);
int (*set_rate) (struct qed_dev *cdev, int vfid,
u32 min_rate, u32 max_rate);
};
#endif