qed: Pass vf_params when creating a queue-cid

We're going to need additional information for queue-cids
that a PF creates for its VFs, so start by refactoring existing
logic used for initializing said struct into receiving a structure
encapsulating the VF-specific information that needs to be provided.

This also introduces QED_QUEUE_CID_SELF - each queue-cid would hold
an indication to whether it belongs to the hw-function holding it
[whether that's a PF or a VF], or else what's the VF id it belongs
to.

Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Mintz, Yuval 2017-06-04 13:31:02 +03:00 committed by David S. Miller
parent f604b17d7f
commit 3946497aff
3 changed files with 95 additions and 45 deletions

View File

@ -155,7 +155,8 @@ void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{
/* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
if (!p_cid->is_vf && IS_PF(p_hwfn->cdev))
if ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
IS_PF(p_hwfn->cdev))
qed_cxt_release_cid(p_hwfn, p_cid->cid);
vfree(p_cid);
}
@ -163,14 +164,13 @@ void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
/* The internal is only meant to be directly called by PFs initializeing CIDs
* for their VFs.
*/
struct qed_queue_cid *
static struct qed_queue_cid *
_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
u8 vf_qid,
struct qed_queue_start_common_params *p_params)
struct qed_queue_start_common_params *p_params,
struct qed_queue_cid_vf_params *p_vf_params)
{
bool b_is_same = (p_hwfn->hw_info.opaque_fid == opaque_fid);
struct qed_queue_cid *p_cid;
int rc;
@ -181,7 +181,6 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->opaque_fid = opaque_fid;
p_cid->cid = cid;
p_cid->vf_qid = vf_qid;
p_cid->p_owner = p_hwfn;
/* Fill in parameters */
@ -191,6 +190,15 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
p_cid->sb_idx = p_params->sb_idx;
/* Fill-in bits related to VFs' queues if information was provided */
if (p_vf_params) {
p_cid->vfid = p_vf_params->vfid;
p_cid->vf_qid = p_vf_params->vf_qid;
p_cid->b_legacy_vf = p_vf_params->vf_legacy;
} else {
p_cid->vfid = QED_QUEUE_CID_SELF;
}
/* Don't try calculating the absolute indices for VFs */
if (IS_VF(p_hwfn->cdev)) {
p_cid->abs = p_cid->rel;
@ -212,7 +220,7 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
/* In case of a PF configuring its VF's queues, the stats-id is already
* absolute [since there's a single index that's suitable per-VF].
*/
if (b_is_same) {
if (p_cid->vfid == QED_QUEUE_CID_SELF) {
rc = qed_fw_vport(p_hwfn, p_cid->rel.stats_id,
&p_cid->abs.stats_id);
if (rc)
@ -221,11 +229,6 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
p_cid->abs.stats_id = p_cid->rel.stats_id;
}
/* This is tricky - we're actually interested in whehter this is a PF
* entry meant for the VF.
*/
if (!b_is_same)
p_cid->is_vf = true;
out:
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
@ -246,32 +249,47 @@ _qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
return NULL;
}
static struct qed_queue_cid *qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid, struct
qed_queue_start_common_params
*p_params)
struct qed_queue_cid *
qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
struct qed_queue_cid_vf_params *p_vf_params)
{
struct qed_queue_cid *p_cid;
bool b_legacy_vf = false;
u32 cid = 0;
/* Currently, PF doesn't need to allocate CIDs for any VF */
if (p_vf_params)
b_legacy_vf = true;
/* Get a unique firmware CID for this queue, in case it's a PF.
* VF's don't need a CID as the queue configuration will be done
* by PF.
*/
if (IS_PF(p_hwfn->cdev)) {
if (IS_PF(p_hwfn->cdev) && !b_legacy_vf) {
if (qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &cid)) {
DP_NOTICE(p_hwfn, "Failed to acquire cid\n");
return NULL;
}
}
p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid, 0, p_params);
if (!p_cid && IS_PF(p_hwfn->cdev))
p_cid = _qed_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
p_params, p_vf_params);
if (!p_cid && IS_PF(p_hwfn->cdev) && !b_legacy_vf)
qed_cxt_release_cid(p_hwfn, cid);
return p_cid;
}
static struct qed_queue_cid *
qed_eth_queue_to_cid_pf(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params)
{
return qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params,
NULL);
}
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params)
{
@ -799,7 +817,7 @@ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
if (p_cid->is_vf) {
if (p_cid->vfid != QED_QUEUE_CID_SELF) {
p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
DP_VERBOSE(p_hwfn, QED_MSG_SP,
"Queue%s is meant for VF rxq[%02x]\n",
@ -849,7 +867,7 @@ qed_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
int rc;
/* Allocate a CID for the queue */
p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
if (!p_cid)
return -ENOMEM;
@ -951,10 +969,11 @@ qed_eth_pf_rx_queue_stop(struct qed_hwfn *p_hwfn,
/* Cleaning the queue requires the completion to arrive there.
* In addition, VFs require the answer to come as eqe to PF.
*/
p_ramrod->complete_cqe_flg = (!p_cid->is_vf &&
p_ramrod->complete_cqe_flg = ((p_cid->vfid == QED_QUEUE_CID_SELF) &&
!b_eq_completion_only) ||
b_cqe_completion;
p_ramrod->complete_event_flg = p_cid->is_vf || b_eq_completion_only;
p_ramrod->complete_event_flg = (p_cid->vfid != QED_QUEUE_CID_SELF) ||
b_eq_completion_only;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@ -1053,7 +1072,7 @@ qed_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid;
int rc;
p_cid = qed_eth_queue_to_cid(p_hwfn, opaque_fid, p_params);
p_cid = qed_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
if (!p_cid)
return -EINVAL;

View File

@ -278,6 +278,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff)
/* Almost identical to the qed_queue_start_common_params,
* but here we maintain the SB index in IGU CAM.
@ -288,6 +289,25 @@ struct qed_queue_cid_params {
u8 stats_id;
};
/* Additional parameters required for initialization of the queue_cid
* and are relevant only for a PF initializing one for its VFs.
*/
struct qed_queue_cid_vf_params {
/* Should match the VF's relative index */
u8 vfid;
/* 0-based queue index. Should reflect the relative qzone the
* VF thinks is associated with it [in its range].
*/
u8 vf_qid;
/* Indicates a VF is legacy, making it differ in:
* - Producers would be placed in a different place.
*/
bool vf_legacy;
};
struct qed_queue_cid {
/* For stats-id, the `rel' is actually absolute as well */
struct qed_queue_cid_params rel;
@ -305,7 +325,7 @@ struct qed_queue_cid {
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
* and not on the VF itself.
*/
bool is_vf;
u8 vfid;
u8 vf_qid;
/* Legacy VFs might have Rx producer located elsewhere */
@ -321,12 +341,11 @@ void qed_l2_free(struct qed_hwfn *p_hwfn);
void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid);
struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
u32 cid,
u8 vf_qid,
struct qed_queue_start_common_params
*p_params);
struct qed_queue_cid *
qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
struct qed_queue_cid_vf_params *p_vf_params);
int
qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,

View File

@ -1947,6 +1947,7 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct qed_vf_q_info *p_queue;
@ -1965,6 +1966,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
/* Acquire a new queue-cid */
p_queue = &vf->vf_queues[req->rx_qid];
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
b_legacy_vf = true;
memset(&params, 0, sizeof(params));
params.queue_id = p_queue->fw_rx_qid;
params.vport_id = vf->vport_id;
@ -1975,26 +1980,23 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
p_queue->p_rx_cid = _qed_eth_queue_to_cid(p_hwfn,
vf->opaque_fid,
p_queue->fw_cid,
req->rx_qid, &params);
memset(&vf_params, 0, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->rx_qid;
vf_params.vf_legacy = b_legacy_vf;
p_queue->p_rx_cid = qed_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
&params, &vf_params);
if (!p_queue->p_rx_cid)
goto out;
/* Legacy VFs have their Producers in a different location, which they
* calculate on their own and clean the producer prior to this.
*/
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN) {
b_legacy_vf = true;
} else {
if (!b_legacy_vf)
REG_WR(p_hwfn,
GTT_BAR0_MAP_REG_MSDM_RAM +
MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
0);
}
p_queue->p_rx_cid->b_legacy_vf = b_legacy_vf;
rc = qed_eth_rxq_start_ramrod(p_hwfn,
p_queue->p_rx_cid,
@ -2273,11 +2275,13 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf)
{
struct qed_queue_start_common_params params;
struct qed_queue_cid_vf_params vf_params;
struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
u8 status = PFVF_STATUS_NO_RESOURCE;
struct vfpf_start_txq_tlv *req;
struct qed_vf_q_info *p_queue;
struct qed_sb_info sb_dummy;
bool b_vf_legacy = false;
int rc;
u16 pq;
@ -2292,6 +2296,10 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
/* Acquire a new queue-cid */
p_queue = &vf->vf_queues[req->tx_qid];
if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
ETH_HSI_VER_NO_PKT_LEN_TUNN)
b_vf_legacy = true;
params.queue_id = p_queue->fw_tx_qid;
params.vport_id = vf->vport_id;
params.stats_id = vf->abs_vf_id + 0x10;
@ -2302,10 +2310,14 @@ static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
params.p_sb = &sb_dummy;
params.sb_idx = req->sb_index;
p_queue->p_tx_cid = _qed_eth_queue_to_cid(p_hwfn,
vf->opaque_fid,
p_queue->fw_cid,
req->tx_qid, &params);
memset(&vf_params, 0, sizeof(vf_params));
vf_params.vfid = vf->relative_vf_id;
vf_params.vf_qid = (u8)req->tx_qid;
vf_params.vf_legacy = b_vf_legacy;
p_queue->p_tx_cid = qed_eth_queue_to_cid(p_hwfn,
vf->opaque_fid,
&params, &vf_params);
if (!p_queue->p_tx_cid)
goto out;