scsi: lpfc: Convert ring number to hardware queue for nvme wqe posting.

SLI4 nvme functions are passing the SLI3 ring number when posting wqe to
hardware. This should be indicating the hardware queue to use, not the ring
number.

Replace ring number with the hardware queue that should be used.

Note: SCSI avoided this issue as it utilized an older lfpc_issue_iocb
routine that properly adapts.

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <jsmart2021@gmail.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
James Smart 2019-01-28 11:14:26 -08:00 committed by Martin K. Petersen
parent 4c47efc140
commit 1fbf974250
9 changed files with 60 additions and 34 deletions

View File

@ -315,8 +315,8 @@ void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
struct lpfc_iocbq *, uint32_t);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t rnum,
struct lpfc_iocbq *iocbq);
int lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe);
struct lpfc_sglq *__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xri);
struct lpfc_sglq *__lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba,
struct lpfc_iocbq *piocbq);

View File

@ -3734,7 +3734,8 @@ lpfc_io_buf_replenish(struct lpfc_hba *phba, struct list_head *cbuf)
return cnt;
cnt++;
qp = &phba->sli4_hba.hdwq[idx];
lpfc_cmd->hdwq = idx;
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->cur_iocbq.wqe_cmpl = NULL;
lpfc_cmd->cur_iocbq.iocb_cmpl = NULL;
spin_lock(&qp->io_buf_list_put_lock);

View File

@ -528,7 +528,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
lpfc_nvmeio_data(phba, "NVME LS XMIT: xri x%x iotag x%x to x%06x\n",
genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
rc = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], genwqe);
if (rc) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
"6045 Issue GEN REQ WQE to NPORT x%x "
@ -1605,7 +1605,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
lpfc_ncmd->cur_iocbq.sli4_xritag,
lpfc_queue_info->index, ndlp->nlp_DID);
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
ret = lpfc_sli4_issue_wqe(phba, lpfc_ncmd->hdwq, &lpfc_ncmd->cur_iocbq);
if (ret) {
atomic_inc(&lport->xmt_fcp_wqerr);
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
@ -1867,7 +1867,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
abts_buf->vport = vport;
abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
ret_val = lpfc_sli4_issue_wqe(phba, lpfc_nbuf->hdwq, abts_buf);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (ret_val) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
@ -1978,7 +1978,8 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
lpfc_ncmd->start_time = jiffies;
lpfc_ncmd->flags = 0;
lpfc_ncmd->hdwq = idx;
lpfc_ncmd->hdwq = qp;
lpfc_ncmd->hdwq_no = idx;
/* Rsp SGE will be filled in when we rcv an IO
* from the NVME Layer to be sent.
@ -2026,7 +2027,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
lpfc_ncmd->ndlp = NULL;
lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
qp = &phba->sli4_hba.hdwq[lpfc_ncmd->hdwq];
qp = lpfc_ncmd->hdwq;
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6310 XB release deferred for "

View File

@ -79,7 +79,8 @@ struct lpfc_nvme_buf {
dma_addr_t dma_phys_sgl;
struct sli4_sge *dma_sgl;
struct lpfc_iocbq cur_iocbq;
uint16_t hdwq;
struct lpfc_sli4_hdw_queue *hdwq;
uint16_t hdwq_no;
uint16_t cpu;
/* NVME specific fields */

View File

@ -845,7 +845,7 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
/*
* Okay to repost buffer here, but wait till cmpl
@ -901,6 +901,11 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
else
ctxp->ts_nvme_data = ktime_get_ns();
}
/* Setup the hdw queue if not already set */
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
int id = smp_processor_id();
if (id < LPFC_CHECK_CPU_CNT) {
@ -946,7 +951,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
ctxp->oxid, rsp->op, rsp->rsplen);
ctxp->flag |= LPFC_NVMET_IO_INP;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
if (rc == WQE_SUCCESS) {
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (!ctxp->ts_cmd_nvme)
@ -965,7 +970,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
* WQE release CQE
*/
ctxp->flag |= LPFC_NVMET_DEFER_WQFULL;
wq = phba->sli4_hba.hdwq[rsp->hwqid].nvme_wq;
wq = ctxp->hdwq->nvme_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
list_add_tail(&nvmewqeq->list, &wq->wqfull_list);
@ -1015,6 +1020,9 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (phba->pport->load_flag & FC_UNLOADING)
return;
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
ctxp->oxid, ctxp->flag, ctxp->state);
@ -1039,7 +1047,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
if (ctxp->flag & LPFC_NVMET_DEFER_WQFULL) {
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
ctxp->oxid);
wq = phba->sli4_hba.hdwq[ctxp->wqeq->hba_wqidx].nvme_wq;
wq = ctxp->hdwq->nvme_wq;
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
lpfc_nvmet_wqfull_flush(phba, wq, ctxp);
return;
@ -1649,6 +1657,7 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
struct lpfc_sli_ring *pring;
struct lpfc_iocbq *nvmewqeq;
struct lpfc_nvmet_rcv_ctx *ctxp;
unsigned long iflags;
int rc;
@ -1662,7 +1671,8 @@ lpfc_nvmet_wqfull_process(struct lpfc_hba *phba,
list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq,
list);
spin_unlock_irqrestore(&pring->ring_lock, iflags);
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmewqeq->context2;
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, nvmewqeq);
spin_lock_irqsave(&pring->ring_lock, iflags);
if (rc == -EBUSY) {
/* WQ was full again, so put it back on the list */
@ -1765,6 +1775,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
ctxp->state = LPFC_NVMET_STE_LS_RCV;
ctxp->entry_cnt = 1;
ctxp->rqb_buffer = (void *)nvmebuf;
ctxp->hdwq = &phba->sli4_hba.hdwq[0];
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
oxid, size, sid);
@ -1987,6 +1998,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
ctxp->flag = 0;
ctxp->ctxbuf = ctx_buf;
ctxp->rqb_buffer = (void *)nvmebuf;
ctxp->hdwq = NULL;
spin_lock_init(&ctxp->ctxlock);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
@ -3044,7 +3056,10 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
abts_wqeq->context2 = ctxp;
abts_wqeq->vport = phba->pport;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_sol);
@ -3096,7 +3111,10 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
abts_wqeq->iocb_cmpl = NULL;
abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[abts_wqeq->hba_wqidx];
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
return 0;
@ -3165,7 +3183,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
abts_wqeq->iocb_cmpl = 0;
abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq);
spin_unlock_irqrestore(&phba->hbalock, flags);
if (rc == WQE_SUCCESS) {
atomic_inc(&tgtp->xmt_abort_unsol);

View File

@ -140,6 +140,7 @@ struct lpfc_nvmet_rcv_ctx {
#define LPFC_NVMET_DEFER_WQFULL 0x40 /* Waiting on a free WQE */
struct rqb_dmabuf *rqb_buffer;
struct lpfc_nvmet_ctxbuf *ctxbuf;
struct lpfc_sli4_hdw_queue *hdwq;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
uint64_t ts_isr_cmd;

View File

@ -748,7 +748,8 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
lpfc_cmd->prot_data_type = 0;
#endif
lpfc_cmd->hdwq = idx;
lpfc_cmd->hdwq = qp;
lpfc_cmd->hdwq_no = idx;
lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
@ -861,7 +862,7 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
psb->seg_cnt = 0;
psb->prot_seg_cnt = 0;
qp = &phba->sli4_hba.hdwq[psb->hdwq];
qp = psb->hdwq;
if (psb->exch_busy) {
spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag);
psb->pCmd = NULL;
@ -4018,7 +4019,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
piocbq->iocb.un.fcpi.fcpi_XRdy = 0;
idx = lpfc_cmd->hdwq;
idx = lpfc_cmd->hdwq_no;
if (phba->sli4_hba.hdwq)
hdwq = &phba->sli4_hba.hdwq[idx];
@ -4557,7 +4558,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
return 0;
out_host_busy_free_buf:
idx = lpfc_cmd->hdwq;
idx = lpfc_cmd->hdwq_no;
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
if (phba->sli4_hba.hdwq) {
switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {

View File

@ -138,7 +138,8 @@ struct lpfc_scsi_buf {
dma_addr_t dma_phys_sgl;
struct ulp_bde64 *dma_sgl;
struct lpfc_iocbq cur_iocbq;
uint16_t hdwq;
struct lpfc_sli4_hdw_queue *hdwq;
uint16_t hdwq_no;
uint16_t cpu;
/* SCSI specific fields */

View File

@ -10005,7 +10005,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
*/
if (!(piocb->iocb_flag & LPFC_USE_FCPWQIDX)) {
lpfc_cmd = (struct lpfc_scsi_buf *)piocb->context1;
piocb->hba_wqidx = lpfc_cmd->hdwq;
piocb->hba_wqidx = lpfc_cmd->hdwq_no;
}
return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring;
} else {
@ -11301,6 +11301,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_iocbq *abtsiocbp;
union lpfc_wqe128 *abts_wqe;
int retval;
int idx = cmdiocb->hba_wqidx;
/*
* There are certain command types we don't want to abort. And we
@ -11356,7 +11357,8 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
abtsiocbp->iocb_flag |= LPFC_IO_NVME;
abtsiocbp->vport = vport;
abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
retval = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[idx],
abtsiocbp);
if (retval) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
"6147 Failed abts issue_wqe with status x%x "
@ -19617,7 +19619,7 @@ lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
* @pwqe: Pointer to command WQE.
**/
int
lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_iocbq *pwqe)
{
union lpfc_wqe128 *wqe = &pwqe->wqe;
@ -19659,12 +19661,12 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
/* NVME_FCREQ and NVME_ABTS requests */
if (pwqe->iocb_flag & LPFC_IO_NVME) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
wq = qp->nvme_wq;
pring = wq->pring;
bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
spin_lock_irqsave(&pring->ring_lock, iflags);
wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
bf_set(wqe_cqid, &wqe->generic.wqe_com,
phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
ret = lpfc_sli4_wq_put(wq, wqe);
if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);
@ -19678,9 +19680,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
/* NVMET requests */
if (pwqe->iocb_flag & LPFC_IO_NVMET) {
/* Get the IO distribution (hba_wqidx) for WQ assignment. */
pring = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq->pring;
wq = qp->nvme_wq;
pring = wq->pring;
spin_lock_irqsave(&pring->ring_lock, iflags);
ctxp = pwqe->context2;
sglq = ctxp->ctxbuf->sglq;
if (pwqe->sli4_xritag == NO_XRI) {
@ -19689,9 +19691,9 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, uint32_t ring_number,
}
bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
pwqe->sli4_xritag);
wq = phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_wq;
bf_set(wqe_cqid, &wqe->generic.wqe_com,
phba->sli4_hba.hdwq[pwqe->hba_wqidx].nvme_cq->queue_id);
bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map);
spin_lock_irqsave(&pring->ring_lock, iflags);
ret = lpfc_sli4_wq_put(wq, wqe);
if (ret) {
spin_unlock_irqrestore(&pring->ring_lock, iflags);