scsi: lpfc: NVME Initiator: bind to nvme_fc api

NVME Initiator: Tie in to NVME Fabrics nvme_fc LLDD initiator api

Adds the routines to:
- register and deregister the FC port as a nvme-fc initiator localport
- register and deregister remote FC ports as a nvme-fc remoteport
- binding of nvme queues to adapter WQs
- send/perform NVME LS's
- send/perform NVME FCP initiator io operations

Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: James Smart <james.smart@broadcom.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
James Smart 2017-02-12 13:52:32 -08:00 committed by Martin K. Petersen
parent a0f2d3ef37
commit 01649561a8
13 changed files with 2411 additions and 38 deletions

View File

@ -28,6 +28,7 @@ endif
obj-$(CONFIG_SCSI_LPFC) := lpfc.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o \
lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o \
lpfc_hbadisc.o lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o \
lpfc_scsi.o lpfc_attr.o lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o \
lpfc_nvme.o

View File

@ -123,6 +123,11 @@ struct perf_prof {
uint16_t wqidx[40];
};
/*
* Provide for FC4 TYPE x28 - NVME. The
* bit mask for FCP and NVME is 0x8 identically
* because they are 32 bit positions distance.
*/
#define LPFC_FC4_TYPE_BITMASK 0x00000100
/* Provide DMA memory definitions the driver uses per port instance. */

View File

@ -518,4 +518,14 @@ int lpfc_sli4_dump_page_a0(struct lpfc_hba *phba, struct lpfcMboxq *mbox);
void lpfc_mbx_cmpl_rdp_page_a0(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb);
/* NVME interfaces. */
void lpfc_nvme_unregister_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
int lpfc_nvme_register_port(struct lpfc_vport *vport,
struct lpfc_nodelist *ndlp);
int lpfc_nvme_create_localport(struct lpfc_vport *vport);
void lpfc_nvme_destroy_localport(struct lpfc_vport *vport);
void lpfc_nvme_update_localport(struct lpfc_vport *vport);
void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba);
void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmdiocb,
struct lpfc_wcqe_complete *abts_cmpl);

View File

@ -1412,7 +1412,7 @@ lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode,
if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) &&
(context == FC_TYPE_NVME)) {
/* todo: init: revise localport nvme attributes */
lpfc_nvme_update_localport(vport);
CtReq->un.rff.type_code = context;
} else if (((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||

View File

@ -2618,7 +2618,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
!(vport->fc_flag & FC_PT2PT_PLOGI)) {
phba->pport->fc_myDID = 0;
/* todo: init: revise localport nvme attributes */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
lpfc_nvme_update_localport(phba->pport);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (mbox) {

View File

@ -909,7 +909,9 @@ lpfc_linkdown(struct lpfc_hba *phba)
vports[i]->fc_myDID = 0;
/* todo: init: revise localport nvme attributes */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
lpfc_nvme_update_localport(vports[i]);
}
}
lpfc_destroy_vport_work_array(phba, vports);
@ -3580,7 +3582,9 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
spin_unlock_irq(shost->host_lock);
vport->fc_myDID = 0;
/* todo: init: revise localport nvme attributes */
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
lpfc_nvme_update_localport(vport);
goto out;
}
@ -3950,7 +3954,8 @@ lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0, FC_TYPE_NVME);
lpfc_ns_cmd(vport, SLI_CTNS_RFF_ID, 0,
FC_TYPE_NVME);
/* Issue SCR just before NameServer GID_FT Query */
lpfc_issue_els_scr(vport, SCR_DID, 0);
@ -4144,7 +4149,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
((ndlp->nlp_fc4_type & NLP_FC4_NVME) ||
(ndlp->nlp_DID == Fabric_DID))) {
vport->phba->nport_event_cnt++;
/* todo: init: unregister rport from nvme */
lpfc_nvme_unregister_port(vport, ndlp);
}
}
@ -4169,7 +4174,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* the register.
*/
vport->phba->nport_event_cnt++;
/* todo: init: register rport with nvme */
lpfc_nvme_register_port(vport, ndlp);
}
}
}

View File

@ -3128,7 +3128,6 @@ static void
lpfc_scsi_free(struct lpfc_hba *phba)
{
struct lpfc_scsi_buf *sb, *sb_next;
struct lpfc_iocbq *io, *io_next;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
return;
@ -3158,14 +3157,6 @@ lpfc_scsi_free(struct lpfc_hba *phba)
phba->total_scsi_bufs--;
}
spin_unlock(&phba->scsi_buf_list_get_lock);
/* Release all the lpfc_iocbq entries maintained by this host. */
list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
list_del(&io->list);
kfree(io);
phba->total_iocbq_bufs--;
}
spin_unlock_irq(&phba->hbalock);
}
/**
@ -3180,7 +3171,6 @@ static void
lpfc_nvme_free(struct lpfc_hba *phba)
{
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
struct lpfc_iocbq *io, *io_next;
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
return;
@ -3209,14 +3199,6 @@ lpfc_nvme_free(struct lpfc_hba *phba)
phba->total_nvme_bufs--;
}
spin_unlock(&phba->nvme_buf_list_get_lock);
/* Release all the lpfc_iocbq entries maintained by this host. */
list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
list_del(&io->list);
kfree(io);
phba->total_iocbq_bufs--;
}
spin_unlock_irq(&phba->hbalock);
}
/**
@ -10685,7 +10667,23 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
/* todo: init: register port with nvme */
/* NVME support in FW earlier in the driver load corrects the
* FC4 type making a check for nvme_support unnecessary.
*/
if ((phba->nvmet_support == 0) &&
(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
/* Create NVME binding with nvme_fc_transport. This
* ensures the vport is initialized.
*/
error = lpfc_nvme_create_localport(vport);
if (error) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6004 NVME registration failed, "
"error x%x\n",
error);
goto out_disable_intr;
}
}
/* check for firmware upgrade or downgrade */
if (phba->cfg_request_firmware_upgrade)
@ -10761,8 +10759,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
/* Perform ndlp cleanup on the physical port. The nvme localport
* is destroyed after to ensure all rports are io-disabled.
*/
lpfc_nvme_destroy_localport(vport);
lpfc_cleanup(vport);
/* todo: init: unregister port with nvme */
/*
* Bring down the SLI Layer. This step disables all interrupts,
@ -10781,6 +10779,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
*/
lpfc_scsi_free(phba);
lpfc_nvme_free(phba);
lpfc_free_iocb_list(phba);
lpfc_sli4_driver_resource_unset(phba);

View File

@ -1655,9 +1655,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
/* todo: init: revise localport nvme
* attributes
*/
lpfc_nvme_update_localport(vport);
}
} else if (ndlp->nlp_fc4_type == 0) {

File diff suppressed because it is too large Load Diff

View File

@ -29,6 +29,12 @@
#define LPFC_NVME_ERSP_LEN 0x20
struct lpfc_nvme_qhandle {
uint32_t index; /* WQ index to use */
uint32_t qidx; /* queue index passed to create */
uint32_t cpu_id; /* current cpu id at time of create */
};
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;

View File

@ -6795,6 +6795,22 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
}
if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
(phba->nvmet_support == 0)) {
/* register the allocated nvme sgl pool to the port */
rc = lpfc_repost_nvme_sgl_list(phba);
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
"6116 Error %d during nvme sgl post "
"operation\n", rc);
/* Some NVME buffers were moved to abort nvme list */
/* A pci function reset will repost them */
rc = -ENODEV;
goto out_destroy_queue;
}
}
/* Post the rpi header region to the device. */
rc = lpfc_sli4_post_all_rpi_hdrs(phba);
if (unlikely(rc)) {
@ -10492,10 +10508,7 @@ lpfc_sli4_abort_nvme_io(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* ABTS WQE must go to the same WQ as the WQE to be aborted */
abtsiocbp->iocb_flag |= LPFC_IO_NVME;
abtsiocbp->vport = vport;
/* todo: assign wqe_cmpl to lpfc_nvme_abort_fcreq_cmpl
* subsequent patch will add routine. For now, just skip assignment
* as won't ever be called.
*/
abtsiocbp->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
retval = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abtsiocbp);
if (retval == IOCB_ERROR) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,

View File

@ -753,6 +753,7 @@ int lpfc_sli4_queue_setup(struct lpfc_hba *);
void lpfc_sli4_queue_unset(struct lpfc_hba *);
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
void lpfc_sli4_free_xri(struct lpfc_hba *, int);
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);

View File

@ -403,7 +403,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
vport->fdmi_port_mask = phba->pport->fdmi_port_mask;
}
/* todo: init: register port with nvme */
if ((phba->nvmet_support == 0) &&
((phba->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
(phba->cfg_enable_fc4_type == LPFC_ENABLE_NVME))) {
/* Create NVME binding with nvme_fc_transport. This
* ensures the vport is initialized.
*/
rc = lpfc_nvme_create_localport(vport);
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"6003 %s status x%x\n",
"NVME registration failed, ",
rc);
goto error_out;
}
}
/*
* In SLI4, the vpi must be activated before it can be used