mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-17 22:28:12 +07:00
scsi: lpfc: Implement common IO buffers between NVME and SCSI
Currently, both NVME and SCSI get their IO buffers from separate pools. XRI's are associated 1:1 with IO buffers, so XRI's are also split between protocols. Eliminate the independent pools and use a single pool. Each buffer structure now has a common section and a protocol section. Per protocol routines for SGL initialization are removed and replaced by common routines. Initialization of the buffers is only done on the common area. All other fields, which are protocol specific, are initialized when the buffer is allocated for use in the per-protocol allocation routine. In the past, the SCSI side allocated IO buffers as part of slave_alloc calls until the maximum XRIs for SCSI was reached. As all XRIs are now common and may be used for either protocol, allocation for everything is done as part of adapter initialization and the scsi side has no action in slave alloc. As XRI's are no longer split, the lpfc_xri_split module parameter is removed. Adapters based on SLI3 will continue to use the older scsi_buf_list_get/put routines. All SLI4 adapters utilize the new IO buffer scheme Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com> Signed-off-by: James Smart <jsmart2021@gmail.com> Reviewed-by: Hannes Reinecke <hare@suse.com> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
parent
e960f5ab40
commit
0794d601d1
@ -617,8 +617,6 @@ struct lpfc_ras_fwlog {
|
||||
|
||||
struct lpfc_hba {
|
||||
/* SCSI interface function jump table entries */
|
||||
int (*lpfc_new_scsi_buf)
|
||||
(struct lpfc_vport *, int);
|
||||
struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
|
||||
(struct lpfc_hba *, struct lpfc_nodelist *);
|
||||
int (*lpfc_scsi_prep_dma_buf)
|
||||
@ -875,7 +873,6 @@ struct lpfc_hba {
|
||||
uint32_t cfg_enable_fc4_type;
|
||||
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
|
||||
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
|
||||
uint32_t cfg_xri_split;
|
||||
#define LPFC_ENABLE_FCP 1
|
||||
#define LPFC_ENABLE_NVME 2
|
||||
#define LPFC_ENABLE_BOTH 3
|
||||
@ -970,13 +967,13 @@ struct lpfc_hba {
|
||||
struct list_head lpfc_scsi_buf_list_get;
|
||||
struct list_head lpfc_scsi_buf_list_put;
|
||||
uint32_t total_scsi_bufs;
|
||||
spinlock_t nvme_buf_list_get_lock; /* NVME buf alloc list lock */
|
||||
spinlock_t nvme_buf_list_put_lock; /* NVME buf free list lock */
|
||||
struct list_head lpfc_nvme_buf_list_get;
|
||||
struct list_head lpfc_nvme_buf_list_put;
|
||||
uint32_t total_nvme_bufs;
|
||||
uint32_t get_nvme_bufs;
|
||||
uint32_t put_nvme_bufs;
|
||||
spinlock_t common_buf_list_get_lock; /* Common buf alloc list lock */
|
||||
spinlock_t common_buf_list_put_lock; /* Common buf free list lock */
|
||||
struct list_head lpfc_common_buf_list_get;
|
||||
struct list_head lpfc_common_buf_list_put;
|
||||
uint32_t total_common_bufs;
|
||||
uint32_t get_common_bufs;
|
||||
uint32_t put_common_bufs;
|
||||
struct list_head lpfc_iocb_list;
|
||||
uint32_t total_iocbq_bufs;
|
||||
struct list_head active_rrq_list;
|
||||
|
@ -334,11 +334,10 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
rcu_read_lock();
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
|
||||
"XRI Dist lpfc%d Total %d IO %d ELS %d\n",
|
||||
phba->brd_no,
|
||||
phba->sli4_hba.max_cfg_param.max_xri,
|
||||
phba->sli4_hba.nvme_xri_max,
|
||||
phba->sli4_hba.scsi_xri_max,
|
||||
phba->sli4_hba.common_xri_max,
|
||||
lpfc_sli4_get_els_iocb_cnt(phba));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
@ -3730,22 +3729,6 @@ LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_FCP,
|
||||
LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
|
||||
"Enable FC4 Protocol support - FCP / NVME");
|
||||
|
||||
/*
|
||||
* lpfc_xri_split: Defines the division of XRI resources between SCSI and NVME
|
||||
* This parameter is only used if:
|
||||
* lpfc_enable_fc4_type is 3 - register both FCP and NVME and
|
||||
* port is not configured for NVMET.
|
||||
*
|
||||
* ELS/CT always get 10% of XRIs, up to a maximum of 250
|
||||
* The remaining XRIs get split up based on lpfc_xri_split per port:
|
||||
*
|
||||
* Supported Values are in percentages
|
||||
* the xri_split value is the percentage the SCSI port will get. The remaining
|
||||
* percentage will go to NVME.
|
||||
*/
|
||||
LPFC_ATTR_R(xri_split, 50, 10, 90,
|
||||
"Percentage of FCP XRI resources versus NVME");
|
||||
|
||||
/*
|
||||
# lpfc_log_verbose: Only turn this flag on if you are willing to risk being
|
||||
# deluged with LOTS of information.
|
||||
@ -5704,7 +5687,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
||||
&dev_attr_lpfc_nodev_tmo,
|
||||
&dev_attr_lpfc_devloss_tmo,
|
||||
&dev_attr_lpfc_enable_fc4_type,
|
||||
&dev_attr_lpfc_xri_split,
|
||||
&dev_attr_lpfc_fcp_class,
|
||||
&dev_attr_lpfc_use_adisc,
|
||||
&dev_attr_lpfc_first_burst_size,
|
||||
@ -6865,7 +6847,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
|
||||
phba->cfg_soft_wwnn = 0L;
|
||||
phba->cfg_soft_wwpn = 0L;
|
||||
lpfc_xri_split_init(phba, lpfc_xri_split);
|
||||
lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
|
||||
lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
|
||||
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
|
||||
|
@ -520,8 +520,10 @@ int lpfc_sli4_read_config(struct lpfc_hba *);
|
||||
void lpfc_sli4_node_prep(struct lpfc_hba *);
|
||||
int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_common_sgl_update(struct lpfc_hba *phba);
|
||||
int lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
|
||||
struct list_head *blist, int xricnt);
|
||||
int lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc);
|
||||
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
|
||||
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
|
||||
int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
|
||||
|
@ -1092,13 +1092,15 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
|
||||
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
|
||||
psb->pCmd = NULL;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
||||
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
|
||||
psb->pCmd = NULL;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
}
|
||||
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
|
||||
list_splice(&aborts, &phba->lpfc_common_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
|
||||
}
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
|
||||
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
cnt = 0;
|
||||
@ -1107,10 +1109,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
cnt++;
|
||||
}
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
||||
phba->put_nvme_bufs += cnt;
|
||||
list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
||||
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
|
||||
phba->put_common_bufs += cnt;
|
||||
list_splice(&nvme_aborts, &phba->lpfc_common_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
|
||||
|
||||
list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
|
||||
ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
|
||||
@ -3123,6 +3125,18 @@ lpfc_online(struct lpfc_hba *phba)
|
||||
"6132 NVME restore reg failed "
|
||||
"on nvmei error x%x\n", error);
|
||||
}
|
||||
/* Don't post more new bufs if repost already recovered
|
||||
* the nvme sgls.
|
||||
*/
|
||||
if (phba->sli4_hba.common_xri_cnt == 0) {
|
||||
i = lpfc_new_common_buf(phba,
|
||||
phba->sli4_hba.common_xri_max);
|
||||
if (i == 0) {
|
||||
lpfc_unblock_mgmt_io(phba);
|
||||
return 1;
|
||||
}
|
||||
phba->total_common_bufs += i;
|
||||
}
|
||||
} else {
|
||||
lpfc_sli_queue_init(phba);
|
||||
if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
|
||||
@ -3355,50 +3369,49 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
||||
spin_unlock(&phba->scsi_buf_list_get_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
|
||||
* lpfc_common_free - Free all the IO buffers and IOCBs from driver lists
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is to free all the NVME buffers and IOCBs from the driver
|
||||
* This routine is to free all the IO buffers and IOCBs from the driver
|
||||
* list back to kernel. It is called from lpfc_pci_remove_one to free
|
||||
* the internal resources before the device is removed from the system.
|
||||
**/
|
||||
static void
|
||||
lpfc_nvme_free(struct lpfc_hba *phba)
|
||||
lpfc_common_free(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return;
|
||||
|
||||
spin_lock_irq(&phba->hbalock);
|
||||
|
||||
/* Release all the lpfc_nvme_bufs maintained by this host. */
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
spin_lock(&phba->common_buf_list_put_lock);
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_put, list) {
|
||||
&phba->lpfc_common_buf_list_put, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
phba->put_nvme_bufs--;
|
||||
phba->put_common_bufs--;
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
phba->total_nvme_bufs--;
|
||||
phba->total_common_bufs--;
|
||||
}
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock(&phba->common_buf_list_put_lock);
|
||||
|
||||
spin_lock(&phba->nvme_buf_list_get_lock);
|
||||
spin_lock(&phba->common_buf_list_get_lock);
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
&phba->lpfc_common_buf_list_get, list) {
|
||||
list_del(&lpfc_ncmd->list);
|
||||
phba->get_nvme_bufs--;
|
||||
phba->get_common_bufs--;
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
phba->total_nvme_bufs--;
|
||||
phba->total_common_bufs--;
|
||||
}
|
||||
spin_unlock(&phba->nvme_buf_list_get_lock);
|
||||
spin_unlock(&phba->common_buf_list_get_lock);
|
||||
spin_unlock_irq(&phba->hbalock);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -3641,7 +3654,7 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
|
||||
* lpfc_sli4_common_sgl_update - update xri-sgl sizing and mapping
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine first calculates the sizes of the current els and allocated
|
||||
@ -3653,94 +3666,214 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
|
||||
* 0 - successful (for now, it always returns 0)
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
|
||||
lpfc_sli4_common_sgl_update(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_scsi_buf *psb, *psb_next;
|
||||
uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
|
||||
LIST_HEAD(scsi_sgl_list);
|
||||
int rc;
|
||||
struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
|
||||
uint16_t i, lxri, els_xri_cnt;
|
||||
uint16_t common_xri_cnt, common_xri_max;
|
||||
LIST_HEAD(common_sgl_list);
|
||||
int rc, cnt;
|
||||
|
||||
phba->total_common_bufs = 0;
|
||||
phba->get_common_bufs = 0;
|
||||
phba->put_common_bufs = 0;
|
||||
|
||||
/*
|
||||
* update on pci function's els xri-sgl list
|
||||
* update on pci function's allocated nvme xri-sgl list
|
||||
*/
|
||||
|
||||
/* maximum number of xris available for nvme buffers */
|
||||
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
||||
phba->total_scsi_bufs = 0;
|
||||
|
||||
/*
|
||||
* update on pci function's allocated scsi xri-sgl list
|
||||
*/
|
||||
/* maximum number of xris available for scsi buffers */
|
||||
phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
|
||||
els_xri_cnt;
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
|
||||
return 0;
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
||||
phba->sli4_hba.scsi_xri_max = /* Split them up */
|
||||
(phba->sli4_hba.scsi_xri_max *
|
||||
phba->cfg_xri_split) / 100;
|
||||
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
common_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
||||
phba->sli4_hba.common_xri_max = common_xri_max;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6060 Current allocated SCSI xri-sgl count:%d, "
|
||||
"maximum SCSI xri count:%d (split:%d)\n",
|
||||
phba->sli4_hba.scsi_xri_cnt,
|
||||
phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
|
||||
"6074 Current allocated XRI sgl count:%d, "
|
||||
"maximum XRI count:%d\n",
|
||||
phba->sli4_hba.common_xri_cnt,
|
||||
phba->sli4_hba.common_xri_max);
|
||||
|
||||
if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
|
||||
/* max scsi xri shrinked below the allocated scsi buffers */
|
||||
scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
|
||||
phba->sli4_hba.scsi_xri_max;
|
||||
/* release the extra allocated scsi buffers */
|
||||
for (i = 0; i < scsi_xri_cnt; i++) {
|
||||
list_remove_head(&scsi_sgl_list, psb,
|
||||
struct lpfc_scsi_buf, list);
|
||||
if (psb) {
|
||||
spin_lock_irq(&phba->common_buf_list_get_lock);
|
||||
spin_lock(&phba->common_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_common_buf_list_get, &common_sgl_list);
|
||||
list_splice(&phba->lpfc_common_buf_list_put, &common_sgl_list);
|
||||
cnt = phba->get_common_bufs + phba->put_common_bufs;
|
||||
phba->get_common_bufs = 0;
|
||||
phba->put_common_bufs = 0;
|
||||
spin_unlock(&phba->common_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->common_buf_list_get_lock);
|
||||
|
||||
if (phba->sli4_hba.common_xri_cnt > phba->sli4_hba.common_xri_max) {
|
||||
/* max nvme xri shrunk below the allocated nvme buffers */
|
||||
spin_lock_irq(&phba->common_buf_list_get_lock);
|
||||
common_xri_cnt = phba->sli4_hba.common_xri_cnt -
|
||||
phba->sli4_hba.common_xri_max;
|
||||
spin_unlock_irq(&phba->common_buf_list_get_lock);
|
||||
/* release the extra allocated nvme buffers */
|
||||
for (i = 0; i < common_xri_cnt; i++) {
|
||||
list_remove_head(&common_sgl_list, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
if (lpfc_ncmd) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
}
|
||||
}
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock_irq(&phba->common_buf_list_get_lock);
|
||||
phba->sli4_hba.common_xri_cnt -= common_xri_cnt;
|
||||
spin_unlock_irq(&phba->common_buf_list_get_lock);
|
||||
}
|
||||
|
||||
/* update xris associated to remaining allocated scsi buffers */
|
||||
psb = NULL;
|
||||
psb_next = NULL;
|
||||
list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
|
||||
/* update xris associated to remaining allocated nvme buffers */
|
||||
lpfc_ncmd = NULL;
|
||||
lpfc_ncmd_next = NULL;
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&common_sgl_list, list) {
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2560 Failed to allocate xri for "
|
||||
"scsi buffer\n");
|
||||
"6075 Failed to allocate xri for "
|
||||
"nvme buffer\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_free_mem;
|
||||
}
|
||||
psb->cur_iocbq.sli4_lxritag = lxri;
|
||||
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
}
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock_irq(&phba->common_buf_list_get_lock);
|
||||
spin_lock(&phba->common_buf_list_put_lock);
|
||||
list_splice_init(&common_sgl_list, &phba->lpfc_common_buf_list_get);
|
||||
phba->get_common_bufs = cnt;
|
||||
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put);
|
||||
spin_unlock(&phba->common_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->common_buf_list_get_lock);
|
||||
return 0;
|
||||
|
||||
out_free_mem:
|
||||
lpfc_scsi_free(phba);
|
||||
lpfc_common_free(phba);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_new_common_buf - IO buffer allocator for HBA with SLI4 IF spec
|
||||
* @vport: The virtual port for which this call being executed.
|
||||
* @num_to_allocate: The requested number of buffers to allocate.
|
||||
*
|
||||
* This routine allocates nvme buffers for device with SLI-4 interface spec,
|
||||
* the nvme buffer contains all the necessary information needed to initiate
|
||||
* an I/O. After allocating up to @num_to_allocate IO buffers and put
|
||||
* them on a list, it post them to the port by using SGL block post.
|
||||
*
|
||||
* Return codes:
|
||||
* int - number of nvme buffers that were allocated and posted.
|
||||
* 0 = failure, less than num_to_alloc is a partial failure.
|
||||
**/
|
||||
int
|
||||
lpfc_new_common_buf(struct lpfc_hba *phba, int num_to_alloc)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd;
|
||||
struct lpfc_iocbq *pwqeq;
|
||||
uint16_t iotag, lxri = 0;
|
||||
int bcnt, num_posted;
|
||||
LIST_HEAD(prep_nblist);
|
||||
LIST_HEAD(post_nblist);
|
||||
LIST_HEAD(nvme_nblist);
|
||||
|
||||
/* Sanity check to ensure our sizing is right for both SCSI and NVME */
|
||||
if ((sizeof(struct lpfc_scsi_buf) > LPFC_COMMON_IO_BUF_SZ) ||
|
||||
(sizeof(struct lpfc_nvme_buf) > LPFC_COMMON_IO_BUF_SZ)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
|
||||
"6426 Common buffer size mismatch: %ld %ld\n",
|
||||
sizeof(struct lpfc_scsi_buf),
|
||||
sizeof(struct lpfc_nvme_buf));
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
|
||||
lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL);
|
||||
if (!lpfc_ncmd)
|
||||
break;
|
||||
/*
|
||||
* Get memory from the pci pool to map the virt space to
|
||||
* pci bus space for an I/O. The DMA buffer includes the
|
||||
* number of SGE's necessary to support the sg_tablesize.
|
||||
*/
|
||||
lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_KERNEL,
|
||||
&lpfc_ncmd->dma_handle);
|
||||
if (!lpfc_ncmd->data) {
|
||||
kfree(lpfc_ncmd);
|
||||
break;
|
||||
}
|
||||
memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
|
||||
|
||||
/*
|
||||
* 4K Page alignment is CRITICAL to BlockGuard, double check
|
||||
* to be sure.
|
||||
*/
|
||||
if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
|
||||
(((unsigned long)(lpfc_ncmd->data) &
|
||||
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
|
||||
"3369 Memory alignment err: addr=%lx\n",
|
||||
(unsigned long)lpfc_ncmd->data);
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
break;
|
||||
}
|
||||
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
break;
|
||||
}
|
||||
pwqeq = &lpfc_ncmd->cur_iocbq;
|
||||
|
||||
/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
|
||||
iotag = lpfc_sli_next_iotag(phba, pwqeq);
|
||||
if (iotag == 0) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6121 Failed to allocate IOTAG for"
|
||||
" XRI:0x%x\n", lxri);
|
||||
lpfc_sli4_free_xri(phba, lxri);
|
||||
break;
|
||||
}
|
||||
pwqeq->sli4_lxritag = lxri;
|
||||
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
pwqeq->context1 = lpfc_ncmd;
|
||||
|
||||
/* Initialize local short-hand pointers. */
|
||||
lpfc_ncmd->dma_sgl = lpfc_ncmd->data;
|
||||
lpfc_ncmd->dma_phys_sgl = lpfc_ncmd->dma_handle;
|
||||
lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
|
||||
|
||||
/* add the nvme buffer to a post list */
|
||||
list_add_tail(&lpfc_ncmd->list, &post_nblist);
|
||||
spin_lock_irq(&phba->common_buf_list_get_lock);
|
||||
phba->sli4_hba.common_xri_cnt++;
|
||||
spin_unlock_irq(&phba->common_buf_list_get_lock);
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
|
||||
"6114 Allocate %d out of %d requested new NVME "
|
||||
"buffers\n", bcnt, num_to_alloc);
|
||||
|
||||
/* post the list of nvme buffer sgls to port if available */
|
||||
if (!list_empty(&post_nblist))
|
||||
num_posted = lpfc_sli4_post_common_sgl_list(
|
||||
phba, &post_nblist, bcnt);
|
||||
else
|
||||
num_posted = 0;
|
||||
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
static uint64_t
|
||||
lpfc_get_wwpn(struct lpfc_hba *phba)
|
||||
{
|
||||
@ -3776,111 +3909,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
|
||||
return rol64(wwn, 32);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine first calculates the sizes of the current els and allocated
|
||||
* scsi sgl lists, and then goes through all sgls to updates the physical
|
||||
* XRIs assigned due to port function reset. During port initialization, the
|
||||
* current els and allocated scsi sgl lists are 0s.
|
||||
*
|
||||
* Return codes
|
||||
* 0 - successful (for now, it always returns 0)
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
|
||||
uint16_t i, lxri, els_xri_cnt;
|
||||
uint16_t nvme_xri_cnt, nvme_xri_max;
|
||||
LIST_HEAD(nvme_sgl_list);
|
||||
int rc, cnt;
|
||||
|
||||
phba->total_nvme_bufs = 0;
|
||||
phba->get_nvme_bufs = 0;
|
||||
phba->put_nvme_bufs = 0;
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return 0;
|
||||
/*
|
||||
* update on pci function's allocated nvme xri-sgl list
|
||||
*/
|
||||
|
||||
/* maximum number of xris available for nvme buffers */
|
||||
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
||||
nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
||||
phba->sli4_hba.nvme_xri_max = nvme_xri_max;
|
||||
phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"6074 Current allocated NVME xri-sgl count:%d, "
|
||||
"maximum NVME xri count:%d\n",
|
||||
phba->sli4_hba.nvme_xri_cnt,
|
||||
phba->sli4_hba.nvme_xri_max);
|
||||
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
|
||||
list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
|
||||
cnt = phba->get_nvme_bufs + phba->put_nvme_bufs;
|
||||
phba->get_nvme_bufs = 0;
|
||||
phba->put_nvme_bufs = 0;
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
|
||||
if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
|
||||
/* max nvme xri shrunk below the allocated nvme buffers */
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
|
||||
phba->sli4_hba.nvme_xri_max;
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
/* release the extra allocated nvme buffers */
|
||||
for (i = 0; i < nvme_xri_cnt; i++) {
|
||||
list_remove_head(&nvme_sgl_list, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
if (lpfc_ncmd) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data,
|
||||
lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
}
|
||||
}
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
}
|
||||
|
||||
/* update xris associated to remaining allocated nvme buffers */
|
||||
lpfc_ncmd = NULL;
|
||||
lpfc_ncmd_next = NULL;
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&nvme_sgl_list, list) {
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6075 Failed to allocate xri for "
|
||||
"nvme buffer\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_free_mem;
|
||||
}
|
||||
lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
}
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
|
||||
phba->get_nvme_bufs = cnt;
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
return 0;
|
||||
|
||||
out_free_mem:
|
||||
lpfc_nvme_free(phba);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_create_port - Create an FC port
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
@ -5819,24 +5847,19 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
||||
"NVME" : " "),
|
||||
(phba->nvmet_support ? "NVMET" : " "));
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
||||
/* Initialize the scsi buffer list used by driver for scsi IO */
|
||||
spin_lock_init(&phba->scsi_buf_list_get_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
|
||||
spin_lock_init(&phba->scsi_buf_list_put_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
}
|
||||
/* Initialize the IO buffer list used by driver for SLI3 SCSI */
|
||||
spin_lock_init(&phba->scsi_buf_list_get_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
|
||||
spin_lock_init(&phba->scsi_buf_list_put_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
|
||||
if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
|
||||
(phba->nvmet_support == 0)) {
|
||||
/* Initialize the NVME buffer list used by driver for NVME IO */
|
||||
spin_lock_init(&phba->nvme_buf_list_get_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
|
||||
phba->get_nvme_bufs = 0;
|
||||
spin_lock_init(&phba->nvme_buf_list_put_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
||||
phba->put_nvme_bufs = 0;
|
||||
}
|
||||
/* Initialize the IO buffer list used by driver for SLI4 SCSI/NVME */
|
||||
spin_lock_init(&phba->common_buf_list_get_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_get);
|
||||
phba->get_common_bufs = 0;
|
||||
spin_lock_init(&phba->common_buf_list_put_lock);
|
||||
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put);
|
||||
phba->put_common_bufs = 0;
|
||||
|
||||
/* Initialize the fabric iocb list */
|
||||
INIT_LIST_HEAD(&phba->fabric_iocb_list);
|
||||
@ -5877,7 +5900,7 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
||||
static int
|
||||
lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||
{
|
||||
int rc;
|
||||
int rc, entry_sz;
|
||||
|
||||
/*
|
||||
* Initialize timers used by driver
|
||||
@ -5922,6 +5945,11 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
entry_sz = sizeof(struct sli4_sge);
|
||||
else
|
||||
entry_sz = sizeof(struct ulp_bde64);
|
||||
|
||||
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
|
||||
if (phba->cfg_enable_bg) {
|
||||
/*
|
||||
@ -5935,7 +5963,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||
*/
|
||||
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
|
||||
sizeof(struct fcp_rsp) +
|
||||
(LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
|
||||
(LPFC_MAX_SG_SEG_CNT * entry_sz);
|
||||
|
||||
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
|
||||
phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
|
||||
@ -5950,7 +5978,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||
*/
|
||||
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
|
||||
sizeof(struct fcp_rsp) +
|
||||
((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
|
||||
((phba->cfg_sg_seg_cnt + 2) * entry_sz);
|
||||
|
||||
/* Total BDEs in BPL for scsi_sg_list */
|
||||
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
|
||||
@ -6875,11 +6903,8 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
|
||||
/* els xri-sgl book keeping */
|
||||
phba->sli4_hba.els_xri_cnt = 0;
|
||||
|
||||
/* scsi xri-buffer book keeping */
|
||||
phba->sli4_hba.scsi_xri_cnt = 0;
|
||||
|
||||
/* nvme xri-buffer book keeping */
|
||||
phba->sli4_hba.nvme_xri_cnt = 0;
|
||||
phba->sli4_hba.common_xri_cnt = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -10556,7 +10581,7 @@ static void
|
||||
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
{
|
||||
int wait_time = 0;
|
||||
int nvme_xri_cmpl = 1;
|
||||
int common_xri_cmpl = 1;
|
||||
int nvmet_xri_cmpl = 1;
|
||||
int fcp_xri_cmpl = 1;
|
||||
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
||||
@ -10575,13 +10600,13 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
fcp_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
nvme_xri_cmpl =
|
||||
common_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
nvmet_xri_cmpl =
|
||||
list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
}
|
||||
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl || !common_xri_cmpl ||
|
||||
!nvmet_xri_cmpl) {
|
||||
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
|
||||
if (!nvmet_xri_cmpl)
|
||||
@ -10589,7 +10614,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
"6424 NVMET XRI exchange busy "
|
||||
"wait time: %d seconds.\n",
|
||||
wait_time/1000);
|
||||
if (!nvme_xri_cmpl)
|
||||
if (!common_xri_cmpl)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6100 NVME XRI exchange busy "
|
||||
"wait time: %d seconds.\n",
|
||||
@ -10611,7 +10636,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
|
||||
}
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
nvme_xri_cmpl = list_empty(
|
||||
common_xri_cmpl = list_empty(
|
||||
&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
nvmet_xri_cmpl = list_empty(
|
||||
&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
@ -11190,6 +11215,8 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
|
||||
* corresponding pools here.
|
||||
*/
|
||||
lpfc_scsi_free(phba);
|
||||
lpfc_free_iocb_list(phba);
|
||||
|
||||
lpfc_mem_free_all(phba);
|
||||
|
||||
dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
|
||||
@ -11767,7 +11794,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_vport *vport = NULL;
|
||||
struct Scsi_Host *shost = NULL;
|
||||
int error;
|
||||
int error, len;
|
||||
uint32_t cfg_mode, intr_mode;
|
||||
|
||||
/* Allocate memory for HBA structure */
|
||||
@ -11877,19 +11904,32 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
||||
/* NVME support in FW earlier in the driver load corrects the
|
||||
* FC4 type making a check for nvme_support unnecessary.
|
||||
*/
|
||||
if ((phba->nvmet_support == 0) &&
|
||||
(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
|
||||
/* Create NVME binding with nvme_fc_transport. This
|
||||
* ensures the vport is initialized. If the localport
|
||||
* create fails, it should not unload the driver to
|
||||
* support field issues.
|
||||
if (phba->nvmet_support == 0) {
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
/* Create NVME binding with nvme_fc_transport. This
|
||||
* ensures the vport is initialized. If the localport
|
||||
* create fails, it should not unload the driver to
|
||||
* support field issues.
|
||||
*/
|
||||
error = lpfc_nvme_create_localport(vport);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6004 NVME registration "
|
||||
"failed, error x%x\n",
|
||||
error);
|
||||
}
|
||||
}
|
||||
/* Don't post more new bufs if repost already recovered
|
||||
* the nvme sgls.
|
||||
*/
|
||||
error = lpfc_nvme_create_localport(vport);
|
||||
if (error) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6004 NVME registration failed, "
|
||||
"error x%x\n",
|
||||
error);
|
||||
if (phba->sli4_hba.common_xri_cnt == 0) {
|
||||
len = lpfc_new_common_buf(
|
||||
phba, phba->sli4_hba.common_xri_max);
|
||||
if (len == 0) {
|
||||
error = -ENOMEM;
|
||||
goto out_disable_intr;
|
||||
}
|
||||
phba->total_common_bufs += len;
|
||||
}
|
||||
}
|
||||
|
||||
@ -11989,8 +12029,7 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
|
||||
/* Perform scsi free before driver resource_unset since scsi
|
||||
* buffers are released to their corresponding pools here.
|
||||
*/
|
||||
lpfc_scsi_free(phba);
|
||||
lpfc_nvme_free(phba);
|
||||
lpfc_common_free(phba);
|
||||
lpfc_free_iocb_list(phba);
|
||||
|
||||
lpfc_unset_driver_resource_phase2(phba);
|
||||
|
@ -783,7 +783,7 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
|
||||
* rather than the virtual memory to ease the restore
|
||||
* operation.
|
||||
*/
|
||||
sgl = lpfc_ncmd->nvme_sgl;
|
||||
sgl = lpfc_ncmd->dma_sgl;
|
||||
sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
|
||||
if (phba->cfg_nvme_embed_cmd) {
|
||||
sgl->addr_hi = 0;
|
||||
@ -1291,7 +1291,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
|
||||
union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe;
|
||||
struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
|
||||
struct sli4_sge *sgl = lpfc_ncmd->dma_sgl;
|
||||
struct scatterlist *data_sg;
|
||||
struct sli4_sge *first_data_sgl;
|
||||
struct ulp_bde64 *bde;
|
||||
@ -1380,6 +1380,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
}
|
||||
|
||||
} else {
|
||||
lpfc_ncmd->seg_cnt = 0;
|
||||
|
||||
/* For this clause to be valid, the payload_length
|
||||
* and sg_cnt must zero.
|
||||
*/
|
||||
@ -1571,7 +1573,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
freqpriv->nvme_buf = lpfc_ncmd;
|
||||
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
|
||||
lpfc_ncmd->ndlp = ndlp;
|
||||
lpfc_ncmd->start_time = jiffies;
|
||||
lpfc_ncmd->qidx = lpfc_queue_info->qidx;
|
||||
|
||||
/*
|
||||
* Issue the IO on the WQ indicated by index in the hw_queue_handle.
|
||||
@ -1910,422 +1912,25 @@ static struct nvme_fc_port_template lpfc_nvme_template = {
|
||||
.fcprqst_priv_sz = sizeof(struct lpfc_nvme_fcpreq_priv),
|
||||
};
|
||||
|
||||
/**
|
||||
* lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @nblist: pointer to nvme buffer list.
|
||||
* @count: number of scsi buffers on the list.
|
||||
*
|
||||
* This routine is invoked to post a block of @count scsi sgl pages from a
|
||||
* SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
|
||||
* No Lock is held.
|
||||
*
|
||||
**/
|
||||
static int
|
||||
lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
|
||||
struct list_head *nblist,
|
||||
int count)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd;
|
||||
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
|
||||
struct sgl_page_pairs *sgl_pg_pairs;
|
||||
void *viraddr;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
uint32_t reqlen, alloclen, pg_pairs;
|
||||
uint32_t mbox_tmo;
|
||||
uint16_t xritag_start = 0;
|
||||
int rc = 0;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
dma_addr_t pdma_phys_bpl1;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
|
||||
/* Calculate the requested length of the dma memory */
|
||||
reqlen = count * sizeof(struct sgl_page_pairs) +
|
||||
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
|
||||
if (reqlen > SLI4_PAGE_SIZE) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"6118 Block sgl registration required DMA "
|
||||
"size (%d) great than a page\n", reqlen);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6119 Failed to allocate mbox cmd memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate DMA memory and set up the non-embedded mailbox command */
|
||||
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
|
||||
LPFC_SLI4_MBX_NEMBED);
|
||||
|
||||
if (alloclen < reqlen) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6120 Allocated DMA memory size (%d) is "
|
||||
"less than the requested DMA memory "
|
||||
"size (%d)\n", alloclen, reqlen);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Get the first SGE entry from the non-embedded DMA memory */
|
||||
viraddr = mbox->sge_array->addr[0];
|
||||
|
||||
/* Set up the SGL pages in the non-embedded DMA pages */
|
||||
sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
|
||||
sgl_pg_pairs = &sgl->sgl_pg_pairs;
|
||||
|
||||
pg_pairs = 0;
|
||||
list_for_each_entry(lpfc_ncmd, nblist, list) {
|
||||
/* Set up the sge entry */
|
||||
sgl_pg_pairs->sgl_pg0_addr_lo =
|
||||
cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
|
||||
sgl_pg_pairs->sgl_pg0_addr_hi =
|
||||
cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
|
||||
if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
|
||||
pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
|
||||
SGL_PAGE_SIZE;
|
||||
else
|
||||
pdma_phys_bpl1 = 0;
|
||||
sgl_pg_pairs->sgl_pg1_addr_lo =
|
||||
cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
|
||||
sgl_pg_pairs->sgl_pg1_addr_hi =
|
||||
cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
|
||||
/* Keep the first xritag on the list */
|
||||
if (pg_pairs == 0)
|
||||
xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
|
||||
sgl_pg_pairs++;
|
||||
pg_pairs++;
|
||||
}
|
||||
bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
|
||||
bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
|
||||
/* Perform endian conversion if necessary */
|
||||
sgl->word0 = cpu_to_le32(sgl->word0);
|
||||
|
||||
if (!phba->sli4_hba.intr_enable)
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
else {
|
||||
mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
|
||||
}
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||
if (rc != MBX_TIMEOUT)
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
if (shdr_status || shdr_add_status || rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6125 POST_SGL_BLOCK mailbox command failed "
|
||||
"status x%x add_status x%x mbx status x%x\n",
|
||||
shdr_status, shdr_add_status, rc);
|
||||
rc = -ENXIO;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @post_nblist: pointer to the nvme buffer list.
|
||||
*
|
||||
* This routine walks a list of nvme buffers that was passed in. It attempts
|
||||
* to construct blocks of nvme buffer sgls which contains contiguous xris and
|
||||
* uses the non-embedded SGL block post mailbox commands to post to the port.
|
||||
* For single NVME buffer sgl with non-contiguous xri, if any, it shall use
|
||||
* embedded SGL post mailbox command for posting. The @post_nblist passed in
|
||||
* must be local list, thus no lock is needed when manipulate the list.
|
||||
*
|
||||
* Returns: 0 = failure, non-zero number of successfully posted buffers.
|
||||
**/
|
||||
static int
|
||||
lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
|
||||
struct list_head *post_nblist, int sb_count)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||||
int status, sgl_size;
|
||||
int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
|
||||
dma_addr_t pdma_phys_sgl1;
|
||||
int last_xritag = NO_XRI;
|
||||
int cur_xritag;
|
||||
LIST_HEAD(prep_nblist);
|
||||
LIST_HEAD(blck_nblist);
|
||||
LIST_HEAD(nvme_nblist);
|
||||
|
||||
/* sanity check */
|
||||
if (sb_count <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
sgl_size = phba->cfg_sg_dma_buf_size;
|
||||
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
block_cnt++;
|
||||
if ((last_xritag != NO_XRI) &&
|
||||
(lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
|
||||
/* a hole in xri block, form a sgl posting block */
|
||||
list_splice_init(&prep_nblist, &blck_nblist);
|
||||
post_cnt = block_cnt - 1;
|
||||
/* prepare list for next posting block */
|
||||
list_add_tail(&lpfc_ncmd->list, &prep_nblist);
|
||||
block_cnt = 1;
|
||||
} else {
|
||||
/* prepare list for next posting block */
|
||||
list_add_tail(&lpfc_ncmd->list, &prep_nblist);
|
||||
/* enough sgls for non-embed sgl mbox command */
|
||||
if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
|
||||
list_splice_init(&prep_nblist, &blck_nblist);
|
||||
post_cnt = block_cnt;
|
||||
block_cnt = 0;
|
||||
}
|
||||
}
|
||||
num_posting++;
|
||||
last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
|
||||
|
||||
/* end of repost sgl list condition for NVME buffers */
|
||||
if (num_posting == sb_count) {
|
||||
if (post_cnt == 0) {
|
||||
/* last sgl posting block */
|
||||
list_splice_init(&prep_nblist, &blck_nblist);
|
||||
post_cnt = block_cnt;
|
||||
} else if (block_cnt == 1) {
|
||||
/* last single sgl with non-contiguous xri */
|
||||
if (sgl_size > SGL_PAGE_SIZE)
|
||||
pdma_phys_sgl1 =
|
||||
lpfc_ncmd->dma_phys_sgl +
|
||||
SGL_PAGE_SIZE;
|
||||
else
|
||||
pdma_phys_sgl1 = 0;
|
||||
cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
|
||||
status = lpfc_sli4_post_sgl(phba,
|
||||
lpfc_ncmd->dma_phys_sgl,
|
||||
pdma_phys_sgl1, cur_xritag);
|
||||
if (status) {
|
||||
/* failure, put on abort nvme list */
|
||||
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
|
||||
} else {
|
||||
/* success, put on NVME buffer list */
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
num_posted++;
|
||||
}
|
||||
/* success, put on NVME buffer sgl list */
|
||||
list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
|
||||
}
|
||||
}
|
||||
|
||||
/* continue until a nembed page worth of sgls */
|
||||
if (post_cnt == 0)
|
||||
continue;
|
||||
|
||||
/* post block of NVME buffer list sgls */
|
||||
status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
|
||||
post_cnt);
|
||||
|
||||
/* don't reset xirtag due to hole in xri block */
|
||||
if (block_cnt == 0)
|
||||
last_xritag = NO_XRI;
|
||||
|
||||
/* reset NVME buffer post count for next round of posting */
|
||||
post_cnt = 0;
|
||||
|
||||
/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
|
||||
while (!list_empty(&blck_nblist)) {
|
||||
list_remove_head(&blck_nblist, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
if (status) {
|
||||
/* failure, put on abort nvme list */
|
||||
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
|
||||
} else {
|
||||
/* success, put on NVME buffer list */
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
num_posted++;
|
||||
}
|
||||
list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
|
||||
}
|
||||
}
|
||||
/* Push NVME buffers with sgl posted to the available list */
|
||||
while (!list_empty(&nvme_nblist)) {
|
||||
list_remove_head(&nvme_nblist, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
}
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine walks the list of nvme buffers that have been allocated and
|
||||
* repost them to the port by using SGL block post. This is needed after a
|
||||
* pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
|
||||
* is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
|
||||
* to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
|
||||
*
|
||||
* Returns: 0 = success, non-zero failure.
|
||||
**/
|
||||
int
|
||||
lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
|
||||
{
|
||||
LIST_HEAD(post_nblist);
|
||||
int num_posted, rc = 0;
|
||||
|
||||
/* get all NVME buffers need to repost to a local list */
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
|
||||
list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
|
||||
phba->get_nvme_bufs = 0;
|
||||
phba->put_nvme_bufs = 0;
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
|
||||
/* post the list of nvme buffer sgls to port if available */
|
||||
if (!list_empty(&post_nblist)) {
|
||||
num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
|
||||
phba->sli4_hba.nvme_xri_cnt);
|
||||
/* failed to post any nvme buffer, return error */
|
||||
if (num_posted == 0)
|
||||
rc = -EIO;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
|
||||
* @vport: The virtual port for which this call being executed.
|
||||
* @num_to_allocate: The requested number of buffers to allocate.
|
||||
*
|
||||
* This routine allocates nvme buffers for device with SLI-4 interface spec,
|
||||
* the nvme buffer contains all the necessary information needed to initiate
|
||||
* a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
|
||||
* them on a list, it post them to the port by using SGL block post.
|
||||
*
|
||||
* Return codes:
|
||||
* int - number of nvme buffers that were allocated and posted.
|
||||
* 0 = failure, less than num_to_alloc is a partial failure.
|
||||
**/
|
||||
static int
|
||||
lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvme_buf *lpfc_ncmd;
|
||||
struct lpfc_iocbq *pwqeq;
|
||||
union lpfc_wqe128 *wqe;
|
||||
struct sli4_sge *sgl;
|
||||
dma_addr_t pdma_phys_sgl;
|
||||
uint16_t iotag, lxri = 0;
|
||||
int bcnt, num_posted;
|
||||
LIST_HEAD(prep_nblist);
|
||||
LIST_HEAD(post_nblist);
|
||||
LIST_HEAD(nvme_nblist);
|
||||
|
||||
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
|
||||
lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
|
||||
if (!lpfc_ncmd)
|
||||
break;
|
||||
/*
|
||||
* Get memory from the pci pool to map the virt space to
|
||||
* pci bus space for an I/O. The DMA buffer includes the
|
||||
* number of SGE's necessary to support the sg_tablesize.
|
||||
*/
|
||||
lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_KERNEL,
|
||||
&lpfc_ncmd->dma_handle);
|
||||
if (!lpfc_ncmd->data) {
|
||||
kfree(lpfc_ncmd);
|
||||
break;
|
||||
}
|
||||
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
break;
|
||||
}
|
||||
pwqeq = &(lpfc_ncmd->cur_iocbq);
|
||||
wqe = &pwqeq->wqe;
|
||||
|
||||
/* Allocate iotag for lpfc_ncmd->cur_iocbq. */
|
||||
iotag = lpfc_sli_next_iotag(phba, pwqeq);
|
||||
if (iotag == 0) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
lpfc_ncmd->data, lpfc_ncmd->dma_handle);
|
||||
kfree(lpfc_ncmd);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6121 Failed to allocated IOTAG for"
|
||||
" XRI:0x%x\n", lxri);
|
||||
lpfc_sli4_free_xri(phba, lxri);
|
||||
break;
|
||||
}
|
||||
pwqeq->sli4_lxritag = lxri;
|
||||
pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
pwqeq->iocb_flag |= LPFC_IO_NVME;
|
||||
pwqeq->context1 = lpfc_ncmd;
|
||||
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
|
||||
|
||||
/* Initialize local short-hand pointers. */
|
||||
lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
|
||||
sgl = lpfc_ncmd->nvme_sgl;
|
||||
pdma_phys_sgl = lpfc_ncmd->dma_handle;
|
||||
lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
|
||||
|
||||
/* Rsp SGE will be filled in when we rcv an IO
|
||||
* from the NVME Layer to be sent.
|
||||
* The cmd is going to be embedded so we need a SKIP SGE.
|
||||
*/
|
||||
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
/* Fill in word 3 / sgl_len during cmd submission */
|
||||
|
||||
lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
|
||||
|
||||
/* Initialize WQE */
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
|
||||
/* add the nvme buffer to a post list */
|
||||
list_add_tail(&lpfc_ncmd->list, &post_nblist);
|
||||
spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
||||
phba->sli4_hba.nvme_xri_cnt++;
|
||||
spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
|
||||
"6114 Allocate %d out of %d requested new NVME "
|
||||
"buffers\n", bcnt, num_to_alloc);
|
||||
|
||||
/* post the list of nvme buffer sgls to port if available */
|
||||
if (!list_empty(&post_nblist))
|
||||
num_posted = lpfc_post_nvme_sgl_list(phba,
|
||||
&post_nblist, bcnt);
|
||||
else
|
||||
num_posted = 0;
|
||||
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
static inline struct lpfc_nvme_buf *
|
||||
lpfc_nvme_buf(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||||
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
||||
&phba->lpfc_nvme_buf_list_get, list) {
|
||||
&phba->lpfc_common_buf_list_get, list) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
phba->get_nvme_bufs--;
|
||||
phba->get_common_bufs--;
|
||||
return lpfc_ncmd;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
|
||||
* lpfc_get_nvme_buf - Get a nvme buffer from lpfc_common_buf_list of the HBA
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
*
|
||||
* This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
|
||||
* This routine removes a nvme buffer from head of @phba lpfc_common_buf_list
|
||||
* and returns to caller.
|
||||
*
|
||||
* Return codes:
|
||||
@ -2337,27 +1942,57 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
int expedite)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd = NULL;
|
||||
struct sli4_sge *sgl;
|
||||
struct lpfc_iocbq *pwqeq;
|
||||
union lpfc_wqe128 *wqe;
|
||||
unsigned long iflag = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
|
||||
if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
|
||||
spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag);
|
||||
if (phba->get_common_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
|
||||
lpfc_ncmd = lpfc_nvme_buf(phba);
|
||||
if (!lpfc_ncmd) {
|
||||
spin_lock(&phba->nvme_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_nvme_buf_list_put,
|
||||
&phba->lpfc_nvme_buf_list_get);
|
||||
phba->get_nvme_bufs += phba->put_nvme_bufs;
|
||||
INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
||||
phba->put_nvme_bufs = 0;
|
||||
spin_unlock(&phba->nvme_buf_list_put_lock);
|
||||
if (phba->get_nvme_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
|
||||
spin_lock(&phba->common_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_common_buf_list_put,
|
||||
&phba->lpfc_common_buf_list_get);
|
||||
phba->get_common_bufs += phba->put_common_bufs;
|
||||
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put);
|
||||
phba->put_common_bufs = 0;
|
||||
spin_unlock(&phba->common_buf_list_put_lock);
|
||||
if (phba->get_common_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
|
||||
expedite)
|
||||
lpfc_ncmd = lpfc_nvme_buf(phba);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag);
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
|
||||
if (lpfc_ncmd) {
|
||||
pwqeq = &(lpfc_ncmd->cur_iocbq);
|
||||
wqe = &pwqeq->wqe;
|
||||
|
||||
/* Setup key fields in buffer that may have been changed
|
||||
* if other protocols used this buffer.
|
||||
*/
|
||||
pwqeq->iocb_flag = LPFC_IO_NVME;
|
||||
pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
|
||||
lpfc_ncmd->start_time = jiffies;
|
||||
lpfc_ncmd->flags = 0;
|
||||
|
||||
/* Rsp SGE will be filled in when we rcv an IO
|
||||
* from the NVME Layer to be sent.
|
||||
* The cmd is going to be embedded so we need a SKIP SGE.
|
||||
*/
|
||||
sgl = lpfc_ncmd->dma_sgl;
|
||||
bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
/* Fill in word 3 / sgl_len during cmd submission */
|
||||
|
||||
/* Initialize WQE */
|
||||
memset(wqe, 0, sizeof(union lpfc_wqe));
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
|
||||
}
|
||||
}
|
||||
return lpfc_ncmd;
|
||||
}
|
||||
@ -2368,7 +2003,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
* @lpfc_ncmd: The nvme buffer which is being released.
|
||||
*
|
||||
* This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
|
||||
* lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
|
||||
* lpfc_common_buf_list list. For SLI4 XRI's are tied to the nvme buffer
|
||||
* and cannot be reused for at least RA_TOV amount of time if it was
|
||||
* aborted.
|
||||
**/
|
||||
@ -2380,7 +2015,6 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||
if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
|
||||
atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
|
||||
|
||||
lpfc_ncmd->nonsg_phys = 0;
|
||||
lpfc_ncmd->ndlp = NULL;
|
||||
lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
|
||||
|
||||
@ -2398,12 +2032,14 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
|
||||
iflag);
|
||||
} else {
|
||||
/* MUST zero fields if buffer is reused by another protocol */
|
||||
lpfc_ncmd->nvmeCmd = NULL;
|
||||
lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
|
||||
spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
||||
list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
|
||||
phba->put_nvme_bufs++;
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
||||
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
|
||||
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
|
||||
list_add_tail(&lpfc_ncmd->list,
|
||||
&phba->lpfc_common_buf_list_put);
|
||||
phba->put_common_bufs++;
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2432,7 +2068,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_ctrl_stat *cstat;
|
||||
int len, i;
|
||||
int i;
|
||||
|
||||
/* Initialize this localport instance. The vport wwn usage ensures
|
||||
* that NPIV is accounted for.
|
||||
@ -2501,18 +2137,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
atomic_set(&cstat->fc4NvmeControlRequests, 0);
|
||||
atomic_set(&cstat->fc4NvmeIoCmpls, 0);
|
||||
}
|
||||
|
||||
/* Don't post more new bufs if repost already recovered
|
||||
* the nvme sgls.
|
||||
*/
|
||||
if (phba->sli4_hba.nvme_xri_cnt == 0) {
|
||||
len = lpfc_new_nvme_buf(vport,
|
||||
phba->sli4_hba.nvme_xri_max);
|
||||
vport->phba->total_nvme_bufs += len;
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
kfree(cstat);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -77,7 +77,15 @@ struct lpfc_nvme_rport {
|
||||
};
|
||||
|
||||
struct lpfc_nvme_buf {
|
||||
/* Common fields */
|
||||
struct list_head list;
|
||||
void *data;
|
||||
dma_addr_t dma_handle;
|
||||
dma_addr_t dma_phys_sgl;
|
||||
struct sli4_sge *dma_sgl;
|
||||
struct lpfc_iocbq cur_iocbq;
|
||||
|
||||
/* NVME specific fields */
|
||||
struct nvmefc_fcp_req *nvmeCmd;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
@ -87,36 +95,19 @@ struct lpfc_nvme_buf {
|
||||
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
|
||||
#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
|
||||
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
|
||||
uint16_t status; /* From IOCB Word 7- ulpStatus */
|
||||
uint16_t cpu;
|
||||
uint16_t qidx;
|
||||
uint16_t sqid;
|
||||
uint16_t status; /* From IOCB Word 7- ulpStatus */
|
||||
uint32_t result; /* From IOCB Word 4. */
|
||||
|
||||
uint32_t seg_cnt; /* Number of scatter-gather segments returned by
|
||||
* dma_map_sg. The driver needs this for calls
|
||||
* to dma_unmap_sg.
|
||||
*/
|
||||
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
|
||||
|
||||
/*
|
||||
* data and dma_handle are the kernel virtual and bus address of the
|
||||
* dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
|
||||
* gather bde list that supports the sg_tablesize value.
|
||||
*/
|
||||
void *data;
|
||||
dma_addr_t dma_handle;
|
||||
|
||||
struct sli4_sge *nvme_sgl;
|
||||
dma_addr_t dma_phys_sgl;
|
||||
|
||||
/* cur_iocbq has phys of the dma-able buffer.
|
||||
* Iotag is in here
|
||||
*/
|
||||
struct lpfc_iocbq cur_iocbq;
|
||||
|
||||
wait_queue_head_t *waitq;
|
||||
unsigned long start_time;
|
||||
|
||||
uint16_t qidx;
|
||||
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
uint64_t ts_cmd_start;
|
||||
uint64_t ts_last_cmd;
|
||||
|
@ -182,7 +182,7 @@ static void
|
||||
lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba,
|
||||
struct lpfc_scsi_buf *lpfc_cmd)
|
||||
{
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
if (sgl) {
|
||||
sgl += 1;
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
@ -394,7 +394,7 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
||||
IOCB_t *iocb;
|
||||
dma_addr_t pdma_phys_fcp_cmd;
|
||||
dma_addr_t pdma_phys_fcp_rsp;
|
||||
dma_addr_t pdma_phys_bpl;
|
||||
dma_addr_t pdma_phys_sgl;
|
||||
uint16_t iotag;
|
||||
int bcnt, bpl_size;
|
||||
|
||||
@ -438,14 +438,14 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
||||
|
||||
psb->fcp_cmnd = psb->data;
|
||||
psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
|
||||
psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
|
||||
psb->dma_sgl = psb->data + sizeof(struct fcp_cmnd) +
|
||||
sizeof(struct fcp_rsp);
|
||||
|
||||
/* Initialize local short-hand pointers. */
|
||||
bpl = psb->fcp_bpl;
|
||||
bpl = psb->dma_sgl;
|
||||
pdma_phys_fcp_cmd = psb->dma_handle;
|
||||
pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
|
||||
pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
|
||||
pdma_phys_sgl = psb->dma_handle + sizeof(struct fcp_cmnd) +
|
||||
sizeof(struct fcp_rsp);
|
||||
|
||||
/*
|
||||
@ -496,9 +496,9 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
|
||||
iocb->un.fcpi64.bdl.bdeSize =
|
||||
(2 * sizeof(struct ulp_bde64));
|
||||
iocb->un.fcpi64.bdl.addrLow =
|
||||
putPaddrLow(pdma_phys_bpl);
|
||||
putPaddrLow(pdma_phys_sgl);
|
||||
iocb->un.fcpi64.bdl.addrHigh =
|
||||
putPaddrHigh(pdma_phys_bpl);
|
||||
putPaddrHigh(pdma_phys_sgl);
|
||||
iocb->ulpBdeCount = 1;
|
||||
iocb->ulpLe = 1;
|
||||
}
|
||||
@ -613,359 +613,6 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
|
||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_post_scsi_sgl_list - Post blocks of scsi buffer sgls from a list
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @post_sblist: pointer to the scsi buffer list.
|
||||
*
|
||||
* This routine walks a list of scsi buffers that was passed in. It attempts
|
||||
* to construct blocks of scsi buffer sgls which contains contiguous xris and
|
||||
* uses the non-embedded SGL block post mailbox commands to post to the port.
|
||||
* For single SCSI buffer sgl with non-contiguous xri, if any, it shall use
|
||||
* embedded SGL post mailbox command for posting. The @post_sblist passed in
|
||||
* must be local list, thus no lock is needed when manipulate the list.
|
||||
*
|
||||
* Returns: 0 = failure, non-zero number of successfully posted buffers.
|
||||
**/
|
||||
static int
|
||||
lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
|
||||
struct list_head *post_sblist, int sb_count)
|
||||
{
|
||||
struct lpfc_scsi_buf *psb, *psb_next;
|
||||
int status, sgl_size;
|
||||
int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
|
||||
dma_addr_t pdma_phys_bpl1;
|
||||
int last_xritag = NO_XRI;
|
||||
LIST_HEAD(prep_sblist);
|
||||
LIST_HEAD(blck_sblist);
|
||||
LIST_HEAD(scsi_sblist);
|
||||
|
||||
/* sanity check */
|
||||
if (sb_count <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
sgl_size = phba->cfg_sg_dma_buf_size -
|
||||
(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
|
||||
|
||||
list_for_each_entry_safe(psb, psb_next, post_sblist, list) {
|
||||
list_del_init(&psb->list);
|
||||
block_cnt++;
|
||||
if ((last_xritag != NO_XRI) &&
|
||||
(psb->cur_iocbq.sli4_xritag != last_xritag + 1)) {
|
||||
/* a hole in xri block, form a sgl posting block */
|
||||
list_splice_init(&prep_sblist, &blck_sblist);
|
||||
post_cnt = block_cnt - 1;
|
||||
/* prepare list for next posting block */
|
||||
list_add_tail(&psb->list, &prep_sblist);
|
||||
block_cnt = 1;
|
||||
} else {
|
||||
/* prepare list for next posting block */
|
||||
list_add_tail(&psb->list, &prep_sblist);
|
||||
/* enough sgls for non-embed sgl mbox command */
|
||||
if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
|
||||
list_splice_init(&prep_sblist, &blck_sblist);
|
||||
post_cnt = block_cnt;
|
||||
block_cnt = 0;
|
||||
}
|
||||
}
|
||||
num_posting++;
|
||||
last_xritag = psb->cur_iocbq.sli4_xritag;
|
||||
|
||||
/* end of repost sgl list condition for SCSI buffers */
|
||||
if (num_posting == sb_count) {
|
||||
if (post_cnt == 0) {
|
||||
/* last sgl posting block */
|
||||
list_splice_init(&prep_sblist, &blck_sblist);
|
||||
post_cnt = block_cnt;
|
||||
} else if (block_cnt == 1) {
|
||||
/* last single sgl with non-contiguous xri */
|
||||
if (sgl_size > SGL_PAGE_SIZE)
|
||||
pdma_phys_bpl1 = psb->dma_phys_bpl +
|
||||
SGL_PAGE_SIZE;
|
||||
else
|
||||
pdma_phys_bpl1 = 0;
|
||||
status = lpfc_sli4_post_sgl(phba,
|
||||
psb->dma_phys_bpl,
|
||||
pdma_phys_bpl1,
|
||||
psb->cur_iocbq.sli4_xritag);
|
||||
if (status) {
|
||||
/* failure, put on abort scsi list */
|
||||
psb->exch_busy = 1;
|
||||
} else {
|
||||
/* success, put on SCSI buffer list */
|
||||
psb->exch_busy = 0;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
num_posted++;
|
||||
}
|
||||
/* success, put on SCSI buffer sgl list */
|
||||
list_add_tail(&psb->list, &scsi_sblist);
|
||||
}
|
||||
}
|
||||
|
||||
/* continue until a nembed page worth of sgls */
|
||||
if (post_cnt == 0)
|
||||
continue;
|
||||
|
||||
/* post block of SCSI buffer list sgls */
|
||||
status = lpfc_sli4_post_scsi_sgl_block(phba, &blck_sblist,
|
||||
post_cnt);
|
||||
|
||||
/* don't reset xirtag due to hole in xri block */
|
||||
if (block_cnt == 0)
|
||||
last_xritag = NO_XRI;
|
||||
|
||||
/* reset SCSI buffer post count for next round of posting */
|
||||
post_cnt = 0;
|
||||
|
||||
/* put posted SCSI buffer-sgl posted on SCSI buffer sgl list */
|
||||
while (!list_empty(&blck_sblist)) {
|
||||
list_remove_head(&blck_sblist, psb,
|
||||
struct lpfc_scsi_buf, list);
|
||||
if (status) {
|
||||
/* failure, put on abort scsi list */
|
||||
psb->exch_busy = 1;
|
||||
} else {
|
||||
/* success, put on SCSI buffer list */
|
||||
psb->exch_busy = 0;
|
||||
psb->status = IOSTAT_SUCCESS;
|
||||
num_posted++;
|
||||
}
|
||||
list_add_tail(&psb->list, &scsi_sblist);
|
||||
}
|
||||
}
|
||||
/* Push SCSI buffers with sgl posted to the availble list */
|
||||
while (!list_empty(&scsi_sblist)) {
|
||||
list_remove_head(&scsi_sblist, psb,
|
||||
struct lpfc_scsi_buf, list);
|
||||
lpfc_release_scsi_buf_s4(phba, psb);
|
||||
}
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_repost_scsi_sgl_list - Repost all the allocated scsi buffer sgls
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine walks the list of scsi buffers that have been allocated and
|
||||
* repost them to the port by using SGL block post. This is needed after a
|
||||
* pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
|
||||
* is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
|
||||
* to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
|
||||
*
|
||||
* Returns: 0 = success, non-zero failure.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
|
||||
{
|
||||
LIST_HEAD(post_sblist);
|
||||
int num_posted, rc = 0;
|
||||
|
||||
/* get all SCSI buffers need to repost to a local list */
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
|
||||
/* post the list of scsi buffer sgls to port if available */
|
||||
if (!list_empty(&post_sblist)) {
|
||||
num_posted = lpfc_sli4_post_scsi_sgl_list(phba, &post_sblist,
|
||||
phba->sli4_hba.scsi_xri_cnt);
|
||||
/* failed to post any scsi buffer, return error */
|
||||
if (num_posted == 0)
|
||||
rc = -EIO;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
|
||||
* @vport: The virtual port for which this call being executed.
|
||||
* @num_to_allocate: The requested number of buffers to allocate.
|
||||
*
|
||||
* This routine allocates scsi buffers for device with SLI-4 interface spec,
|
||||
* the scsi buffer contains all the necessary information needed to initiate
|
||||
* a SCSI I/O. After allocating up to @num_to_allocate SCSI buffers and put
|
||||
* them on a list, it post them to the port by using SGL block post.
|
||||
*
|
||||
* Return codes:
|
||||
* int - number of scsi buffers that were allocated and posted.
|
||||
* 0 = failure, less than num_to_alloc is a partial failure.
|
||||
**/
|
||||
static int
|
||||
lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_scsi_buf *psb;
|
||||
struct sli4_sge *sgl;
|
||||
IOCB_t *iocb;
|
||||
dma_addr_t pdma_phys_fcp_cmd;
|
||||
dma_addr_t pdma_phys_fcp_rsp;
|
||||
dma_addr_t pdma_phys_bpl;
|
||||
uint16_t iotag, lxri = 0;
|
||||
int bcnt, num_posted, sgl_size;
|
||||
LIST_HEAD(prep_sblist);
|
||||
LIST_HEAD(post_sblist);
|
||||
LIST_HEAD(scsi_sblist);
|
||||
|
||||
sgl_size = phba->cfg_sg_dma_buf_size -
|
||||
(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
|
||||
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
|
||||
"9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n",
|
||||
num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size,
|
||||
(int)sizeof(struct fcp_cmnd),
|
||||
(int)sizeof(struct fcp_rsp));
|
||||
|
||||
for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
|
||||
psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
|
||||
if (!psb)
|
||||
break;
|
||||
/*
|
||||
* Get memory from the pci pool to map the virt space to
|
||||
* pci bus space for an I/O. The DMA buffer includes space
|
||||
* for the struct fcp_cmnd, struct fcp_rsp and the number
|
||||
* of bde's necessary to support the sg_tablesize.
|
||||
*/
|
||||
psb->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
|
||||
GFP_KERNEL, &psb->dma_handle);
|
||||
if (!psb->data) {
|
||||
kfree(psb);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* 4K Page alignment is CRITICAL to BlockGuard, double check
|
||||
* to be sure.
|
||||
*/
|
||||
if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
|
||||
(((unsigned long)(psb->data) &
|
||||
(unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
|
||||
"3369 Memory alignment error "
|
||||
"addr=%lx\n",
|
||||
(unsigned long)psb->data);
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
lxri = lpfc_sli4_next_xritag(phba);
|
||||
if (lxri == NO_XRI) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Allocate iotag for psb->cur_iocbq. */
|
||||
iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
|
||||
if (iotag == 0) {
|
||||
dma_pool_free(phba->lpfc_sg_dma_buf_pool,
|
||||
psb->data, psb->dma_handle);
|
||||
kfree(psb);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_FCP,
|
||||
"3368 Failed to allocate IOTAG for"
|
||||
" XRI:0x%x\n", lxri);
|
||||
lpfc_sli4_free_xri(phba, lxri);
|
||||
break;
|
||||
}
|
||||
psb->cur_iocbq.sli4_lxritag = lxri;
|
||||
psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
||||
psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
|
||||
psb->fcp_bpl = psb->data;
|
||||
psb->fcp_cmnd = (psb->data + sgl_size);
|
||||
psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
|
||||
sizeof(struct fcp_cmnd));
|
||||
|
||||
/* Initialize local short-hand pointers. */
|
||||
sgl = (struct sli4_sge *)psb->fcp_bpl;
|
||||
pdma_phys_bpl = psb->dma_handle;
|
||||
pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size);
|
||||
pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
|
||||
|
||||
/*
|
||||
* The first two bdes are the FCP_CMD and FCP_RSP.
|
||||
* The balance are sg list bdes. Initialize the
|
||||
* first two and leave the rest for queuecommand.
|
||||
*/
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
|
||||
sgl++;
|
||||
|
||||
/* Setup the physical region for the FCP RSP */
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
|
||||
|
||||
/*
|
||||
* Since the IOCB for the FCP I/O is built into this
|
||||
* lpfc_scsi_buf, initialize it with all known data now.
|
||||
*/
|
||||
iocb = &psb->cur_iocbq.iocb;
|
||||
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
|
||||
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
/* setting the BLP size to 2 * sizeof BDE may not be correct.
|
||||
* We are setting the bpl to point to out sgl. An sgl's
|
||||
* entries are 16 bytes, a bpl entries are 12 bytes.
|
||||
*/
|
||||
iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
|
||||
iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
|
||||
iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
|
||||
iocb->ulpBdeCount = 1;
|
||||
iocb->ulpLe = 1;
|
||||
iocb->ulpClass = CLASS3;
|
||||
psb->cur_iocbq.context1 = psb;
|
||||
psb->dma_phys_bpl = pdma_phys_bpl;
|
||||
|
||||
/* add the scsi buffer to a post list */
|
||||
list_add_tail(&psb->list, &post_sblist);
|
||||
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
||||
phba->sli4_hba.scsi_xri_cnt++;
|
||||
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_BG | LOG_FCP,
|
||||
"3021 Allocate %d out of %d requested new SCSI "
|
||||
"buffers\n", bcnt, num_to_alloc);
|
||||
|
||||
/* post the list of scsi buffer sgls to port if available */
|
||||
if (!list_empty(&post_sblist))
|
||||
num_posted = lpfc_sli4_post_scsi_sgl_list(phba,
|
||||
&post_sblist, bcnt);
|
||||
else
|
||||
num_posted = 0;
|
||||
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
|
||||
* @vport: The virtual port for which this call being executed.
|
||||
* @num_to_allocate: The requested number of buffers to allocate.
|
||||
*
|
||||
* This routine wraps the actual SCSI buffer allocator function pointer from
|
||||
* the lpfc_hba struct.
|
||||
*
|
||||
* Return codes:
|
||||
* int - number of scsi buffers that were allocated.
|
||||
* 0 = failure, less than num_to_alloc is a partial failure.
|
||||
**/
|
||||
static inline int
|
||||
lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
|
||||
{
|
||||
return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_get_scsi_buf_s3 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
@ -1005,10 +652,10 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
return lpfc_cmd;
|
||||
}
|
||||
/**
|
||||
* lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
|
||||
* lpfc_get_scsi_buf_s4 - Get a scsi buffer from lpfc_common_buf_list of the HBA
|
||||
* @phba: The HBA for which this call is being executed.
|
||||
*
|
||||
* This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
|
||||
* This routine removes a scsi buffer from head of @phba lpfc_common_buf_list
|
||||
* and returns to caller.
|
||||
*
|
||||
* Return codes:
|
||||
@ -1020,38 +667,113 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
{
|
||||
struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next;
|
||||
unsigned long iflag = 0;
|
||||
struct sli4_sge *sgl;
|
||||
IOCB_t *iocb;
|
||||
dma_addr_t pdma_phys_fcp_rsp;
|
||||
dma_addr_t pdma_phys_fcp_cmd;
|
||||
uint32_t sgl_size;
|
||||
int found = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag);
|
||||
spin_lock_irqsave(&phba->common_buf_list_get_lock, iflag);
|
||||
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
|
||||
&phba->lpfc_scsi_buf_list_get, list) {
|
||||
&phba->lpfc_common_buf_list_get, list) {
|
||||
if (lpfc_test_rrq_active(phba, ndlp,
|
||||
lpfc_cmd->cur_iocbq.sli4_lxritag))
|
||||
continue;
|
||||
list_del_init(&lpfc_cmd->list);
|
||||
phba->get_common_bufs--;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
spin_lock(&phba->scsi_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_scsi_buf_list_put,
|
||||
&phba->lpfc_scsi_buf_list_get);
|
||||
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
spin_lock(&phba->common_buf_list_put_lock);
|
||||
list_splice(&phba->lpfc_common_buf_list_put,
|
||||
&phba->lpfc_common_buf_list_get);
|
||||
phba->get_common_bufs += phba->put_common_bufs;
|
||||
INIT_LIST_HEAD(&phba->lpfc_common_buf_list_put);
|
||||
phba->put_common_bufs = 0;
|
||||
spin_unlock(&phba->common_buf_list_put_lock);
|
||||
list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
|
||||
&phba->lpfc_scsi_buf_list_get, list) {
|
||||
&phba->lpfc_common_buf_list_get,
|
||||
list) {
|
||||
if (lpfc_test_rrq_active(
|
||||
phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag))
|
||||
continue;
|
||||
list_del_init(&lpfc_cmd->list);
|
||||
phba->get_common_bufs--;
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_get_lock, iflag);
|
||||
if (!found)
|
||||
return NULL;
|
||||
|
||||
sgl_size = phba->cfg_sg_dma_buf_size -
|
||||
(sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
|
||||
|
||||
/* Setup key fields in buffer that may have been changed
|
||||
* if other protocols used this buffer.
|
||||
*/
|
||||
lpfc_cmd->cur_iocbq.iocb_flag = LPFC_IO_FCP;
|
||||
lpfc_cmd->prot_seg_cnt = 0;
|
||||
lpfc_cmd->seg_cnt = 0;
|
||||
lpfc_cmd->waitq = NULL;
|
||||
lpfc_cmd->timeout = 0;
|
||||
lpfc_cmd->flags = 0;
|
||||
lpfc_cmd->start_time = jiffies;
|
||||
lpfc_cmd->waitq = NULL;
|
||||
lpfc_cmd->cpu = smp_processor_id();
|
||||
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
|
||||
lpfc_cmd->prot_data_type = 0;
|
||||
#endif
|
||||
|
||||
lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size);
|
||||
lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd +
|
||||
sizeof(struct fcp_cmnd));
|
||||
|
||||
/*
|
||||
* The first two SGEs are the FCP_CMD and FCP_RSP.
|
||||
* The balance are sg list bdes. Initialize the
|
||||
* first two and leave the rest for queuecommand.
|
||||
*/
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size);
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 0);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_cmnd));
|
||||
sgl++;
|
||||
|
||||
/* Setup the physical region for the FCP RSP */
|
||||
pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
|
||||
sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
|
||||
sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(sizeof(struct fcp_rsp));
|
||||
|
||||
/*
|
||||
* Since the IOCB for the FCP I/O is built into this
|
||||
* lpfc_scsi_buf, initialize it with all known data now.
|
||||
*/
|
||||
iocb = &lpfc_cmd->cur_iocbq.iocb;
|
||||
iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
|
||||
iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
/* setting the BLP size to 2 * sizeof BDE may not be correct.
|
||||
* We are setting the bpl to point to out sgl. An sgl's
|
||||
* entries are 16 bytes, a bpl entries are 12 bytes.
|
||||
*/
|
||||
iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
|
||||
iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
|
||||
iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
|
||||
iocb->ulpBdeCount = 1;
|
||||
iocb->ulpLe = 1;
|
||||
iocb->ulpClass = CLASS3;
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
|
||||
@ -1089,7 +811,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
|
||||
unsigned long iflag = 0;
|
||||
|
||||
psb->seg_cnt = 0;
|
||||
psb->nonsg_phys = 0;
|
||||
psb->prot_seg_cnt = 0;
|
||||
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
|
||||
@ -1105,7 +826,7 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
|
||||
* @psb: The scsi buffer which is being released.
|
||||
*
|
||||
* This routine releases @psb scsi buffer by adding it to tail of @phba
|
||||
* lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
|
||||
* lpfc_common_buf_list list. For SLI4 XRI's are tied to the scsi buffer
|
||||
* and cannot be reused for at least RA_TOV amount of time if it was
|
||||
* aborted.
|
||||
**/
|
||||
@ -1115,7 +836,6 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
|
||||
unsigned long iflag = 0;
|
||||
|
||||
psb->seg_cnt = 0;
|
||||
psb->nonsg_phys = 0;
|
||||
psb->prot_seg_cnt = 0;
|
||||
|
||||
if (psb->exch_busy) {
|
||||
@ -1127,11 +847,13 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
|
||||
iflag);
|
||||
} else {
|
||||
/* MUST zero fields if buffer is reused by another protocol */
|
||||
psb->pCmd = NULL;
|
||||
psb->cur_iocbq.iocb_flag = LPFC_IO_FCP;
|
||||
spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
|
||||
list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put);
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
||||
psb->cur_iocbq.iocb_cmpl = NULL;
|
||||
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
|
||||
list_add_tail(&psb->list, &phba->lpfc_common_buf_list_put);
|
||||
phba->put_common_bufs++;
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1173,7 +895,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
|
||||
struct scatterlist *sgel = NULL;
|
||||
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
|
||||
struct ulp_bde64 *bpl = lpfc_cmd->dma_sgl;
|
||||
struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
|
||||
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
|
||||
struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
|
||||
@ -2728,7 +2450,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba,
|
||||
{
|
||||
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
|
||||
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
|
||||
struct ulp_bde64 *bpl = lpfc_cmd->dma_sgl;
|
||||
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
|
||||
uint32_t num_bde = 0;
|
||||
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
|
||||
@ -3261,7 +2983,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
|
||||
struct scatterlist *sgel = NULL;
|
||||
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
struct sli4_sge *first_data_sgl;
|
||||
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
|
||||
dma_addr_t physaddr;
|
||||
@ -3406,7 +3128,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
|
||||
{
|
||||
struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
|
||||
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl);
|
||||
struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->dma_sgl);
|
||||
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
|
||||
uint32_t num_sge = 0;
|
||||
int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction;
|
||||
@ -3941,7 +3663,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba,
|
||||
|
||||
if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU
|
||||
&& phba->cfg_fcp_io_channel > 1) {
|
||||
cpu = smp_processor_id();
|
||||
cpu = lpfc_cmd->cpu;
|
||||
if (cpu < phba->sli4_hba.num_present_cpu) {
|
||||
cpup = phba->sli4_hba.cpu_map;
|
||||
cpup += cpu;
|
||||
@ -4413,14 +4135,12 @@ lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
|
||||
|
||||
switch (dev_grp) {
|
||||
case LPFC_PCI_DEV_LP:
|
||||
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
|
||||
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
|
||||
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s3;
|
||||
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
|
||||
phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf_s3;
|
||||
break;
|
||||
case LPFC_PCI_DEV_OC:
|
||||
phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
|
||||
phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
|
||||
phba->lpfc_bg_scsi_prep_dma_buf = lpfc_bg_scsi_prep_dma_buf_s4;
|
||||
phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
|
||||
@ -4735,8 +4455,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
lpfc_cmd->pCmd = cmnd;
|
||||
lpfc_cmd->rdata = rdata;
|
||||
lpfc_cmd->ndlp = ndlp;
|
||||
lpfc_cmd->timeout = 0;
|
||||
lpfc_cmd->start_time = jiffies;
|
||||
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
|
||||
|
||||
if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) {
|
||||
@ -5671,6 +5389,12 @@ lpfc_slave_alloc(struct scsi_device *sdev)
|
||||
}
|
||||
sdev_cnt = atomic_inc_return(&phba->sdev_cnt);
|
||||
|
||||
/* For SLI4, all IO buffers are pre-allocated */
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
return 0;
|
||||
|
||||
/* This code path is now ONLY for SLI3 adapters */
|
||||
|
||||
/*
|
||||
* Populate the cmds_per_lun count scsi_bufs into this host's globally
|
||||
* available list of scsi buffers. Don't allocate more than the
|
||||
@ -5702,7 +5426,7 @@ lpfc_slave_alloc(struct scsi_device *sdev)
|
||||
(phba->cfg_hba_queue_depth - total));
|
||||
num_to_alloc = phba->cfg_hba_queue_depth - total;
|
||||
}
|
||||
num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
|
||||
num_allocated = lpfc_new_scsi_buf_s3(vport, num_to_alloc);
|
||||
if (num_to_alloc != num_allocated) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
|
||||
"0708 Allocation request of %d "
|
||||
|
@ -131,7 +131,15 @@ struct lpfc_scsicmd_bkt {
|
||||
};
|
||||
|
||||
struct lpfc_scsi_buf {
|
||||
/* Common fields */
|
||||
struct list_head list;
|
||||
void *data;
|
||||
dma_addr_t dma_handle;
|
||||
dma_addr_t dma_phys_sgl;
|
||||
struct ulp_bde64 *dma_sgl;
|
||||
struct lpfc_iocbq cur_iocbq;
|
||||
|
||||
/* SCSI specific fields */
|
||||
struct scsi_cmnd *pCmd;
|
||||
struct lpfc_rport_data *rdata;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
@ -139,9 +147,10 @@ struct lpfc_scsi_buf {
|
||||
uint32_t timeout;
|
||||
|
||||
uint16_t flags; /* TBD convert exch_busy to flags */
|
||||
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
|
||||
#define LPFC_SBUF_BUMP_QDEPTH 0x8 /* bumped queue depth counter */
|
||||
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
|
||||
#define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
|
||||
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
|
||||
uint16_t cpu;
|
||||
uint16_t status; /* From IOCB Word 7- ulpStatus */
|
||||
uint32_t result; /* From IOCB Word 4. */
|
||||
|
||||
@ -150,27 +159,13 @@ struct lpfc_scsi_buf {
|
||||
* to dma_unmap_sg. */
|
||||
uint32_t prot_seg_cnt; /* seg_cnt's counterpart for protection data */
|
||||
|
||||
dma_addr_t nonsg_phys; /* Non scatter-gather physical address. */
|
||||
|
||||
/*
|
||||
* data and dma_handle are the kernel virtual and bus address of the
|
||||
* dma-able buffer containing the fcp_cmd, fcp_rsp and a scatter
|
||||
* gather bde list that supports the sg_tablesize value.
|
||||
*/
|
||||
void *data;
|
||||
dma_addr_t dma_handle;
|
||||
|
||||
struct fcp_cmnd *fcp_cmnd;
|
||||
struct fcp_rsp *fcp_rsp;
|
||||
struct ulp_bde64 *fcp_bpl;
|
||||
|
||||
dma_addr_t dma_phys_bpl;
|
||||
|
||||
/* cur_iocbq has phys of the dma-able buffer.
|
||||
* Iotag is in here
|
||||
*/
|
||||
struct lpfc_iocbq cur_iocbq;
|
||||
uint16_t cpu;
|
||||
|
||||
wait_queue_head_t *waitq;
|
||||
unsigned long start_time;
|
||||
|
@ -6027,11 +6027,8 @@ lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
|
||||
list_add_tail(&rsrc_blks->list, ext_blk_list);
|
||||
rsrc_start = rsrc_id;
|
||||
if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
|
||||
phba->sli4_hba.scsi_xri_start = rsrc_start +
|
||||
phba->sli4_hba.common_xri_start = rsrc_start +
|
||||
lpfc_sli4_get_iocb_cnt(phba);
|
||||
phba->sli4_hba.nvme_xri_start =
|
||||
phba->sli4_hba.scsi_xri_start +
|
||||
phba->sli4_hba.scsi_xri_max;
|
||||
}
|
||||
|
||||
while (rsrc_id < (rsrc_start + rsrc_size)) {
|
||||
@ -7057,6 +7054,45 @@ lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
|
||||
return total_cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_repost_common_sgl_list - Repost all the allocated nvme buffer sgls
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine walks the list of nvme buffers that have been allocated and
|
||||
* repost them to the port by using SGL block post. This is needed after a
|
||||
* pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
|
||||
* is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
|
||||
* to the lpfc_common_buf_list. If the repost fails, reject all nvme buffers.
|
||||
*
|
||||
* Returns: 0 = success, non-zero failure.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_repost_common_sgl_list(struct lpfc_hba *phba)
|
||||
{
|
||||
LIST_HEAD(post_nblist);
|
||||
int num_posted, rc = 0;
|
||||
|
||||
/* get all NVME buffers need to repost to a local list */
|
||||
spin_lock_irq(&phba->common_buf_list_get_lock);
|
||||
spin_lock(&phba->common_buf_list_put_lock);
|
||||
list_splice_init(&phba->lpfc_common_buf_list_get, &post_nblist);
|
||||
list_splice(&phba->lpfc_common_buf_list_put, &post_nblist);
|
||||
phba->get_common_bufs = 0;
|
||||
phba->put_common_bufs = 0;
|
||||
spin_unlock(&phba->common_buf_list_put_lock);
|
||||
spin_unlock_irq(&phba->common_buf_list_get_lock);
|
||||
|
||||
/* post the list of nvme buffer sgls to port if available */
|
||||
if (!list_empty(&post_nblist)) {
|
||||
num_posted = lpfc_sli4_post_common_sgl_list(
|
||||
phba, &post_nblist, phba->sli4_hba.common_xri_cnt);
|
||||
/* failed to post any nvme buffer, return error */
|
||||
if (num_posted == 0)
|
||||
rc = -EIO;
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
||||
void
|
||||
lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
|
||||
{
|
||||
@ -7518,17 +7554,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
/* We need 1 iocbq for every SGL, for IO processing */
|
||||
cnt += phba->sli4_hba.nvmet_xri_cnt;
|
||||
} else {
|
||||
/* update host scsi xri-sgl sizes and mappings */
|
||||
rc = lpfc_sli4_scsi_sgl_update(phba);
|
||||
if (unlikely(rc)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"6309 Failed to update scsi-sgl size "
|
||||
"and mapping: %d\n", rc);
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
|
||||
/* update host nvme xri-sgl sizes and mappings */
|
||||
rc = lpfc_sli4_nvme_sgl_update(phba);
|
||||
/* update host common xri-sgl sizes and mappings */
|
||||
rc = lpfc_sli4_common_sgl_update(phba);
|
||||
if (unlikely(rc)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"6082 Failed to update nvme-sgl size "
|
||||
@ -7536,6 +7563,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
|
||||
/* register the allocated common sgl pool to the port */
|
||||
rc = lpfc_sli4_repost_common_sgl_list(phba);
|
||||
if (unlikely(rc)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"6116 Error %d during nvme sgl post "
|
||||
"operation\n", rc);
|
||||
/* Some NVME buffers were moved to abort nvme list */
|
||||
/* A pci function reset will repost them */
|
||||
rc = -ENODEV;
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
cnt = phba->cfg_iocb_cnt * 1024;
|
||||
}
|
||||
|
||||
@ -7572,36 +7610,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
||||
/* register the allocated scsi sgl pool to the port */
|
||||
rc = lpfc_sli4_repost_scsi_sgl_list(phba);
|
||||
if (unlikely(rc)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"0383 Error %d during scsi sgl post "
|
||||
"operation\n", rc);
|
||||
/* Some Scsi buffers were moved to abort scsi list */
|
||||
/* A pci function reset will repost them */
|
||||
rc = -ENODEV;
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
}
|
||||
|
||||
if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
|
||||
(phba->nvmet_support == 0)) {
|
||||
|
||||
/* register the allocated nvme sgl pool to the port */
|
||||
rc = lpfc_repost_nvme_sgl_list(phba);
|
||||
if (unlikely(rc)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
||||
"6116 Error %d during nvme sgl post "
|
||||
"operation\n", rc);
|
||||
/* Some NVME buffers were moved to abort nvme list */
|
||||
/* A pci function reset will repost them */
|
||||
rc = -ENODEV;
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
}
|
||||
|
||||
/* Post the rpi header region to the device. */
|
||||
rc = lpfc_sli4_post_all_rpi_hdrs(phba);
|
||||
if (unlikely(rc)) {
|
||||
@ -9484,7 +9492,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
/* 128 byte wqe support here */
|
||||
|
||||
lpfc_cmd = iocbq->context1;
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
|
||||
/* Word 0-2 - FCP_CMND */
|
||||
@ -9548,7 +9556,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
/* 128 byte wqe support here */
|
||||
|
||||
lpfc_cmd = iocbq->context1;
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
|
||||
/* Word 0-2 - FCP_CMND */
|
||||
@ -9605,7 +9613,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
|
||||
/* 128 byte wqe support here */
|
||||
|
||||
lpfc_cmd = iocbq->context1;
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
|
||||
sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
|
||||
fcp_cmnd = lpfc_cmd->fcp_cmnd;
|
||||
|
||||
/* Word 0-2 - FCP_CMND */
|
||||
@ -16827,22 +16835,22 @@ lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
|
||||
* lpfc_sli4_post_common_sgl_block - post a block of nvme sgl list to firmware
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @sblist: pointer to scsi buffer list.
|
||||
* @nblist: pointer to nvme buffer list.
|
||||
* @count: number of scsi buffers on the list.
|
||||
*
|
||||
* This routine is invoked to post a block of @count scsi sgl pages from a
|
||||
* SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
|
||||
* SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
|
||||
* No Lock is held.
|
||||
*
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
struct list_head *sblist,
|
||||
int count)
|
||||
static int
|
||||
lpfc_sli4_post_common_sgl_block(struct lpfc_hba *phba,
|
||||
struct list_head *nblist,
|
||||
int count)
|
||||
{
|
||||
struct lpfc_scsi_buf *psb;
|
||||
struct lpfc_nvme_buf *lpfc_ncmd;
|
||||
struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
|
||||
struct sgl_page_pairs *sgl_pg_pairs;
|
||||
void *viraddr;
|
||||
@ -16860,25 +16868,25 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
|
||||
if (reqlen > SLI4_PAGE_SIZE) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"0217 Block sgl registration required DMA "
|
||||
"6118 Block sgl registration required DMA "
|
||||
"size (%d) great than a page\n", reqlen);
|
||||
return -ENOMEM;
|
||||
}
|
||||
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mbox) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0283 Failed to allocate mbox cmd memory\n");
|
||||
"6119 Failed to allocate mbox cmd memory\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate DMA memory and set up the non-embedded mailbox command */
|
||||
alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
|
||||
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
|
||||
LPFC_SLI4_MBX_NEMBED);
|
||||
LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
|
||||
reqlen, LPFC_SLI4_MBX_NEMBED);
|
||||
|
||||
if (alloclen < reqlen) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2561 Allocated DMA memory size (%d) is "
|
||||
"6120 Allocated DMA memory size (%d) is "
|
||||
"less than the requested DMA memory "
|
||||
"size (%d)\n", alloclen, reqlen);
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
@ -16893,14 +16901,15 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
sgl_pg_pairs = &sgl->sgl_pg_pairs;
|
||||
|
||||
pg_pairs = 0;
|
||||
list_for_each_entry(psb, sblist, list) {
|
||||
list_for_each_entry(lpfc_ncmd, nblist, list) {
|
||||
/* Set up the sge entry */
|
||||
sgl_pg_pairs->sgl_pg0_addr_lo =
|
||||
cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
|
||||
cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
|
||||
sgl_pg_pairs->sgl_pg0_addr_hi =
|
||||
cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
|
||||
cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
|
||||
if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
|
||||
pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
|
||||
pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
|
||||
SGL_PAGE_SIZE;
|
||||
else
|
||||
pdma_phys_bpl1 = 0;
|
||||
sgl_pg_pairs->sgl_pg1_addr_lo =
|
||||
@ -16909,7 +16918,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
|
||||
/* Keep the first xritag on the list */
|
||||
if (pg_pairs == 0)
|
||||
xritag_start = psb->cur_iocbq.sli4_xritag;
|
||||
xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
|
||||
sgl_pg_pairs++;
|
||||
pg_pairs++;
|
||||
}
|
||||
@ -16918,20 +16927,20 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
/* Perform endian conversion if necessary */
|
||||
sgl->word0 = cpu_to_le32(sgl->word0);
|
||||
|
||||
if (!phba->sli4_hba.intr_enable)
|
||||
if (!phba->sli4_hba.intr_enable) {
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
else {
|
||||
} else {
|
||||
mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
|
||||
}
|
||||
shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||
if (rc != MBX_TIMEOUT)
|
||||
lpfc_sli4_mbox_cmd_free(phba, mbox);
|
||||
if (shdr_status || shdr_add_status || rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"2564 POST_SGL_BLOCK mailbox command failed "
|
||||
"6125 POST_SGL_BLOCK mailbox command failed "
|
||||
"status x%x add_status x%x mbx status x%x\n",
|
||||
shdr_status, shdr_add_status, rc);
|
||||
rc = -ENXIO;
|
||||
@ -16939,6 +16948,142 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_post_common_sgl_list - Post blocks of nvme buffer sgls from a list
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @post_nblist: pointer to the nvme buffer list.
|
||||
*
|
||||
* This routine walks a list of nvme buffers that was passed in. It attempts
|
||||
* to construct blocks of nvme buffer sgls which contains contiguous xris and
|
||||
* uses the non-embedded SGL block post mailbox commands to post to the port.
|
||||
* For single NVME buffer sgl with non-contiguous xri, if any, it shall use
|
||||
* embedded SGL post mailbox command for posting. The @post_nblist passed in
|
||||
* must be local list, thus no lock is needed when manipulate the list.
|
||||
*
|
||||
* Returns: 0 = failure, non-zero number of successfully posted buffers.
|
||||
**/
|
||||
int
|
||||
lpfc_sli4_post_common_sgl_list(struct lpfc_hba *phba,
|
||||
struct list_head *post_nblist, int sb_count)
|
||||
{
|
||||
struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
||||
int status, sgl_size;
|
||||
int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
|
||||
dma_addr_t pdma_phys_sgl1;
|
||||
int last_xritag = NO_XRI;
|
||||
int cur_xritag;
|
||||
unsigned long iflag;
|
||||
LIST_HEAD(prep_nblist);
|
||||
LIST_HEAD(blck_nblist);
|
||||
LIST_HEAD(nvme_nblist);
|
||||
|
||||
/* sanity check */
|
||||
if (sb_count <= 0)
|
||||
return -EINVAL;
|
||||
|
||||
sgl_size = phba->cfg_sg_dma_buf_size;
|
||||
list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
|
||||
list_del_init(&lpfc_ncmd->list);
|
||||
block_cnt++;
|
||||
if ((last_xritag != NO_XRI) &&
|
||||
(lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
|
||||
/* a hole in xri block, form a sgl posting block */
|
||||
list_splice_init(&prep_nblist, &blck_nblist);
|
||||
post_cnt = block_cnt - 1;
|
||||
/* prepare list for next posting block */
|
||||
list_add_tail(&lpfc_ncmd->list, &prep_nblist);
|
||||
block_cnt = 1;
|
||||
} else {
|
||||
/* prepare list for next posting block */
|
||||
list_add_tail(&lpfc_ncmd->list, &prep_nblist);
|
||||
/* enough sgls for non-embed sgl mbox command */
|
||||
if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
|
||||
list_splice_init(&prep_nblist, &blck_nblist);
|
||||
post_cnt = block_cnt;
|
||||
block_cnt = 0;
|
||||
}
|
||||
}
|
||||
num_posting++;
|
||||
last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
|
||||
|
||||
/* end of repost sgl list condition for NVME buffers */
|
||||
if (num_posting == sb_count) {
|
||||
if (post_cnt == 0) {
|
||||
/* last sgl posting block */
|
||||
list_splice_init(&prep_nblist, &blck_nblist);
|
||||
post_cnt = block_cnt;
|
||||
} else if (block_cnt == 1) {
|
||||
/* last single sgl with non-contiguous xri */
|
||||
if (sgl_size > SGL_PAGE_SIZE)
|
||||
pdma_phys_sgl1 =
|
||||
lpfc_ncmd->dma_phys_sgl +
|
||||
SGL_PAGE_SIZE;
|
||||
else
|
||||
pdma_phys_sgl1 = 0;
|
||||
cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
|
||||
status = lpfc_sli4_post_sgl(
|
||||
phba, lpfc_ncmd->dma_phys_sgl,
|
||||
pdma_phys_sgl1, cur_xritag);
|
||||
if (status) {
|
||||
/* failure, put on abort nvme list */
|
||||
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
|
||||
} else {
|
||||
/* success, put on NVME buffer list */
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
num_posted++;
|
||||
}
|
||||
/* success, put on NVME buffer sgl list */
|
||||
list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
|
||||
}
|
||||
}
|
||||
|
||||
/* continue until a nembed page worth of sgls */
|
||||
if (post_cnt == 0)
|
||||
continue;
|
||||
|
||||
/* post block of NVME buffer list sgls */
|
||||
status = lpfc_sli4_post_common_sgl_block(phba, &blck_nblist,
|
||||
post_cnt);
|
||||
|
||||
/* don't reset xirtag due to hole in xri block */
|
||||
if (block_cnt == 0)
|
||||
last_xritag = NO_XRI;
|
||||
|
||||
/* reset NVME buffer post count for next round of posting */
|
||||
post_cnt = 0;
|
||||
|
||||
/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
|
||||
while (!list_empty(&blck_nblist)) {
|
||||
list_remove_head(&blck_nblist, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
if (status) {
|
||||
/* failure, put on abort nvme list */
|
||||
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
|
||||
} else {
|
||||
/* success, put on NVME buffer list */
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||
num_posted++;
|
||||
}
|
||||
list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
|
||||
}
|
||||
}
|
||||
/* Push NVME buffers with sgl posted to the available list */
|
||||
while (!list_empty(&nvme_nblist)) {
|
||||
list_remove_head(&nvme_nblist, lpfc_ncmd,
|
||||
struct lpfc_nvme_buf, list);
|
||||
lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL;
|
||||
lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL;
|
||||
spin_lock_irqsave(&phba->common_buf_list_put_lock, iflag);
|
||||
list_add_tail(&lpfc_ncmd->list,
|
||||
&phba->lpfc_common_buf_list_put);
|
||||
phba->put_common_bufs++;
|
||||
spin_unlock_irqrestore(&phba->common_buf_list_put_lock, iflag);
|
||||
}
|
||||
return num_posted;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
|
||||
* @phba: pointer to lpfc_hba struct that the frame was received on
|
||||
|
@ -41,6 +41,9 @@
|
||||
#define LPFC_FCP_IO_CHAN_DEF 4
|
||||
#define LPFC_NVME_IO_CHAN_DEF 0
|
||||
|
||||
/* Common buffer size to accomidate SCSI and NVME IO buffers */
|
||||
#define LPFC_COMMON_IO_BUF_SZ 768
|
||||
|
||||
/* Number of channels used for Flash Optimized Fabric (FOF) operations */
|
||||
|
||||
#define LPFC_FOF_IO_CHAN_NUM 1
|
||||
@ -663,12 +666,9 @@ struct lpfc_sli4_hba {
|
||||
uint16_t rpi_hdrs_in_use; /* must post rpi hdrs if set. */
|
||||
uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
|
||||
uint16_t next_rpi;
|
||||
uint16_t nvme_xri_max;
|
||||
uint16_t nvme_xri_cnt;
|
||||
uint16_t nvme_xri_start;
|
||||
uint16_t scsi_xri_max;
|
||||
uint16_t scsi_xri_cnt;
|
||||
uint16_t scsi_xri_start;
|
||||
uint16_t common_xri_max;
|
||||
uint16_t common_xri_cnt;
|
||||
uint16_t common_xri_start;
|
||||
uint16_t els_xri_cnt;
|
||||
uint16_t nvmet_xri_cnt;
|
||||
uint16_t nvmet_io_wait_cnt;
|
||||
@ -843,12 +843,10 @@ int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
|
||||
int lpfc_sli4_queue_setup(struct lpfc_hba *);
|
||||
void lpfc_sli4_queue_unset(struct lpfc_hba *);
|
||||
int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
|
||||
int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
|
||||
int lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba);
|
||||
int lpfc_repost_common_sgl_list(struct lpfc_hba *phba);
|
||||
uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
|
||||
void lpfc_sli4_free_xri(struct lpfc_hba *, int);
|
||||
int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
|
||||
int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
|
||||
struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
|
||||
struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
|
||||
void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
|
||||
|
Loading…
Reference in New Issue
Block a user