mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:40:52 +07:00
SCSI fixes on 20200926
Three fixes: one in drivers (lpfc) and two for zoned block devices. The latter also impinges on the block layer but only to introduce a new block API for setting the zone model rather than fiddling with the queue directly in the zoned block driver. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCX29mRyYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishabnAP48vMYD /cjyGAJfq/0k/U/t6pRPc5tUm89LOWcOJz0SjwD/YXcQNz7mx8MxnypAV1jbWXR7 iyWkPMYVc4EJh7oTARE= =SQhI -----END PGP SIGNATURE----- Merge tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI fixes from James Bottomley: "Three fixes: one in drivers (lpfc) and two for zoned block devices. The latter also impinges on the block layer but only to introduce a new block API for setting the zone model rather than fiddling with the queue directly in the zoned block driver" * tag 'scsi-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: scsi: sd: sd_zbc: Fix ZBC disk initialization scsi: sd: sd_zbc: Fix handling of host-aware ZBC disks scsi: lpfc: Fix initial FLOGI failure due to BBSCN not supported
This commit is contained in:
commit
a1bffa4874
@ -801,6 +801,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging);
|
||||
|
||||
/**
|
||||
* blk_queue_set_zoned - configure a disk queue zoned model.
|
||||
* @disk: the gendisk of the queue to configure
|
||||
* @model: the zoned model to set
|
||||
*
|
||||
* Set the zoned model of the request queue of @disk according to @model.
|
||||
* When @model is BLK_ZONED_HM (host managed), this should be called only
|
||||
* if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option).
|
||||
* If @model specifies BLK_ZONED_HA (host aware), the effective model used
|
||||
* depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions
|
||||
* on the disk.
|
||||
*/
|
||||
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
{
|
||||
switch (model) {
|
||||
case BLK_ZONED_HM:
|
||||
/*
|
||||
* Host managed devices are supported only if
|
||||
* CONFIG_BLK_DEV_ZONED is enabled.
|
||||
*/
|
||||
WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED));
|
||||
break;
|
||||
case BLK_ZONED_HA:
|
||||
/*
|
||||
* Host aware devices can be treated either as regular block
|
||||
* devices (similar to drive managed devices) or as zoned block
|
||||
* devices to take advantage of the zone command set, similarly
|
||||
* to host managed devices. We try the latter if there are no
|
||||
* partitions and zoned block device support is enabled, else
|
||||
* we do nothing special as far as the block layer is concerned.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) ||
|
||||
disk_has_partitions(disk))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
case BLK_ZONED_NONE:
|
||||
default:
|
||||
if (WARN_ON_ONCE(model != BLK_ZONED_NONE))
|
||||
model = BLK_ZONED_NONE;
|
||||
break;
|
||||
}
|
||||
|
||||
disk->queue->limits.zoned = model;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_set_zoned);
|
||||
|
||||
static int __init blk_settings_init(void)
|
||||
{
|
||||
blk_max_low_pfn = max_low_pfn - 1;
|
||||
|
@ -71,6 +71,7 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *);
|
||||
static void lpfc_disc_flush_list(struct lpfc_vport *vport);
|
||||
static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
static int lpfc_fcf_inuse(struct lpfc_hba *);
|
||||
static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
|
||||
|
||||
void
|
||||
lpfc_terminate_rport_io(struct fc_rport *rport)
|
||||
@ -1138,11 +1139,13 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
{
|
||||
struct lpfc_vport *vport = pmb->vport;
|
||||
LPFC_MBOXQ_t *sparam_mb;
|
||||
struct lpfc_dmabuf *sparam_mp;
|
||||
int rc;
|
||||
|
||||
if (pmb->u.mb.mbxStatus)
|
||||
goto out;
|
||||
@ -1167,12 +1170,42 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
}
|
||||
|
||||
/* Start discovery by sending a FLOGI. port_state is identically
|
||||
* LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending
|
||||
* the FLOGI is being deferred till after MBX_READ_SPARAM completes.
|
||||
* LPFC_FLOGI while waiting for FLOGI cmpl.
|
||||
*/
|
||||
if (vport->port_state != LPFC_FLOGI) {
|
||||
if (!(phba->hba_flag & HBA_DEFER_FLOGI))
|
||||
/* Issue MBX_READ_SPARAM to update CSPs before FLOGI if
|
||||
* bb-credit recovery is in place.
|
||||
*/
|
||||
if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
|
||||
!(phba->link_flag & LS_LOOPBACK_MODE)) {
|
||||
sparam_mb = mempool_alloc(phba->mbox_mem_pool,
|
||||
GFP_KERNEL);
|
||||
if (!sparam_mb)
|
||||
goto sparam_out;
|
||||
|
||||
rc = lpfc_read_sparam(phba, sparam_mb, 0);
|
||||
if (rc) {
|
||||
mempool_free(sparam_mb, phba->mbox_mem_pool);
|
||||
goto sparam_out;
|
||||
}
|
||||
sparam_mb->vport = vport;
|
||||
sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
|
||||
rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
sparam_mp = (struct lpfc_dmabuf *)
|
||||
sparam_mb->ctx_buf;
|
||||
lpfc_mbuf_free(phba, sparam_mp->virt,
|
||||
sparam_mp->phys);
|
||||
kfree(sparam_mp);
|
||||
sparam_mb->ctx_buf = NULL;
|
||||
mempool_free(sparam_mb, phba->mbox_mem_pool);
|
||||
goto sparam_out;
|
||||
}
|
||||
|
||||
phba->hba_flag |= HBA_DEFER_FLOGI;
|
||||
} else {
|
||||
lpfc_initial_flogi(vport);
|
||||
}
|
||||
} else {
|
||||
if (vport->fc_flag & FC_PT2PT)
|
||||
lpfc_disc_start(vport);
|
||||
@ -1184,6 +1217,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
"0306 CONFIG_LINK mbxStatus error x%x "
|
||||
"HBA state x%x\n",
|
||||
pmb->u.mb.mbxStatus, vport->port_state);
|
||||
sparam_out:
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
|
||||
lpfc_linkdown(phba);
|
||||
@ -3239,21 +3273,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
lpfc_linkup(phba);
|
||||
sparam_mbox = NULL;
|
||||
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!cfglink_mbox)
|
||||
goto out;
|
||||
vport->port_state = LPFC_LOCAL_CFG_LINK;
|
||||
lpfc_config_link(phba, cfglink_mbox);
|
||||
cfglink_mbox->vport = vport;
|
||||
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
|
||||
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!sparam_mbox)
|
||||
goto out;
|
||||
@ -3274,7 +3293,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (phba->hba_flag & HBA_FCOE_MODE) {
|
||||
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!cfglink_mbox)
|
||||
goto out;
|
||||
vport->port_state = LPFC_LOCAL_CFG_LINK;
|
||||
lpfc_config_link(phba, cfglink_mbox);
|
||||
cfglink_mbox->vport = vport;
|
||||
cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
|
||||
rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mempool_free(cfglink_mbox, phba->mbox_mem_pool);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
vport->port_state = LPFC_VPORT_UNKNOWN;
|
||||
/*
|
||||
* Add the driver's default FCF record at FCF index 0 now. This
|
||||
@ -3331,10 +3363,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
|
||||
}
|
||||
/* Reset FCF roundrobin bmask for new discovery */
|
||||
lpfc_sli4_clear_fcf_rr_bmask(phba);
|
||||
} else {
|
||||
if (phba->bbcredit_support && phba->cfg_enable_bbcr &&
|
||||
!(phba->link_flag & LS_LOOPBACK_MODE))
|
||||
phba->hba_flag |= HBA_DEFER_FLOGI;
|
||||
}
|
||||
|
||||
/* Prepare for LINK up registrations */
|
||||
|
@ -2964,26 +2964,32 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
|
||||
|
||||
if (sdkp->device->type == TYPE_ZBC) {
|
||||
/* Host-managed */
|
||||
q->limits.zoned = BLK_ZONED_HM;
|
||||
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
|
||||
} else {
|
||||
sdkp->zoned = (buffer[8] >> 4) & 3;
|
||||
if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) {
|
||||
if (sdkp->zoned == 1) {
|
||||
/* Host-aware */
|
||||
q->limits.zoned = BLK_ZONED_HA;
|
||||
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
|
||||
} else {
|
||||
/*
|
||||
* Treat drive-managed devices and host-aware devices
|
||||
* with partitions as regular block devices.
|
||||
*/
|
||||
q->limits.zoned = BLK_ZONED_NONE;
|
||||
if (sdkp->zoned == 2 && sdkp->first_scan)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Drive-managed SMR disk\n");
|
||||
/* Regular disk or drive managed disk */
|
||||
blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
|
||||
}
|
||||
}
|
||||
if (blk_queue_is_zoned(q) && sdkp->first_scan)
|
||||
|
||||
if (!sdkp->first_scan)
|
||||
goto out;
|
||||
|
||||
if (blk_queue_is_zoned(q)) {
|
||||
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
|
||||
q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
|
||||
} else {
|
||||
if (sdkp->zoned == 1)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Host-aware SMR disk used as regular disk\n");
|
||||
else if (sdkp->zoned == 2)
|
||||
sd_printk(KERN_NOTICE, sdkp,
|
||||
"Drive-managed SMR disk\n");
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(buffer);
|
||||
@ -3404,10 +3410,6 @@ static int sd_probe(struct device *dev)
|
||||
sdkp->first_scan = 1;
|
||||
sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;
|
||||
|
||||
error = sd_zbc_init_disk(sdkp);
|
||||
if (error)
|
||||
goto out_free_index;
|
||||
|
||||
sd_revalidate_disk(gd);
|
||||
|
||||
gd->flags = GENHD_FL_EXT_DEVT;
|
||||
|
@ -215,7 +215,6 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp)
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
int sd_zbc_init_disk(struct scsi_disk *sdkp);
|
||||
void sd_zbc_release_disk(struct scsi_disk *sdkp);
|
||||
int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer);
|
||||
int sd_zbc_revalidate_zones(struct scsi_disk *sdkp);
|
||||
@ -231,11 +230,6 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
|
||||
|
||||
#else /* CONFIG_BLK_DEV_ZONED */
|
||||
|
||||
static inline int sd_zbc_init_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {}
|
||||
|
||||
static inline int sd_zbc_read_zones(struct scsi_disk *sdkp,
|
||||
@ -259,7 +253,7 @@ static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
|
||||
static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd,
|
||||
unsigned int good_bytes, struct scsi_sense_hdr *sshdr)
|
||||
{
|
||||
return 0;
|
||||
return good_bytes;
|
||||
}
|
||||
|
||||
static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd,
|
||||
|
@ -651,6 +651,28 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp)
|
||||
sdkp->zone_blocks);
|
||||
}
|
||||
|
||||
static int sd_zbc_init_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
sdkp->zones_wp_offset = NULL;
|
||||
spin_lock_init(&sdkp->zones_wp_offset_lock);
|
||||
sdkp->rev_wp_offset = NULL;
|
||||
mutex_init(&sdkp->rev_mutex);
|
||||
INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
|
||||
sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
|
||||
if (!sdkp->zone_wp_update_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sd_zbc_release_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
kvfree(sdkp->zones_wp_offset);
|
||||
sdkp->zones_wp_offset = NULL;
|
||||
kfree(sdkp->zone_wp_update_buf);
|
||||
sdkp->zone_wp_update_buf = NULL;
|
||||
}
|
||||
|
||||
static void sd_zbc_revalidate_zones_cb(struct gendisk *disk)
|
||||
{
|
||||
struct scsi_disk *sdkp = scsi_disk(disk);
|
||||
@ -667,7 +689,24 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
|
||||
u32 max_append;
|
||||
int ret = 0;
|
||||
|
||||
if (!sd_is_zoned(sdkp))
|
||||
/*
|
||||
* For all zoned disks, initialize zone append emulation data if not
|
||||
* already done. This is necessary also for host-aware disks used as
|
||||
* regular disks due to the presence of partitions as these partitions
|
||||
* may be deleted and the disk zoned model changed back from
|
||||
* BLK_ZONED_NONE to BLK_ZONED_HA.
|
||||
*/
|
||||
if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) {
|
||||
ret = sd_zbc_init_disk(sdkp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is nothing to do for regular disks, including host-aware disks
|
||||
* that have partitions.
|
||||
*/
|
||||
if (!blk_queue_is_zoned(q))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@ -764,28 +803,3 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int sd_zbc_init_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
if (!sd_is_zoned(sdkp))
|
||||
return 0;
|
||||
|
||||
sdkp->zones_wp_offset = NULL;
|
||||
spin_lock_init(&sdkp->zones_wp_offset_lock);
|
||||
sdkp->rev_wp_offset = NULL;
|
||||
mutex_init(&sdkp->rev_mutex);
|
||||
INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn);
|
||||
sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL);
|
||||
if (!sdkp->zone_wp_update_buf)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void sd_zbc_release_disk(struct scsi_disk *sdkp)
|
||||
{
|
||||
kvfree(sdkp->zones_wp_offset);
|
||||
sdkp->zones_wp_offset = NULL;
|
||||
kfree(sdkp->zone_wp_update_buf);
|
||||
sdkp->zone_wp_update_buf = NULL;
|
||||
}
|
||||
|
@ -352,6 +352,8 @@ struct queue_limits {
|
||||
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
|
||||
void *data);
|
||||
|
||||
void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_ZONED
|
||||
|
||||
#define BLK_ALL_ZONES ((unsigned int)-1)
|
||||
|
Loading…
Reference in New Issue
Block a user