mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:16:45 +07:00
SCSI misc on 20180815
This is mostly updates to the usual drivers: mpt3sas, lpfc, qla2xxx, hisi_sas, smartpqi, megaraid_sas, arcmsr. In addition, with the continuing absence of Nic we have target updates for tcmu and target core (all with reviews and acks). The biggest observable change is going to be that we're (again) trying to switch to mulitqueue as the default (a user can still override the setting on the kernel command line). Other major core stuff is the removal of the remaining Microchannel drivers, an update of the internal timers and some reworks of completion and result handling. Signed-off-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCW3R3niYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishauRAP4yfBKK dbxF81c/Bxi/Stk16FWkOOrjs4CizwmnMcpM5wD/UmM9o6ebDzaYpZgA8wIl7X/N o/JckEZZpIp+5NySZNc= =ggLB -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This is mostly updates to the usual drivers: mpt3sas, lpfc, qla2xxx, hisi_sas, smartpqi, megaraid_sas, arcmsr. In addition, with the continuing absence of Nic we have target updates for tcmu and target core (all with reviews and acks). The biggest observable change is going to be that we're (again) trying to switch to mulitqueue as the default (a user can still override the setting on the kernel command line). Other major core stuff is the removal of the remaining Microchannel drivers, an update of the internal timers and some reworks of completion and result handling" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (203 commits) scsi: core: use blk_mq_run_hw_queues in scsi_kick_queue scsi: ufs: remove unnecessary query(DM) UPIU trace scsi: qla2xxx: Fix issue reported by static checker for qla2x00_els_dcmd2_sp_done() scsi: aacraid: Spelling fix in comment scsi: mpt3sas: Fix calltrace observed while running IO & reset scsi: aic94xx: fix an error code in aic94xx_init() scsi: st: remove redundant pointer STbuffer scsi: qla2xxx: Update driver version to 10.00.00.08-k scsi: qla2xxx: Migrate NVME N2N handling into state machine scsi: qla2xxx: Save frame payload size from ICB scsi: qla2xxx: Fix stalled relogin scsi: qla2xxx: Fix race between switch cmd completion and timeout scsi: qla2xxx: Fix Management Server NPort handle reservation logic scsi: qla2xxx: Flush mailbox commands on chip reset scsi: qla2xxx: Fix unintended Logout scsi: qla2xxx: Fix session state stuck in Get Port DB scsi: qla2xxx: Fix redundant fc_rport registration scsi: qla2xxx: Silent erroneous message scsi: qla2xxx: Prevent sysfs access when chip is down scsi: qla2xxx: Add longer window for chip reset ...
This commit is contained in:
commit
72f02ba66b
41
Documentation/devicetree/bindings/ufs/ufs-hisi.txt
Normal file
41
Documentation/devicetree/bindings/ufs/ufs-hisi.txt
Normal file
@ -0,0 +1,41 @@
|
||||
* Hisilicon Universal Flash Storage (UFS) Host Controller
|
||||
|
||||
UFS nodes are defined to describe on-chip UFS hardware macro.
|
||||
Each UFS Host Controller should have its own node.
|
||||
|
||||
Required properties:
|
||||
- compatible : compatible list, contains one of the following -
|
||||
"hisilicon,hi3660-ufs", "jedec,ufs-1.1" for hisi ufs
|
||||
host controller present on Hi36xx chipset.
|
||||
- reg : should contain UFS register address space & UFS SYS CTRL register address,
|
||||
- interrupt-parent : interrupt device
|
||||
- interrupts : interrupt number
|
||||
- clocks : List of phandle and clock specifier pairs
|
||||
- clock-names : List of clock input name strings sorted in the same
|
||||
order as the clocks property. "ref_clk", "phy_clk" is optional
|
||||
- freq-table-hz : Array of <min max> operating frequencies stored in the same
|
||||
order as the clocks property. If this property is not
|
||||
defined or a value in the array is "0" then it is assumed
|
||||
that the frequency is set by the parent clock or a
|
||||
fixed rate clock source.
|
||||
- resets : describe reset node register
|
||||
- reset-names : reset node register, the "rst" corresponds to reset the whole UFS IP.
|
||||
|
||||
Example:
|
||||
|
||||
ufs: ufs@ff3b0000 {
|
||||
compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
|
||||
/* 0: HCI standard */
|
||||
/* 1: UFS SYS CTRL */
|
||||
reg = <0x0 0xff3b0000 0x0 0x1000>,
|
||||
<0x0 0xff3b1000 0x0 0x1000>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
|
||||
<&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
|
||||
clock-names = "ref_clk", "phy_clk";
|
||||
freq-table-hz = <0 0>, <0 0>;
|
||||
/* offset: 0x84; bit: 12 */
|
||||
resets = <&crg_rst 0x84 12>;
|
||||
reset-names = "rst";
|
||||
};
|
@ -41,6 +41,8 @@ Optional properties:
|
||||
-lanes-per-direction : number of lanes available per direction - either 1 or 2.
|
||||
Note that it is assume same number of lanes is used both
|
||||
directions at once. If not specified, default is 2 lanes per direction.
|
||||
- resets : reset node register
|
||||
- reset-names : describe reset node register, the "rst" corresponds to reset the whole UFS IP.
|
||||
|
||||
Note: If above properties are not defined it can be assumed that the supply
|
||||
regulators or clocks are always on.
|
||||
@ -61,9 +63,11 @@ Example:
|
||||
vccq-max-microamp = 200000;
|
||||
vccq2-max-microamp = 200000;
|
||||
|
||||
clocks = <&core 0>, <&ref 0>, <&iface 0>;
|
||||
clock-names = "core_clk", "ref_clk", "iface_clk";
|
||||
freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
|
||||
clocks = <&core 0>, <&ref 0>, <&phy 0>, <&iface 0>;
|
||||
clock-names = "core_clk", "ref_clk", "phy_clk", "iface_clk";
|
||||
freq-table-hz = <100000000 200000000>, <0 0>, <0 0>, <0 0>;
|
||||
resets = <&reset 0 1>;
|
||||
reset-names = "rst";
|
||||
phys = <&ufsphy1>;
|
||||
phy-names = "ufsphy";
|
||||
};
|
||||
|
@ -9847,12 +9847,6 @@ F: drivers/scsi/mac_scsi.*
|
||||
F: drivers/scsi/sun3_scsi.*
|
||||
F: drivers/scsi/sun3_scsi_vme.c
|
||||
|
||||
NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
|
||||
M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/scsi/NCR_D700.*
|
||||
|
||||
NCSI LIBRARY:
|
||||
M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
|
||||
S: Maintained
|
||||
|
@ -1000,6 +1000,24 @@ &gic GIC_SPI 284 IRQ_TYPE_LEVEL_HIGH>,
|
||||
reset-gpios = <&gpio11 1 0 >;
|
||||
};
|
||||
|
||||
/* UFS */
|
||||
ufs: ufs@ff3b0000 {
|
||||
compatible = "hisilicon,hi3660-ufs", "jedec,ufs-1.1";
|
||||
/* 0: HCI standard */
|
||||
/* 1: UFS SYS CTRL */
|
||||
reg = <0x0 0xff3b0000 0x0 0x1000>,
|
||||
<0x0 0xff3b1000 0x0 0x1000>;
|
||||
interrupt-parent = <&gic>;
|
||||
interrupts = <GIC_SPI 278 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&crg_ctrl HI3660_CLK_GATE_UFSIO_REF>,
|
||||
<&crg_ctrl HI3660_CLK_GATE_UFSPHY_CFG>;
|
||||
clock-names = "ref_clk", "phy_clk";
|
||||
freq-table-hz = <0 0>, <0 0>;
|
||||
/* offset: 0x84; bit: 12 */
|
||||
resets = <&crg_rst 0x84 12>;
|
||||
reset-names = "rst";
|
||||
};
|
||||
|
||||
/* SD */
|
||||
dwmmc1: dwmmc1@ff37f000 {
|
||||
#address-cells = <1>;
|
||||
|
@ -193,6 +193,7 @@ CONFIG_SCSI_HISI_SAS=y
|
||||
CONFIG_SCSI_HISI_SAS_PCI=y
|
||||
CONFIG_SCSI_UFSHCD=m
|
||||
CONFIG_SCSI_UFSHCD_PLATFORM=m
|
||||
CONFIG_SCSI_UFS_HISI=y
|
||||
CONFIG_SCSI_UFS_QCOM=m
|
||||
CONFIG_ATA=y
|
||||
CONFIG_SATA_AHCI=y
|
||||
|
@ -6424,6 +6424,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
|
||||
host->n_tags = ATA_MAX_QUEUE;
|
||||
host->dev = dev;
|
||||
host->ops = ops;
|
||||
kref_init(&host->kref);
|
||||
}
|
||||
|
||||
void __ata_port_probe(struct ata_port *ap)
|
||||
@ -7391,3 +7392,5 @@ EXPORT_SYMBOL_GPL(ata_cable_80wire);
|
||||
EXPORT_SYMBOL_GPL(ata_cable_unknown);
|
||||
EXPORT_SYMBOL_GPL(ata_cable_ignore);
|
||||
EXPORT_SYMBOL_GPL(ata_cable_sata);
|
||||
EXPORT_SYMBOL_GPL(ata_host_get);
|
||||
EXPORT_SYMBOL_GPL(ata_host_put);
|
@ -100,8 +100,6 @@ extern int ata_port_probe(struct ata_port *ap);
|
||||
extern void __ata_port_probe(struct ata_port *ap);
|
||||
extern unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
|
||||
u8 page, void *buf, unsigned int sectors);
|
||||
extern void ata_host_get(struct ata_host *host);
|
||||
extern void ata_host_put(struct ata_host *host);
|
||||
|
||||
#define to_ata_port(d) container_of(d, struct ata_port, tdev)
|
||||
|
||||
|
@ -2029,8 +2029,7 @@ static void srpt_release_channel_work(struct work_struct *w)
|
||||
target_sess_cmd_list_set_waiting(se_sess);
|
||||
target_wait_for_sess_cmds(se_sess);
|
||||
|
||||
transport_deregister_session_configfs(se_sess);
|
||||
transport_deregister_session(se_sess);
|
||||
target_remove_session(se_sess);
|
||||
ch->sess = NULL;
|
||||
|
||||
if (ch->using_rdma_cm)
|
||||
@ -2221,16 +2220,16 @@ static int srpt_cm_req_recv(struct srpt_device *const sdev,
|
||||
pr_debug("registering session %s\n", ch->sess_name);
|
||||
|
||||
if (sport->port_guid_tpg.se_tpg_wwn)
|
||||
ch->sess = target_alloc_session(&sport->port_guid_tpg, 0, 0,
|
||||
ch->sess = target_setup_session(&sport->port_guid_tpg, 0, 0,
|
||||
TARGET_PROT_NORMAL,
|
||||
ch->sess_name, ch, NULL);
|
||||
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
|
||||
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
|
||||
ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
|
||||
TARGET_PROT_NORMAL, i_port_id, ch,
|
||||
NULL);
|
||||
/* Retry without leading "0x" */
|
||||
if (sport->port_gid_tpg.se_tpg_wwn && IS_ERR_OR_NULL(ch->sess))
|
||||
ch->sess = target_alloc_session(&sport->port_gid_tpg, 0, 0,
|
||||
ch->sess = target_setup_session(&sport->port_gid_tpg, 0, 0,
|
||||
TARGET_PROT_NORMAL,
|
||||
i_port_id + 2, ch, NULL);
|
||||
if (IS_ERR_OR_NULL(ch->sess)) {
|
||||
@ -3597,11 +3596,9 @@ static struct configfs_attribute *srpt_tpg_attrs[] = {
|
||||
/**
|
||||
* srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg
|
||||
* @wwn: Corresponds to $driver/$port.
|
||||
* @group: Not used.
|
||||
* @name: $tpg.
|
||||
*/
|
||||
static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct srpt_port *sport = wwn->priv;
|
||||
|
@ -642,6 +642,7 @@ mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
|
||||
freereq = 0;
|
||||
if (event != MPI_EVENT_EVENT_CHANGE)
|
||||
break;
|
||||
/* else: fall through */
|
||||
case MPI_FUNCTION_CONFIG:
|
||||
case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
|
||||
ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
|
||||
@ -1779,7 +1780,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
struct proc_dir_entry *dent;
|
||||
#endif
|
||||
|
||||
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
|
||||
ioc = kzalloc(sizeof(MPT_ADAPTER), GFP_KERNEL);
|
||||
if (ioc == NULL) {
|
||||
printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
|
||||
return -ENOMEM;
|
||||
@ -1886,6 +1887,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
case MPI_MANUFACTPAGE_DEVICEID_FC939X:
|
||||
case MPI_MANUFACTPAGE_DEVICEID_FC949X:
|
||||
ioc->errata_flag_1064 = 1;
|
||||
/* fall through */
|
||||
case MPI_MANUFACTPAGE_DEVICEID_FC909:
|
||||
case MPI_MANUFACTPAGE_DEVICEID_FC929:
|
||||
case MPI_MANUFACTPAGE_DEVICEID_FC919:
|
||||
@ -1930,6 +1932,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
pcixcmd &= 0x8F;
|
||||
pci_write_config_byte(pdev, 0x6a, pcixcmd);
|
||||
}
|
||||
/* fall through */
|
||||
|
||||
case MPI_MANUFACTPAGE_DEVID_1030_53C1035:
|
||||
ioc->bus_type = SPI;
|
||||
|
@ -2514,8 +2514,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
|
||||
if (mpt_config(ioc, &cfg) == 0) {
|
||||
ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
|
||||
if (strlen(pdata->BoardTracerNumber) > 1) {
|
||||
strncpy(karg.serial_number, pdata->BoardTracerNumber, 24);
|
||||
karg.serial_number[24-1]='\0';
|
||||
strlcpy(karg.serial_number,
|
||||
pdata->BoardTracerNumber, 24);
|
||||
}
|
||||
}
|
||||
pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
|
||||
|
@ -1292,7 +1292,7 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
/* SCSI needs scsi_cmnd lookup table!
|
||||
* (with size equal to req_depth*PtrSz!)
|
||||
*/
|
||||
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_ATOMIC);
|
||||
ioc->ScsiLookup = kcalloc(ioc->req_depth, sizeof(void *), GFP_KERNEL);
|
||||
if (!ioc->ScsiLookup) {
|
||||
error = -ENOMEM;
|
||||
goto out_mptfc_probe;
|
||||
|
@ -4327,6 +4327,7 @@ mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
|
||||
}
|
||||
}
|
||||
mpt_findImVolumes(ioc);
|
||||
/* fall through */
|
||||
|
||||
case MPTSAS_ADD_DEVICE:
|
||||
memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
|
||||
|
@ -2038,6 +2038,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
|
||||
if (twa_initialize_device_extension(tw_dev)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
|
||||
retval = -ENOMEM;
|
||||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
@ -2060,6 +2061,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
tw_dev->base_addr = ioremap(mem_addr, mem_len);
|
||||
if (!tw_dev->base_addr) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
|
||||
retval = -ENOMEM;
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
|
||||
@ -2067,8 +2069,10 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
TW_DISABLE_INTERRUPTS(tw_dev);
|
||||
|
||||
/* Initialize the card */
|
||||
if (twa_reset_sequence(tw_dev, 0))
|
||||
if (twa_reset_sequence(tw_dev, 0)) {
|
||||
retval = -ENOMEM;
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
/* Set host specific parameters */
|
||||
if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
|
||||
|
@ -1594,6 +1594,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
|
||||
if (twl_initialize_device_extension(tw_dev)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Failed to initialize device extension");
|
||||
retval = -ENOMEM;
|
||||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
@ -1608,6 +1609,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
tw_dev->base_addr = pci_iomap(pdev, 1, 0);
|
||||
if (!tw_dev->base_addr) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to ioremap");
|
||||
retval = -ENOMEM;
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
|
||||
@ -1617,6 +1619,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
/* Initialize the card */
|
||||
if (twl_reset_sequence(tw_dev, 0)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Controller reset failed during probe");
|
||||
retval = -ENOMEM;
|
||||
goto out_iounmap;
|
||||
}
|
||||
|
||||
|
@ -1925,7 +1925,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
|
||||
if (test_bit(TW_IN_RESET, &tw_dev->flags))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
/* Save done function into Scsi_Cmnd struct */
|
||||
/* Save done function into struct scsi_cmnd */
|
||||
SCpnt->scsi_done = done;
|
||||
|
||||
/* Queue the command and get a request id */
|
||||
@ -2280,6 +2280,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
|
||||
if (tw_initialize_device_extension(tw_dev)) {
|
||||
printk(KERN_WARNING "3w-xxxx: Failed to initialize device extension.");
|
||||
retval = -ENOMEM;
|
||||
goto out_free_device_extension;
|
||||
}
|
||||
|
||||
@ -2294,6 +2295,7 @@ static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
||||
tw_dev->base_addr = pci_resource_start(pdev, 0);
|
||||
if (!tw_dev->base_addr) {
|
||||
printk(KERN_WARNING "3w-xxxx: Failed to get io address.");
|
||||
retval = -ENOMEM;
|
||||
goto out_release_mem_region;
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,7 @@ config SCSI_NETLINK
|
||||
|
||||
config SCSI_MQ_DEFAULT
|
||||
bool "SCSI: use blk-mq I/O path by default"
|
||||
default y
|
||||
depends on SCSI
|
||||
---help---
|
||||
This option enables the new blk-mq based I/O path for SCSI
|
||||
@ -841,18 +842,6 @@ config SCSI_IZIP_SLOW_CTR
|
||||
|
||||
Generally, saying N is fine.
|
||||
|
||||
config SCSI_NCR_D700
|
||||
tristate "NCR Dual 700 MCA SCSI support"
|
||||
depends on MCA && SCSI
|
||||
select SCSI_SPI_ATTRS
|
||||
help
|
||||
This is a driver for the MicroChannel Dual 700 card produced by
|
||||
NCR and commonly used in 345x/35xx/4100 class machines. It always
|
||||
tries to negotiate sync and uses tag command queueing.
|
||||
|
||||
Unless you have an NCR manufactured machine, the chances are that
|
||||
you do not have this SCSI card, so say N.
|
||||
|
||||
config SCSI_LASI700
|
||||
tristate "HP Lasi SCSI support for 53c700/710"
|
||||
depends on GSC && SCSI
|
||||
@ -1000,21 +989,9 @@ config SCSI_ZALON
|
||||
used on the add-in Bluefish, Barracuda & Shrike SCSI cards.
|
||||
Say Y here if you have one of these machines or cards.
|
||||
|
||||
config SCSI_NCR_Q720
|
||||
tristate "NCR Quad 720 MCA SCSI support"
|
||||
depends on MCA && SCSI
|
||||
select SCSI_SPI_ATTRS
|
||||
help
|
||||
This is a driver for the MicroChannel Quad 720 card produced by
|
||||
NCR and commonly used in 345x/35xx/4100 class machines. It always
|
||||
tries to negotiate sync and uses tag command queueing.
|
||||
|
||||
Unless you have an NCR manufactured machine, the chances are that
|
||||
you do not have this SCSI card, so say N.
|
||||
|
||||
config SCSI_NCR53C8XX_DEFAULT_TAGS
|
||||
int "default tagged command queue depth"
|
||||
depends on SCSI_ZALON || SCSI_NCR_Q720
|
||||
depends on SCSI_ZALON
|
||||
default "8"
|
||||
---help---
|
||||
"Tagged command queuing" is a feature of SCSI-2 which improves
|
||||
@ -1040,7 +1017,7 @@ config SCSI_NCR53C8XX_DEFAULT_TAGS
|
||||
|
||||
config SCSI_NCR53C8XX_MAX_TAGS
|
||||
int "maximum number of queued commands"
|
||||
depends on SCSI_ZALON || SCSI_NCR_Q720
|
||||
depends on SCSI_ZALON
|
||||
default "32"
|
||||
---help---
|
||||
This option allows you to specify the maximum number of commands
|
||||
@ -1057,7 +1034,7 @@ config SCSI_NCR53C8XX_MAX_TAGS
|
||||
|
||||
config SCSI_NCR53C8XX_SYNC
|
||||
int "synchronous transfers frequency in MHz"
|
||||
depends on SCSI_ZALON || SCSI_NCR_Q720
|
||||
depends on SCSI_ZALON
|
||||
default "20"
|
||||
---help---
|
||||
The SCSI Parallel Interface-2 Standard defines 5 classes of transfer
|
||||
@ -1091,7 +1068,7 @@ config SCSI_NCR53C8XX_SYNC
|
||||
|
||||
config SCSI_NCR53C8XX_NO_DISCONNECT
|
||||
bool "not allow targets to disconnect"
|
||||
depends on (SCSI_ZALON || SCSI_NCR_Q720) && SCSI_NCR53C8XX_DEFAULT_TAGS=0
|
||||
depends on SCSI_ZALON && SCSI_NCR53C8XX_DEFAULT_TAGS=0
|
||||
help
|
||||
This option is only provided for safety if you suspect some SCSI
|
||||
device of yours to not support properly the target-disconnect
|
||||
|
@ -77,8 +77,6 @@ obj-$(CONFIG_SCSI_PM8001) += pm8001/
|
||||
obj-$(CONFIG_SCSI_ISCI) += isci/
|
||||
obj-$(CONFIG_SCSI_IPS) += ips.o
|
||||
obj-$(CONFIG_SCSI_GENERIC_NCR5380) += g_NCR5380.o
|
||||
obj-$(CONFIG_SCSI_NCR_D700) += 53c700.o NCR_D700.o
|
||||
obj-$(CONFIG_SCSI_NCR_Q720) += NCR_Q720_mod.o
|
||||
obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
|
||||
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
|
||||
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
|
||||
@ -180,7 +178,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
|
||||
-DCONFIG_SCSI_NCR53C8XX_NO_WORD_TRANSFERS
|
||||
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
|
||||
zalon7xx-objs := zalon.o ncr53c8xx.o
|
||||
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
|
||||
|
||||
# Files generated that shall be removed upon make clean
|
||||
clean-files := 53c700_d.h 53c700_u.h scsi_devinfo_tbl.c
|
||||
|
@ -1,405 +0,0 @@
|
||||
/* -*- mode: c; c-basic-offset: 8 -*- */
|
||||
|
||||
/* NCR Dual 700 MCA SCSI Driver
|
||||
*
|
||||
* Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
|
||||
**-----------------------------------------------------------------------------
|
||||
**
|
||||
** This program is free software; you can redistribute it and/or modify
|
||||
** it under the terms of the GNU General Public License as published by
|
||||
** the Free Software Foundation; either version 2 of the License, or
|
||||
** (at your option) any later version.
|
||||
**
|
||||
** This program is distributed in the hope that it will be useful,
|
||||
** but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
** GNU General Public License for more details.
|
||||
**
|
||||
** You should have received a copy of the GNU General Public License
|
||||
** along with this program; if not, write to the Free Software
|
||||
** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
**
|
||||
**-----------------------------------------------------------------------------
|
||||
*/
|
||||
|
||||
/* Notes:
|
||||
*
|
||||
* Most of the work is done in the chip specific module, 53c700.o
|
||||
*
|
||||
* TODO List:
|
||||
*
|
||||
* 1. Extract the SCSI ID from the voyager CMOS table (necessary to
|
||||
* support multi-host environments.
|
||||
*
|
||||
* */
|
||||
|
||||
|
||||
/* CHANGELOG
|
||||
*
|
||||
* Version 2.2
|
||||
*
|
||||
* Added mca_set_adapter_name().
|
||||
*
|
||||
* Version 2.1
|
||||
*
|
||||
* Modularise the driver into a Board piece (this file) and a chip
|
||||
* piece 53c700.[ch] and 53c700.scr, added module options. You can
|
||||
* now specify the scsi id by the parameters
|
||||
*
|
||||
* NCR_D700=slot:<n> [siop:<n>] id:<n> ....
|
||||
*
|
||||
* They need to be comma separated if compiled into the kernel
|
||||
*
|
||||
* Version 2.0
|
||||
*
|
||||
* Initial implementation of TCQ (Tag Command Queueing). TCQ is full
|
||||
* featured and uses the clock algorithm to keep track of outstanding
|
||||
* tags and guard against individual tag starvation. Also fixed a bug
|
||||
* in all of the 1.x versions where the D700_data_residue() function
|
||||
* was returning results off by 32 bytes (and thus causing the same 32
|
||||
* bytes to be written twice corrupting the data block). It turns out
|
||||
* the 53c700 only has a 6 bit DBC and DFIFO registers not 7 bit ones
|
||||
* like the 53c710 (The 710 is the only data manual still available,
|
||||
* which I'd been using to program the 700).
|
||||
*
|
||||
* Version 1.2
|
||||
*
|
||||
* Much improved message handling engine
|
||||
*
|
||||
* Version 1.1
|
||||
*
|
||||
* Add code to handle selection reasonably correctly. By the time we
|
||||
* get the selection interrupt, we've already responded, but drop off the
|
||||
* bus and hope the selector will go away.
|
||||
*
|
||||
* Version 1.0:
|
||||
*
|
||||
* Initial release. Fully functional except for procfs and tag
|
||||
* command queueing. Has only been tested on cards with 53c700-66
|
||||
* chips and only single ended. Features are
|
||||
*
|
||||
* 1. Synchronous data transfers to offset 8 (limit of 700-66) and
|
||||
* 100ns (10MHz) limit of SCSI-2
|
||||
*
|
||||
* 2. Disconnection and reselection
|
||||
*
|
||||
* Testing:
|
||||
*
|
||||
* I've only really tested this with the 700-66 chip, but have done
|
||||
* soak tests in multi-device environments to verify that
|
||||
* disconnections and reselections are being processed correctly.
|
||||
* */
|
||||
|
||||
#define NCR_D700_VERSION "2.2"
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mca.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/io.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
#include <scsi/scsi_transport_spi.h>
|
||||
|
||||
#include "53c700.h"
|
||||
#include "NCR_D700.h"
|
||||
|
||||
static char *NCR_D700; /* command line from insmod */
|
||||
|
||||
MODULE_AUTHOR("James Bottomley");
|
||||
MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
module_param(NCR_D700, charp, 0);
|
||||
|
||||
static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
|
||||
{ [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
|
||||
|
||||
#ifdef MODULE
|
||||
#define ARG_SEP ' '
|
||||
#else
|
||||
#define ARG_SEP ','
|
||||
#endif
|
||||
|
||||
static int __init
|
||||
param_setup(char *string)
|
||||
{
|
||||
char *pos = string, *next;
|
||||
int slot = -1, siop = -1;
|
||||
|
||||
while(pos != NULL && (next = strchr(pos, ':')) != NULL) {
|
||||
int val = (int)simple_strtoul(++next, NULL, 0);
|
||||
|
||||
if(!strncmp(pos, "slot:", 5))
|
||||
slot = val;
|
||||
else if(!strncmp(pos, "siop:", 5))
|
||||
siop = val;
|
||||
else if(!strncmp(pos, "id:", 3)) {
|
||||
if(slot == -1) {
|
||||
printk(KERN_WARNING "NCR D700: Must specify slot for id parameter\n");
|
||||
} else if(slot > MCA_MAX_SLOT_NR) {
|
||||
printk(KERN_WARNING "NCR D700: Illegal slot %d for id %d\n", slot, val);
|
||||
} else {
|
||||
if(siop != 0 && siop != 1) {
|
||||
id_array[slot*2] = val;
|
||||
id_array[slot*2 + 1] =val;
|
||||
} else {
|
||||
id_array[slot*2 + siop] = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
if((pos = strchr(pos, ARG_SEP)) != NULL)
|
||||
pos++;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Host template. The 53c700 routine NCR_700_detect will
|
||||
* fill in all of the missing routines */
|
||||
static struct scsi_host_template NCR_D700_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "NCR Dual 700 MCA",
|
||||
.proc_name = "NCR_D700",
|
||||
.this_id = 7,
|
||||
};
|
||||
|
||||
/* We needs this helper because we have two hosts per struct device */
|
||||
struct NCR_D700_private {
|
||||
struct device *dev;
|
||||
struct Scsi_Host *hosts[2];
|
||||
char name[30];
|
||||
char pad;
|
||||
};
|
||||
|
||||
static int
|
||||
NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
|
||||
int slot, u32 region, int differential)
|
||||
{
|
||||
struct NCR_700_Host_Parameters *hostdata;
|
||||
struct Scsi_Host *host;
|
||||
int ret;
|
||||
|
||||
hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL);
|
||||
if (!hostdata) {
|
||||
printk(KERN_ERR "NCR D700: SIOP%d: Failed to allocate host"
|
||||
"data, detatching\n", siop);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (!request_region(region, 64, "NCR_D700")) {
|
||||
printk(KERN_ERR "NCR D700: Failed to reserve IO region 0x%x\n",
|
||||
region);
|
||||
ret = -ENODEV;
|
||||
goto region_failed;
|
||||
}
|
||||
|
||||
/* Fill in the three required pieces of hostdata */
|
||||
hostdata->base = ioport_map(region, 64);
|
||||
hostdata->differential = (((1<<siop) & differential) != 0);
|
||||
hostdata->clock = NCR_D700_CLOCK_MHZ;
|
||||
hostdata->burst_length = 8;
|
||||
|
||||
/* and register the siop */
|
||||
host = NCR_700_detect(&NCR_D700_driver_template, hostdata, p->dev);
|
||||
if (!host) {
|
||||
ret = -ENOMEM;
|
||||
goto detect_failed;
|
||||
}
|
||||
|
||||
p->hosts[siop] = host;
|
||||
/* FIXME: read this from SUS */
|
||||
host->this_id = id_array[slot * 2 + siop];
|
||||
host->irq = irq;
|
||||
host->base = region;
|
||||
scsi_scan_host(host);
|
||||
|
||||
return 0;
|
||||
|
||||
detect_failed:
|
||||
release_region(region, 64);
|
||||
region_failed:
|
||||
kfree(hostdata);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
NCR_D700_intr(int irq, void *data)
|
||||
{
|
||||
struct NCR_D700_private *p = (struct NCR_D700_private *)data;
|
||||
int i, found = 0;
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
if (p->hosts[i] &&
|
||||
NCR_700_intr(irq, p->hosts[i]) == IRQ_HANDLED)
|
||||
found++;
|
||||
|
||||
return found ? IRQ_HANDLED : IRQ_NONE;
|
||||
}
|
||||
|
||||
/* Detect a D700 card. Note, because of the setup --- the chips are
|
||||
* essentially connectecd to the MCA bus independently, it is easier
|
||||
* to set them up as two separate host adapters, rather than one
|
||||
* adapter with two channels */
|
||||
static int
|
||||
NCR_D700_probe(struct device *dev)
|
||||
{
|
||||
struct NCR_D700_private *p;
|
||||
int differential;
|
||||
static int banner = 1;
|
||||
struct mca_device *mca_dev = to_mca_device(dev);
|
||||
int slot = mca_dev->slot;
|
||||
int found = 0;
|
||||
int irq, i;
|
||||
int pos3j, pos3k, pos3a, pos3b, pos4;
|
||||
__u32 base_addr, offset_addr;
|
||||
|
||||
/* enable board interrupt */
|
||||
pos4 = mca_device_read_pos(mca_dev, 4);
|
||||
pos4 |= 0x4;
|
||||
mca_device_write_pos(mca_dev, 4, pos4);
|
||||
|
||||
mca_device_write_pos(mca_dev, 6, 9);
|
||||
pos3j = mca_device_read_pos(mca_dev, 3);
|
||||
mca_device_write_pos(mca_dev, 6, 10);
|
||||
pos3k = mca_device_read_pos(mca_dev, 3);
|
||||
mca_device_write_pos(mca_dev, 6, 0);
|
||||
pos3a = mca_device_read_pos(mca_dev, 3);
|
||||
mca_device_write_pos(mca_dev, 6, 1);
|
||||
pos3b = mca_device_read_pos(mca_dev, 3);
|
||||
|
||||
base_addr = ((pos3j << 8) | pos3k) & 0xfffffff0;
|
||||
offset_addr = ((pos3a << 8) | pos3b) & 0xffffff70;
|
||||
|
||||
irq = (pos4 & 0x3) + 11;
|
||||
if(irq >= 13)
|
||||
irq++;
|
||||
if(banner) {
|
||||
printk(KERN_NOTICE "NCR D700: Driver Version " NCR_D700_VERSION "\n"
|
||||
"NCR D700: Copyright (c) 2001 by James.Bottomley@HansenPartnership.com\n"
|
||||
"NCR D700:\n");
|
||||
banner = 0;
|
||||
}
|
||||
/* now do the bus related transforms */
|
||||
irq = mca_device_transform_irq(mca_dev, irq);
|
||||
base_addr = mca_device_transform_ioport(mca_dev, base_addr);
|
||||
offset_addr = mca_device_transform_ioport(mca_dev, offset_addr);
|
||||
|
||||
printk(KERN_NOTICE "NCR D700: found in slot %d irq = %d I/O base = 0x%x\n", slot, irq, offset_addr);
|
||||
|
||||
/*outb(BOARD_RESET, base_addr);*/
|
||||
|
||||
/* clear any pending interrupts */
|
||||
(void)inb(base_addr + 0x08);
|
||||
/* get modctl, used later for setting diff bits */
|
||||
switch(differential = (inb(base_addr + 0x08) >> 6)) {
|
||||
case 0x00:
|
||||
/* only SIOP1 differential */
|
||||
differential = 0x02;
|
||||
break;
|
||||
case 0x01:
|
||||
/* Both SIOPs differential */
|
||||
differential = 0x03;
|
||||
break;
|
||||
case 0x03:
|
||||
/* No SIOPs differential */
|
||||
differential = 0x00;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "D700: UNEXPECTED DIFFERENTIAL RESULT 0x%02x\n",
|
||||
differential);
|
||||
differential = 0x00;
|
||||
break;
|
||||
}
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
p->dev = dev;
|
||||
snprintf(p->name, sizeof(p->name), "D700(%s)", dev_name(dev));
|
||||
if (request_irq(irq, NCR_D700_intr, IRQF_SHARED, p->name, p)) {
|
||||
printk(KERN_ERR "D700: request_irq failed\n");
|
||||
kfree(p);
|
||||
return -EBUSY;
|
||||
}
|
||||
/* plumb in both 700 chips */
|
||||
for (i = 0; i < 2; i++) {
|
||||
int err;
|
||||
|
||||
if ((err = NCR_D700_probe_one(p, i, irq, slot,
|
||||
offset_addr + (0x80 * i),
|
||||
differential)) != 0)
|
||||
printk("D700: SIOP%d: probe failed, error = %d\n",
|
||||
i, err);
|
||||
else
|
||||
found++;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
kfree(p);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
mca_device_set_claim(mca_dev, 1);
|
||||
mca_device_set_name(mca_dev, "NCR_D700");
|
||||
dev_set_drvdata(dev, p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
NCR_D700_remove_one(struct Scsi_Host *host)
|
||||
{
|
||||
scsi_remove_host(host);
|
||||
NCR_700_release(host);
|
||||
kfree((struct NCR_700_Host_Parameters *)host->hostdata[0]);
|
||||
free_irq(host->irq, host);
|
||||
release_region(host->base, 64);
|
||||
}
|
||||
|
||||
static int
|
||||
NCR_D700_remove(struct device *dev)
|
||||
{
|
||||
struct NCR_D700_private *p = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; i++)
|
||||
NCR_D700_remove_one(p->hosts[i]);
|
||||
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static short NCR_D700_id_table[] = { NCR_D700_MCA_ID, 0 };
|
||||
|
||||
static struct mca_driver NCR_D700_driver = {
|
||||
.id_table = NCR_D700_id_table,
|
||||
.driver = {
|
||||
.name = "NCR_D700",
|
||||
.bus = &mca_bus_type,
|
||||
.probe = NCR_D700_probe,
|
||||
.remove = NCR_D700_remove,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init NCR_D700_init(void)
|
||||
{
|
||||
#ifdef MODULE
|
||||
if (NCR_D700)
|
||||
param_setup(NCR_D700);
|
||||
#endif
|
||||
|
||||
return mca_register_driver(&NCR_D700_driver);
|
||||
}
|
||||
|
||||
static void __exit NCR_D700_exit(void)
|
||||
{
|
||||
mca_unregister_driver(&NCR_D700_driver);
|
||||
}
|
||||
|
||||
module_init(NCR_D700_init);
|
||||
module_exit(NCR_D700_exit);
|
||||
|
||||
__setup("NCR_D700=", param_setup);
|
@ -1,30 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* -*- mode: c; c-basic-offset: 8 -*- */
|
||||
|
||||
/* NCR Dual 700 MCA SCSI Driver
|
||||
*
|
||||
* Copyright (C) 2001 by James.Bottomley@HansenPartnership.com
|
||||
*/
|
||||
|
||||
#ifndef _NCR_D700_H
|
||||
#define _NCR_D700_H
|
||||
|
||||
/* Don't turn on debugging messages */
|
||||
#undef NCR_D700_DEBUG
|
||||
|
||||
/* The MCA identifier */
|
||||
#define NCR_D700_MCA_ID 0x0092
|
||||
|
||||
/* Defines for the Board registers */
|
||||
#define BOARD_RESET 0x80 /* board level reset */
|
||||
#define ADD_PARENB 0x04 /* Address Parity Enabled */
|
||||
#define DAT_PARENB 0x01 /* Data Parity Enabled */
|
||||
#define SFBK_ENB 0x10 /* SFDBK Interrupt Enabled */
|
||||
#define LED0GREEN 0x20 /* Led 0 (red 0; green 1) */
|
||||
#define LED1GREEN 0x40 /* Led 1 (red 0; green 1) */
|
||||
#define LED0RED 0xDF /* Led 0 (red 0; green 1) */
|
||||
#define LED1RED 0xBF /* Led 1 (red 0; green 1) */
|
||||
|
||||
#define NCR_D700_CLOCK_MHZ 50
|
||||
|
||||
#endif
|
@ -1,376 +0,0 @@
|
||||
/* -*- mode: c; c-basic-offset: 8 -*- */
|
||||
|
||||
/* NCR Quad 720 MCA SCSI Driver
|
||||
*
|
||||
* Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
|
||||
*/
|
||||
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mca.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include "scsi.h"
|
||||
#include <scsi/scsi_host.h>
|
||||
|
||||
#include "ncr53c8xx.h"
|
||||
|
||||
#include "NCR_Q720.h"
|
||||
|
||||
static struct ncr_chip q720_chip __initdata = {
|
||||
.revision_id = 0x0f,
|
||||
.burst_max = 3,
|
||||
.offset_max = 8,
|
||||
.nr_divisor = 4,
|
||||
.features = FE_WIDE | FE_DIFF | FE_VARCLK,
|
||||
};
|
||||
|
||||
MODULE_AUTHOR("James Bottomley");
|
||||
MODULE_DESCRIPTION("NCR Quad 720 SCSI Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
#define NCR_Q720_VERSION "0.9"
|
||||
|
||||
/* We needs this helper because we have up to four hosts per struct device */
|
||||
struct NCR_Q720_private {
|
||||
struct device *dev;
|
||||
void __iomem * mem_base;
|
||||
__u32 phys_mem_base;
|
||||
__u32 mem_size;
|
||||
__u8 irq;
|
||||
__u8 siops;
|
||||
__u8 irq_enable;
|
||||
struct Scsi_Host *hosts[4];
|
||||
};
|
||||
|
||||
static struct scsi_host_template NCR_Q720_tpnt = {
|
||||
.module = THIS_MODULE,
|
||||
.proc_name = "NCR_Q720",
|
||||
};
|
||||
|
||||
static irqreturn_t
|
||||
NCR_Q720_intr(int irq, void *data)
|
||||
{
|
||||
struct NCR_Q720_private *p = (struct NCR_Q720_private *)data;
|
||||
__u8 sir = (readb(p->mem_base + 0x0d) & 0xf0) >> 4;
|
||||
__u8 siop;
|
||||
|
||||
sir |= ~p->irq_enable;
|
||||
|
||||
if(sir == 0xff)
|
||||
return IRQ_NONE;
|
||||
|
||||
|
||||
while((siop = ffz(sir)) < p->siops) {
|
||||
sir |= 1<<siop;
|
||||
ncr53c8xx_intr(irq, p->hosts[siop]);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int __init
|
||||
NCR_Q720_probe_one(struct NCR_Q720_private *p, int siop,
|
||||
int irq, int slot, __u32 paddr, void __iomem *vaddr)
|
||||
{
|
||||
struct ncr_device device;
|
||||
__u8 scsi_id;
|
||||
static int unit = 0;
|
||||
__u8 scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
|
||||
__u8 differential = readb(vaddr + NCR_Q720_SCSR_OFFSET) & 0x20;
|
||||
__u8 version;
|
||||
int error;
|
||||
|
||||
scsi_id = scsr1 >> 4;
|
||||
/* enable burst length 16 (FIXME: should allow this) */
|
||||
scsr1 |= 0x02;
|
||||
/* force a siop reset */
|
||||
scsr1 |= 0x04;
|
||||
writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
|
||||
udelay(10);
|
||||
version = readb(vaddr + 0x18) >> 4;
|
||||
|
||||
memset(&device, 0, sizeof(struct ncr_device));
|
||||
/* Initialise ncr_device structure with items required by ncr_attach. */
|
||||
device.chip = q720_chip;
|
||||
device.chip.revision_id = version;
|
||||
device.host_id = scsi_id;
|
||||
device.dev = p->dev;
|
||||
device.slot.base = paddr;
|
||||
device.slot.base_c = paddr;
|
||||
device.slot.base_v = vaddr;
|
||||
device.slot.irq = irq;
|
||||
device.differential = differential ? 2 : 0;
|
||||
printk("Q720 probe unit %d (siop%d) at 0x%lx, diff = %d, vers = %d\n", unit, siop,
|
||||
(unsigned long)paddr, differential, version);
|
||||
|
||||
p->hosts[siop] = ncr_attach(&NCR_Q720_tpnt, unit++, &device);
|
||||
|
||||
if (!p->hosts[siop])
|
||||
goto fail;
|
||||
|
||||
p->irq_enable |= (1<<siop);
|
||||
scsr1 = readb(vaddr + NCR_Q720_SCSR_OFFSET + 1);
|
||||
/* clear the disable interrupt bit */
|
||||
scsr1 &= ~0x01;
|
||||
writeb(scsr1, vaddr + NCR_Q720_SCSR_OFFSET + 1);
|
||||
|
||||
error = scsi_add_host(p->hosts[siop], p->dev);
|
||||
if (error)
|
||||
ncr53c8xx_release(p->hosts[siop]);
|
||||
else
|
||||
scsi_scan_host(p->hosts[siop]);
|
||||
return error;
|
||||
|
||||
fail:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Detect a Q720 card. Note, because of the setup --- the chips are
|
||||
* essentially connectecd to the MCA bus independently, it is easier
|
||||
* to set them up as two separate host adapters, rather than one
|
||||
* adapter with two channels */
|
||||
static int __init
|
||||
NCR_Q720_probe(struct device *dev)
|
||||
{
|
||||
struct NCR_Q720_private *p;
|
||||
static int banner = 1;
|
||||
struct mca_device *mca_dev = to_mca_device(dev);
|
||||
int slot = mca_dev->slot;
|
||||
int found = 0;
|
||||
int irq, i, siops;
|
||||
__u8 pos2, pos4, asr2, asr9, asr10;
|
||||
__u16 io_base;
|
||||
__u32 base_addr, mem_size;
|
||||
void __iomem *mem_base;
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
pos2 = mca_device_read_pos(mca_dev, 2);
|
||||
/* enable device */
|
||||
pos2 |= NCR_Q720_POS2_BOARD_ENABLE | NCR_Q720_POS2_INTERRUPT_ENABLE;
|
||||
mca_device_write_pos(mca_dev, 2, pos2);
|
||||
|
||||
io_base = (pos2 & NCR_Q720_POS2_IO_MASK) << NCR_Q720_POS2_IO_SHIFT;
|
||||
|
||||
|
||||
if(banner) {
|
||||
printk(KERN_NOTICE "NCR Q720: Driver Version " NCR_Q720_VERSION "\n"
|
||||
"NCR Q720: Copyright (c) 2003 by James.Bottomley@HansenPartnership.com\n"
|
||||
"NCR Q720:\n");
|
||||
banner = 0;
|
||||
}
|
||||
io_base = mca_device_transform_ioport(mca_dev, io_base);
|
||||
|
||||
/* OK, this is phase one of the bootstrap, we now know the
|
||||
* I/O space base address. All the configuration registers
|
||||
* are mapped here (including pos) */
|
||||
|
||||
/* sanity check I/O mapping */
|
||||
i = inb(io_base) | (inb(io_base+1)<<8);
|
||||
if(i != NCR_Q720_MCA_ID) {
|
||||
printk(KERN_ERR "NCR_Q720, adapter failed to I/O map registers correctly at 0x%x(0x%x)\n", io_base, i);
|
||||
kfree(p);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Phase II, find the ram base and memory map the board register */
|
||||
pos4 = inb(io_base + 4);
|
||||
/* enable streaming data */
|
||||
pos4 |= 0x01;
|
||||
outb(pos4, io_base + 4);
|
||||
base_addr = (pos4 & 0x7e) << 20;
|
||||
base_addr += (pos4 & 0x80) << 23;
|
||||
asr10 = inb(io_base + 0x12);
|
||||
base_addr += (asr10 & 0x80) << 24;
|
||||
base_addr += (asr10 & 0x70) << 23;
|
||||
|
||||
/* OK, got the base addr, now we need to find the ram size,
|
||||
* enable and map it */
|
||||
asr9 = inb(io_base + 0x11);
|
||||
i = (asr9 & 0xc0) >> 6;
|
||||
if(i == 0)
|
||||
mem_size = 1024;
|
||||
else
|
||||
mem_size = 1 << (19 + i);
|
||||
|
||||
/* enable the sram mapping */
|
||||
asr9 |= 0x20;
|
||||
|
||||
/* disable the rom mapping */
|
||||
asr9 &= ~0x10;
|
||||
|
||||
outb(asr9, io_base + 0x11);
|
||||
|
||||
if(!request_mem_region(base_addr, mem_size, "NCR_Q720")) {
|
||||
printk(KERN_ERR "NCR_Q720: Failed to claim memory region 0x%lx\n-0x%lx",
|
||||
(unsigned long)base_addr,
|
||||
(unsigned long)(base_addr + mem_size));
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
if (dma_declare_coherent_memory(dev, base_addr, base_addr,
|
||||
mem_size, 0)) {
|
||||
printk(KERN_ERR "NCR_Q720: DMA declare memory failed\n");
|
||||
goto out_release_region;
|
||||
}
|
||||
|
||||
/* The first 1k of the memory buffer is a memory map of the registers
|
||||
*/
|
||||
mem_base = dma_mark_declared_memory_occupied(dev, base_addr,
|
||||
1024);
|
||||
if (IS_ERR(mem_base)) {
|
||||
printk("NCR_Q720 failed to reserve memory mapped region\n");
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
/* now also enable accesses in asr 2 */
|
||||
asr2 = inb(io_base + 0x0a);
|
||||
|
||||
asr2 |= 0x01;
|
||||
|
||||
outb(asr2, io_base + 0x0a);
|
||||
|
||||
/* get the number of SIOPs (this should be 2 or 4) */
|
||||
siops = ((asr2 & 0xe0) >> 5) + 1;
|
||||
|
||||
/* sanity check mapping (again) */
|
||||
i = readw(mem_base);
|
||||
if(i != NCR_Q720_MCA_ID) {
|
||||
printk(KERN_ERR "NCR_Q720, adapter failed to memory map registers correctly at 0x%lx(0x%x)\n", (unsigned long)base_addr, i);
|
||||
goto out_release;
|
||||
}
|
||||
|
||||
irq = readb(mem_base + 5) & 0x0f;
|
||||
|
||||
|
||||
/* now do the bus related transforms */
|
||||
irq = mca_device_transform_irq(mca_dev, irq);
|
||||
|
||||
printk(KERN_NOTICE "NCR Q720: found in slot %d irq = %d mem base = 0x%lx siops = %d\n", slot, irq, (unsigned long)base_addr, siops);
|
||||
printk(KERN_NOTICE "NCR Q720: On board ram %dk\n", mem_size/1024);
|
||||
|
||||
p->dev = dev;
|
||||
p->mem_base = mem_base;
|
||||
p->phys_mem_base = base_addr;
|
||||
p->mem_size = mem_size;
|
||||
p->irq = irq;
|
||||
p->siops = siops;
|
||||
|
||||
if (request_irq(irq, NCR_Q720_intr, IRQF_SHARED, "NCR_Q720", p)) {
|
||||
printk(KERN_ERR "NCR_Q720: request irq %d failed\n", irq);
|
||||
goto out_release;
|
||||
}
|
||||
/* disable all the siop interrupts */
|
||||
for(i = 0; i < siops; i++) {
|
||||
void __iomem *reg_scsr1 = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
|
||||
+ i*NCR_Q720_SIOP_SHIFT + NCR_Q720_SCSR_OFFSET + 1;
|
||||
__u8 scsr1 = readb(reg_scsr1);
|
||||
scsr1 |= 0x01;
|
||||
writeb(scsr1, reg_scsr1);
|
||||
}
|
||||
|
||||
/* plumb in all 720 chips */
|
||||
for (i = 0; i < siops; i++) {
|
||||
void __iomem *siop_v_base = mem_base + NCR_Q720_CHIP_REGISTER_OFFSET
|
||||
+ i*NCR_Q720_SIOP_SHIFT;
|
||||
__u32 siop_p_base = base_addr + NCR_Q720_CHIP_REGISTER_OFFSET
|
||||
+ i*NCR_Q720_SIOP_SHIFT;
|
||||
__u16 port = io_base + NCR_Q720_CHIP_REGISTER_OFFSET
|
||||
+ i*NCR_Q720_SIOP_SHIFT;
|
||||
int err;
|
||||
|
||||
outb(0xff, port + 0x40);
|
||||
outb(0x07, port + 0x41);
|
||||
if ((err = NCR_Q720_probe_one(p, i, irq, slot,
|
||||
siop_p_base, siop_v_base)) != 0)
|
||||
printk("Q720: SIOP%d: probe failed, error = %d\n",
|
||||
i, err);
|
||||
else
|
||||
found++;
|
||||
}
|
||||
|
||||
if (!found) {
|
||||
kfree(p);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
mca_device_set_claim(mca_dev, 1);
|
||||
mca_device_set_name(mca_dev, "NCR_Q720");
|
||||
dev_set_drvdata(dev, p);
|
||||
|
||||
return 0;
|
||||
|
||||
out_release:
|
||||
dma_release_declared_memory(dev);
|
||||
out_release_region:
|
||||
release_mem_region(base_addr, mem_size);
|
||||
out_free:
|
||||
kfree(p);
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static void __exit
|
||||
NCR_Q720_remove_one(struct Scsi_Host *host)
|
||||
{
|
||||
scsi_remove_host(host);
|
||||
ncr53c8xx_release(host);
|
||||
}
|
||||
|
||||
static int __exit
|
||||
NCR_Q720_remove(struct device *dev)
|
||||
{
|
||||
struct NCR_Q720_private *p = dev_get_drvdata(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < p->siops; i++)
|
||||
if(p->hosts[i])
|
||||
NCR_Q720_remove_one(p->hosts[i]);
|
||||
|
||||
dma_release_declared_memory(dev);
|
||||
release_mem_region(p->phys_mem_base, p->mem_size);
|
||||
free_irq(p->irq, p);
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static short NCR_Q720_id_table[] = { NCR_Q720_MCA_ID, 0 };
|
||||
|
||||
static struct mca_driver NCR_Q720_driver = {
|
||||
.id_table = NCR_Q720_id_table,
|
||||
.driver = {
|
||||
.name = "NCR_Q720",
|
||||
.bus = &mca_bus_type,
|
||||
.probe = NCR_Q720_probe,
|
||||
.remove = NCR_Q720_remove,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init
|
||||
NCR_Q720_init(void)
|
||||
{
|
||||
int ret = ncr53c8xx_init();
|
||||
if (!ret)
|
||||
ret = mca_register_driver(&NCR_Q720_driver);
|
||||
if (ret)
|
||||
ncr53c8xx_exit();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit
|
||||
NCR_Q720_exit(void)
|
||||
{
|
||||
mca_unregister_driver(&NCR_Q720_driver);
|
||||
ncr53c8xx_exit();
|
||||
}
|
||||
|
||||
module_init(NCR_Q720_init);
|
||||
module_exit(NCR_Q720_exit);
|
@ -1,29 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* -*- mode: c; c-basic-offset: 8 -*- */
|
||||
|
||||
/* NCR Quad 720 MCA SCSI Driver
|
||||
*
|
||||
* Copyright (C) 2003 by James.Bottomley@HansenPartnership.com
|
||||
*/
|
||||
|
||||
#ifndef _NCR_Q720_H
|
||||
#define _NCR_Q720_H
|
||||
|
||||
/* The MCA identifier */
|
||||
#define NCR_Q720_MCA_ID 0x0720
|
||||
|
||||
#define NCR_Q720_CLOCK_MHZ 30
|
||||
|
||||
#define NCR_Q720_POS2_BOARD_ENABLE 0x01
|
||||
#define NCR_Q720_POS2_INTERRUPT_ENABLE 0x02
|
||||
#define NCR_Q720_POS2_PARITY_DISABLE 0x04
|
||||
#define NCR_Q720_POS2_IO_MASK 0xf8
|
||||
#define NCR_Q720_POS2_IO_SHIFT 8
|
||||
|
||||
#define NCR_Q720_CHIP_REGISTER_OFFSET 0x200
|
||||
#define NCR_Q720_SCSR_OFFSET 0x070
|
||||
#define NCR_Q720_SIOP_SHIFT 0x080
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -143,7 +143,7 @@ static u8 wait_chip_ready(struct orc_host * host)
|
||||
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
|
||||
if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
|
||||
return 1;
|
||||
mdelay(100);
|
||||
msleep(100);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -155,7 +155,7 @@ static u8 wait_firmware_ready(struct orc_host * host)
|
||||
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
|
||||
if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
|
||||
return 1;
|
||||
mdelay(100); /* wait 100ms before try again */
|
||||
msleep(100); /* wait 100ms before try again */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -115,8 +115,6 @@
|
||||
#define ASENCODE_LUN_FAILED_SELF_CONFIG 0x00
|
||||
#define ASENCODE_OVERLAPPED_COMMAND 0x00
|
||||
|
||||
#define AAC_STAT_GOOD (DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD)
|
||||
|
||||
#define BYTE0(x) (unsigned char)(x)
|
||||
#define BYTE1(x) (unsigned char)((x) >> 8)
|
||||
#define BYTE2(x) (unsigned char)((x) >> 16)
|
||||
@ -2961,7 +2959,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
|
||||
case SYNCHRONIZE_CACHE:
|
||||
if (((aac_cache & 6) == 6) && dev->cache_protected) {
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
}
|
||||
/* Issue FIB to tell Firmware to flush it's cache */
|
||||
@ -2989,7 +2988,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
arr[1] = scsicmd->cmnd[2];
|
||||
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
|
||||
sizeof(inq_data));
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
} else if (scsicmd->cmnd[2] == 0x80) {
|
||||
/* unit serial number page */
|
||||
arr[3] = setinqserial(dev, &arr[4],
|
||||
@ -3000,7 +3001,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
if (aac_wwn != 2)
|
||||
return aac_get_container_serial(
|
||||
scsicmd);
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
} else if (scsicmd->cmnd[2] == 0x83) {
|
||||
/* vpd page 0x83 - Device Identification Page */
|
||||
char *sno = (char *)&inq_data;
|
||||
@ -3009,7 +3012,9 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
if (aac_wwn != 2)
|
||||
return aac_get_container_serial(
|
||||
scsicmd);
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
} else {
|
||||
/* vpd page not implemented */
|
||||
scsicmd->result = DID_OK << 16 |
|
||||
@ -3040,7 +3045,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
|
||||
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
|
||||
sizeof(inq_data));
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
}
|
||||
if (dev->in_reset)
|
||||
@ -3089,7 +3095,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
/* Do not cache partition table for arrays */
|
||||
scsicmd->device->removable = 1;
|
||||
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3115,7 +3122,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
|
||||
/* Do not cache partition table for arrays */
|
||||
scsicmd->device->removable = 1;
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3194,7 +3202,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
scsi_sg_copy_from_buffer(scsicmd,
|
||||
(char *)&mpd,
|
||||
mode_buf_length);
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
}
|
||||
case MODE_SENSE_10:
|
||||
@ -3271,7 +3280,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
(char *)&mpd10,
|
||||
mode_buf_length);
|
||||
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
}
|
||||
case REQUEST_SENSE:
|
||||
@ -3280,7 +3290,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
sizeof(struct sense_data));
|
||||
memset(&dev->fsa_dev[cid].sense_data, 0,
|
||||
sizeof(struct sense_data));
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
|
||||
case ALLOW_MEDIUM_REMOVAL:
|
||||
@ -3290,7 +3301,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
else
|
||||
fsa_dev_ptr[cid].locked = 0;
|
||||
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
/*
|
||||
* These commands are all No-Ops
|
||||
@ -3314,7 +3326,8 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
||||
case REZERO_UNIT:
|
||||
case REASSIGN_BLOCKS:
|
||||
case SEEK_10:
|
||||
scsicmd->result = AAC_STAT_GOOD;
|
||||
scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
|
||||
SAM_STAT_GOOD;
|
||||
break;
|
||||
|
||||
case START_STOP:
|
||||
|
@ -514,7 +514,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
|
||||
* The only invalid cases are if the caller requests to wait and
|
||||
* does not request a response and if the caller does not want a
|
||||
* response and the Fib is not allocated from pool. If a response
|
||||
* is not requesed the Fib will just be deallocaed by the DPC
|
||||
* is not requested the Fib will just be deallocaed by the DPC
|
||||
* routine when the response comes back from the adapter. No
|
||||
* further processing will be done besides deleting the Fib. We
|
||||
* will have a debug mode where the adapter can notify the host
|
||||
|
@ -65,7 +65,7 @@ unsigned int aac_response_normal(struct aac_queue * q)
|
||||
/*
|
||||
* Keep pulling response QEs off the response queue and waking
|
||||
* up the waiters until there are no more QEs. We then return
|
||||
* back to the system. If no response was requesed we just
|
||||
* back to the system. If no response was requested we just
|
||||
* deallocate the Fib here and continue.
|
||||
*/
|
||||
while(aac_consumer_get(dev, q, &entry))
|
||||
|
@ -319,7 +319,7 @@ static void aac_rx_start_adapter(struct aac_dev *dev)
|
||||
union aac_init *init;
|
||||
|
||||
init = dev->init;
|
||||
init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
|
||||
init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
|
||||
// We can only use a 32 bit address here
|
||||
rx_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS, (u32)(ulong)dev->init_pa,
|
||||
0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
|
||||
|
@ -251,7 +251,7 @@ static void aac_sa_start_adapter(struct aac_dev *dev)
|
||||
* Fill in the remaining pieces of the init.
|
||||
*/
|
||||
init = dev->init;
|
||||
init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
|
||||
init->r7.host_elapsed_seconds = cpu_to_le32(ktime_get_real_seconds());
|
||||
/* We can only use a 32 bit address here */
|
||||
sa_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
|
||||
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
|
||||
|
@ -409,7 +409,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
|
||||
|
||||
init = dev->init;
|
||||
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE3) {
|
||||
init->r8.host_elapsed_seconds = cpu_to_le32(get_seconds());
|
||||
init->r8.host_elapsed_seconds =
|
||||
cpu_to_le32(ktime_get_real_seconds());
|
||||
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
|
||||
lower_32_bits(dev->init_pa),
|
||||
upper_32_bits(dev->init_pa),
|
||||
@ -417,7 +418,8 @@ static void aac_src_start_adapter(struct aac_dev *dev)
|
||||
(AAC_MAX_HRRQ - 1) * sizeof(struct _rrq),
|
||||
0, 0, 0, NULL, NULL, NULL, NULL, NULL);
|
||||
} else {
|
||||
init->r7.host_elapsed_seconds = cpu_to_le32(get_seconds());
|
||||
init->r7.host_elapsed_seconds =
|
||||
cpu_to_le32(ktime_get_real_seconds());
|
||||
// We can only use a 32 bit address here
|
||||
src_sync_cmd(dev, INIT_STRUCT_BASE_ADDRESS,
|
||||
(u32)(ulong)dev->init_pa, 0, 0, 0, 0, 0,
|
||||
|
@ -2416,8 +2416,8 @@ static void asc_prt_scsi_host(struct Scsi_Host *s)
|
||||
struct asc_board *boardp = shost_priv(s);
|
||||
|
||||
printk("Scsi_Host at addr 0x%p, device %s\n", s, dev_name(boardp->dev));
|
||||
printk(" host_busy %u, host_no %d,\n",
|
||||
atomic_read(&s->host_busy), s->host_no);
|
||||
printk(" host_busy %d, host_no %d,\n",
|
||||
scsi_host_busy(s), s->host_no);
|
||||
|
||||
printk(" base 0x%lx, io_port 0x%lx, irq %d,\n",
|
||||
(ulong)s->base, (ulong)s->io_port, boardp->irq);
|
||||
@ -3182,8 +3182,8 @@ static void asc_prt_driver_conf(struct seq_file *m, struct Scsi_Host *shost)
|
||||
shost->host_no);
|
||||
|
||||
seq_printf(m,
|
||||
" host_busy %u, max_id %u, max_lun %llu, max_channel %u\n",
|
||||
atomic_read(&shost->host_busy), shost->max_id,
|
||||
" host_busy %d, max_id %u, max_lun %llu, max_channel %u\n",
|
||||
scsi_host_busy(shost), shost->max_id,
|
||||
shost->max_lun, shost->max_channel);
|
||||
|
||||
seq_printf(m,
|
||||
@ -8466,7 +8466,7 @@ static int AdvExeScsiQueue(ADV_DVC_VAR *asc_dvc, adv_req_t *reqp)
|
||||
}
|
||||
|
||||
/*
|
||||
* Execute a single 'Scsi_Cmnd'.
|
||||
* Execute a single 'struct scsi_cmnd'.
|
||||
*/
|
||||
static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
|
||||
{
|
||||
|
@ -422,16 +422,16 @@ enum aha152x_state {
|
||||
*
|
||||
*/
|
||||
struct aha152x_hostdata {
|
||||
Scsi_Cmnd *issue_SC;
|
||||
struct scsi_cmnd *issue_SC;
|
||||
/* pending commands to issue */
|
||||
|
||||
Scsi_Cmnd *current_SC;
|
||||
struct scsi_cmnd *current_SC;
|
||||
/* current command on the bus */
|
||||
|
||||
Scsi_Cmnd *disconnected_SC;
|
||||
struct scsi_cmnd *disconnected_SC;
|
||||
/* commands that disconnected */
|
||||
|
||||
Scsi_Cmnd *done_SC;
|
||||
struct scsi_cmnd *done_SC;
|
||||
/* command that was completed */
|
||||
|
||||
spinlock_t lock;
|
||||
@ -510,7 +510,7 @@ struct aha152x_hostdata {
|
||||
*
|
||||
*/
|
||||
struct aha152x_scdata {
|
||||
Scsi_Cmnd *next; /* next sc in queue */
|
||||
struct scsi_cmnd *next; /* next sc in queue */
|
||||
struct completion *done;/* semaphore to block on */
|
||||
struct scsi_eh_save ses;
|
||||
};
|
||||
@ -633,7 +633,7 @@ static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
|
||||
static void done(struct Scsi_Host *shpnt, int error);
|
||||
|
||||
/* diagnostics */
|
||||
static void show_command(Scsi_Cmnd * ptr);
|
||||
static void show_command(struct scsi_cmnd * ptr);
|
||||
static void show_queues(struct Scsi_Host *shpnt);
|
||||
static void disp_enintr(struct Scsi_Host *shpnt);
|
||||
|
||||
@ -642,9 +642,9 @@ static void disp_enintr(struct Scsi_Host *shpnt);
|
||||
* queue services:
|
||||
*
|
||||
*/
|
||||
static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
|
||||
static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC)
|
||||
{
|
||||
Scsi_Cmnd *end;
|
||||
struct scsi_cmnd *end;
|
||||
|
||||
SCNEXT(new_SC) = NULL;
|
||||
if (!*SC)
|
||||
@ -656,9 +656,9 @@ static inline void append_SC(Scsi_Cmnd **SC, Scsi_Cmnd *new_SC)
|
||||
}
|
||||
}
|
||||
|
||||
static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
|
||||
static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd ** SC)
|
||||
{
|
||||
Scsi_Cmnd *ptr;
|
||||
struct scsi_cmnd *ptr;
|
||||
|
||||
ptr = *SC;
|
||||
if (ptr) {
|
||||
@ -668,9 +668,10 @@ static inline Scsi_Cmnd *remove_first_SC(Scsi_Cmnd ** SC)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
|
||||
static inline struct scsi_cmnd *remove_lun_SC(struct scsi_cmnd ** SC,
|
||||
int target, int lun)
|
||||
{
|
||||
Scsi_Cmnd *ptr, *prev;
|
||||
struct scsi_cmnd *ptr, *prev;
|
||||
|
||||
for (ptr = *SC, prev = NULL;
|
||||
ptr && ((ptr->device->id != target) || (ptr->device->lun != lun));
|
||||
@ -689,9 +690,10 @@ static inline Scsi_Cmnd *remove_lun_SC(Scsi_Cmnd ** SC, int target, int lun)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static inline Scsi_Cmnd *remove_SC(Scsi_Cmnd **SC, Scsi_Cmnd *SCp)
|
||||
static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC,
|
||||
struct scsi_cmnd *SCp)
|
||||
{
|
||||
Scsi_Cmnd *ptr, *prev;
|
||||
struct scsi_cmnd *ptr, *prev;
|
||||
|
||||
for (ptr = *SC, prev = NULL;
|
||||
ptr && SCp!=ptr;
|
||||
@ -912,8 +914,9 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
|
||||
/*
|
||||
* Queue a command and setup interrupts for a free bus.
|
||||
*/
|
||||
static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
|
||||
int phase, void (*done)(Scsi_Cmnd *))
|
||||
static int aha152x_internal_queue(struct scsi_cmnd *SCpnt,
|
||||
struct completion *complete,
|
||||
int phase, void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
struct Scsi_Host *shpnt = SCpnt->device->host;
|
||||
unsigned long flags;
|
||||
@ -987,7 +990,8 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
|
||||
* queue a command
|
||||
*
|
||||
*/
|
||||
static int aha152x_queue_lck(Scsi_Cmnd *SCpnt, void (*done)(Scsi_Cmnd *))
|
||||
static int aha152x_queue_lck(struct scsi_cmnd *SCpnt,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
return aha152x_internal_queue(SCpnt, NULL, 0, done);
|
||||
}
|
||||
@ -998,7 +1002,7 @@ static DEF_SCSI_QCMD(aha152x_queue)
|
||||
/*
|
||||
*
|
||||
*/
|
||||
static void reset_done(Scsi_Cmnd *SCpnt)
|
||||
static void reset_done(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
if(SCSEM(SCpnt)) {
|
||||
complete(SCSEM(SCpnt));
|
||||
@ -1011,10 +1015,10 @@ static void reset_done(Scsi_Cmnd *SCpnt)
|
||||
* Abort a command
|
||||
*
|
||||
*/
|
||||
static int aha152x_abort(Scsi_Cmnd *SCpnt)
|
||||
static int aha152x_abort(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
struct Scsi_Host *shpnt = SCpnt->device->host;
|
||||
Scsi_Cmnd *ptr;
|
||||
struct scsi_cmnd *ptr;
|
||||
unsigned long flags;
|
||||
|
||||
DO_LOCK(flags);
|
||||
@ -1052,7 +1056,7 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
|
||||
* Reset a device
|
||||
*
|
||||
*/
|
||||
static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
|
||||
static int aha152x_device_reset(struct scsi_cmnd * SCpnt)
|
||||
{
|
||||
struct Scsi_Host *shpnt = SCpnt->device->host;
|
||||
DECLARE_COMPLETION(done);
|
||||
@ -1110,13 +1114,14 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_hard_reset_SCs(struct Scsi_Host *shpnt, Scsi_Cmnd **SCs)
|
||||
static void free_hard_reset_SCs(struct Scsi_Host *shpnt,
|
||||
struct scsi_cmnd **SCs)
|
||||
{
|
||||
Scsi_Cmnd *ptr;
|
||||
struct scsi_cmnd *ptr;
|
||||
|
||||
ptr=*SCs;
|
||||
while(ptr) {
|
||||
Scsi_Cmnd *next;
|
||||
struct scsi_cmnd *next;
|
||||
|
||||
if(SCDATA(ptr)) {
|
||||
next = SCNEXT(ptr);
|
||||
@ -1171,7 +1176,7 @@ static int aha152x_bus_reset_host(struct Scsi_Host *shpnt)
|
||||
* Reset the bus
|
||||
*
|
||||
*/
|
||||
static int aha152x_bus_reset(Scsi_Cmnd *SCpnt)
|
||||
static int aha152x_bus_reset(struct scsi_cmnd *SCpnt)
|
||||
{
|
||||
return aha152x_bus_reset_host(SCpnt->device->host);
|
||||
}
|
||||
@ -1436,7 +1441,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
||||
|
||||
if(!(DONE_SC->SCp.phase & not_issued)) {
|
||||
struct aha152x_scdata *sc;
|
||||
Scsi_Cmnd *ptr = DONE_SC;
|
||||
struct scsi_cmnd *ptr = DONE_SC;
|
||||
DONE_SC=NULL;
|
||||
|
||||
sc = SCDATA(ptr);
|
||||
@ -1451,7 +1456,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
||||
}
|
||||
|
||||
if(DONE_SC && DONE_SC->scsi_done) {
|
||||
Scsi_Cmnd *ptr = DONE_SC;
|
||||
struct scsi_cmnd *ptr = DONE_SC;
|
||||
DONE_SC=NULL;
|
||||
|
||||
/* turn led off, when no commands are in the driver */
|
||||
@ -2247,13 +2252,13 @@ static void parerr_run(struct Scsi_Host *shpnt)
|
||||
*/
|
||||
static void rsti_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
Scsi_Cmnd *ptr;
|
||||
struct scsi_cmnd *ptr;
|
||||
|
||||
shost_printk(KERN_NOTICE, shpnt, "scsi reset in\n");
|
||||
|
||||
ptr=DISCONNECTED_SC;
|
||||
while(ptr) {
|
||||
Scsi_Cmnd *next = SCNEXT(ptr);
|
||||
struct scsi_cmnd *next = SCNEXT(ptr);
|
||||
|
||||
if (!ptr->device->soft_reset) {
|
||||
remove_SC(&DISCONNECTED_SC, ptr);
|
||||
@ -2438,7 +2443,7 @@ static void disp_enintr(struct Scsi_Host *shpnt)
|
||||
/*
|
||||
* Show the command data of a command
|
||||
*/
|
||||
static void show_command(Scsi_Cmnd *ptr)
|
||||
static void show_command(struct scsi_cmnd *ptr)
|
||||
{
|
||||
scsi_print_command(ptr);
|
||||
scmd_printk(KERN_DEBUG, ptr,
|
||||
@ -2462,7 +2467,7 @@ static void show_command(Scsi_Cmnd *ptr)
|
||||
*/
|
||||
static void show_queues(struct Scsi_Host *shpnt)
|
||||
{
|
||||
Scsi_Cmnd *ptr;
|
||||
struct scsi_cmnd *ptr;
|
||||
unsigned long flags;
|
||||
|
||||
DO_LOCK(flags);
|
||||
@ -2484,7 +2489,7 @@ static void show_queues(struct Scsi_Host *shpnt)
|
||||
disp_enintr(shpnt);
|
||||
}
|
||||
|
||||
static void get_command(struct seq_file *m, Scsi_Cmnd * ptr)
|
||||
static void get_command(struct seq_file *m, struct scsi_cmnd * ptr)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -2813,7 +2818,7 @@ static int aha152x_set_info(struct Scsi_Host *shpnt, char *buffer, int length)
|
||||
static int aha152x_show_info(struct seq_file *m, struct Scsi_Host *shpnt)
|
||||
{
|
||||
int i;
|
||||
Scsi_Cmnd *ptr;
|
||||
struct scsi_cmnd *ptr;
|
||||
unsigned long flags;
|
||||
|
||||
seq_puts(m, AHA152X_REVID "\n");
|
||||
|
@ -207,11 +207,11 @@ static int aha1740_test_port(unsigned int base)
|
||||
static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
|
||||
{
|
||||
struct Scsi_Host *host = (struct Scsi_Host *) dev_id;
|
||||
void (*my_done)(Scsi_Cmnd *);
|
||||
void (*my_done)(struct scsi_cmnd *);
|
||||
int errstatus, adapstat;
|
||||
int number_serviced;
|
||||
struct ecb *ecbptr;
|
||||
Scsi_Cmnd *SCtmp;
|
||||
struct scsi_cmnd *SCtmp;
|
||||
unsigned int base;
|
||||
unsigned long flags;
|
||||
int handled = 0;
|
||||
@ -311,7 +311,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
static int aha1740_queuecommand_lck(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
|
||||
static int aha1740_queuecommand_lck(struct scsi_cmnd * SCpnt,
|
||||
void (*done)(struct scsi_cmnd *))
|
||||
{
|
||||
unchar direction;
|
||||
unchar *cmd = (unchar *) SCpnt->cmnd;
|
||||
@ -520,7 +521,7 @@ static int aha1740_biosparam(struct scsi_device *sdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aha1740_eh_abort_handler (Scsi_Cmnd *dummy)
|
||||
static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy)
|
||||
{
|
||||
/*
|
||||
* From Alan Cox :
|
||||
|
@ -135,8 +135,8 @@ struct ecb { /* Enhanced Control Block 6.1 */
|
||||
/* Hardware defined portion ends here, rest is driver defined */
|
||||
u8 sense[MAX_SENSE]; /* Sense area */
|
||||
u8 status[MAX_STATUS]; /* Status area */
|
||||
Scsi_Cmnd *SCpnt; /* Link to the SCSI Command Block */
|
||||
void (*done) (Scsi_Cmnd *); /* Completion Function */
|
||||
struct scsi_cmnd *SCpnt; /* Link to the SCSI Command Block */
|
||||
void (*done) (struct scsi_cmnd *); /* Completion Function */
|
||||
};
|
||||
|
||||
#define AHA1740CMD_NOP 0x00 /* No OP */
|
||||
|
@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
|
||||
|
||||
aic94xx_transport_template =
|
||||
sas_domain_attach_transport(&aic94xx_transport_functions);
|
||||
if (!aic94xx_transport_template)
|
||||
if (!aic94xx_transport_template) {
|
||||
err = -ENOMEM;
|
||||
goto out_destroy_caches;
|
||||
}
|
||||
|
||||
err = pci_register_driver(&aic94xx_pci_driver);
|
||||
if (err)
|
||||
|
@ -49,7 +49,7 @@ struct device_attribute;
|
||||
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
|
||||
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
|
||||
#define ARCMSR_MIN_OUTSTANDING_CMD 32
|
||||
#define ARCMSR_DRIVER_VERSION "v1.40.00.05-20180309"
|
||||
#define ARCMSR_DRIVER_VERSION "v1.40.00.09-20180709"
|
||||
#define ARCMSR_SCSI_INITIATOR_ID 255
|
||||
#define ARCMSR_MAX_XFER_SECTORS 512
|
||||
#define ARCMSR_MAX_XFER_SECTORS_B 4096
|
||||
|
@ -1061,6 +1061,13 @@ static int arcmsr_resume(struct pci_dev *pdev)
|
||||
pci_set_master(pdev);
|
||||
if (arcmsr_request_irq(pdev, acb) == FAILED)
|
||||
goto controller_stop;
|
||||
if (acb->adapter_type == ACB_ADAPTER_TYPE_E) {
|
||||
writel(0, &acb->pmuE->host_int_status);
|
||||
writel(ARCMSR_HBEMU_DOORBELL_SYNC, &acb->pmuE->iobound_doorbell);
|
||||
acb->in_doorbell = 0;
|
||||
acb->out_doorbell = 0;
|
||||
acb->doneq_index = 0;
|
||||
}
|
||||
arcmsr_iop_init(acb);
|
||||
arcmsr_init_get_devmap_timer(acb);
|
||||
if (set_date_time)
|
||||
|
@ -1055,7 +1055,7 @@ static void tscam(struct Scsi_Host *host, bool wide_chip, u8 scam_on)
|
||||
udelay(2); /* 2 deskew delay(45ns*2=90ns) */
|
||||
val &= 0x007f; /* no bsy */
|
||||
atp_writew_io(dev, 0, 0x1c, val);
|
||||
mdelay(128);
|
||||
msleep(128);
|
||||
val &= 0x00fb; /* after 1ms no msg */
|
||||
atp_writew_io(dev, 0, 0x1c, val);
|
||||
while ((atp_readb_io(dev, 0, 0x1c) & 0x04) != 0)
|
||||
@ -1286,9 +1286,9 @@ static void atp870_init(struct Scsi_Host *shpnt)
|
||||
k = (atp_readb_base(atpdev, 0x3a) & 0xf3) | 0x10;
|
||||
atp_writeb_base(atpdev, 0x3a, k);
|
||||
atp_writeb_base(atpdev, 0x3a, k & 0xdf);
|
||||
mdelay(32);
|
||||
msleep(32);
|
||||
atp_writeb_base(atpdev, 0x3a, k);
|
||||
mdelay(32);
|
||||
msleep(32);
|
||||
atp_set_host_id(atpdev, 0, host_id);
|
||||
|
||||
tscam(shpnt, wide_chip, scam_on);
|
||||
@ -1370,9 +1370,9 @@ static void atp880_init(struct Scsi_Host *shpnt)
|
||||
k = atp_readb_base(atpdev, 0x38) & 0x80;
|
||||
atp_writeb_base(atpdev, 0x38, k);
|
||||
atp_writeb_base(atpdev, 0x3b, 0x20);
|
||||
mdelay(32);
|
||||
msleep(32);
|
||||
atp_writeb_base(atpdev, 0x3b, 0);
|
||||
mdelay(32);
|
||||
msleep(32);
|
||||
atp_readb_io(atpdev, 0, 0x1b);
|
||||
atp_readb_io(atpdev, 0, 0x17);
|
||||
|
||||
@ -1454,10 +1454,10 @@ static void atp885_init(struct Scsi_Host *shpnt)
|
||||
atp_writeb_base(atpdev, 0x28, k);
|
||||
atp_writeb_pci(atpdev, 0, 1, 0x80);
|
||||
atp_writeb_pci(atpdev, 1, 1, 0x80);
|
||||
mdelay(100);
|
||||
msleep(100);
|
||||
atp_writeb_pci(atpdev, 0, 1, 0);
|
||||
atp_writeb_pci(atpdev, 1, 1, 0);
|
||||
mdelay(1000);
|
||||
msleep(1000);
|
||||
atp_readb_io(atpdev, 0, 0x1b);
|
||||
atp_readb_io(atpdev, 0, 0x17);
|
||||
atp_readb_io(atpdev, 1, 0x1b);
|
||||
@ -1473,7 +1473,7 @@ static void atp885_init(struct Scsi_Host *shpnt)
|
||||
k = (k & 0x07) | 0x40;
|
||||
atp_set_host_id(atpdev, 1, k);
|
||||
|
||||
mdelay(600); /* this delay used to be called tscam_885() */
|
||||
msleep(600); /* this delay used to be called tscam_885() */
|
||||
dev_info(&pdev->dev, "Scanning Channel A SCSI Device ...\n");
|
||||
atp_is(atpdev, 0, true, atp_readb_io(atpdev, 0, 0x1b) >> 7);
|
||||
atp_writeb_io(atpdev, 0, 0x16, 0x80);
|
||||
|
@ -1545,7 +1545,7 @@ int beiscsi_set_host_data(struct beiscsi_hba *phba)
|
||||
snprintf((char *)ioctl->param.req.param_data,
|
||||
sizeof(ioctl->param.req.param_data),
|
||||
"Linux iSCSI v%s", BUILD_STR);
|
||||
ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len, 4);
|
||||
ioctl->param.req.param_len = ALIGN(ioctl->param.req.param_len + 1, 4);
|
||||
if (ioctl->param.req.param_len > BE_CMD_MAX_DRV_VERSION)
|
||||
ioctl->param.req.param_len = BE_CMD_MAX_DRV_VERSION;
|
||||
ret = be_mbox_notify(ctrl);
|
||||
|
@ -1,11 +1,14 @@
|
||||
/*
|
||||
* Copyright 2017 Broadcom. All Rights Reserved.
|
||||
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
|
||||
* This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
|
||||
* Host Bus Adapters. Refer to the README file included with this package
|
||||
* for driver version and adapter compatibility.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation. The full GNU General
|
||||
* Public License is included in this distribution in the file called COPYING.
|
||||
* Copyright (c) 2018 Broadcom. All Rights Reserved.
|
||||
* The term "Broadcom" refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@broadcom.com
|
||||
|
@ -1,11 +1,22 @@
|
||||
/*
|
||||
* Copyright 2017 Broadcom. All Rights Reserved.
|
||||
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
|
||||
* This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
|
||||
* Host Bus Adapters. Refer to the README file included with this package
|
||||
* for driver version and adapter compatibility.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation. The full GNU General
|
||||
* Public License is included in this distribution in the file called COPYING.
|
||||
* Copyright (c) 2018 Broadcom. All Rights Reserved.
|
||||
* The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful. ALL EXPRESS
|
||||
* OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
|
||||
* IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
|
||||
* OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
|
||||
* DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
|
||||
* See the GNU General Public License for more details, a copy of which
|
||||
* can be found in the file COPYING included with this package.
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@broadcom.com
|
||||
|
@ -1,11 +1,22 @@
|
||||
/*
|
||||
* Copyright 2017 Broadcom. All Rights Reserved.
|
||||
* The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
|
||||
* This file is part of the Emulex Linux Device Driver for Enterprise iSCSI
|
||||
* Host Bus Adapters. Refer to the README file included with this package
|
||||
* for driver version and adapter compatibility.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version 2
|
||||
* as published by the Free Software Foundation. The full GNU General
|
||||
* Public License is included in this distribution in the file called COPYING.
|
||||
* Copyright (c) 2018 Broadcom. All Rights Reserved.
|
||||
* The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of version 2 of the GNU General Public License as published
|
||||
* by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful. ALL EXPRESS
|
||||
* OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY
|
||||
* IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
|
||||
* OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH
|
||||
* DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
|
||||
* See the GNU General Public License for more details, a copy of which
|
||||
* can be found in the file COPYING included with this package.
|
||||
*
|
||||
* Contact Information:
|
||||
* linux-drivers@broadcom.com
|
||||
|
@ -70,21 +70,18 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
|
||||
host_status = DID_ERROR;
|
||||
}
|
||||
}
|
||||
cmnd->result = ScsiResult(host_status, scsi_status);
|
||||
cmnd->result = host_status << 16 | scsi_status;
|
||||
|
||||
break;
|
||||
|
||||
case BFI_IOIM_STS_TIMEDOUT:
|
||||
host_status = DID_TIME_OUT;
|
||||
cmnd->result = ScsiResult(host_status, 0);
|
||||
cmnd->result = DID_TIME_OUT << 16;
|
||||
break;
|
||||
case BFI_IOIM_STS_PATHTOV:
|
||||
host_status = DID_TRANSPORT_DISRUPTED;
|
||||
cmnd->result = ScsiResult(host_status, 0);
|
||||
cmnd->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
break;
|
||||
default:
|
||||
host_status = DID_ERROR;
|
||||
cmnd->result = ScsiResult(host_status, 0);
|
||||
cmnd->result = DID_ERROR << 16;
|
||||
}
|
||||
|
||||
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
|
||||
@ -117,7 +114,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
|
||||
struct bfad_itnim_data_s *itnim_data;
|
||||
struct bfad_itnim_s *itnim;
|
||||
|
||||
cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
|
||||
cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
|
||||
|
||||
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
|
||||
if (cmnd->device->host != NULL)
|
||||
@ -144,7 +141,7 @@ bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
|
||||
struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
|
||||
struct bfad_s *bfad = drv;
|
||||
|
||||
cmnd->result = ScsiResult(DID_ERROR, 0);
|
||||
cmnd->result = DID_ERROR << 16;
|
||||
|
||||
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
|
||||
if (cmnd->device->host != NULL)
|
||||
@ -1253,14 +1250,14 @@ bfad_im_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd
|
||||
printk(KERN_WARNING
|
||||
"bfad%d, queuecommand %p %x failed, BFA stopped\n",
|
||||
bfad->inst_no, cmnd, cmnd->cmnd[0]);
|
||||
cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
|
||||
cmnd->result = DID_NO_CONNECT << 16;
|
||||
goto out_fail_cmd;
|
||||
}
|
||||
|
||||
|
||||
itnim = itnim_data->itnim;
|
||||
if (!itnim) {
|
||||
cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
|
||||
cmnd->result = DID_IMM_RETRY << 16;
|
||||
goto out_fail_cmd;
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,6 @@ u32 bfad_im_supported_speeds(struct bfa_s *bfa);
|
||||
#define MAX_FCP_LUN 16384
|
||||
#define BFAD_TARGET_RESET_TMO 60
|
||||
#define BFAD_LUN_RESET_TMO 60
|
||||
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
|
||||
#define BFA_QUEUE_FULL_RAMP_UP_TIME 120
|
||||
|
||||
/*
|
||||
|
@ -2727,6 +2727,8 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
|
||||
BNX2X_DOORBELL_PCI_BAR);
|
||||
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
|
||||
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
|
||||
if (!ep->qp.ctx_base)
|
||||
return -ENOMEM;
|
||||
goto arm_cq;
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
|
||||
buflength, &sshdr, timeout * HZ,
|
||||
MAX_RETRIES, NULL);
|
||||
|
||||
if (driver_byte(result) & DRIVER_SENSE) {
|
||||
if (driver_byte(result) == DRIVER_SENSE) {
|
||||
if (debug)
|
||||
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
|
||||
errno = ch_find_errno(&sshdr);
|
||||
|
@ -761,27 +761,116 @@ csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
|
||||
static int
|
||||
csio_hw_get_flash_params(struct csio_hw *hw)
|
||||
{
|
||||
/* Table for non-Numonix supported flash parts. Numonix parts are left
|
||||
* to the preexisting code. All flash parts have 64KB sectors.
|
||||
*/
|
||||
static struct flash_desc {
|
||||
u32 vendor_and_model_id;
|
||||
u32 size_mb;
|
||||
} supported_flash[] = {
|
||||
{ 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
|
||||
};
|
||||
|
||||
u32 part, manufacturer;
|
||||
u32 density, size = 0;
|
||||
u32 flashid = 0;
|
||||
int ret;
|
||||
uint32_t info = 0;
|
||||
|
||||
ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
|
||||
if (!ret)
|
||||
ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
|
||||
ret = csio_hw_sf1_read(hw, 3, 0, 1, &flashid);
|
||||
csio_wr_reg32(hw, 0, SF_OP_A); /* unlock SF */
|
||||
if (ret != 0)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if ((info & 0xff) != 0x20) /* not a Numonix flash */
|
||||
return -EINVAL;
|
||||
info >>= 16; /* log2 of size */
|
||||
if (info >= 0x14 && info < 0x18)
|
||||
hw->params.sf_nsec = 1 << (info - 16);
|
||||
else if (info == 0x18)
|
||||
hw->params.sf_nsec = 64;
|
||||
else
|
||||
return -EINVAL;
|
||||
hw->params.sf_size = 1 << info;
|
||||
/* Check to see if it's one of our non-standard supported Flash parts.
|
||||
*/
|
||||
for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
|
||||
if (supported_flash[part].vendor_and_model_id == flashid) {
|
||||
hw->params.sf_size = supported_flash[part].size_mb;
|
||||
hw->params.sf_nsec =
|
||||
hw->params.sf_size / SF_SEC_SIZE;
|
||||
goto found;
|
||||
}
|
||||
|
||||
/* Decode Flash part size. The code below looks repetative with
|
||||
* common encodings, but that's not guaranteed in the JEDEC
|
||||
* specification for the Read JADEC ID command. The only thing that
|
||||
* we're guaranteed by the JADEC specification is where the
|
||||
* Manufacturer ID is in the returned result. After that each
|
||||
* Manufacturer ~could~ encode things completely differently.
|
||||
* Note, all Flash parts must have 64KB sectors.
|
||||
*/
|
||||
manufacturer = flashid & 0xff;
|
||||
switch (manufacturer) {
|
||||
case 0x20: { /* Micron/Numonix */
|
||||
/* This Density -> Size decoding table is taken from Micron
|
||||
* Data Sheets.
|
||||
*/
|
||||
density = (flashid >> 16) & 0xff;
|
||||
switch (density) {
|
||||
case 0x14 ... 0x19: /* 1MB - 32MB */
|
||||
size = 1 << density;
|
||||
break;
|
||||
case 0x20: /* 64MB */
|
||||
size = 1 << 26;
|
||||
break;
|
||||
case 0x21: /* 128MB */
|
||||
size = 1 << 27;
|
||||
break;
|
||||
case 0x22: /* 256MB */
|
||||
size = 1 << 28;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
|
||||
/* This Density -> Size decoding table is taken from ISSI
|
||||
* Data Sheets.
|
||||
*/
|
||||
density = (flashid >> 16) & 0xff;
|
||||
switch (density) {
|
||||
case 0x16: /* 32 MB */
|
||||
size = 1 << 25;
|
||||
break;
|
||||
case 0x17: /* 64MB */
|
||||
size = 1 << 26;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 0xc2: /* Macronix */
|
||||
case 0xef: /* Winbond */ {
|
||||
/* This Density -> Size decoding table is taken from
|
||||
* Macronix and Winbond Data Sheets.
|
||||
*/
|
||||
density = (flashid >> 16) & 0xff;
|
||||
switch (density) {
|
||||
case 0x17: /* 8MB */
|
||||
case 0x18: /* 16MB */
|
||||
size = 1 << density;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If we didn't recognize the FLASH part, that's no real issue: the
|
||||
* Hardware/Software contract says that Hardware will _*ALWAYS*_
|
||||
* use a FLASH part which is at least 4MB in size and has 64KB
|
||||
* sectors. The unrecognized FLASH part is likely to be much larger
|
||||
* than 4MB, but that's all we really need.
|
||||
*/
|
||||
if (size == 0) {
|
||||
csio_warn(hw, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
|
||||
flashid);
|
||||
size = 1 << 22;
|
||||
}
|
||||
|
||||
/* Store decoded Flash size */
|
||||
hw->params.sf_size = size;
|
||||
hw->params.sf_nsec = size / SF_SEC_SIZE;
|
||||
|
||||
found:
|
||||
if (hw->params.sf_size < FLASH_MIN_SIZE)
|
||||
csio_warn(hw, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
|
||||
flashid, hw->params.sf_size, FLASH_MIN_SIZE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include <asm/page.h>
|
||||
#include <linux/cache.h>
|
||||
|
||||
#include "t4_values.h"
|
||||
#include "csio_hw.h"
|
||||
#include "csio_wr.h"
|
||||
#include "csio_mb.h"
|
||||
@ -1309,8 +1310,11 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
|
||||
struct csio_sge *sge = &wrm->sge;
|
||||
uint32_t clsz = L1_CACHE_BYTES;
|
||||
uint32_t s_hps = PAGE_SHIFT - 10;
|
||||
uint32_t ingpad = 0;
|
||||
uint32_t stat_len = clsz > 64 ? 128 : 64;
|
||||
u32 fl_align = clsz < 32 ? 32 : clsz;
|
||||
u32 pack_align;
|
||||
u32 ingpad, ingpack;
|
||||
int pcie_cap;
|
||||
|
||||
csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) |
|
||||
HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) |
|
||||
@ -1318,14 +1322,82 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
|
||||
HOSTPAGESIZEPF6_V(s_hps) | HOSTPAGESIZEPF7_V(s_hps),
|
||||
SGE_HOST_PAGE_SIZE_A);
|
||||
|
||||
sge->csio_fl_align = clsz < 32 ? 32 : clsz;
|
||||
ingpad = ilog2(sge->csio_fl_align) - 5;
|
||||
/* T5 introduced the separation of the Free List Padding and
|
||||
* Packing Boundaries. Thus, we can select a smaller Padding
|
||||
* Boundary to avoid uselessly chewing up PCIe Link and Memory
|
||||
* Bandwidth, and use a Packing Boundary which is large enough
|
||||
* to avoid false sharing between CPUs, etc.
|
||||
*
|
||||
* For the PCI Link, the smaller the Padding Boundary the
|
||||
* better. For the Memory Controller, a smaller Padding
|
||||
* Boundary is better until we cross under the Memory Line
|
||||
* Size (the minimum unit of transfer to/from Memory). If we
|
||||
* have a Padding Boundary which is smaller than the Memory
|
||||
* Line Size, that'll involve a Read-Modify-Write cycle on the
|
||||
* Memory Controller which is never good.
|
||||
*/
|
||||
|
||||
/* We want the Packing Boundary to be based on the Cache Line
|
||||
* Size in order to help avoid False Sharing performance
|
||||
* issues between CPUs, etc. We also want the Packing
|
||||
* Boundary to incorporate the PCI-E Maximum Payload Size. We
|
||||
* get best performance when the Packing Boundary is a
|
||||
* multiple of the Maximum Payload Size.
|
||||
*/
|
||||
pack_align = fl_align;
|
||||
pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP);
|
||||
if (pcie_cap) {
|
||||
u32 mps, mps_log;
|
||||
u16 devctl;
|
||||
|
||||
/* The PCIe Device Control Maximum Payload Size field
|
||||
* [bits 7:5] encodes sizes as powers of 2 starting at
|
||||
* 128 bytes.
|
||||
*/
|
||||
pci_read_config_word(hw->pdev,
|
||||
pcie_cap + PCI_EXP_DEVCTL,
|
||||
&devctl);
|
||||
mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
|
||||
mps = 1 << mps_log;
|
||||
if (mps > pack_align)
|
||||
pack_align = mps;
|
||||
}
|
||||
|
||||
/* T5/T6 have a special interpretation of the "0"
|
||||
* value for the Packing Boundary. This corresponds to 16
|
||||
* bytes instead of the expected 32 bytes.
|
||||
*/
|
||||
if (pack_align <= 16) {
|
||||
ingpack = INGPACKBOUNDARY_16B_X;
|
||||
fl_align = 16;
|
||||
} else if (pack_align == 32) {
|
||||
ingpack = INGPACKBOUNDARY_64B_X;
|
||||
fl_align = 64;
|
||||
} else {
|
||||
u32 pack_align_log = fls(pack_align) - 1;
|
||||
|
||||
ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
|
||||
fl_align = pack_align;
|
||||
}
|
||||
|
||||
/* Use the smallest Ingress Padding which isn't smaller than
|
||||
* the Memory Controller Read/Write Size. We'll take that as
|
||||
* being 8 bytes since we don't know of any system with a
|
||||
* wider Memory Controller Bus Width.
|
||||
*/
|
||||
if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
|
||||
ingpad = INGPADBOUNDARY_32B_X;
|
||||
else
|
||||
ingpad = T6_INGPADBOUNDARY_8B_X;
|
||||
|
||||
csio_set_reg_field(hw, SGE_CONTROL_A,
|
||||
INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
|
||||
EGRSTATUSPAGESIZE_F,
|
||||
INGPADBOUNDARY_V(ingpad) |
|
||||
EGRSTATUSPAGESIZE_V(stat_len != 64));
|
||||
csio_set_reg_field(hw, SGE_CONTROL2_A,
|
||||
INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
|
||||
INGPACKBOUNDARY_V(ingpack));
|
||||
|
||||
/* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
|
||||
csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0_A);
|
||||
@ -1337,14 +1409,16 @@ csio_wr_fixup_host_params(struct csio_hw *hw)
|
||||
if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS) {
|
||||
csio_wr_reg32(hw,
|
||||
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2_A) +
|
||||
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
|
||||
fl_align - 1) & ~(fl_align - 1),
|
||||
SGE_FL_BUFFER_SIZE2_A);
|
||||
csio_wr_reg32(hw,
|
||||
(csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3_A) +
|
||||
sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
|
||||
fl_align - 1) & ~(fl_align - 1),
|
||||
SGE_FL_BUFFER_SIZE3_A);
|
||||
}
|
||||
|
||||
sge->csio_fl_align = fl_align;
|
||||
|
||||
csio_wr_reg32(hw, HPZ0_V(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ_A);
|
||||
|
||||
/* default value of rx_dma_offset of the NIC driver */
|
||||
|
@ -1141,7 +1141,7 @@ static int afu_release(struct inode *inode, struct file *file)
|
||||
*
|
||||
* Return: 0 on success, -errno on failure
|
||||
*/
|
||||
static int ocxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t ocxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct ocxlflash_context *ctx = vma->vm_file->private_data;
|
||||
@ -1164,8 +1164,7 @@ static int ocxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
mmio_area = ctx->psn_phys;
|
||||
mmio_area += offset;
|
||||
|
||||
vm_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
|
||||
return VM_FAULT_NOPAGE;
|
||||
return vmf_insert_pfn(vma, vmf->address, mmio_area >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct ocxlflash_vmops = {
|
||||
|
@ -1104,7 +1104,7 @@ static struct page *get_err_page(struct cxlflash_cfg *cfg)
|
||||
*
|
||||
* Return: 0 on success, VM_FAULT_SIGBUS on failure
|
||||
*/
|
||||
static int cxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
static vm_fault_t cxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct vm_area_struct *vma = vmf->vma;
|
||||
struct file *file = vma->vm_file;
|
||||
@ -1115,7 +1115,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
struct ctx_info *ctxi = NULL;
|
||||
struct page *err_page = NULL;
|
||||
enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE;
|
||||
int rc = 0;
|
||||
vm_fault_t rc = 0;
|
||||
int ctxid;
|
||||
|
||||
ctxid = cfg->ops->process_element(ctx);
|
||||
@ -1155,7 +1155,7 @@ static int cxlflash_mmap_fault(struct vm_fault *vmf)
|
||||
out:
|
||||
if (likely(ctxi))
|
||||
put_context(ctxi);
|
||||
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
|
||||
dev_dbg(dev, "%s: returning rc=%x\n", __func__, rc);
|
||||
return rc;
|
||||
|
||||
err:
|
||||
|
@ -3473,9 +3473,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
||||
|
||||
/*if( srb->cmd->cmnd[0] == INQUIRY && */
|
||||
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
|
||||
if ((cmd->result == (DID_OK << 16)
|
||||
|| status_byte(cmd->result) &
|
||||
CHECK_CONDITION)) {
|
||||
if ((cmd->result == (DID_OK << 16) ||
|
||||
status_byte(cmd->result) == CHECK_CONDITION)) {
|
||||
if (!dcb->init_tcq_flag) {
|
||||
add_dev(acb, dcb, ptr);
|
||||
dcb->init_tcq_flag = 1;
|
||||
|
@ -2175,15 +2175,13 @@ static void fcoe_ctlr_disc_stop_locked(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
rcu_read_lock();
|
||||
mutex_lock(&lport->disc.disc_mutex);
|
||||
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
|
||||
if (kref_get_unless_zero(&rdata->kref)) {
|
||||
fc_rport_logoff(rdata);
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_lock(&lport->disc.disc_mutex);
|
||||
lport->disc.disc_callback = NULL;
|
||||
mutex_unlock(&lport->disc.disc_mutex);
|
||||
}
|
||||
@ -2712,7 +2710,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
|
||||
unsigned long deadline;
|
||||
|
||||
next_time = jiffies + msecs_to_jiffies(FIP_VN_BEACON_INT * 10);
|
||||
rcu_read_lock();
|
||||
mutex_lock(&lport->disc.disc_mutex);
|
||||
list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) {
|
||||
if (!kref_get_unless_zero(&rdata->kref))
|
||||
continue;
|
||||
@ -2733,7 +2731,7 @@ static unsigned long fcoe_ctlr_vn_age(struct fcoe_ctlr *fip)
|
||||
next_time = deadline;
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&lport->disc.disc_mutex);
|
||||
return next_time;
|
||||
}
|
||||
|
||||
@ -3080,8 +3078,6 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
|
||||
mutex_lock(&disc->disc_mutex);
|
||||
callback = disc->pending ? disc->disc_callback : NULL;
|
||||
disc->pending = 0;
|
||||
mutex_unlock(&disc->disc_mutex);
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
|
||||
if (!kref_get_unless_zero(&rdata->kref))
|
||||
continue;
|
||||
@ -3090,7 +3086,7 @@ static void fcoe_ctlr_vn_disc(struct fcoe_ctlr *fip)
|
||||
fc_rport_login(rdata);
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&disc->disc_mutex);
|
||||
if (callback)
|
||||
callback(lport, DISC_EV_SUCCESS);
|
||||
}
|
||||
|
@ -146,14 +146,14 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id);
|
||||
static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
|
||||
int gdth_from_wait, int* pIndex);
|
||||
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
|
||||
Scsi_Cmnd *scp);
|
||||
struct scsi_cmnd *scp);
|
||||
static int gdth_async_event(gdth_ha_str *ha);
|
||||
static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
|
||||
|
||||
static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority);
|
||||
static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
|
||||
static void gdth_next(gdth_ha_str *ha);
|
||||
static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b);
|
||||
static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
|
||||
static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
|
||||
static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
|
||||
static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
|
||||
u16 idx, gdth_evt_data *evt);
|
||||
static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
|
||||
@ -161,10 +161,11 @@ static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
|
||||
gdth_evt_str *estr);
|
||||
static void gdth_clear_events(void);
|
||||
|
||||
static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
|
||||
static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
|
||||
char *buffer, u16 count);
|
||||
static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp);
|
||||
static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive);
|
||||
static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
|
||||
static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
|
||||
u16 hdrive);
|
||||
|
||||
static void gdth_enable_int(gdth_ha_str *ha);
|
||||
static int gdth_test_busy(gdth_ha_str *ha);
|
||||
@ -446,7 +447,7 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd,
|
||||
int timeout, u32 *info)
|
||||
{
|
||||
gdth_ha_str *ha = shost_priv(sdev->host);
|
||||
Scsi_Cmnd *scp;
|
||||
struct scsi_cmnd *scp;
|
||||
struct gdth_cmndinfo cmndinfo;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
int rval;
|
||||
@ -1982,11 +1983,11 @@ static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
|
||||
|
||||
/* command queueing/sending functions */
|
||||
|
||||
static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
|
||||
static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
|
||||
{
|
||||
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
|
||||
register Scsi_Cmnd *pscp;
|
||||
register Scsi_Cmnd *nscp;
|
||||
register struct scsi_cmnd *pscp;
|
||||
register struct scsi_cmnd *nscp;
|
||||
unsigned long flags;
|
||||
|
||||
TRACE(("gdth_putq() priority %d\n",priority));
|
||||
@ -2000,11 +2001,11 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
|
||||
scp->SCp.ptr = NULL;
|
||||
} else { /* queue not empty */
|
||||
pscp = ha->req_first;
|
||||
nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
|
||||
nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
|
||||
/* priority: 0-highest,..,0xff-lowest */
|
||||
while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
|
||||
pscp = nscp;
|
||||
nscp = (Scsi_Cmnd *)pscp->SCp.ptr;
|
||||
nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
|
||||
}
|
||||
pscp->SCp.ptr = (char *)scp;
|
||||
scp->SCp.ptr = (char *)nscp;
|
||||
@ -2013,7 +2014,7 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
|
||||
|
||||
#ifdef GDTH_STATISTICS
|
||||
flags = 0;
|
||||
for (nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
|
||||
for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
|
||||
++flags;
|
||||
if (max_rq < flags) {
|
||||
max_rq = flags;
|
||||
@ -2024,8 +2025,8 @@ static void gdth_putq(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 priority)
|
||||
|
||||
static void gdth_next(gdth_ha_str *ha)
|
||||
{
|
||||
register Scsi_Cmnd *pscp;
|
||||
register Scsi_Cmnd *nscp;
|
||||
register struct scsi_cmnd *pscp;
|
||||
register struct scsi_cmnd *nscp;
|
||||
u8 b, t, l, firsttime;
|
||||
u8 this_cmd, next_cmd;
|
||||
unsigned long flags = 0;
|
||||
@ -2040,10 +2041,10 @@ static void gdth_next(gdth_ha_str *ha)
|
||||
next_cmd = gdth_polling ? FALSE:TRUE;
|
||||
cmd_index = 0;
|
||||
|
||||
for (nscp = pscp = ha->req_first; nscp; nscp = (Scsi_Cmnd *)nscp->SCp.ptr) {
|
||||
for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
|
||||
struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
|
||||
if (nscp != pscp && nscp != (Scsi_Cmnd *)pscp->SCp.ptr)
|
||||
pscp = (Scsi_Cmnd *)pscp->SCp.ptr;
|
||||
if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
|
||||
pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
|
||||
if (!nscp_cmndinfo->internal_command) {
|
||||
b = nscp->device->channel;
|
||||
t = nscp->device->id;
|
||||
@ -2250,7 +2251,7 @@ static void gdth_next(gdth_ha_str *ha)
|
||||
if (!this_cmd)
|
||||
break;
|
||||
if (nscp == ha->req_first)
|
||||
ha->req_first = pscp = (Scsi_Cmnd *)nscp->SCp.ptr;
|
||||
ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
|
||||
else
|
||||
pscp->SCp.ptr = nscp->SCp.ptr;
|
||||
if (!next_cmd)
|
||||
@ -2275,7 +2276,7 @@ static void gdth_next(gdth_ha_str *ha)
|
||||
* gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
|
||||
* buffers, kmap_atomic() as needed.
|
||||
*/
|
||||
static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
|
||||
static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
|
||||
char *buffer, u16 count)
|
||||
{
|
||||
u16 cpcount,i, max_sg = scsi_sg_count(scp);
|
||||
@ -2317,7 +2318,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
|
||||
}
|
||||
}
|
||||
|
||||
static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
|
||||
static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
|
||||
{
|
||||
u8 t;
|
||||
gdth_inq_data inq;
|
||||
@ -2419,7 +2420,8 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
|
||||
static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
|
||||
u16 hdrive)
|
||||
{
|
||||
register gdth_cmd_str *cmdp;
|
||||
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
|
||||
@ -2594,7 +2596,7 @@ static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u16 hdrive)
|
||||
return cmd_index;
|
||||
}
|
||||
|
||||
static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
|
||||
static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
|
||||
{
|
||||
register gdth_cmd_str *cmdp;
|
||||
u16 i;
|
||||
@ -2767,7 +2769,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, u8 b)
|
||||
return cmd_index;
|
||||
}
|
||||
|
||||
static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp)
|
||||
static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
|
||||
{
|
||||
register gdth_cmd_str *cmdp;
|
||||
struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
|
||||
@ -2958,7 +2960,7 @@ static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
|
||||
gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
|
||||
gdt6_dpram_str __iomem *dp6_ptr;
|
||||
gdt2_dpram_str __iomem *dp2_ptr;
|
||||
Scsi_Cmnd *scp;
|
||||
struct scsi_cmnd *scp;
|
||||
int rval, i;
|
||||
u8 IStatus;
|
||||
u16 Service;
|
||||
@ -3217,7 +3219,7 @@ static irqreturn_t gdth_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
|
||||
static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
|
||||
Scsi_Cmnd *scp)
|
||||
struct scsi_cmnd *scp)
|
||||
{
|
||||
gdth_msg_str *msg;
|
||||
gdth_cmd_str *cmdp;
|
||||
@ -3708,7 +3710,7 @@ static u8 gdth_timer_running;
|
||||
static void gdth_timeout(struct timer_list *unused)
|
||||
{
|
||||
u32 i;
|
||||
Scsi_Cmnd *nscp;
|
||||
struct scsi_cmnd *nscp;
|
||||
gdth_ha_str *ha;
|
||||
unsigned long flags;
|
||||
|
||||
@ -3724,7 +3726,8 @@ static void gdth_timeout(struct timer_list *unused)
|
||||
if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
|
||||
++act_stats;
|
||||
|
||||
for (act_rq=0,nscp=ha->req_first; nscp; nscp=(Scsi_Cmnd*)nscp->SCp.ptr)
|
||||
for (act_rq=0,
|
||||
nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
|
||||
++act_rq;
|
||||
|
||||
TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
|
||||
@ -3909,12 +3912,12 @@ static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
|
||||
}
|
||||
|
||||
|
||||
static int gdth_eh_bus_reset(Scsi_Cmnd *scp)
|
||||
static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
|
||||
{
|
||||
gdth_ha_str *ha = shost_priv(scp->device->host);
|
||||
int i;
|
||||
unsigned long flags;
|
||||
Scsi_Cmnd *cmnd;
|
||||
struct scsi_cmnd *cmnd;
|
||||
u8 b;
|
||||
|
||||
TRACE2(("gdth_eh_bus_reset()\n"));
|
||||
@ -4465,7 +4468,7 @@ static int ioc_rescan(void __user *arg, char *cmnd)
|
||||
static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
gdth_ha_str *ha;
|
||||
Scsi_Cmnd *scp;
|
||||
struct scsi_cmnd *scp;
|
||||
unsigned long flags;
|
||||
char cmnd[MAX_COMMAND_SIZE];
|
||||
void __user *argp = (void __user *)arg;
|
||||
|
@ -162,9 +162,9 @@
|
||||
#define BIGSECS 63 /* mapping 255*63 */
|
||||
|
||||
/* special command ptr. */
|
||||
#define UNUSED_CMND ((Scsi_Cmnd *)-1)
|
||||
#define INTERNAL_CMND ((Scsi_Cmnd *)-2)
|
||||
#define SCREEN_CMND ((Scsi_Cmnd *)-3)
|
||||
#define UNUSED_CMND ((struct scsi_cmnd *)-1)
|
||||
#define INTERNAL_CMND ((struct scsi_cmnd *)-2)
|
||||
#define SCREEN_CMND ((struct scsi_cmnd *)-3)
|
||||
#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
|
||||
|
||||
/* controller services */
|
||||
@ -867,7 +867,7 @@ typedef struct {
|
||||
u16 service; /* service/firmware ver./.. */
|
||||
u32 info;
|
||||
u32 info2; /* additional info */
|
||||
Scsi_Cmnd *req_first; /* top of request queue */
|
||||
struct scsi_cmnd *req_first; /* top of request queue */
|
||||
struct {
|
||||
u8 present; /* Flag: host drive present? */
|
||||
u8 is_logdrv; /* Flag: log. drive (master)? */
|
||||
@ -896,7 +896,7 @@ typedef struct {
|
||||
u32 id_list[MAXID]; /* IDs of the phys. devices */
|
||||
} raw[MAXBUS]; /* SCSI channels */
|
||||
struct {
|
||||
Scsi_Cmnd *cmnd; /* pending request */
|
||||
struct scsi_cmnd *cmnd; /* pending request */
|
||||
u16 service; /* service */
|
||||
} cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
|
||||
struct gdth_cmndinfo { /* per-command private info */
|
||||
|
@ -626,7 +626,7 @@ static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
|
||||
{
|
||||
unsigned long flags;
|
||||
int i;
|
||||
Scsi_Cmnd *scp;
|
||||
struct scsi_cmnd *scp;
|
||||
struct gdth_cmndinfo *cmndinfo;
|
||||
u8 b, t;
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/lcm.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
@ -199,17 +200,17 @@ struct hisi_sas_slot {
|
||||
int dlvry_queue_slot;
|
||||
int cmplt_queue;
|
||||
int cmplt_queue_slot;
|
||||
int idx;
|
||||
int abort;
|
||||
int ready;
|
||||
void *buf;
|
||||
dma_addr_t buf_dma;
|
||||
void *cmd_hdr;
|
||||
dma_addr_t cmd_hdr_dma;
|
||||
struct work_struct abort_slot;
|
||||
struct timer_list internal_abort_timer;
|
||||
bool is_internal;
|
||||
struct hisi_sas_tmf_task *tmf;
|
||||
/* Do not reorder/change members after here */
|
||||
void *buf;
|
||||
dma_addr_t buf_dma;
|
||||
int idx;
|
||||
};
|
||||
|
||||
struct hisi_sas_hw {
|
||||
@ -277,6 +278,7 @@ struct hisi_hba {
|
||||
|
||||
int n_phy;
|
||||
spinlock_t lock;
|
||||
struct semaphore sem;
|
||||
|
||||
struct timer_list timer;
|
||||
struct workqueue_struct *wq;
|
||||
@ -298,7 +300,6 @@ struct hisi_hba {
|
||||
|
||||
int queue_count;
|
||||
|
||||
struct dma_pool *buffer_pool;
|
||||
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
|
||||
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
|
||||
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
|
||||
@ -319,6 +320,7 @@ struct hisi_hba {
|
||||
const struct hisi_sas_hw *hw; /* Low level hw interface */
|
||||
unsigned long sata_dev_bitmap[BITS_TO_LONGS(HISI_SAS_MAX_DEVICES)];
|
||||
struct work_struct rst_work;
|
||||
u32 phy_state;
|
||||
};
|
||||
|
||||
/* Generic HW DMA host memory structures */
|
||||
@ -479,4 +481,6 @@ extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
|
||||
enum hisi_sas_phy_event event);
|
||||
extern void hisi_sas_release_tasks(struct hisi_hba *hisi_hba);
|
||||
extern u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max);
|
||||
extern void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba);
|
||||
extern void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba);
|
||||
#endif
|
||||
|
@ -242,20 +242,16 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
|
||||
task->data_dir);
|
||||
}
|
||||
|
||||
if (slot->buf)
|
||||
dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
|
||||
|
||||
spin_lock_irqsave(&dq->lock, flags);
|
||||
list_del_init(&slot->entry);
|
||||
spin_unlock_irqrestore(&dq->lock, flags);
|
||||
slot->buf = NULL;
|
||||
slot->task = NULL;
|
||||
slot->port = NULL;
|
||||
|
||||
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
|
||||
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_index_free(hisi_hba, slot->idx);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
|
||||
/* slot memory is fully zeroed when it is reused */
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
|
||||
|
||||
@ -285,40 +281,6 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
|
||||
device_id, abort_flag, tag_to_abort);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function will issue an abort TMF regardless of whether the
|
||||
* task is in the sdev or not. Then it will do the task complete
|
||||
* cleanup and callbacks.
|
||||
*/
|
||||
static void hisi_sas_slot_abort(struct work_struct *work)
|
||||
{
|
||||
struct hisi_sas_slot *abort_slot =
|
||||
container_of(work, struct hisi_sas_slot, abort_slot);
|
||||
struct sas_task *task = abort_slot->task;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
|
||||
struct scsi_cmnd *cmnd = task->uldd_task;
|
||||
struct hisi_sas_tmf_task tmf_task;
|
||||
struct scsi_lun lun;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int tag = abort_slot->idx;
|
||||
|
||||
if (!(task->task_proto & SAS_PROTOCOL_SSP)) {
|
||||
dev_err(dev, "cannot abort slot for non-ssp task\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
int_to_scsilun(cmnd->device->lun, &lun);
|
||||
tmf_task.tmf = TMF_ABORT_TASK;
|
||||
tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
|
||||
|
||||
hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun, &tmf_task);
|
||||
out:
|
||||
/* Do cleanup for this task */
|
||||
hisi_sas_slot_task_free(hisi_hba, task, abort_slot);
|
||||
if (task->task_done)
|
||||
task->task_done(task);
|
||||
}
|
||||
|
||||
static int hisi_sas_task_prep(struct sas_task *task,
|
||||
struct hisi_sas_dq **dq_pointer,
|
||||
bool is_tmf, struct hisi_sas_tmf_task *tmf,
|
||||
@ -334,8 +296,8 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
|
||||
int n_elem = 0, n_elem_req = 0, n_elem_resp = 0;
|
||||
unsigned long flags, flags_dq;
|
||||
struct hisi_sas_dq *dq;
|
||||
unsigned long flags;
|
||||
int wr_q_index;
|
||||
|
||||
if (!sas_port) {
|
||||
@ -430,30 +392,22 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
||||
goto err_out_dma_unmap;
|
||||
|
||||
slot = &hisi_hba->slot_info[slot_idx];
|
||||
memset(slot, 0, sizeof(struct hisi_sas_slot));
|
||||
|
||||
slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
|
||||
GFP_ATOMIC, &slot->buf_dma);
|
||||
if (!slot->buf) {
|
||||
rc = -ENOMEM;
|
||||
spin_lock_irqsave(&dq->lock, flags);
|
||||
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
|
||||
if (wr_q_index < 0) {
|
||||
spin_unlock_irqrestore(&dq->lock, flags);
|
||||
rc = -EAGAIN;
|
||||
goto err_out_tag;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dq->lock, flags_dq);
|
||||
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
|
||||
if (wr_q_index < 0) {
|
||||
spin_unlock_irqrestore(&dq->lock, flags_dq);
|
||||
rc = -EAGAIN;
|
||||
goto err_out_buf;
|
||||
}
|
||||
|
||||
list_add_tail(&slot->delivery, &dq->list);
|
||||
spin_unlock_irqrestore(&dq->lock, flags_dq);
|
||||
list_add_tail(&slot->entry, &sas_dev->list);
|
||||
spin_unlock_irqrestore(&dq->lock, flags);
|
||||
|
||||
dlvry_queue = dq->id;
|
||||
dlvry_queue_slot = wr_q_index;
|
||||
|
||||
slot->idx = slot_idx;
|
||||
slot->n_elem = n_elem;
|
||||
slot->dlvry_queue = dlvry_queue;
|
||||
slot->dlvry_queue_slot = dlvry_queue_slot;
|
||||
@ -464,7 +418,6 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
||||
slot->tmf = tmf;
|
||||
slot->is_internal = is_tmf;
|
||||
task->lldd_task = slot;
|
||||
INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
|
||||
|
||||
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
|
||||
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
|
||||
@ -488,21 +441,15 @@ static int hisi_sas_task_prep(struct sas_task *task,
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dq->lock, flags);
|
||||
list_add_tail(&slot->entry, &sas_dev->list);
|
||||
spin_unlock_irqrestore(&dq->lock, flags);
|
||||
spin_lock_irqsave(&task->task_state_lock, flags);
|
||||
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
++(*pass);
|
||||
slot->ready = 1;
|
||||
WRITE_ONCE(slot->ready, 1);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_buf:
|
||||
dma_pool_free(hisi_hba->buffer_pool, slot->buf,
|
||||
slot->buf_dma);
|
||||
err_out_tag:
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_index_free(hisi_hba, slot_idx);
|
||||
@ -536,8 +483,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_dq *dq = NULL;
|
||||
|
||||
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
|
||||
return -EINVAL;
|
||||
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
|
||||
if (in_softirq())
|
||||
return -EINVAL;
|
||||
|
||||
down(&hisi_hba->sem);
|
||||
up(&hisi_hba->sem);
|
||||
}
|
||||
|
||||
/* protect task_prep and start_delivery sequence */
|
||||
rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
|
||||
@ -819,6 +771,8 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
|
||||
|
||||
for (i = 0; i < HISI_PHYES_NUM; i++)
|
||||
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
|
||||
|
||||
spin_lock_init(&phy->lock);
|
||||
}
|
||||
|
||||
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
|
||||
@ -862,7 +816,6 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
|
||||
hisi_sas_slot_task_free(hisi_hba, task, slot);
|
||||
}
|
||||
|
||||
/* hisi_hba.lock should be locked */
|
||||
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device)
|
||||
{
|
||||
@ -914,7 +867,9 @@ static void hisi_sas_dev_gone(struct domain_device *device)
|
||||
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
down(&hisi_hba->sem);
|
||||
hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
|
||||
up(&hisi_hba->sem);
|
||||
device->lldd_dev = NULL;
|
||||
}
|
||||
|
||||
@ -1351,11 +1306,50 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
|
||||
}
|
||||
}
|
||||
|
||||
void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
|
||||
down(&hisi_hba->sem);
|
||||
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
|
||||
scsi_block_requests(shost);
|
||||
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
|
||||
|
||||
if (timer_pending(&hisi_hba->timer))
|
||||
del_timer_sync(&hisi_hba->timer);
|
||||
|
||||
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
|
||||
|
||||
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
u32 state;
|
||||
|
||||
/* Init and wait for PHYs to come up and all libsas event finished. */
|
||||
hisi_hba->hw->phys_init(hisi_hba);
|
||||
msleep(1000);
|
||||
hisi_sas_refresh_port_id(hisi_hba);
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
up(&hisi_hba->sem);
|
||||
|
||||
if (hisi_hba->reject_stp_links_msk)
|
||||
hisi_sas_terminate_stp_reject(hisi_hba);
|
||||
hisi_sas_reset_init_all_devices(hisi_hba);
|
||||
scsi_unblock_requests(shost);
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
|
||||
state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
|
||||
|
||||
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
u32 old_state, state;
|
||||
int rc;
|
||||
|
||||
if (!hisi_hba->hw->soft_reset)
|
||||
@ -1365,43 +1359,22 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
return -1;
|
||||
|
||||
dev_info(dev, "controller resetting...\n");
|
||||
old_state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
hisi_sas_controller_reset_prepare(hisi_hba);
|
||||
|
||||
scsi_block_requests(shost);
|
||||
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
|
||||
|
||||
if (timer_pending(&hisi_hba->timer))
|
||||
del_timer_sync(&hisi_hba->timer);
|
||||
|
||||
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
rc = hisi_hba->hw->soft_reset(hisi_hba);
|
||||
if (rc) {
|
||||
dev_warn(dev, "controller reset failed (%d)\n", rc);
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
up(&hisi_hba->sem);
|
||||
scsi_unblock_requests(shost);
|
||||
goto out;
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
return rc;
|
||||
}
|
||||
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
|
||||
/* Init and wait for PHYs to come up and all libsas event finished. */
|
||||
hisi_hba->hw->phys_init(hisi_hba);
|
||||
msleep(1000);
|
||||
hisi_sas_refresh_port_id(hisi_hba);
|
||||
|
||||
if (hisi_hba->reject_stp_links_msk)
|
||||
hisi_sas_terminate_stp_reject(hisi_hba);
|
||||
hisi_sas_reset_init_all_devices(hisi_hba);
|
||||
scsi_unblock_requests(shost);
|
||||
|
||||
state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
hisi_sas_rescan_topology(hisi_hba, old_state, state);
|
||||
hisi_sas_controller_reset_done(hisi_hba);
|
||||
dev_info(dev, "controller reset complete\n");
|
||||
|
||||
out:
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_sas_abort_task(struct sas_task *task)
|
||||
@ -1644,14 +1617,32 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
||||
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
|
||||
int rc, i;
|
||||
|
||||
queue_work(hisi_hba->wq, &r.work);
|
||||
wait_for_completion(r.completion);
|
||||
if (r.done)
|
||||
return TMF_RESP_FUNC_COMPLETE;
|
||||
if (!r.done)
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
||||
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
|
||||
struct domain_device *device = sas_dev->sas_device;
|
||||
|
||||
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
|
||||
DEV_IS_EXPANDER(device->dev_type))
|
||||
continue;
|
||||
|
||||
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||
dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
|
||||
sas_dev->device_id, rc);
|
||||
}
|
||||
|
||||
hisi_sas_release_tasks(hisi_hba);
|
||||
|
||||
return TMF_RESP_FUNC_COMPLETE;
|
||||
}
|
||||
|
||||
static int hisi_sas_query_task(struct sas_task *task)
|
||||
@ -1723,21 +1714,13 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
|
||||
slot = &hisi_hba->slot_info[slot_idx];
|
||||
memset(slot, 0, sizeof(struct hisi_sas_slot));
|
||||
|
||||
slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
|
||||
GFP_ATOMIC, &slot->buf_dma);
|
||||
if (!slot->buf) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_tag;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dq->lock, flags_dq);
|
||||
wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq);
|
||||
if (wr_q_index < 0) {
|
||||
spin_unlock_irqrestore(&dq->lock, flags_dq);
|
||||
rc = -EAGAIN;
|
||||
goto err_out_buf;
|
||||
goto err_out_tag;
|
||||
}
|
||||
list_add_tail(&slot->delivery, &dq->list);
|
||||
spin_unlock_irqrestore(&dq->lock, flags_dq);
|
||||
@ -1745,7 +1728,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
||||
dlvry_queue = dq->id;
|
||||
dlvry_queue_slot = wr_q_index;
|
||||
|
||||
slot->idx = slot_idx;
|
||||
slot->n_elem = n_elem;
|
||||
slot->dlvry_queue = dlvry_queue;
|
||||
slot->dlvry_queue_slot = dlvry_queue_slot;
|
||||
@ -1767,7 +1749,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
||||
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
slot->ready = 1;
|
||||
WRITE_ONCE(slot->ready, 1);
|
||||
/* send abort command to the chip */
|
||||
spin_lock_irqsave(&dq->lock, flags);
|
||||
list_add_tail(&slot->entry, &sas_dev->list);
|
||||
@ -1776,9 +1758,6 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_buf:
|
||||
dma_pool_free(hisi_hba->buffer_pool, slot->buf,
|
||||
slot->buf_dma);
|
||||
err_out_tag:
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_index_free(hisi_hba, slot_idx);
|
||||
@ -1919,7 +1898,8 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
|
||||
} else {
|
||||
struct hisi_sas_port *port = phy->port;
|
||||
|
||||
if (phy->in_reset) {
|
||||
if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
|
||||
phy->in_reset) {
|
||||
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
|
||||
return;
|
||||
}
|
||||
@ -2014,8 +1994,11 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
|
||||
int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
|
||||
int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries;
|
||||
int max_command_entries_ru, sz_slot_buf_ru;
|
||||
int blk_cnt, slots_per_blk;
|
||||
|
||||
sema_init(&hisi_hba->sem, 1);
|
||||
spin_lock_init(&hisi_hba->lock);
|
||||
for (i = 0; i < hisi_hba->n_phy; i++) {
|
||||
hisi_sas_phy_init(hisi_hba, i);
|
||||
@ -2045,29 +2028,27 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
|
||||
/* Delivery queue */
|
||||
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
|
||||
hisi_hba->cmd_hdr[i] = dma_alloc_coherent(dev, s,
|
||||
&hisi_hba->cmd_hdr_dma[i], GFP_KERNEL);
|
||||
hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
|
||||
&hisi_hba->cmd_hdr_dma[i],
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->cmd_hdr[i])
|
||||
goto err_out;
|
||||
|
||||
/* Completion queue */
|
||||
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
|
||||
hisi_hba->complete_hdr[i] = dma_alloc_coherent(dev, s,
|
||||
&hisi_hba->complete_hdr_dma[i], GFP_KERNEL);
|
||||
hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
|
||||
&hisi_hba->complete_hdr_dma[i],
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->complete_hdr[i])
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
s = sizeof(struct hisi_sas_slot_buf_table);
|
||||
hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
|
||||
if (!hisi_hba->buffer_pool)
|
||||
goto err_out;
|
||||
|
||||
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
|
||||
hisi_hba->itct = dma_zalloc_coherent(dev, s, &hisi_hba->itct_dma,
|
||||
GFP_KERNEL);
|
||||
hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->itct)
|
||||
goto err_out;
|
||||
memset(hisi_hba->itct, 0, s);
|
||||
|
||||
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
|
||||
sizeof(struct hisi_sas_slot),
|
||||
@ -2075,15 +2056,45 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
if (!hisi_hba->slot_info)
|
||||
goto err_out;
|
||||
|
||||
/* roundup to avoid overly large block size */
|
||||
max_command_entries_ru = roundup(max_command_entries, 64);
|
||||
sz_slot_buf_ru = roundup(sizeof(struct hisi_sas_slot_buf_table), 64);
|
||||
s = lcm(max_command_entries_ru, sz_slot_buf_ru);
|
||||
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
|
||||
slots_per_blk = s / sz_slot_buf_ru;
|
||||
for (i = 0; i < blk_cnt; i++) {
|
||||
struct hisi_sas_slot_buf_table *buf;
|
||||
dma_addr_t buf_dma;
|
||||
int slot_index = i * slots_per_blk;
|
||||
|
||||
buf = dmam_alloc_coherent(dev, s, &buf_dma, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto err_out;
|
||||
memset(buf, 0, s);
|
||||
|
||||
for (j = 0; j < slots_per_blk; j++, slot_index++) {
|
||||
struct hisi_sas_slot *slot;
|
||||
|
||||
slot = &hisi_hba->slot_info[slot_index];
|
||||
slot->buf = buf;
|
||||
slot->buf_dma = buf_dma;
|
||||
slot->idx = slot_index;
|
||||
|
||||
buf++;
|
||||
buf_dma += sizeof(*buf);
|
||||
}
|
||||
}
|
||||
|
||||
s = max_command_entries * sizeof(struct hisi_sas_iost);
|
||||
hisi_hba->iost = dma_alloc_coherent(dev, s, &hisi_hba->iost_dma,
|
||||
GFP_KERNEL);
|
||||
hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->iost)
|
||||
goto err_out;
|
||||
|
||||
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
|
||||
hisi_hba->breakpoint = dma_alloc_coherent(dev, s,
|
||||
&hisi_hba->breakpoint_dma, GFP_KERNEL);
|
||||
hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
|
||||
&hisi_hba->breakpoint_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->breakpoint)
|
||||
goto err_out;
|
||||
|
||||
@ -2094,14 +2105,16 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
goto err_out;
|
||||
|
||||
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
|
||||
hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
|
||||
&hisi_hba->initial_fis_dma, GFP_KERNEL);
|
||||
hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
|
||||
&hisi_hba->initial_fis_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->initial_fis)
|
||||
goto err_out;
|
||||
|
||||
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
|
||||
hisi_hba->sata_breakpoint = dma_alloc_coherent(dev, s,
|
||||
&hisi_hba->sata_breakpoint_dma, GFP_KERNEL);
|
||||
hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
|
||||
&hisi_hba->sata_breakpoint_dma,
|
||||
GFP_KERNEL);
|
||||
if (!hisi_hba->sata_breakpoint)
|
||||
goto err_out;
|
||||
hisi_sas_init_mem(hisi_hba);
|
||||
@ -2122,54 +2135,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_alloc);
|
||||
|
||||
void hisi_sas_free(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
|
||||
|
||||
for (i = 0; i < hisi_hba->queue_count; i++) {
|
||||
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
|
||||
if (hisi_hba->cmd_hdr[i])
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->cmd_hdr[i],
|
||||
hisi_hba->cmd_hdr_dma[i]);
|
||||
|
||||
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
|
||||
if (hisi_hba->complete_hdr[i])
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->complete_hdr[i],
|
||||
hisi_hba->complete_hdr_dma[i]);
|
||||
}
|
||||
|
||||
dma_pool_destroy(hisi_hba->buffer_pool);
|
||||
|
||||
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
|
||||
if (hisi_hba->itct)
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->itct, hisi_hba->itct_dma);
|
||||
|
||||
s = max_command_entries * sizeof(struct hisi_sas_iost);
|
||||
if (hisi_hba->iost)
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->iost, hisi_hba->iost_dma);
|
||||
|
||||
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
|
||||
if (hisi_hba->breakpoint)
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->breakpoint,
|
||||
hisi_hba->breakpoint_dma);
|
||||
|
||||
|
||||
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
|
||||
if (hisi_hba->initial_fis)
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->initial_fis,
|
||||
hisi_hba->initial_fis_dma);
|
||||
|
||||
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
|
||||
if (hisi_hba->sata_breakpoint)
|
||||
dma_free_coherent(dev, s,
|
||||
hisi_hba->sata_breakpoint,
|
||||
hisi_hba->sata_breakpoint_dma);
|
||||
|
||||
if (hisi_hba->wq)
|
||||
destroy_workqueue(hisi_hba->wq);
|
||||
}
|
||||
|
@ -903,23 +903,28 @@ get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
|
||||
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = dq->hisi_hba;
|
||||
struct hisi_sas_slot *s, *s1;
|
||||
struct hisi_sas_slot *s, *s1, *s2 = NULL;
|
||||
struct list_head *dq_list;
|
||||
int dlvry_queue = dq->id;
|
||||
int wp, count = 0;
|
||||
int wp;
|
||||
|
||||
dq_list = &dq->list;
|
||||
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
|
||||
if (!s->ready)
|
||||
break;
|
||||
count++;
|
||||
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
|
||||
s2 = s;
|
||||
list_del(&s->delivery);
|
||||
}
|
||||
|
||||
if (!count)
|
||||
if (!s2)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure that memories for slots built on other CPUs is observed.
|
||||
*/
|
||||
smp_rmb();
|
||||
wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
|
||||
|
||||
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
|
||||
}
|
||||
|
||||
@ -1296,11 +1301,8 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
||||
!(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
|
||||
|
||||
slot_err_v1_hw(hisi_hba, task, slot);
|
||||
if (unlikely(slot->abort)) {
|
||||
queue_work(hisi_hba->wq, &slot->abort_slot);
|
||||
/* immediately return and do not complete */
|
||||
if (unlikely(slot->abort))
|
||||
return ts->stat;
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1469,7 +1471,8 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
|
||||
goto end;
|
||||
}
|
||||
|
||||
sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
||||
sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
|
||||
end:
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
|
||||
|
@ -1665,23 +1665,28 @@ get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
|
||||
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = dq->hisi_hba;
|
||||
struct hisi_sas_slot *s, *s1;
|
||||
struct hisi_sas_slot *s, *s1, *s2 = NULL;
|
||||
struct list_head *dq_list;
|
||||
int dlvry_queue = dq->id;
|
||||
int wp, count = 0;
|
||||
int wp;
|
||||
|
||||
dq_list = &dq->list;
|
||||
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
|
||||
if (!s->ready)
|
||||
break;
|
||||
count++;
|
||||
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
|
||||
s2 = s;
|
||||
list_del(&s->delivery);
|
||||
}
|
||||
|
||||
if (!count)
|
||||
if (!s2)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure that memories for slots built on other CPUs is observed.
|
||||
*/
|
||||
smp_rmb();
|
||||
wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
|
||||
|
||||
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
|
||||
}
|
||||
|
||||
@ -2840,7 +2845,8 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
|
||||
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
|
||||
if (bcast_status & RX_BCAST_CHG_MSK)
|
||||
if ((bcast_status & RX_BCAST_CHG_MSK) &&
|
||||
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
|
||||
CHL_INT0_SL_RX_BCST_ACK_MSK);
|
||||
@ -3234,8 +3240,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
|
||||
if (fis->status & ATA_ERR) {
|
||||
dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n", phy_no,
|
||||
fis->status);
|
||||
disable_phy_v2_hw(hisi_hba, phy_no);
|
||||
enable_phy_v2_hw(hisi_hba, phy_no);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
res = IRQ_NONE;
|
||||
goto end;
|
||||
}
|
||||
|
@ -51,7 +51,6 @@
|
||||
#define CFG_ABT_SET_IPTT_DONE 0xd8
|
||||
#define CFG_ABT_SET_IPTT_DONE_OFF 0
|
||||
#define HGC_IOMB_PROC1_STATUS 0x104
|
||||
#define CFG_1US_TIMER_TRSH 0xcc
|
||||
#define CHNL_INT_STATUS 0x148
|
||||
#define HGC_AXI_FIFO_ERR_INFO 0x154
|
||||
#define AXI_ERR_INFO_OFF 0
|
||||
@ -121,6 +120,8 @@
|
||||
#define PHY_CFG_ENA_MSK (0x1 << PHY_CFG_ENA_OFF)
|
||||
#define PHY_CFG_DC_OPT_OFF 2
|
||||
#define PHY_CFG_DC_OPT_MSK (0x1 << PHY_CFG_DC_OPT_OFF)
|
||||
#define PHY_CFG_PHY_RST_OFF 3
|
||||
#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
|
||||
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
|
||||
#define PHY_CTRL (PORT_BASE + 0x14)
|
||||
#define PHY_CTRL_RESET_OFF 0
|
||||
@ -131,6 +132,9 @@
|
||||
#define SL_CONTROL_NOTIFY_EN_MSK (0x1 << SL_CONTROL_NOTIFY_EN_OFF)
|
||||
#define SL_CTA_OFF 17
|
||||
#define SL_CTA_MSK (0x1 << SL_CTA_OFF)
|
||||
#define RX_PRIMS_STATUS (PORT_BASE + 0x98)
|
||||
#define RX_BCAST_CHG_OFF 1
|
||||
#define RX_BCAST_CHG_MSK (0x1 << RX_BCAST_CHG_OFF)
|
||||
#define TX_ID_DWORD0 (PORT_BASE + 0x9c)
|
||||
#define TX_ID_DWORD1 (PORT_BASE + 0xa0)
|
||||
#define TX_ID_DWORD2 (PORT_BASE + 0xa4)
|
||||
@ -206,6 +210,8 @@
|
||||
|
||||
#define AXI_MASTER_CFG_BASE (0x5000)
|
||||
#define AM_CTRL_GLOBAL (0x0)
|
||||
#define AM_CTRL_SHUTDOWN_REQ_OFF 0
|
||||
#define AM_CTRL_SHUTDOWN_REQ_MSK (0x1 << AM_CTRL_SHUTDOWN_REQ_OFF)
|
||||
#define AM_CURR_TRANS_RETURN (0x150)
|
||||
|
||||
#define AM_CFG_MAX_TRANS (0x5010)
|
||||
@ -425,7 +431,6 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
||||
(u32)((1ULL << hisi_hba->queue_count) - 1));
|
||||
hisi_sas_write32(hisi_hba, CFG_MAX_TAG, 0xfff0400);
|
||||
hisi_sas_write32(hisi_hba, HGC_SAS_TXFAIL_RETRY_CTRL, 0x108);
|
||||
hisi_sas_write32(hisi_hba, CFG_1US_TIMER_TRSH, 0xd);
|
||||
hisi_sas_write32(hisi_hba, INT_COAL_EN, 0x1);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_TIME, 0x1);
|
||||
hisi_sas_write32(hisi_hba, OQ_INT_COAL_CNT, 0x1);
|
||||
@ -486,6 +491,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
||||
hisi_sas_phy_write32(hisi_hba, i, SL_RX_BCAST_CHK_MSK, 0x0);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
|
||||
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
|
||||
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
|
||||
|
||||
/* used for 12G negotiate */
|
||||
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
|
||||
@ -758,15 +764,25 @@ static void enable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
|
||||
|
||||
cfg |= PHY_CFG_ENA_MSK;
|
||||
cfg &= ~PHY_CFG_PHY_RST_MSK;
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
|
||||
}
|
||||
|
||||
static void disable_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
u32 cfg = hisi_sas_phy_read32(hisi_hba, phy_no, PHY_CFG);
|
||||
u32 state;
|
||||
|
||||
cfg &= ~PHY_CFG_ENA_MSK;
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
|
||||
|
||||
mdelay(50);
|
||||
|
||||
state = hisi_sas_read32(hisi_hba, PHY_STATE);
|
||||
if (state & BIT(phy_no)) {
|
||||
cfg |= PHY_CFG_PHY_RST_MSK;
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PHY_CFG, cfg);
|
||||
}
|
||||
}
|
||||
|
||||
static void start_phy_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
@ -866,23 +882,28 @@ get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
|
||||
static void start_delivery_v3_hw(struct hisi_sas_dq *dq)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = dq->hisi_hba;
|
||||
struct hisi_sas_slot *s, *s1;
|
||||
struct hisi_sas_slot *s, *s1, *s2 = NULL;
|
||||
struct list_head *dq_list;
|
||||
int dlvry_queue = dq->id;
|
||||
int wp, count = 0;
|
||||
int wp;
|
||||
|
||||
dq_list = &dq->list;
|
||||
list_for_each_entry_safe(s, s1, &dq->list, delivery) {
|
||||
if (!s->ready)
|
||||
break;
|
||||
count++;
|
||||
wp = (s->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
|
||||
s2 = s;
|
||||
list_del(&s->delivery);
|
||||
}
|
||||
|
||||
if (!count)
|
||||
if (!s2)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Ensure that memories for slots built on other CPUs is observed.
|
||||
*/
|
||||
smp_rmb();
|
||||
wp = (s2->dlvry_queue_slot + 1) % HISI_SAS_QUEUE_SLOTS;
|
||||
|
||||
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14), wp);
|
||||
}
|
||||
|
||||
@ -1170,6 +1191,16 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
dev_info(dev, "phyup: phy%d link_rate=%d(sata)\n", phy_no, link_rate);
|
||||
initial_fis = &hisi_hba->initial_fis[phy_no];
|
||||
fis = &initial_fis->fis;
|
||||
|
||||
/* check ERR bit of Status Register */
|
||||
if (fis->status & ATA_ERR) {
|
||||
dev_warn(dev, "sata int: phy%d FIS status: 0x%x\n",
|
||||
phy_no, fis->status);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
res = IRQ_NONE;
|
||||
goto end;
|
||||
}
|
||||
|
||||
sas_phy->oob_mode = SATA_OOB_MODE;
|
||||
attached_sas_addr[0] = 0x50;
|
||||
attached_sas_addr[7] = phy_no;
|
||||
@ -1256,9 +1287,13 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
|
||||
u32 bcast_status;
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
|
||||
if ((bcast_status & RX_BCAST_CHG_MSK) &&
|
||||
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
||||
sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
|
||||
CHL_INT0_SL_RX_BCST_ACK_MSK);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
|
||||
@ -1327,11 +1362,77 @@ static const struct hisi_sas_hw_error port_axi_error[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static void handle_chl_int1_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1);
|
||||
u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT1_MSK);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int i;
|
||||
|
||||
irq_value &= ~irq_msk;
|
||||
if (!irq_value)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
|
||||
const struct hisi_sas_hw_error *error = &port_axi_error[i];
|
||||
|
||||
if (!(irq_value & error->irq_msk))
|
||||
continue;
|
||||
|
||||
dev_err(dev, "%s error (phy%d 0x%x) found!\n",
|
||||
error->msg, phy_no, irq_value);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT1, irq_value);
|
||||
}
|
||||
|
||||
static void handle_chl_int2_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
u32 irq_msk = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2_MSK);
|
||||
u32 irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
struct pci_dev *pci_dev = hisi_hba->pci_dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
irq_value &= ~irq_msk;
|
||||
if (!irq_value)
|
||||
return;
|
||||
|
||||
if (irq_value & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
|
||||
dev_warn(dev, "phy%d identify timeout\n", phy_no);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
if (irq_value & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
|
||||
u32 reg_value = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
STP_LINK_TIMEOUT_STATE);
|
||||
|
||||
dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
|
||||
phy_no, reg_value);
|
||||
if (reg_value & BIT(4))
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
if ((irq_value & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
|
||||
(pci_dev->revision == 0x20)) {
|
||||
u32 reg_value;
|
||||
int rc;
|
||||
|
||||
rc = hisi_sas_read32_poll_timeout_atomic(
|
||||
HILINK_ERR_DFX, reg_value,
|
||||
!((reg_value >> 8) & BIT(phy_no)),
|
||||
1000, 10000);
|
||||
if (rc)
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2, irq_value);
|
||||
}
|
||||
|
||||
static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = p;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct pci_dev *pci_dev = hisi_hba->pci_dev;
|
||||
u32 irq_msk;
|
||||
int phy_no = 0;
|
||||
|
||||
@ -1341,84 +1442,12 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p)
|
||||
while (irq_msk) {
|
||||
u32 irq_value0 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT0);
|
||||
u32 irq_value1 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT1);
|
||||
u32 irq_value2 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT2);
|
||||
u32 irq_msk1 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT1_MSK);
|
||||
u32 irq_msk2 = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
CHL_INT2_MSK);
|
||||
|
||||
irq_value1 &= ~irq_msk1;
|
||||
irq_value2 &= ~irq_msk2;
|
||||
if (irq_msk & (4 << (phy_no * 4)))
|
||||
handle_chl_int1_v3_hw(hisi_hba, phy_no);
|
||||
|
||||
if ((irq_msk & (4 << (phy_no * 4))) &&
|
||||
irq_value1) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(port_axi_error); i++) {
|
||||
const struct hisi_sas_hw_error *error =
|
||||
&port_axi_error[i];
|
||||
|
||||
if (!(irq_value1 & error->irq_msk))
|
||||
continue;
|
||||
|
||||
dev_err(dev, "%s error (phy%d 0x%x) found!\n",
|
||||
error->msg, phy_no, irq_value1);
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
}
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT1, irq_value1);
|
||||
}
|
||||
|
||||
if (irq_msk & (8 << (phy_no * 4)) && irq_value2) {
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
|
||||
if (irq_value2 & BIT(CHL_INT2_SL_IDAF_TOUT_CONF_OFF)) {
|
||||
dev_warn(dev, "phy%d identify timeout\n",
|
||||
phy_no);
|
||||
hisi_sas_notify_phy_event(phy,
|
||||
HISI_PHYE_LINK_RESET);
|
||||
|
||||
}
|
||||
|
||||
if (irq_value2 & BIT(CHL_INT2_STP_LINK_TIMEOUT_OFF)) {
|
||||
u32 reg_value = hisi_sas_phy_read32(hisi_hba,
|
||||
phy_no, STP_LINK_TIMEOUT_STATE);
|
||||
|
||||
dev_warn(dev, "phy%d stp link timeout (0x%x)\n",
|
||||
phy_no, reg_value);
|
||||
if (reg_value & BIT(4))
|
||||
hisi_sas_notify_phy_event(phy,
|
||||
HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT2, irq_value2);
|
||||
|
||||
if ((irq_value2 & BIT(CHL_INT2_RX_INVLD_DW_OFF)) &&
|
||||
(pci_dev->revision == 0x20)) {
|
||||
u32 reg_value;
|
||||
int rc;
|
||||
|
||||
rc = hisi_sas_read32_poll_timeout_atomic(
|
||||
HILINK_ERR_DFX, reg_value,
|
||||
!((reg_value >> 8) & BIT(phy_no)),
|
||||
1000, 10000);
|
||||
if (rc) {
|
||||
disable_phy_v3_hw(hisi_hba, phy_no);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
CHL_INT2,
|
||||
BIT(CHL_INT2_RX_INVLD_DW_OFF));
|
||||
hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
ERR_CNT_INVLD_DW);
|
||||
mdelay(1);
|
||||
enable_phy_v3_hw(hisi_hba, phy_no);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (irq_msk & (8 << (phy_no * 4)))
|
||||
handle_chl_int2_v3_hw(hisi_hba, phy_no);
|
||||
|
||||
if (irq_msk & (2 << (phy_no * 4)) && irq_value0) {
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
@ -1964,11 +1993,11 @@ static void phy_get_events_v3_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
|
||||
}
|
||||
|
||||
static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
|
||||
static int disable_host_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 status, reg_val;
|
||||
int rc;
|
||||
u32 status;
|
||||
|
||||
interrupt_disable_v3_hw(hisi_hba);
|
||||
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
|
||||
@ -1978,14 +2007,32 @@ static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
|
||||
|
||||
mdelay(10);
|
||||
|
||||
hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE + AM_CTRL_GLOBAL, 0x1);
|
||||
reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
AM_CTRL_GLOBAL);
|
||||
reg_val |= AM_CTRL_SHUTDOWN_REQ_MSK;
|
||||
hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
AM_CTRL_GLOBAL, reg_val);
|
||||
|
||||
/* wait until bus idle */
|
||||
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
|
||||
AM_CURR_TRANS_RETURN, status,
|
||||
status == 0x3, 10, 100);
|
||||
if (rc) {
|
||||
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
|
||||
dev_err(dev, "axi bus is not idle, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int soft_reset_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc;
|
||||
|
||||
rc = disable_host_v3_hw(hisi_hba);
|
||||
if (rc) {
|
||||
dev_err(dev, "soft reset: disable host failed rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2433,6 +2480,41 @@ static pci_ers_result_t hisi_sas_slot_reset_v3_hw(struct pci_dev *pdev)
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
|
||||
{
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc;
|
||||
|
||||
dev_info(dev, "FLR prepare\n");
|
||||
set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
hisi_sas_controller_reset_prepare(hisi_hba);
|
||||
|
||||
rc = disable_host_v3_hw(hisi_hba);
|
||||
if (rc)
|
||||
dev_err(dev, "FLR: disable host failed rc=%d\n", rc);
|
||||
}
|
||||
|
||||
static void hisi_sas_reset_done_v3_hw(struct pci_dev *pdev)
|
||||
{
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc;
|
||||
|
||||
hisi_sas_init_mem(hisi_hba);
|
||||
|
||||
rc = hw_init_v3_hw(hisi_hba);
|
||||
if (rc) {
|
||||
dev_err(dev, "FLR: hw init failed rc=%d\n", rc);
|
||||
return;
|
||||
}
|
||||
|
||||
hisi_sas_controller_reset_done(hisi_hba);
|
||||
dev_info(dev, "FLR done\n");
|
||||
}
|
||||
|
||||
enum {
|
||||
/* instances of the controller */
|
||||
hip08,
|
||||
@ -2444,38 +2526,24 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
u32 device_state, status;
|
||||
u32 device_state;
|
||||
int rc;
|
||||
u32 reg_val;
|
||||
|
||||
if (!pdev->pm_cap) {
|
||||
dev_err(dev, "PCI PM not supported\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
||||
return -1;
|
||||
|
||||
scsi_block_requests(shost);
|
||||
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
flush_workqueue(hisi_hba->wq);
|
||||
/* disable DQ/PHY/bus */
|
||||
interrupt_disable_v3_hw(hisi_hba);
|
||||
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0);
|
||||
hisi_sas_kill_tasklets(hisi_hba);
|
||||
|
||||
hisi_sas_stop_phys(hisi_hba);
|
||||
|
||||
reg_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
AM_CTRL_GLOBAL);
|
||||
reg_val |= 0x1;
|
||||
hisi_sas_write32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
AM_CTRL_GLOBAL, reg_val);
|
||||
|
||||
/* wait until bus idle */
|
||||
rc = hisi_sas_read32_poll_timeout(AXI_MASTER_CFG_BASE +
|
||||
AM_CURR_TRANS_RETURN, status,
|
||||
status == 0x3, 10, 100);
|
||||
rc = disable_host_v3_hw(hisi_hba);
|
||||
if (rc) {
|
||||
dev_err(dev, "axi bus is not idle, rc = %d\n", rc);
|
||||
dev_err(dev, "PM suspend: disable host failed rc=%d\n", rc);
|
||||
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
scsi_unblock_requests(shost);
|
||||
@ -2538,6 +2606,8 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
|
||||
.error_detected = hisi_sas_error_detected_v3_hw,
|
||||
.mmio_enabled = hisi_sas_mmio_enabled_v3_hw,
|
||||
.slot_reset = hisi_sas_slot_reset_v3_hw,
|
||||
.reset_prepare = hisi_sas_reset_prepare_v3_hw,
|
||||
.reset_done = hisi_sas_reset_done_v3_hw,
|
||||
};
|
||||
|
||||
static struct pci_driver sas_v3_pci_driver = {
|
||||
|
@ -563,6 +563,38 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost)
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_host_get);
|
||||
|
||||
struct scsi_host_mq_in_flight {
|
||||
int cnt;
|
||||
};
|
||||
|
||||
static void scsi_host_check_in_flight(struct request *rq, void *data,
|
||||
bool reserved)
|
||||
{
|
||||
struct scsi_host_mq_in_flight *in_flight = data;
|
||||
|
||||
if (blk_mq_request_started(rq))
|
||||
in_flight->cnt++;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsi_host_busy - Return the host busy counter
|
||||
* @shost: Pointer to Scsi_Host to inc.
|
||||
**/
|
||||
int scsi_host_busy(struct Scsi_Host *shost)
|
||||
{
|
||||
struct scsi_host_mq_in_flight in_flight = {
|
||||
.cnt = 0,
|
||||
};
|
||||
|
||||
if (!shost->use_blk_mq)
|
||||
return atomic_read(&shost->host_busy);
|
||||
|
||||
blk_mq_tagset_busy_iter(&shost->tag_set, scsi_host_check_in_flight,
|
||||
&in_flight);
|
||||
return in_flight.cnt;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_host_busy);
|
||||
|
||||
/**
|
||||
* scsi_host_put - dec a Scsi_Host ref count
|
||||
* @shost: Pointer to Scsi_Host to dec.
|
||||
|
@ -1322,7 +1322,7 @@ static void ibmvfc_map_sg_list(struct scsi_cmnd *scmd, int nseg,
|
||||
|
||||
/**
|
||||
* ibmvfc_map_sg_data - Maps dma for a scatterlist and initializes decriptor fields
|
||||
* @scmd: Scsi_Cmnd with the scatterlist
|
||||
* @scmd: struct scsi_cmnd with the scatterlist
|
||||
* @evt: ibmvfc event struct
|
||||
* @vfc_cmd: vfc_cmd that contains the memory descriptor
|
||||
* @dev: device for which to map dma memory
|
||||
|
@ -93,7 +93,7 @@ static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
|
||||
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
|
||||
static int fast_fail = 1;
|
||||
static int client_reserve = 1;
|
||||
static char partition_name[97] = "UNKNOWN";
|
||||
static char partition_name[96] = "UNKNOWN";
|
||||
static unsigned int partition_number = -1;
|
||||
static LIST_HEAD(ibmvscsi_head);
|
||||
|
||||
@ -262,7 +262,7 @@ static void gather_partition_info(void)
|
||||
|
||||
ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
|
||||
if (ppartition_name)
|
||||
strncpy(partition_name, ppartition_name,
|
||||
strlcpy(partition_name, ppartition_name,
|
||||
sizeof(partition_name));
|
||||
p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
|
||||
if (p_number_ptr)
|
||||
@ -681,7 +681,7 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg,
|
||||
|
||||
/**
|
||||
* map_sg_data: - Maps dma for a scatterlist and initializes decriptor fields
|
||||
* @cmd: Scsi_Cmnd with the scatterlist
|
||||
* @cmd: struct scsi_cmnd with the scatterlist
|
||||
* @srp_cmd: srp_cmd that contains the memory descriptor
|
||||
* @dev: device for which to map dma memory
|
||||
*
|
||||
@ -1274,14 +1274,12 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
|
||||
if (hostdata->client_migrated)
|
||||
hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED);
|
||||
|
||||
strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
|
||||
strlcpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
|
||||
sizeof(hostdata->caps.name));
|
||||
hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
|
||||
|
||||
location = of_get_property(of_node, "ibm,loc-code", NULL);
|
||||
location = location ? location : dev_name(hostdata->dev);
|
||||
strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
|
||||
hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
|
||||
strlcpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
|
||||
|
||||
req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE);
|
||||
req->buffer = cpu_to_be64(hostdata->caps_addr);
|
||||
|
@ -2233,7 +2233,7 @@ static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nexus->se_sess = target_alloc_session(&tport->se_tpg, 0, 0,
|
||||
nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
|
||||
TARGET_PROT_NORMAL, name, nexus,
|
||||
NULL);
|
||||
if (IS_ERR(nexus->se_sess)) {
|
||||
@ -2267,8 +2267,7 @@ static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
|
||||
* Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
|
||||
*/
|
||||
target_wait_for_sess_cmds(se_sess);
|
||||
transport_deregister_session_configfs(se_sess);
|
||||
transport_deregister_session(se_sess);
|
||||
target_remove_session(se_sess);
|
||||
tport->ibmv_nexus = NULL;
|
||||
kfree(nexus);
|
||||
|
||||
@ -3928,7 +3927,6 @@ static void ibmvscsis_drop_tport(struct se_wwn *wwn)
|
||||
}
|
||||
|
||||
static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
|
||||
struct config_group *group,
|
||||
const char *name)
|
||||
{
|
||||
struct ibmvscsis_tport *tport =
|
||||
|
@ -892,7 +892,7 @@ static int imm_engine(imm_struct *dev, struct scsi_cmnd *cmd)
|
||||
/* Check for optional message byte */
|
||||
if (imm_wait(dev) == (unsigned char) 0xb8)
|
||||
imm_in(dev, &h, 1);
|
||||
cmd->result = (DID_OK << 16) + (l & STATUS_MASK);
|
||||
cmd->result = (DID_OK << 16) | (l & STATUS_MASK);
|
||||
}
|
||||
if ((dev->mode == IMM_NIBBLE) || (dev->mode == IMM_PS2)) {
|
||||
w_ctr(ppb, 0x4);
|
||||
|
@ -2411,6 +2411,28 @@ static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
|
||||
ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
|
||||
}
|
||||
|
||||
/**
|
||||
* ipr_log_sis64_service_required_error - Log a sis64 service required error.
|
||||
* @ioa_cfg: ioa config struct
|
||||
* @hostrcb: hostrcb struct
|
||||
*
|
||||
* Return value:
|
||||
* none
|
||||
**/
|
||||
static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
|
||||
struct ipr_hostrcb *hostrcb)
|
||||
{
|
||||
struct ipr_hostrcb_type_41_error *error;
|
||||
|
||||
error = &hostrcb->hcam.u.error64.u.type_41_error;
|
||||
|
||||
error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
|
||||
ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
|
||||
ipr_log_hex_data(ioa_cfg, error->data,
|
||||
be32_to_cpu(hostrcb->hcam.length) -
|
||||
(offsetof(struct ipr_hostrcb_error, u) +
|
||||
offsetof(struct ipr_hostrcb_type_41_error, data)));
|
||||
}
|
||||
/**
|
||||
* ipr_log_generic_error - Log an adapter error.
|
||||
* @ioa_cfg: ioa config struct
|
||||
@ -2586,6 +2608,9 @@ static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
|
||||
case IPR_HOST_RCB_OVERLAY_ID_30:
|
||||
ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
|
||||
break;
|
||||
case IPR_HOST_RCB_OVERLAY_ID_41:
|
||||
ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
|
||||
break;
|
||||
case IPR_HOST_RCB_OVERLAY_ID_1:
|
||||
case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
|
||||
default:
|
||||
|
@ -1135,6 +1135,11 @@ struct ipr_hostrcb_type_30_error {
|
||||
struct ipr_hostrcb64_fabric_desc desc[1];
|
||||
}__attribute__((packed, aligned (4)));
|
||||
|
||||
struct ipr_hostrcb_type_41_error {
|
||||
u8 failure_reason[64];
|
||||
__be32 data[200];
|
||||
}__attribute__((packed, aligned (4)));
|
||||
|
||||
struct ipr_hostrcb_error {
|
||||
__be32 fd_ioasc;
|
||||
struct ipr_res_addr fd_res_addr;
|
||||
@ -1173,6 +1178,7 @@ struct ipr_hostrcb64_error {
|
||||
struct ipr_hostrcb_type_23_error type_23_error;
|
||||
struct ipr_hostrcb_type_24_error type_24_error;
|
||||
struct ipr_hostrcb_type_30_error type_30_error;
|
||||
struct ipr_hostrcb_type_41_error type_41_error;
|
||||
} u;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
|
||||
@ -1218,6 +1224,7 @@ struct ipr_hcam {
|
||||
#define IPR_HOST_RCB_OVERLAY_ID_24 0x24
|
||||
#define IPR_HOST_RCB_OVERLAY_ID_26 0x26
|
||||
#define IPR_HOST_RCB_OVERLAY_ID_30 0x30
|
||||
#define IPR_HOST_RCB_OVERLAY_ID_41 0x41
|
||||
#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT 0xFF
|
||||
|
||||
u8 reserved1[3];
|
||||
|
@ -59,34 +59,25 @@ static void fc_disc_restart(struct fc_disc *);
|
||||
/**
|
||||
* fc_disc_stop_rports() - Delete all the remote ports associated with the lport
|
||||
* @disc: The discovery job to stop remote ports on
|
||||
*
|
||||
* Locking Note: This function expects that the lport mutex is locked before
|
||||
* calling it.
|
||||
*/
|
||||
static void fc_disc_stop_rports(struct fc_disc *disc)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
lport = fc_disc_lport(disc);
|
||||
lockdep_assert_held(&disc->disc_mutex);
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
|
||||
list_for_each_entry(rdata, &disc->rports, peers) {
|
||||
if (kref_get_unless_zero(&rdata->kref)) {
|
||||
fc_rport_logoff(rdata);
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
|
||||
* @disc: The discovery object to which the RSCN applies
|
||||
* @fp: The RSCN frame
|
||||
*
|
||||
* Locking Note: This function expects that the disc_mutex is locked
|
||||
* before it is called.
|
||||
*/
|
||||
static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
|
||||
{
|
||||
@ -101,6 +92,8 @@ static void fc_disc_recv_rscn_req(struct fc_disc *disc, struct fc_frame *fp)
|
||||
LIST_HEAD(disc_ports);
|
||||
struct fc_disc_port *dp, *next;
|
||||
|
||||
lockdep_assert_held(&disc->disc_mutex);
|
||||
|
||||
lport = fc_disc_lport(disc);
|
||||
|
||||
FC_DISC_DBG(disc, "Received an RSCN event\n");
|
||||
@ -220,12 +213,11 @@ static void fc_disc_recv_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
/**
|
||||
* fc_disc_restart() - Restart discovery
|
||||
* @disc: The discovery object to be restarted
|
||||
*
|
||||
* Locking Note: This function expects that the disc mutex
|
||||
* is already locked.
|
||||
*/
|
||||
static void fc_disc_restart(struct fc_disc *disc)
|
||||
{
|
||||
lockdep_assert_held(&disc->disc_mutex);
|
||||
|
||||
if (!disc->disc_callback)
|
||||
return;
|
||||
|
||||
@ -271,16 +263,13 @@ static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
|
||||
* fc_disc_done() - Discovery has been completed
|
||||
* @disc: The discovery context
|
||||
* @event: The discovery completion status
|
||||
*
|
||||
* Locking Note: This function expects that the disc mutex is locked before
|
||||
* it is called. The discovery callback is then made with the lock released,
|
||||
* and the lock is re-taken before returning from this function
|
||||
*/
|
||||
static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
|
||||
{
|
||||
struct fc_lport *lport = fc_disc_lport(disc);
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
lockdep_assert_held(&disc->disc_mutex);
|
||||
FC_DISC_DBG(disc, "Discovery complete\n");
|
||||
|
||||
disc->pending = 0;
|
||||
@ -294,9 +283,11 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
|
||||
* discovery, reverify or log them in. Otherwise, log them out.
|
||||
* Skip ports which were never discovered. These are the dNS port
|
||||
* and ports which were created by PLOGI.
|
||||
*
|
||||
* We don't need to use the _rcu variant here as the rport list
|
||||
* is protected by the disc mutex which is already held on entry.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(rdata, &disc->rports, peers) {
|
||||
list_for_each_entry(rdata, &disc->rports, peers) {
|
||||
if (!kref_get_unless_zero(&rdata->kref))
|
||||
continue;
|
||||
if (rdata->disc_id) {
|
||||
@ -307,7 +298,6 @@ static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
|
||||
}
|
||||
kref_put(&rdata->kref, fc_rport_destroy);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&disc->disc_mutex);
|
||||
disc->disc_callback(lport, event);
|
||||
mutex_lock(&disc->disc_mutex);
|
||||
@ -360,15 +350,14 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
|
||||
/**
|
||||
* fc_disc_gpn_ft_req() - Send Get Port Names by FC-4 type (GPN_FT) request
|
||||
* @lport: The discovery context
|
||||
*
|
||||
* Locking Note: This function expects that the disc_mutex is locked
|
||||
* before it is called.
|
||||
*/
|
||||
static void fc_disc_gpn_ft_req(struct fc_disc *disc)
|
||||
{
|
||||
struct fc_frame *fp;
|
||||
struct fc_lport *lport = fc_disc_lport(disc);
|
||||
|
||||
lockdep_assert_held(&disc->disc_mutex);
|
||||
|
||||
WARN_ON(!fc_lport_test_ready(lport));
|
||||
|
||||
disc->pending = 1;
|
||||
@ -658,8 +647,6 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* @lport: The local port to initiate discovery on
|
||||
* @rdata: remote port private data
|
||||
*
|
||||
* Locking Note: This function expects that the disc_mutex is locked
|
||||
* before it is called.
|
||||
* On failure, an error code is returned.
|
||||
*/
|
||||
static int fc_disc_gpn_id_req(struct fc_lport *lport,
|
||||
@ -667,6 +654,7 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
|
||||
{
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&lport->disc.disc_mutex);
|
||||
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
|
||||
sizeof(struct fc_ns_fid));
|
||||
if (!fp)
|
||||
@ -683,14 +671,13 @@ static int fc_disc_gpn_id_req(struct fc_lport *lport,
|
||||
* fc_disc_single() - Discover the directory information for a single target
|
||||
* @lport: The local port the remote port is associated with
|
||||
* @dp: The port to rediscover
|
||||
*
|
||||
* Locking Note: This function expects that the disc_mutex is locked
|
||||
* before it is called.
|
||||
*/
|
||||
static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
|
||||
{
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
lockdep_assert_held(&lport->disc.disc_mutex);
|
||||
|
||||
rdata = fc_rport_create(lport, dp->port_id);
|
||||
if (!rdata)
|
||||
return -ENOMEM;
|
||||
@ -708,7 +695,9 @@ static void fc_disc_stop(struct fc_lport *lport)
|
||||
|
||||
if (disc->pending)
|
||||
cancel_delayed_work_sync(&disc->disc_work);
|
||||
mutex_lock(&disc->disc_mutex);
|
||||
fc_disc_stop_rports(disc);
|
||||
mutex_unlock(&disc->disc_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -237,14 +237,13 @@ static const char *fc_lport_state(struct fc_lport *lport)
|
||||
* @remote_fid: The FID of the ptp rport
|
||||
* @remote_wwpn: The WWPN of the ptp rport
|
||||
* @remote_wwnn: The WWNN of the ptp rport
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_ptp_setup(struct fc_lport *lport,
|
||||
u32 remote_fid, u64 remote_wwpn,
|
||||
u64 remote_wwnn)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
if (lport->ptp_rdata) {
|
||||
fc_rport_logoff(lport->ptp_rdata);
|
||||
kref_put(&lport->ptp_rdata->kref, fc_rport_destroy);
|
||||
@ -403,12 +402,11 @@ static void fc_lport_add_fc4_type(struct fc_lport *lport, enum fc_fh_type type)
|
||||
* fc_lport_recv_rlir_req() - Handle received Registered Link Incident Report.
|
||||
* @lport: Fibre Channel local port receiving the RLIR
|
||||
* @fp: The RLIR request frame
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Received RLIR request while in state %s\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -420,9 +418,6 @@ static void fc_lport_recv_rlir_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
* fc_lport_recv_echo_req() - Handle received ECHO request
|
||||
* @lport: The local port receiving the ECHO
|
||||
* @fp: ECHO request frame
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_recv_echo_req(struct fc_lport *lport,
|
||||
struct fc_frame *in_fp)
|
||||
@ -432,6 +427,8 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
|
||||
void *pp;
|
||||
void *dp;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Received ECHO request while in state %s\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -456,9 +453,6 @@ static void fc_lport_recv_echo_req(struct fc_lport *lport,
|
||||
* fc_lport_recv_rnid_req() - Handle received Request Node ID data request
|
||||
* @lport: The local port receiving the RNID
|
||||
* @fp: The RNID request frame
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_recv_rnid_req(struct fc_lport *lport,
|
||||
struct fc_frame *in_fp)
|
||||
@ -474,6 +468,8 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
|
||||
u8 fmt;
|
||||
size_t len;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Received RNID request while in state %s\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -515,12 +511,11 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
|
||||
* fc_lport_recv_logo_req() - Handle received fabric LOGO request
|
||||
* @lport: The local port receiving the LOGO
|
||||
* @fp: The LOGO request frame
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
|
||||
fc_lport_enter_reset(lport);
|
||||
fc_frame_free(fp);
|
||||
@ -553,11 +548,11 @@ EXPORT_SYMBOL(fc_fabric_login);
|
||||
/**
|
||||
* __fc_linkup() - Handler for transport linkup events
|
||||
* @lport: The lport whose link is up
|
||||
*
|
||||
* Locking: must be called with the lp_mutex held
|
||||
*/
|
||||
void __fc_linkup(struct fc_lport *lport)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
if (!lport->link_up) {
|
||||
lport->link_up = 1;
|
||||
|
||||
@ -584,11 +579,11 @@ EXPORT_SYMBOL(fc_linkup);
|
||||
/**
|
||||
* __fc_linkdown() - Handler for transport linkdown events
|
||||
* @lport: The lport whose link is down
|
||||
*
|
||||
* Locking: must be called with the lp_mutex held
|
||||
*/
|
||||
void __fc_linkdown(struct fc_lport *lport)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
if (lport->link_up) {
|
||||
lport->link_up = 0;
|
||||
fc_lport_enter_reset(lport);
|
||||
@ -722,12 +717,11 @@ static void fc_lport_disc_callback(struct fc_lport *lport,
|
||||
/**
|
||||
* fc_rport_enter_ready() - Enter the ready state and start discovery
|
||||
* @lport: The local port that is ready
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_ready(struct fc_lport *lport)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered READY from state %s\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -745,13 +739,12 @@ static void fc_lport_enter_ready(struct fc_lport *lport)
|
||||
* @lport: The local port which will have its Port ID set.
|
||||
* @port_id: The new port ID.
|
||||
* @fp: The frame containing the incoming request, or NULL.
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_set_port_id(struct fc_lport *lport, u32 port_id,
|
||||
struct fc_frame *fp)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
if (port_id)
|
||||
printk(KERN_INFO "host%d: Assigned Port ID %6.6x\n",
|
||||
lport->host->host_no, port_id);
|
||||
@ -801,9 +794,6 @@ EXPORT_SYMBOL(fc_lport_set_local_id);
|
||||
* A received FLOGI request indicates a point-to-point connection.
|
||||
* Accept it with the common service parameters indicating our N port.
|
||||
* Set up to do a PLOGI if we have the higher-number WWPN.
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_recv_flogi_req(struct fc_lport *lport,
|
||||
struct fc_frame *rx_fp)
|
||||
@ -816,6 +806,8 @@ static void fc_lport_recv_flogi_req(struct fc_lport *lport,
|
||||
u32 remote_fid;
|
||||
u32 local_fid;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Received FLOGI request while in state %s\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1006,12 +998,11 @@ EXPORT_SYMBOL(fc_lport_reset);
|
||||
/**
|
||||
* fc_lport_reset_locked() - Reset the local port w/ the lport lock held
|
||||
* @lport: The local port to be reset
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_reset_locked(struct fc_lport *lport)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
if (lport->dns_rdata) {
|
||||
fc_rport_logoff(lport->dns_rdata);
|
||||
lport->dns_rdata = NULL;
|
||||
@ -1035,12 +1026,11 @@ static void fc_lport_reset_locked(struct fc_lport *lport)
|
||||
/**
|
||||
* fc_lport_enter_reset() - Reset the local port
|
||||
* @lport: The local port to be reset
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_reset(struct fc_lport *lport)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered RESET state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1065,12 +1055,11 @@ static void fc_lport_enter_reset(struct fc_lport *lport)
|
||||
/**
|
||||
* fc_lport_enter_disabled() - Disable the local port
|
||||
* @lport: The local port to be reset
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_disabled(struct fc_lport *lport)
|
||||
{
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1321,14 +1310,13 @@ static void fc_lport_scr_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
/**
|
||||
* fc_lport_enter_scr() - Send a SCR (State Change Register) request
|
||||
* @lport: The local port to register for state changes
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_scr(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered SCR state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1349,9 +1337,6 @@ static void fc_lport_enter_scr(struct fc_lport *lport)
|
||||
/**
|
||||
* fc_lport_enter_ns() - register some object with the name server
|
||||
* @lport: Fibre Channel local port to register
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
|
||||
{
|
||||
@ -1360,6 +1345,8 @@ static void fc_lport_enter_ns(struct fc_lport *lport, enum fc_lport_state state)
|
||||
int size = sizeof(struct fc_ct_hdr);
|
||||
size_t len;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
|
||||
fc_lport_state_names[state],
|
||||
fc_lport_state(lport));
|
||||
@ -1419,14 +1406,13 @@ static struct fc_rport_operations fc_lport_rport_ops = {
|
||||
/**
|
||||
* fc_rport_enter_dns() - Create a fc_rport for the name server
|
||||
* @lport: The local port requesting a remote port for the name server
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_dns(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1449,9 +1435,6 @@ static void fc_lport_enter_dns(struct fc_lport *lport)
|
||||
/**
|
||||
* fc_lport_enter_ms() - management server commands
|
||||
* @lport: Fibre Channel local port to register
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
|
||||
{
|
||||
@ -1461,6 +1444,8 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
|
||||
size_t len;
|
||||
int numattrs;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered %s state from %s state\n",
|
||||
fc_lport_state_names[state],
|
||||
fc_lport_state(lport));
|
||||
@ -1536,14 +1521,13 @@ static void fc_lport_enter_ms(struct fc_lport *lport, enum fc_lport_state state)
|
||||
/**
|
||||
* fc_rport_enter_fdmi() - Create a fc_rport for the management server
|
||||
* @lport: The local port requesting a remote port for the management server
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_fdmi(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered FDMI state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1668,15 +1652,14 @@ EXPORT_SYMBOL(fc_lport_logo_resp);
|
||||
/**
|
||||
* fc_rport_enter_logo() - Logout of the fabric
|
||||
* @lport: The local port to be logged out
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_logo(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_frame *fp;
|
||||
struct fc_els_logo *logo;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered LOGO state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1811,14 +1794,13 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
|
||||
/**
|
||||
* fc_rport_enter_flogi() - Send a FLOGI request to the fabric manager
|
||||
* @lport: Fibre Channel local port to be logged in to the fabric
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static void fc_lport_enter_flogi(struct fc_lport *lport)
|
||||
{
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_LPORT_DBG(lport, "Entered FLOGI state from %s state\n",
|
||||
fc_lport_state(lport));
|
||||
|
||||
@ -1962,9 +1944,6 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* @job: The BSG Passthrough job
|
||||
* @lport: The local port sending the request
|
||||
* @did: The destination port id
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static int fc_lport_els_request(struct bsg_job *job,
|
||||
struct fc_lport *lport,
|
||||
@ -1976,6 +1955,8 @@ static int fc_lport_els_request(struct bsg_job *job,
|
||||
char *pp;
|
||||
int len;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
fp = fc_frame_alloc(lport, job->request_payload.payload_len);
|
||||
if (!fp)
|
||||
return -ENOMEM;
|
||||
@ -2023,9 +2004,6 @@ static int fc_lport_els_request(struct bsg_job *job,
|
||||
* @lport: The local port sending the request
|
||||
* @did: The destination FC-ID
|
||||
* @tov: The timeout period to wait for the response
|
||||
*
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*/
|
||||
static int fc_lport_ct_request(struct bsg_job *job,
|
||||
struct fc_lport *lport, u32 did, u32 tov)
|
||||
@ -2036,6 +2014,8 @@ static int fc_lport_ct_request(struct bsg_job *job,
|
||||
struct fc_ct_req *ct;
|
||||
size_t len;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
|
||||
job->request_payload.payload_len);
|
||||
if (!fp)
|
||||
|
@ -136,13 +136,13 @@ EXPORT_SYMBOL(fc_rport_lookup);
|
||||
* @ids: The identifiers for the new remote port
|
||||
*
|
||||
* The remote port will start in the INIT state.
|
||||
*
|
||||
* Locking note: must be called with the disc_mutex held.
|
||||
*/
|
||||
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
|
||||
{
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
lockdep_assert_held(&lport->disc.disc_mutex);
|
||||
|
||||
rdata = fc_rport_lookup(lport, port_id);
|
||||
if (rdata)
|
||||
return rdata;
|
||||
@ -184,6 +184,7 @@ void fc_rport_destroy(struct kref *kref)
|
||||
struct fc_rport_priv *rdata;
|
||||
|
||||
rdata = container_of(kref, struct fc_rport_priv, kref);
|
||||
WARN_ON(!list_empty(&rdata->peers));
|
||||
kfree_rcu(rdata, rcu);
|
||||
}
|
||||
EXPORT_SYMBOL(fc_rport_destroy);
|
||||
@ -245,12 +246,12 @@ static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
|
||||
* fc_rport_state_enter() - Change the state of a remote port
|
||||
* @rdata: The remote port whose state should change
|
||||
* @new: The new state
|
||||
*
|
||||
* Locking Note: Called with the rport lock held
|
||||
*/
|
||||
static void fc_rport_state_enter(struct fc_rport_priv *rdata,
|
||||
enum fc_rport_state new)
|
||||
{
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
if (rdata->rp_state != new)
|
||||
rdata->retries = 0;
|
||||
rdata->rp_state = new;
|
||||
@ -469,8 +470,6 @@ EXPORT_SYMBOL(fc_rport_login);
|
||||
* @rdata: The remote port to be deleted
|
||||
* @event: The event to report as the reason for deletion
|
||||
*
|
||||
* Locking Note: Called with the rport lock held.
|
||||
*
|
||||
* Allow state change into DELETE only once.
|
||||
*
|
||||
* Call queue_work only if there's no event already pending.
|
||||
@ -483,6 +482,8 @@ EXPORT_SYMBOL(fc_rport_login);
|
||||
static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
|
||||
enum fc_rport_event event)
|
||||
{
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
if (rdata->rp_state == RPORT_ST_DELETE)
|
||||
return;
|
||||
|
||||
@ -546,13 +547,12 @@ EXPORT_SYMBOL(fc_rport_logoff);
|
||||
* fc_rport_enter_ready() - Transition to the RPORT_ST_READY state
|
||||
* @rdata: The remote port that is ready
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: schedules workqueue, does not modify kref
|
||||
*/
|
||||
static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
|
||||
{
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
fc_rport_state_enter(rdata, RPORT_ST_READY);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Port is Ready\n");
|
||||
@ -615,15 +615,14 @@ static void fc_rport_timeout(struct work_struct *work)
|
||||
* @rdata: The remote port the error is happened on
|
||||
* @err: The error code
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before
|
||||
* calling this routine
|
||||
*
|
||||
* Reference counting: does not modify kref
|
||||
*/
|
||||
static void fc_rport_error(struct fc_rport_priv *rdata, int err)
|
||||
{
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Error %d in state %s, retries %d\n",
|
||||
-err, fc_rport_state(rdata), rdata->retries);
|
||||
|
||||
@ -662,15 +661,14 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
|
||||
* If the error was an exchange timeout retry immediately,
|
||||
* otherwise wait for E_D_TOV.
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before
|
||||
* calling this routine
|
||||
*
|
||||
* Reference counting: increments kref when scheduling retry_work
|
||||
*/
|
||||
static void fc_rport_error_retry(struct fc_rport_priv *rdata, int err)
|
||||
{
|
||||
unsigned long delay = msecs_to_jiffies(rdata->e_d_tov);
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
/* make sure this isn't an FC_EX_CLOSED error, never retry those */
|
||||
if (err == -FC_EX_CLOSED)
|
||||
goto out;
|
||||
@ -822,9 +820,6 @@ static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* fc_rport_enter_flogi() - Send a FLOGI request to the remote port for p-mp
|
||||
* @rdata: The remote port to send a FLOGI to
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: increments kref when sending ELS
|
||||
*/
|
||||
static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
|
||||
@ -832,6 +827,8 @@ static void fc_rport_enter_flogi(struct fc_rport_priv *rdata)
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
if (!lport->point_to_multipoint)
|
||||
return fc_rport_enter_plogi(rdata);
|
||||
|
||||
@ -1071,9 +1068,6 @@ fc_rport_compatible_roles(struct fc_lport *lport, struct fc_rport_priv *rdata)
|
||||
* fc_rport_enter_plogi() - Send Port Login (PLOGI) request
|
||||
* @rdata: The remote port to send a PLOGI to
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: increments kref when sending ELS
|
||||
*/
|
||||
static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
|
||||
@ -1081,6 +1075,8 @@ static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
if (!fc_rport_compatible_roles(lport, rdata)) {
|
||||
FC_RPORT_DBG(rdata, "PLOGI suppressed for incompatible role\n");
|
||||
fc_rport_state_enter(rdata, RPORT_ST_PLOGI_WAIT);
|
||||
@ -1232,9 +1228,6 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* fc_rport_enter_prli() - Send Process Login (PRLI) request
|
||||
* @rdata: The remote port to send the PRLI request to
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: increments kref when sending ELS
|
||||
*/
|
||||
static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
|
||||
@ -1247,6 +1240,8 @@ static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
|
||||
struct fc_frame *fp;
|
||||
struct fc4_prov *prov;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
/*
|
||||
* If the rport is one of the well known addresses
|
||||
* we skip PRLI and RTV and go straight to READY.
|
||||
@ -1372,9 +1367,6 @@ static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request
|
||||
* @rdata: The remote port to send the RTV request to
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: increments kref when sending ELS
|
||||
*/
|
||||
static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
|
||||
@ -1382,6 +1374,8 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
|
||||
struct fc_frame *fp;
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
@ -1406,8 +1400,6 @@ static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
|
||||
* fc_rport_recv_rtv_req() - Handler for Read Timeout Value (RTV) requests
|
||||
* @rdata: The remote port that sent the RTV request
|
||||
* @in_fp: The RTV request frame
|
||||
*
|
||||
* Locking Note: Called with the lport and rport locks held.
|
||||
*/
|
||||
static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
|
||||
struct fc_frame *in_fp)
|
||||
@ -1417,6 +1409,9 @@ static void fc_rport_recv_rtv_req(struct fc_rport_priv *rdata,
|
||||
struct fc_els_rtv_acc *rtv;
|
||||
struct fc_seq_els_data rjt_data;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Received RTV request\n");
|
||||
|
||||
fp = fc_frame_alloc(lport, sizeof(*rtv));
|
||||
@ -1460,9 +1455,6 @@ static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* fc_rport_enter_logo() - Send a logout (LOGO) request
|
||||
* @rdata: The remote port to send the LOGO request to
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: increments kref when sending ELS
|
||||
*/
|
||||
static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
|
||||
@ -1470,6 +1462,8 @@ static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Port sending LOGO from %s state\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
@ -1548,9 +1542,6 @@ static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
|
||||
* fc_rport_enter_adisc() - Send Address Discover (ADISC) request
|
||||
* @rdata: The remote port to send the ADISC request to
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this routine.
|
||||
*
|
||||
* Reference counting: increments kref when sending ELS
|
||||
*/
|
||||
static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
|
||||
@ -1558,6 +1549,8 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
|
||||
struct fc_lport *lport = rdata->local_port;
|
||||
struct fc_frame *fp;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
@ -1581,8 +1574,6 @@ static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
|
||||
* fc_rport_recv_adisc_req() - Handler for Address Discovery (ADISC) requests
|
||||
* @rdata: The remote port that sent the ADISC request
|
||||
* @in_fp: The ADISC request frame
|
||||
*
|
||||
* Locking Note: Called with the lport and rport locks held.
|
||||
*/
|
||||
static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
|
||||
struct fc_frame *in_fp)
|
||||
@ -1592,6 +1583,9 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
|
||||
struct fc_els_adisc *adisc;
|
||||
struct fc_seq_els_data rjt_data;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Received ADISC request\n");
|
||||
|
||||
adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
|
||||
@ -1618,9 +1612,6 @@ static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
|
||||
* fc_rport_recv_rls_req() - Handle received Read Link Status request
|
||||
* @rdata: The remote port that sent the RLS request
|
||||
* @rx_fp: The PRLI request frame
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
|
||||
struct fc_frame *rx_fp)
|
||||
@ -1634,6 +1625,8 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
|
||||
struct fc_seq_els_data rjt_data;
|
||||
struct fc_host_statistics *hst;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Received RLS request while in state %s\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
@ -1687,8 +1680,6 @@ static void fc_rport_recv_rls_req(struct fc_rport_priv *rdata,
|
||||
* Handle incoming ELS requests that require port login.
|
||||
* The ELS opcode has already been validated by the caller.
|
||||
*
|
||||
* Locking Note: Called with the lport lock held.
|
||||
*
|
||||
* Reference counting: does not modify kref
|
||||
*/
|
||||
static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
@ -1696,6 +1687,8 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
struct fc_rport_priv *rdata;
|
||||
struct fc_seq_els_data els_data;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
rdata = fc_rport_lookup(lport, fc_frame_sid(fp));
|
||||
if (!rdata) {
|
||||
FC_RPORT_ID_DBG(lport, fc_frame_sid(fp),
|
||||
@ -1783,14 +1776,14 @@ static void fc_rport_recv_els_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
* @lport: The local port that received the request
|
||||
* @fp: The request frame
|
||||
*
|
||||
* Locking Note: Called with the lport lock held.
|
||||
*
|
||||
* Reference counting: does not modify kref
|
||||
*/
|
||||
void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
{
|
||||
struct fc_seq_els_data els_data;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
/*
|
||||
* Handle FLOGI, PLOGI and LOGO requests separately, since they
|
||||
* don't require prior login.
|
||||
@ -1831,8 +1824,6 @@ EXPORT_SYMBOL(fc_rport_recv_req);
|
||||
* @lport: The local port that received the PLOGI request
|
||||
* @rx_fp: The PLOGI request frame
|
||||
*
|
||||
* Locking Note: The rport lock is held before calling this function.
|
||||
*
|
||||
* Reference counting: increments kref on return
|
||||
*/
|
||||
static void fc_rport_recv_plogi_req(struct fc_lport *lport,
|
||||
@ -1845,6 +1836,8 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
|
||||
struct fc_seq_els_data rjt_data;
|
||||
u32 sid;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
sid = fc_frame_sid(fp);
|
||||
|
||||
FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
|
||||
@ -1955,9 +1948,6 @@ static void fc_rport_recv_plogi_req(struct fc_lport *lport,
|
||||
* fc_rport_recv_prli_req() - Handler for process login (PRLI) requests
|
||||
* @rdata: The remote port that sent the PRLI request
|
||||
* @rx_fp: The PRLI request frame
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
|
||||
struct fc_frame *rx_fp)
|
||||
@ -1976,6 +1966,8 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
|
||||
struct fc_seq_els_data rjt_data;
|
||||
struct fc4_prov *prov;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
@ -2072,9 +2064,6 @@ static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
|
||||
* fc_rport_recv_prlo_req() - Handler for process logout (PRLO) requests
|
||||
* @rdata: The remote port that sent the PRLO request
|
||||
* @rx_fp: The PRLO request frame
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
|
||||
struct fc_frame *rx_fp)
|
||||
@ -2091,6 +2080,8 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
|
||||
unsigned int plen;
|
||||
struct fc_seq_els_data rjt_data;
|
||||
|
||||
lockdep_assert_held(&rdata->rp_mutex);
|
||||
|
||||
FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
|
||||
fc_rport_state(rdata));
|
||||
|
||||
@ -2144,9 +2135,6 @@ static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
|
||||
* @lport: The local port that received the LOGO request
|
||||
* @fp: The LOGO request frame
|
||||
*
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*
|
||||
* Reference counting: drops kref on return
|
||||
*/
|
||||
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
@ -2154,6 +2142,8 @@ static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
struct fc_rport_priv *rdata;
|
||||
u32 sid;
|
||||
|
||||
lockdep_assert_held(&lport->lp_mutex);
|
||||
|
||||
fc_seq_els_rsp_send(fp, ELS_LS_ACC, NULL);
|
||||
|
||||
sid = fc_frame_sid(fp);
|
||||
|
@ -1705,6 +1705,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
||||
sc->result = DID_NO_CONNECT << 16;
|
||||
break;
|
||||
}
|
||||
/* fall through */
|
||||
case ISCSI_STATE_IN_RECOVERY:
|
||||
reason = FAILURE_SESSION_IN_RECOVERY;
|
||||
sc->result = DID_IMM_RETRY << 16;
|
||||
@ -1832,6 +1833,7 @@ static void iscsi_tmf_timedout(struct timer_list *t)
|
||||
static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
|
||||
struct iscsi_tm *hdr, int age,
|
||||
int timeout)
|
||||
__must_hold(&session->frwd_lock)
|
||||
{
|
||||
struct iscsi_session *session = conn->session;
|
||||
struct iscsi_task *task;
|
||||
|
@ -695,7 +695,7 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
|
||||
struct scsi_data_buffer *sdb = scsi_in(task->sc);
|
||||
|
||||
/*
|
||||
* Setup copy of Data-In into the Scsi_Cmnd
|
||||
* Setup copy of Data-In into the struct scsi_cmnd
|
||||
* Scatterlist case:
|
||||
* We set up the iscsi_segment to point to the next
|
||||
* scatterlist entry to copy to. As we go along,
|
||||
|
@ -176,7 +176,6 @@ static void sas_ata_task_done(struct sas_task *task)
|
||||
|
||||
static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct sas_task *task;
|
||||
struct scatterlist *sg;
|
||||
int ret = AC_ERR_SYSTEM;
|
||||
@ -187,10 +186,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
|
||||
struct Scsi_Host *host = sas_ha->core.shost;
|
||||
struct sas_internal *i = to_sas_internal(host->transportt);
|
||||
|
||||
/* TODO: audit callers to ensure they are ready for qc_issue to
|
||||
* unconditionally re-enable interrupts
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
/* TODO: we should try to remove that unlock */
|
||||
spin_unlock(ap->lock);
|
||||
|
||||
/* If the device fell off, no sense in issuing commands */
|
||||
@ -252,7 +248,6 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
||||
out:
|
||||
spin_lock(ap->lock);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -557,34 +552,46 @@ int sas_ata_init(struct domain_device *found_dev)
|
||||
{
|
||||
struct sas_ha_struct *ha = found_dev->port->ha;
|
||||
struct Scsi_Host *shost = ha->core.shost;
|
||||
struct ata_host *ata_host;
|
||||
struct ata_port *ap;
|
||||
int rc;
|
||||
|
||||
ata_host_init(&found_dev->sata_dev.ata_host, ha->dev, &sas_sata_ops);
|
||||
ap = ata_sas_port_alloc(&found_dev->sata_dev.ata_host,
|
||||
&sata_port_info,
|
||||
shost);
|
||||
ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL);
|
||||
if (!ata_host) {
|
||||
SAS_DPRINTK("ata host alloc failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ata_host_init(ata_host, ha->dev, &sas_sata_ops);
|
||||
|
||||
ap = ata_sas_port_alloc(ata_host, &sata_port_info, shost);
|
||||
if (!ap) {
|
||||
SAS_DPRINTK("ata_sas_port_alloc failed.\n");
|
||||
return -ENODEV;
|
||||
rc = -ENODEV;
|
||||
goto free_host;
|
||||
}
|
||||
|
||||
ap->private_data = found_dev;
|
||||
ap->cbl = ATA_CBL_SATA;
|
||||
ap->scsi_host = shost;
|
||||
rc = ata_sas_port_init(ap);
|
||||
if (rc) {
|
||||
ata_sas_port_destroy(ap);
|
||||
return rc;
|
||||
}
|
||||
rc = ata_sas_tport_add(found_dev->sata_dev.ata_host.dev, ap);
|
||||
if (rc) {
|
||||
ata_sas_port_destroy(ap);
|
||||
return rc;
|
||||
}
|
||||
if (rc)
|
||||
goto destroy_port;
|
||||
|
||||
rc = ata_sas_tport_add(ata_host->dev, ap);
|
||||
if (rc)
|
||||
goto destroy_port;
|
||||
|
||||
found_dev->sata_dev.ata_host = ata_host;
|
||||
found_dev->sata_dev.ap = ap;
|
||||
|
||||
return 0;
|
||||
|
||||
destroy_port:
|
||||
ata_sas_port_destroy(ap);
|
||||
free_host:
|
||||
ata_host_put(ata_host);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void sas_ata_task_abort(struct sas_task *task)
|
||||
|
@ -316,6 +316,8 @@ void sas_free_device(struct kref *kref)
|
||||
if (dev_is_sata(dev) && dev->sata_dev.ap) {
|
||||
ata_sas_tport_delete(dev->sata_dev.ap);
|
||||
ata_sas_port_destroy(dev->sata_dev.ap);
|
||||
ata_host_put(dev->sata_dev.ata_host);
|
||||
dev->sata_dev.ata_host = NULL;
|
||||
dev->sata_dev.ap = NULL;
|
||||
}
|
||||
|
||||
|
@ -759,7 +759,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
SAS_DPRINTK("Enter %s busy: %d failed: %d\n",
|
||||
__func__, atomic_read(&shost->host_busy), shost->host_failed);
|
||||
__func__, scsi_host_busy(shost), shost->host_failed);
|
||||
/*
|
||||
* Deal with commands that still have SAS tasks (i.e. they didn't
|
||||
* complete via the normal sas_task completion mechanism),
|
||||
@ -801,7 +801,7 @@ void sas_scsi_recover_host(struct Scsi_Host *shost)
|
||||
goto retry;
|
||||
|
||||
SAS_DPRINTK("--- Exit %s: busy: %d failed: %d tries: %d\n",
|
||||
__func__, atomic_read(&shost->host_busy),
|
||||
__func__, scsi_host_busy(shost),
|
||||
shost->host_failed, tries);
|
||||
}
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
#/*******************************************************************
|
||||
# * This file is part of the Emulex Linux Device Driver for *
|
||||
# * Fibre Channel Host Bus Adapters. *
|
||||
# * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
# * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
# * Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
# * Copyright (C) 2004-2012 Emulex. All rights reserved. *
|
||||
# * EMULEX and SLI are trademarks of Emulex. *
|
||||
# * www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -840,8 +840,7 @@ struct lpfc_hba {
|
||||
#define LPFC_ENABLE_FCP 1
|
||||
#define LPFC_ENABLE_NVME 2
|
||||
#define LPFC_ENABLE_BOTH 3
|
||||
uint32_t nvme_embed_pbde;
|
||||
uint32_t fcp_embed_pbde;
|
||||
uint32_t cfg_enable_pbde;
|
||||
uint32_t io_channel_irqs; /* number of irqs for io channels */
|
||||
struct nvmet_fc_target_port *targetport;
|
||||
lpfc_vpd_t vpd; /* vital product data */
|
||||
|
@ -64,6 +64,9 @@
|
||||
#define LPFC_MIN_MRQ_POST 512
|
||||
#define LPFC_MAX_MRQ_POST 2048
|
||||
|
||||
#define LPFC_MAX_NVME_INFO_TMP_LEN 100
|
||||
#define LPFC_NVME_INFO_MORE_STR "\nCould be more info...\n"
|
||||
|
||||
/*
|
||||
* Write key size should be multiple of 4. If write key is changed
|
||||
* make sure that library write key is also changed.
|
||||
@ -158,14 +161,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
char *statep;
|
||||
int i;
|
||||
int len = 0;
|
||||
char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0};
|
||||
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
|
||||
len += snprintf(buf, PAGE_SIZE, "NVME Disabled\n");
|
||||
len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n");
|
||||
return len;
|
||||
}
|
||||
if (phba->nvmet_support) {
|
||||
if (!phba->targetport) {
|
||||
len = snprintf(buf, PAGE_SIZE,
|
||||
len = scnprintf(buf, PAGE_SIZE,
|
||||
"NVME Target: x%llx is not allocated\n",
|
||||
wwn_to_u64(vport->fc_portname.u.wwn));
|
||||
return len;
|
||||
@ -175,135 +179,169 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
statep = "REGISTERED";
|
||||
else
|
||||
statep = "INIT";
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"NVME Target Enabled State %s\n",
|
||||
statep);
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
|
||||
"NVME Target: lpfc",
|
||||
phba->brd_no,
|
||||
wwn_to_u64(vport->fc_portname.u.wwn),
|
||||
wwn_to_u64(vport->fc_nodename.u.wwn),
|
||||
phba->targetport->port_id);
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"NVME Target Enabled State %s\n",
|
||||
statep);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
|
||||
"NVME Target: lpfc",
|
||||
phba->brd_no,
|
||||
wwn_to_u64(vport->fc_portname.u.wwn),
|
||||
wwn_to_u64(vport->fc_nodename.u.wwn),
|
||||
phba->targetport->port_id);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
if (strlcat(buf, "\nNVME Target: Statistics\n", PAGE_SIZE)
|
||||
>= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"\nNVME Target: Statistics\n");
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Rcv %08x Drop %08x Abort %08x\n",
|
||||
atomic_read(&tgtp->rcv_ls_req_in),
|
||||
atomic_read(&tgtp->rcv_ls_req_drop),
|
||||
atomic_read(&tgtp->xmt_ls_abort));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"LS: Rcv %08x Drop %08x Abort %08x\n",
|
||||
atomic_read(&tgtp->rcv_ls_req_in),
|
||||
atomic_read(&tgtp->rcv_ls_req_drop),
|
||||
atomic_read(&tgtp->xmt_ls_abort));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
if (atomic_read(&tgtp->rcv_ls_req_in) !=
|
||||
atomic_read(&tgtp->rcv_ls_req_out)) {
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"Rcv LS: in %08x != out %08x\n",
|
||||
atomic_read(&tgtp->rcv_ls_req_in),
|
||||
atomic_read(&tgtp->rcv_ls_req_out));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Rcv LS: in %08x != out %08x\n",
|
||||
atomic_read(&tgtp->rcv_ls_req_in),
|
||||
atomic_read(&tgtp->rcv_ls_req_out));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp),
|
||||
atomic_read(&tgtp->xmt_ls_drop),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"LS: Xmt %08x Drop %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp),
|
||||
atomic_read(&tgtp->xmt_ls_drop),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_cmpl));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"LS: RSP Abort %08x xb %08x Err %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"LS: RSP Abort %08x xb %08x Err %08x\n",
|
||||
atomic_read(&tgtp->xmt_ls_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_ls_rsp_error));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP: Rcv %08x Defer %08x Release %08x "
|
||||
"Drop %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_defer),
|
||||
atomic_read(&tgtp->xmt_fcp_release),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_drop));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FCP: Rcv %08x Defer %08x Release %08x "
|
||||
"Drop %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_defer),
|
||||
atomic_read(&tgtp->xmt_fcp_release),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_drop));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_out)) {
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"Rcv FCP: in %08x != out %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_out));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Rcv FCP: in %08x != out %08x\n",
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_in),
|
||||
atomic_read(&tgtp->rcv_fcp_cmd_out));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
|
||||
"drop %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_read),
|
||||
atomic_read(&tgtp->xmt_fcp_read_rsp),
|
||||
atomic_read(&tgtp->xmt_fcp_write),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp),
|
||||
atomic_read(&tgtp->xmt_fcp_drop));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FCP Rsp: RD %08x rsp %08x WR %08x rsp %08x "
|
||||
"drop %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_read),
|
||||
atomic_read(&tgtp->xmt_fcp_read_rsp),
|
||||
atomic_read(&tgtp->xmt_fcp_write),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp),
|
||||
atomic_read(&tgtp->xmt_fcp_drop));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_error),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_drop));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_error),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_drop));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_aborted),
|
||||
atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
|
||||
atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"ABORT: Xmt %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_abort),
|
||||
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"ABORT: Xmt %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_fcp_abort),
|
||||
atomic_read(&tgtp->xmt_fcp_abort_cmpl));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
|
||||
atomic_read(&tgtp->xmt_abort_sol),
|
||||
atomic_read(&tgtp->xmt_abort_unsol),
|
||||
atomic_read(&tgtp->xmt_abort_rsp),
|
||||
atomic_read(&tgtp->xmt_abort_rsp_error));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x\n",
|
||||
atomic_read(&tgtp->xmt_abort_sol),
|
||||
atomic_read(&tgtp->xmt_abort_unsol),
|
||||
atomic_read(&tgtp->xmt_abort_rsp),
|
||||
atomic_read(&tgtp->xmt_abort_rsp_error));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"DELAY: ctx %08x fod %08x wqfull %08x\n",
|
||||
atomic_read(&tgtp->defer_ctx),
|
||||
atomic_read(&tgtp->defer_fod),
|
||||
atomic_read(&tgtp->defer_wqfull));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"DELAY: ctx %08x fod %08x wqfull %08x\n",
|
||||
atomic_read(&tgtp->defer_ctx),
|
||||
atomic_read(&tgtp->defer_fod),
|
||||
atomic_read(&tgtp->defer_wqfull));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
/* Calculate outstanding IOs */
|
||||
tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
|
||||
tot += atomic_read(&tgtp->xmt_fcp_release);
|
||||
tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
|
||||
"CTX Outstanding %08llx\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_total,
|
||||
tot);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
|
||||
return len;
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
|
||||
"CTX Outstanding %08llx\n\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_total,
|
||||
tot);
|
||||
strlcat(buf, tmp, PAGE_SIZE);
|
||||
goto buffer_done;
|
||||
}
|
||||
|
||||
localport = vport->localport;
|
||||
if (!localport) {
|
||||
len = snprintf(buf, PAGE_SIZE,
|
||||
len = scnprintf(buf, PAGE_SIZE,
|
||||
"NVME Initiator x%llx is not allocated\n",
|
||||
wwn_to_u64(vport->fc_portname.u.wwn));
|
||||
return len;
|
||||
}
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
|
||||
if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
|
||||
phba->brd_no,
|
||||
phba->sli4_hba.max_cfg_param.max_xri,
|
||||
phba->sli4_hba.nvme_xri_max,
|
||||
phba->sli4_hba.scsi_xri_max,
|
||||
lpfc_sli4_get_els_iocb_cnt(phba));
|
||||
rcu_read_lock();
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
|
||||
phba->brd_no,
|
||||
phba->sli4_hba.max_cfg_param.max_xri,
|
||||
phba->sli4_hba.nvme_xri_max,
|
||||
phba->sli4_hba.scsi_xri_max,
|
||||
lpfc_sli4_get_els_iocb_cnt(phba));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
if (localport->port_id)
|
||||
@ -311,13 +349,15 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
else
|
||||
statep = "UNKNOWN ";
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
|
||||
"NVME LPORT lpfc",
|
||||
phba->brd_no,
|
||||
wwn_to_u64(vport->fc_portname.u.wwn),
|
||||
wwn_to_u64(vport->fc_nodename.u.wwn),
|
||||
localport->port_id, statep);
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"%s%d WWPN x%llx WWNN x%llx DID x%06x %s\n",
|
||||
"NVME LPORT lpfc",
|
||||
phba->brd_no,
|
||||
wwn_to_u64(vport->fc_portname.u.wwn),
|
||||
wwn_to_u64(vport->fc_nodename.u.wwn),
|
||||
localport->port_id, statep);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
rport = lpfc_ndlp_get_nrport(ndlp);
|
||||
@ -343,56 +383,77 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
}
|
||||
|
||||
/* Tab in to show lport ownership. */
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"NVME RPORT ");
|
||||
if (phba->brd_no >= 10)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, " ");
|
||||
if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
if (phba->brd_no >= 10) {
|
||||
if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "WWPN x%llx ",
|
||||
nrport->port_name);
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "WWNN x%llx ",
|
||||
nrport->node_name);
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
|
||||
nrport->port_id);
|
||||
scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
|
||||
nrport->port_name);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
|
||||
nrport->node_name);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp), "DID x%06x ",
|
||||
nrport->port_id);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
/* An NVME rport can have multiple roles. */
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"INITIATOR ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"TARGET ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"DISCSRVC ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
|
||||
if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
|
||||
if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
|
||||
if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
|
||||
FC_PORT_ROLE_NVME_TARGET |
|
||||
FC_PORT_ROLE_NVME_DISCOVERY))
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"UNKNOWN ROLE x%x",
|
||||
nrport->port_role);
|
||||
FC_PORT_ROLE_NVME_DISCOVERY)) {
|
||||
scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
|
||||
nrport->port_role);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
|
||||
/* Terminate the string. */
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
scnprintf(tmp, sizeof(tmp), "%s\n", statep);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!lport)
|
||||
return len;
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Xmt %010x Cmpl %010x Abort %08x\n",
|
||||
atomic_read(&lport->fc4NvmeLsRequests),
|
||||
atomic_read(&lport->fc4NvmeLsCmpls),
|
||||
atomic_read(&lport->xmt_ls_abort));
|
||||
if (strlcat(buf, "\nNVME Statistics\n", PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->xmt_ls_err),
|
||||
atomic_read(&lport->cmpl_ls_xb),
|
||||
atomic_read(&lport->cmpl_ls_err));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"LS: Xmt %010x Cmpl %010x Abort %08x\n",
|
||||
atomic_read(&lport->fc4NvmeLsRequests),
|
||||
atomic_read(&lport->fc4NvmeLsCmpls),
|
||||
atomic_read(&lport->xmt_ls_abort));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"LS XMIT: Err %08x CMPL: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->xmt_ls_err),
|
||||
atomic_read(&lport->cmpl_ls_xb),
|
||||
atomic_read(&lport->cmpl_ls_err));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
totin = 0;
|
||||
totout = 0;
|
||||
@ -405,25 +466,46 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
data3 = atomic_read(&cstat->fc4NvmeControlRequests);
|
||||
totout += (data1 + data2 + data3);
|
||||
}
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"Total FCP Cmpl %016llx Issue %016llx "
|
||||
"OutIO %016llx\n",
|
||||
totin, totout, totout - totin);
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"Total FCP Cmpl %016llx Issue %016llx "
|
||||
"OutIO %016llx\n",
|
||||
totin, totout, totout - totin);
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
" abort %08x noxri %08x nondlp %08x qdepth %08x "
|
||||
"wqerr %08x err %08x\n",
|
||||
atomic_read(&lport->xmt_fcp_abort),
|
||||
atomic_read(&lport->xmt_fcp_noxri),
|
||||
atomic_read(&lport->xmt_fcp_bad_ndlp),
|
||||
atomic_read(&lport->xmt_fcp_qdepth),
|
||||
atomic_read(&lport->xmt_fcp_err),
|
||||
atomic_read(&lport->xmt_fcp_wqerr));
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"\tabort %08x noxri %08x nondlp %08x qdepth %08x "
|
||||
"wqerr %08x err %08x\n",
|
||||
atomic_read(&lport->xmt_fcp_abort),
|
||||
atomic_read(&lport->xmt_fcp_noxri),
|
||||
atomic_read(&lport->xmt_fcp_bad_ndlp),
|
||||
atomic_read(&lport->xmt_fcp_qdepth),
|
||||
atomic_read(&lport->xmt_fcp_err),
|
||||
atomic_read(&lport->xmt_fcp_wqerr));
|
||||
if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
|
||||
goto buffer_done;
|
||||
|
||||
scnprintf(tmp, sizeof(tmp),
|
||||
"FCP CMPL: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->cmpl_fcp_xb),
|
||||
atomic_read(&lport->cmpl_fcp_err));
|
||||
strlcat(buf, tmp, PAGE_SIZE);
|
||||
|
||||
buffer_done:
|
||||
len = strnlen(buf, PAGE_SIZE);
|
||||
|
||||
if (unlikely(len >= (PAGE_SIZE - 1))) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
|
||||
"6314 Catching potential buffer "
|
||||
"overflow > PAGE_SIZE = %lu bytes\n",
|
||||
PAGE_SIZE);
|
||||
strlcpy(buf + PAGE_SIZE - 1 -
|
||||
strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1),
|
||||
LPFC_NVME_INFO_MORE_STR,
|
||||
strnlen(LPFC_NVME_INFO_MORE_STR, PAGE_SIZE - 1)
|
||||
+ 1);
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"FCP CMPL: xb %08x Err %08x\n",
|
||||
atomic_read(&lport->cmpl_fcp_xb),
|
||||
atomic_read(&lport->cmpl_fcp_err));
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -5836,6 +5918,24 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
|
||||
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
} else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) {
|
||||
switch (phba->fc_linkspeed) {
|
||||
case LPFC_ASYNC_LINK_SPEED_10GBPS:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
|
||||
break;
|
||||
case LPFC_ASYNC_LINK_SPEED_25GBPS:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
|
||||
break;
|
||||
case LPFC_ASYNC_LINK_SPEED_40GBPS:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
|
||||
break;
|
||||
case LPFC_ASYNC_LINK_SPEED_100GBPS:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
|
||||
break;
|
||||
default:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
|
||||
|
||||
@ -5891,7 +5991,6 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
||||
struct lpfc_lnk_stat * lso = &psli->lnk_stat_offsets;
|
||||
LPFC_MBOXQ_t *pmboxq;
|
||||
MAILBOX_t *pmb;
|
||||
unsigned long seconds;
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
@ -5992,12 +6091,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
|
||||
|
||||
hs->dumped_frames = -1;
|
||||
|
||||
seconds = get_seconds();
|
||||
if (seconds < psli->stats_start)
|
||||
hs->seconds_since_last_reset = seconds +
|
||||
((unsigned long)-1 - psli->stats_start);
|
||||
else
|
||||
hs->seconds_since_last_reset = seconds - psli->stats_start;
|
||||
hs->seconds_since_last_reset = ktime_get_seconds() - psli->stats_start;
|
||||
|
||||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||||
|
||||
@ -6076,7 +6170,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
|
||||
else
|
||||
lso->link_events = (phba->fc_eventTag >> 1);
|
||||
|
||||
psli->stats_start = get_seconds();
|
||||
psli->stats_start = ktime_get_seconds();
|
||||
|
||||
mempool_free(pmboxq, phba->mbox_mem_pool);
|
||||
|
||||
@ -6454,6 +6548,8 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
phba->cfg_auto_imax = 0;
|
||||
phba->initial_imax = phba->cfg_fcp_imax;
|
||||
|
||||
phba->cfg_enable_pbde = 0;
|
||||
|
||||
/* A value of 0 means use the number of CPUs found in the system */
|
||||
if (phba->cfg_fcp_io_channel == 0)
|
||||
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2009-2015 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2010-2015 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -469,7 +469,6 @@ int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
|
||||
void lpfc_start_fdiscs(struct lpfc_hba *phba);
|
||||
struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
|
||||
struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
|
||||
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
|
||||
#define HBA_EVENT_RSCN 5
|
||||
#define HBA_EVENT_LINK_UP 2
|
||||
#define HBA_EVENT_LINK_DOWN 3
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2013 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -150,6 +150,9 @@ struct lpfc_node_rrq {
|
||||
unsigned long rrq_stop_time;
|
||||
};
|
||||
|
||||
#define lpfc_ndlp_check_qdepth(phba, ndlp) \
|
||||
(ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri)
|
||||
|
||||
/* Defines for nlp_flag (uint32) */
|
||||
#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */
|
||||
#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -5640,8 +5640,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
" mbx status x%x\n",
|
||||
shdr_status, shdr_add_status, mb->mbxStatus);
|
||||
|
||||
if (mb->mbxStatus && !(shdr_status &&
|
||||
shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)) {
|
||||
if ((mb->mbxStatus != MBX_SUCCESS) || shdr_status ||
|
||||
(shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE) ||
|
||||
(shdr_add_status == ADD_STATUS_INVALID_REQUEST)) {
|
||||
mempool_free(pmb, phba->mbox_mem_pool);
|
||||
goto error;
|
||||
}
|
||||
@ -5661,6 +5662,7 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
lcb_res = (struct fc_lcb_res_frame *)
|
||||
(((struct lpfc_dmabuf *)elsiocb->context2)->virt);
|
||||
|
||||
memset(lcb_res, 0, sizeof(struct fc_lcb_res_frame));
|
||||
icmd = &elsiocb->iocb;
|
||||
icmd->ulpContext = lcb_context->rx_id;
|
||||
icmd->unsli3.rcvsli3.ox_id = lcb_context->ox_id;
|
||||
@ -5669,7 +5671,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||
*((uint32_t *)(pcmd)) = ELS_CMD_ACC;
|
||||
lcb_res->lcb_sub_command = lcb_context->sub_command;
|
||||
lcb_res->lcb_type = lcb_context->type;
|
||||
lcb_res->capability = lcb_context->capability;
|
||||
lcb_res->lcb_frequency = lcb_context->frequency;
|
||||
lcb_res->lcb_duration = lcb_context->duration;
|
||||
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
|
||||
phba->fc_stat.elsXmitACC++;
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
|
||||
@ -5712,6 +5716,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
|
||||
uint32_t beacon_state)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
union lpfc_sli4_cfg_shdr *cfg_shdr;
|
||||
LPFC_MBOXQ_t *mbox = NULL;
|
||||
uint32_t len;
|
||||
int rc;
|
||||
@ -5720,6 +5725,7 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
|
||||
if (!mbox)
|
||||
return 1;
|
||||
|
||||
cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
|
||||
len = sizeof(struct lpfc_mbx_set_beacon_config) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr);
|
||||
lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
@ -5732,8 +5738,40 @@ lpfc_sli4_set_beacon(struct lpfc_vport *vport,
|
||||
phba->sli4_hba.physical_port);
|
||||
bf_set(lpfc_mbx_set_beacon_state, &mbox->u.mqe.un.beacon_config,
|
||||
beacon_state);
|
||||
bf_set(lpfc_mbx_set_beacon_port_type, &mbox->u.mqe.un.beacon_config, 1);
|
||||
bf_set(lpfc_mbx_set_beacon_duration, &mbox->u.mqe.un.beacon_config, 0);
|
||||
mbox->u.mqe.un.beacon_config.word5 = 0; /* Reserved */
|
||||
|
||||
/*
|
||||
* Check bv1s bit before issuing the mailbox
|
||||
* if bv1s == 1, LCB V1 supported
|
||||
* else, LCB V0 supported
|
||||
*/
|
||||
|
||||
if (phba->sli4_hba.pc_sli4_params.bv1s) {
|
||||
/* COMMON_SET_BEACON_CONFIG_V1 */
|
||||
cfg_shdr->request.word9 = BEACON_VERSION_V1;
|
||||
lcb_context->capability |= LCB_CAPABILITY_DURATION;
|
||||
bf_set(lpfc_mbx_set_beacon_port_type,
|
||||
&mbox->u.mqe.un.beacon_config, 0);
|
||||
bf_set(lpfc_mbx_set_beacon_duration_v1,
|
||||
&mbox->u.mqe.un.beacon_config,
|
||||
be16_to_cpu(lcb_context->duration));
|
||||
} else {
|
||||
/* COMMON_SET_BEACON_CONFIG_V0 */
|
||||
if (be16_to_cpu(lcb_context->duration) != 0) {
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
return 1;
|
||||
}
|
||||
cfg_shdr->request.word9 = BEACON_VERSION_V0;
|
||||
lcb_context->capability &= ~(LCB_CAPABILITY_DURATION);
|
||||
bf_set(lpfc_mbx_set_beacon_state,
|
||||
&mbox->u.mqe.un.beacon_config, beacon_state);
|
||||
bf_set(lpfc_mbx_set_beacon_port_type,
|
||||
&mbox->u.mqe.un.beacon_config, 1);
|
||||
bf_set(lpfc_mbx_set_beacon_duration,
|
||||
&mbox->u.mqe.un.beacon_config,
|
||||
be16_to_cpu(lcb_context->duration));
|
||||
}
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
|
||||
if (rc == MBX_NOT_FINISHED) {
|
||||
mempool_free(mbox, phba->mbox_mem_pool);
|
||||
@ -5784,24 +5822,16 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
beacon->lcb_frequency,
|
||||
be16_to_cpu(beacon->lcb_duration));
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 ||
|
||||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_2)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
|
||||
if (phba->hba_flag & HBA_FCOE_MODE) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if (beacon->lcb_sub_command != LPFC_LCB_ON &&
|
||||
beacon->lcb_sub_command != LPFC_LCB_OFF) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if (beacon->lcb_sub_command == LPFC_LCB_ON &&
|
||||
be16_to_cpu(beacon->lcb_duration) != 0) {
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 ||
|
||||
phba->hba_flag & HBA_FCOE_MODE ||
|
||||
(bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) <
|
||||
LPFC_SLI_INTF_IF_TYPE_2)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
@ -5814,8 +5844,10 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
|
||||
state = (beacon->lcb_sub_command == LPFC_LCB_ON) ? 1 : 0;
|
||||
lcb_context->sub_command = beacon->lcb_sub_command;
|
||||
lcb_context->capability = 0;
|
||||
lcb_context->type = beacon->lcb_type;
|
||||
lcb_context->frequency = beacon->lcb_frequency;
|
||||
lcb_context->duration = beacon->lcb_duration;
|
||||
lcb_context->ox_id = cmdiocb->iocb.unsli3.rcvsli3.ox_id;
|
||||
lcb_context->rx_id = cmdiocb->iocb.ulpContext;
|
||||
lcb_context->ndlp = lpfc_nlp_get(ndlp);
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -1065,14 +1065,17 @@ typedef struct _ELS_PKT { /* Structure is in Big Endian format */
|
||||
struct fc_lcb_request_frame {
|
||||
uint32_t lcb_command; /* ELS command opcode (0x81) */
|
||||
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
|
||||
#define LPFC_LCB_ON 0x1
|
||||
#define LPFC_LCB_OFF 0x2
|
||||
uint8_t reserved[3];
|
||||
|
||||
#define LPFC_LCB_ON 0x1
|
||||
#define LPFC_LCB_OFF 0x2
|
||||
uint8_t reserved[2];
|
||||
uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
|
||||
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
|
||||
#define LPFC_LCB_GREEN 0x1
|
||||
#define LPFC_LCB_AMBER 0x2
|
||||
#define LPFC_LCB_GREEN 0x1
|
||||
#define LPFC_LCB_AMBER 0x2
|
||||
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
|
||||
#define LCB_CAPABILITY_DURATION 1
|
||||
#define BEACON_VERSION_V1 1
|
||||
#define BEACON_VERSION_V0 0
|
||||
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
|
||||
};
|
||||
|
||||
@ -1082,7 +1085,8 @@ struct fc_lcb_request_frame {
|
||||
struct fc_lcb_res_frame {
|
||||
uint32_t lcb_ls_acc; /* Acceptance of LCB request (0x02) */
|
||||
uint8_t lcb_sub_command;/* LCB Payload Word 1, bit 24:31 */
|
||||
uint8_t reserved[3];
|
||||
uint8_t reserved[2];
|
||||
uint8_t capability; /* LCB Payload Word 1, bit 0:7 */
|
||||
uint8_t lcb_type; /* LCB Payload Word 2, bit 24:31 */
|
||||
uint8_t lcb_frequency; /* LCB Payload Word 2, bit 16:23 */
|
||||
uint16_t lcb_duration; /* LCB Payload Word 2, bit 15:0 */
|
||||
|
@ -1790,9 +1790,12 @@ struct lpfc_mbx_set_beacon_config {
|
||||
#define lpfc_mbx_set_beacon_duration_SHIFT 16
|
||||
#define lpfc_mbx_set_beacon_duration_MASK 0x000000FF
|
||||
#define lpfc_mbx_set_beacon_duration_WORD word4
|
||||
#define lpfc_mbx_set_beacon_status_duration_SHIFT 24
|
||||
#define lpfc_mbx_set_beacon_status_duration_MASK 0x000000FF
|
||||
#define lpfc_mbx_set_beacon_status_duration_WORD word4
|
||||
|
||||
/* COMMON_SET_BEACON_CONFIG_V1 */
|
||||
#define lpfc_mbx_set_beacon_duration_v1_SHIFT 16
|
||||
#define lpfc_mbx_set_beacon_duration_v1_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_set_beacon_duration_v1_WORD word4
|
||||
uint32_t word5; /* RESERVED */
|
||||
};
|
||||
|
||||
struct lpfc_id_range {
|
||||
@ -2243,6 +2246,7 @@ struct lpfc_mbx_redisc_fcf_tbl {
|
||||
*/
|
||||
#define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67
|
||||
#define ADD_STATUS_FW_NOT_SUPPORTED 0xEB
|
||||
#define ADD_STATUS_INVALID_REQUEST 0x4B
|
||||
|
||||
struct lpfc_mbx_sli4_config {
|
||||
struct mbox_header header;
|
||||
@ -3392,7 +3396,41 @@ struct lpfc_sli4_parameters {
|
||||
#define cfg_nosr_SHIFT 9
|
||||
#define cfg_nosr_MASK 0x00000001
|
||||
#define cfg_nosr_WORD word19
|
||||
#define LPFC_NODELAY_MAX_IO 32
|
||||
|
||||
#define cfg_bv1s_SHIFT 10
|
||||
#define cfg_bv1s_MASK 0x00000001
|
||||
#define cfg_bv1s_WORD word19
|
||||
|
||||
uint32_t word20;
|
||||
#define cfg_max_tow_xri_SHIFT 0
|
||||
#define cfg_max_tow_xri_MASK 0x0000ffff
|
||||
#define cfg_max_tow_xri_WORD word20
|
||||
|
||||
uint32_t word21; /* RESERVED */
|
||||
uint32_t word22; /* RESERVED */
|
||||
uint32_t word23; /* RESERVED */
|
||||
|
||||
uint32_t word24;
|
||||
#define cfg_frag_field_offset_SHIFT 0
|
||||
#define cfg_frag_field_offset_MASK 0x0000ffff
|
||||
#define cfg_frag_field_offset_WORD word24
|
||||
|
||||
#define cfg_frag_field_size_SHIFT 16
|
||||
#define cfg_frag_field_size_MASK 0x0000ffff
|
||||
#define cfg_frag_field_size_WORD word24
|
||||
|
||||
uint32_t word25;
|
||||
#define cfg_sgl_field_offset_SHIFT 0
|
||||
#define cfg_sgl_field_offset_MASK 0x0000ffff
|
||||
#define cfg_sgl_field_offset_WORD word25
|
||||
|
||||
#define cfg_sgl_field_size_SHIFT 16
|
||||
#define cfg_sgl_field_size_MASK 0x0000ffff
|
||||
#define cfg_sgl_field_size_WORD word25
|
||||
|
||||
uint32_t word26; /* Chain SGE initial value LOW */
|
||||
uint32_t word27; /* Chain SGE initial value HIGH */
|
||||
#define LPFC_NODELAY_MAX_IO 32
|
||||
};
|
||||
|
||||
#define LPFC_SET_UE_RECOVERY 0x10
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -10387,6 +10387,11 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
||||
while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
|
||||
!nvmet_xri_cmpl) {
|
||||
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
|
||||
if (!nvmet_xri_cmpl)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6424 NVMET XRI exchange busy "
|
||||
"wait time: %d seconds.\n",
|
||||
wait_time/1000);
|
||||
if (!nvme_xri_cmpl)
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6100 NVME XRI exchange busy "
|
||||
@ -10639,6 +10644,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
sli4_params->eqav = bf_get(cfg_eqav, mbx_sli4_parameters);
|
||||
sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters);
|
||||
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
|
||||
sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters);
|
||||
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
|
||||
mbx_sli4_parameters);
|
||||
sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
|
||||
@ -10668,18 +10674,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
|
||||
}
|
||||
|
||||
/* Only embed PBDE for if_type 6 */
|
||||
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
|
||||
LPFC_SLI_INTF_IF_TYPE_6) {
|
||||
phba->fcp_embed_pbde = 1;
|
||||
phba->nvme_embed_pbde = 1;
|
||||
}
|
||||
|
||||
/* PBDE support requires xib be set */
|
||||
if (!bf_get(cfg_xib, mbx_sli4_parameters)) {
|
||||
phba->fcp_embed_pbde = 0;
|
||||
phba->nvme_embed_pbde = 0;
|
||||
}
|
||||
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */
|
||||
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
|
||||
LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters)))
|
||||
phba->cfg_enable_pbde = 0;
|
||||
|
||||
/*
|
||||
* To support Suppress Response feature we must satisfy 3 conditions.
|
||||
@ -10713,10 +10711,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
phba->fcp_embed_io = 0;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
|
||||
"6422 XIB %d: FCP %d %d NVME %d %d %d %d\n",
|
||||
"6422 XIB %d PBDE %d: FCP %d NVME %d %d %d\n",
|
||||
bf_get(cfg_xib, mbx_sli4_parameters),
|
||||
phba->fcp_embed_pbde, phba->fcp_embed_io,
|
||||
phba->nvme_support, phba->nvme_embed_pbde,
|
||||
phba->cfg_enable_pbde,
|
||||
phba->fcp_embed_io, phba->nvme_support,
|
||||
phba->cfg_nvme_embed_cmd, phba->cfg_suppress_rsp);
|
||||
|
||||
if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2009 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2014 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -1,8 +1,8 @@
|
||||
/*******************************************************************
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2010 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -1062,6 +1062,9 @@ lpfc_rcv_logo_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
{
|
||||
struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
|
||||
/* Retrieve RPI from LOGO IOCB. RPI is used for CMD_ABORT_XRI_CN */
|
||||
if (vport->phba->sli_rev == LPFC_SLI_REV3)
|
||||
ndlp->nlp_rpi = cmdiocb->iocb.ulpIoTag;
|
||||
/* software abort outstanding PLOGI */
|
||||
lpfc_els_abort(vport->phba, ndlp);
|
||||
|
||||
@ -1982,12 +1985,6 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
if (bf_get_be32(prli_disc, nvpr))
|
||||
ndlp->nlp_type |= NLP_NVME_DISCOVERY;
|
||||
|
||||
/* This node is an NVME target. Adjust the command
|
||||
* queue depth on this node to not exceed the available
|
||||
* xris.
|
||||
*/
|
||||
ndlp->cmd_qdepth = phba->sli4_hba.nvme_xri_max;
|
||||
|
||||
/*
|
||||
* If prli_fba is set, the Target supports FirstBurst.
|
||||
* If prli_fb_sz is 0, the FirstBurst size is unlimited,
|
||||
|
@ -1135,9 +1135,6 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
else
|
||||
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp))
|
||||
atomic_dec(&ndlp->cmd_pending);
|
||||
|
||||
/* Update stats and complete the IO. There is
|
||||
* no need for dma unprep because the nvme_transport
|
||||
* owns the dma address.
|
||||
@ -1279,6 +1276,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
/* Word 9 */
|
||||
bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
|
||||
|
||||
/* Words 13 14 15 are for PBDE support */
|
||||
|
||||
pwqeq->vport = vport;
|
||||
return 0;
|
||||
}
|
||||
@ -1378,7 +1377,7 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
data_sg = sg_next(data_sg);
|
||||
sgl++;
|
||||
}
|
||||
if (phba->nvme_embed_pbde) {
|
||||
if (phba->cfg_enable_pbde) {
|
||||
/* Use PBDE support for first SGL only, offset == 0 */
|
||||
/* Words 13-15 */
|
||||
bde = (struct ulp_bde64 *)
|
||||
@ -1394,10 +1393,8 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
|
||||
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
|
||||
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
|
||||
}
|
||||
} else {
|
||||
bf_set(wqe_pbde, &wqe->generic.wqe_com, 0);
|
||||
memset(&wqe->words[13], 0, (sizeof(uint32_t) * 3));
|
||||
|
||||
} else {
|
||||
/* For this clause to be valid, the payload_length
|
||||
* and sg_cnt must zero.
|
||||
*/
|
||||
@ -1546,17 +1543,19 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
/* The node is shared with FCP IO, make sure the IO pending count does
|
||||
* not exceed the programmed depth.
|
||||
*/
|
||||
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
|
||||
!expedite) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6174 Fail IO, ndlp qdepth exceeded: "
|
||||
"idx %d DID %x pend %d qdepth %d\n",
|
||||
lpfc_queue_info->index, ndlp->nlp_DID,
|
||||
atomic_read(&ndlp->cmd_pending),
|
||||
ndlp->cmd_qdepth);
|
||||
atomic_inc(&lport->xmt_fcp_qdepth);
|
||||
ret = -EBUSY;
|
||||
goto out_fail;
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
|
||||
if ((atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) &&
|
||||
!expedite) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6174 Fail IO, ndlp qdepth exceeded: "
|
||||
"idx %d DID %x pend %d qdepth %d\n",
|
||||
lpfc_queue_info->index, ndlp->nlp_DID,
|
||||
atomic_read(&ndlp->cmd_pending),
|
||||
ndlp->cmd_qdepth);
|
||||
atomic_inc(&lport->xmt_fcp_qdepth);
|
||||
ret = -EBUSY;
|
||||
goto out_fail;
|
||||
}
|
||||
}
|
||||
|
||||
lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp, expedite);
|
||||
@ -1614,8 +1613,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
goto out_free_nvme_buf;
|
||||
}
|
||||
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
|
||||
lpfc_ncmd->cur_iocbq.sli4_xritag,
|
||||
lpfc_queue_info->index, ndlp->nlp_DID);
|
||||
@ -1623,7 +1620,6 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
|
||||
if (ret) {
|
||||
atomic_inc(&lport->xmt_fcp_wqerr);
|
||||
atomic_dec(&ndlp->cmd_pending);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6113 Fail IO, Could not issue WQE err %x "
|
||||
"sid: x%x did: x%x oxid: x%x\n",
|
||||
@ -2378,6 +2374,11 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
|
||||
lpfc_ncmd = lpfc_nvme_buf(phba);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_ncmd) {
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
lpfc_ncmd->flags |= LPFC_BUMP_QDEPTH;
|
||||
}
|
||||
return lpfc_ncmd;
|
||||
}
|
||||
|
||||
@ -2396,7 +2397,13 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||
{
|
||||
unsigned long iflag = 0;
|
||||
|
||||
if ((lpfc_ncmd->flags & LPFC_BUMP_QDEPTH) && lpfc_ncmd->ndlp)
|
||||
atomic_dec(&lpfc_ncmd->ndlp->cmd_pending);
|
||||
|
||||
lpfc_ncmd->nonsg_phys = 0;
|
||||
lpfc_ncmd->ndlp = NULL;
|
||||
lpfc_ncmd->flags &= ~LPFC_BUMP_QDEPTH;
|
||||
|
||||
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6310 XB release deferred for "
|
||||
@ -2687,7 +2694,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
struct lpfc_nvme_rport *oldrport;
|
||||
struct nvme_fc_remote_port *remote_port;
|
||||
struct nvme_fc_port_info rpinfo;
|
||||
struct lpfc_nodelist *prev_ndlp;
|
||||
struct lpfc_nodelist *prev_ndlp = NULL;
|
||||
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6006 Register NVME PORT. DID x%06x nlptype x%x\n",
|
||||
@ -2736,23 +2743,29 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
spin_unlock_irq(&vport->phba->hbalock);
|
||||
rport = remote_port->private;
|
||||
if (oldrport) {
|
||||
/* New remoteport record does not guarantee valid
|
||||
* host private memory area.
|
||||
*/
|
||||
prev_ndlp = oldrport->ndlp;
|
||||
if (oldrport == remote_port->private) {
|
||||
/* Same remoteport. Just reuse. */
|
||||
/* Same remoteport - ndlp should match.
|
||||
* Just reuse.
|
||||
*/
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
"remoteport %p wwpn 0x%llx, "
|
||||
"Data: x%x x%x %p x%x x%06x\n",
|
||||
"Data: x%x x%x %p %p x%x x%06x\n",
|
||||
remote_port,
|
||||
remote_port->port_name,
|
||||
remote_port->port_id,
|
||||
remote_port->port_role,
|
||||
prev_ndlp,
|
||||
ndlp,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
return 0;
|
||||
}
|
||||
prev_ndlp = rport->ndlp;
|
||||
|
||||
/* Sever the ndlp<->rport association
|
||||
* before dropping the ndlp ref from
|
||||
@ -2786,13 +2799,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6022 Binding new rport to "
|
||||
"lport %p Remoteport %p WWNN 0x%llx, "
|
||||
"lport %p Remoteport %p rport %p WWNN 0x%llx, "
|
||||
"Rport WWPN 0x%llx DID "
|
||||
"x%06x Role x%x, ndlp %p\n",
|
||||
lport, remote_port,
|
||||
"x%06x Role x%x, ndlp %p prev_ndlp %p\n",
|
||||
lport, remote_port, rport,
|
||||
rpinfo.node_name, rpinfo.port_name,
|
||||
rpinfo.port_id, rpinfo.port_role,
|
||||
ndlp);
|
||||
ndlp, prev_ndlp);
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_ERR,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
@ -2970,7 +2983,7 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
|
||||
struct lpfc_sli_ring *pring;
|
||||
u32 i, wait_cnt = 0;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.nvme_wq)
|
||||
return;
|
||||
|
||||
/* Cycle through all NVME rings and make sure all outstanding
|
||||
@ -2979,6 +2992,9 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
|
||||
for (i = 0; i < phba->cfg_nvme_io_channel; i++) {
|
||||
pring = phba->sli4_hba.nvme_wq[i]->pring;
|
||||
|
||||
if (!pring)
|
||||
continue;
|
||||
|
||||
/* Retrieve everything on the txcmplq */
|
||||
while (!list_empty(&pring->txcmplq)) {
|
||||
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
|
||||
|
@ -86,6 +86,7 @@ struct lpfc_nvme_buf {
|
||||
|
||||
uint16_t flags; /* TBD convert exch_busy to flags */
|
||||
#define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */
|
||||
#define LPFC_BUMP_QDEPTH 0x2 /* bumped queue depth counter */
|
||||
uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */
|
||||
uint16_t status; /* From IOCB Word 7- ulpStatus */
|
||||
uint16_t cpu;
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channsel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
@ -402,6 +402,7 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
|
||||
/* Process FCP command */
|
||||
if (rc == 0) {
|
||||
ctxp->rqb_buffer = NULL;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_out);
|
||||
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
|
||||
return;
|
||||
@ -1116,8 +1117,17 @@ lpfc_nvmet_defer_rcv(struct nvmet_fc_target_port *tgtport,
|
||||
lpfc_nvmeio_data(phba, "NVMET DEFERRCV: xri x%x sz %d CPU %02x\n",
|
||||
ctxp->oxid, ctxp->size, smp_processor_id());
|
||||
|
||||
if (!nvmebuf) {
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
|
||||
"6425 Defer rcv: no buffer xri x%x: "
|
||||
"flg %x ste %x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->state);
|
||||
return;
|
||||
}
|
||||
|
||||
tgtp = phba->targetport->private;
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
|
||||
if (tgtp)
|
||||
atomic_inc(&tgtp->rcv_fcp_cmd_defer);
|
||||
|
||||
/* Free the nvmebuf since a new buffer already replaced it */
|
||||
nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
|
||||
@ -1732,9 +1742,12 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
uint32_t *payload;
|
||||
uint32_t size, oxid, sid, rc;
|
||||
|
||||
if (!nvmebuf || !phba->targetport) {
|
||||
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
|
||||
if (!phba->targetport) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6154 LS Drop IO\n");
|
||||
"6154 LS Drop IO x%x\n", oxid);
|
||||
oxid = 0;
|
||||
size = 0;
|
||||
sid = 0;
|
||||
@ -1744,9 +1757,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
payload = (uint32_t *)(nvmebuf->dbuf.virt);
|
||||
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
||||
size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
|
||||
@ -1759,8 +1770,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
lpfc_nvmeio_data(phba, "NVMET LS DROP: "
|
||||
"xri x%x sz %d from %06x\n",
|
||||
oxid, size, sid);
|
||||
if (nvmebuf)
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
return;
|
||||
}
|
||||
ctxp->phba = phba;
|
||||
@ -1803,8 +1813,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
ctxp->oxid, rc);
|
||||
|
||||
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
|
||||
if (nvmebuf)
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
|
||||
|
||||
atomic_inc(&tgtp->xmt_ls_abort);
|
||||
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
|
||||
@ -2492,7 +2501,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, 0);
|
||||
|
||||
/* Word 11 - set pbde later */
|
||||
if (phba->nvme_embed_pbde) {
|
||||
if (phba->cfg_enable_pbde) {
|
||||
do_pbde = 1;
|
||||
} else {
|
||||
bf_set(wqe_pbde, &wqe->fcp_treceive.wqe_com, 0);
|
||||
@ -2607,16 +2616,19 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = cpu_to_le32(cnt);
|
||||
if (do_pbde && i == 0) {
|
||||
if (i == 0) {
|
||||
bde = (struct ulp_bde64 *)&wqe->words[13];
|
||||
memset(bde, 0, sizeof(struct ulp_bde64));
|
||||
/* Words 13-15 (PBDE)*/
|
||||
bde->addrLow = sgl->addr_lo;
|
||||
bde->addrHigh = sgl->addr_hi;
|
||||
bde->tus.f.bdeSize =
|
||||
le32_to_cpu(sgl->sge_len);
|
||||
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bde->tus.w = cpu_to_le32(bde->tus.w);
|
||||
if (do_pbde) {
|
||||
/* Words 13-15 (PBDE) */
|
||||
bde->addrLow = sgl->addr_lo;
|
||||
bde->addrHigh = sgl->addr_hi;
|
||||
bde->tus.f.bdeSize =
|
||||
le32_to_cpu(sgl->sge_len);
|
||||
bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||
bde->tus.w = cpu_to_le32(bde->tus.w);
|
||||
} else {
|
||||
memset(bde, 0, sizeof(struct ulp_bde64));
|
||||
}
|
||||
}
|
||||
sgl++;
|
||||
ctxp->offset += cnt;
|
||||
@ -3105,11 +3117,17 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
aerr:
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if (ctxp->flag & LPFC_NVMET_CTX_RLS)
|
||||
list_del(&ctxp->list);
|
||||
ctxp->flag &= ~(LPFC_NVMET_ABORT_OP | LPFC_NVMET_CTX_RLS);
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
|
||||
ctxp->oxid, rc);
|
||||
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
* This file is part of the Emulex Linux Device Driver for *
|
||||
* Fibre Channel Host Bus Adapters. *
|
||||
* Copyright (C) 2017-2018 Broadcom. All Rights Reserved. The term *
|
||||
* “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
|
||||
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
||||
* EMULEX and SLI are trademarks of Emulex. *
|
||||
* www.broadcom.com *
|
||||
|
@ -995,6 +995,11 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
spin_unlock(&phba->scsi_buf_list_put_lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
|
||||
}
|
||||
return lpfc_cmd;
|
||||
}
|
||||
/**
|
||||
@ -1044,6 +1049,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
|
||||
spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag);
|
||||
if (!found)
|
||||
return NULL;
|
||||
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp) && lpfc_cmd) {
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
lpfc_cmd->flags |= LPFC_SBUF_BUMP_QDEPTH;
|
||||
}
|
||||
return lpfc_cmd;
|
||||
}
|
||||
/**
|
||||
@ -1134,7 +1144,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
|
||||
static void
|
||||
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
|
||||
{
|
||||
if ((psb->flags & LPFC_SBUF_BUMP_QDEPTH) && psb->ndlp)
|
||||
atomic_dec(&psb->ndlp->cmd_pending);
|
||||
|
||||
psb->flags &= ~LPFC_SBUF_BUMP_QDEPTH;
|
||||
phba->lpfc_release_scsi_buf(phba, psb);
|
||||
}
|
||||
|
||||
@ -3017,8 +3030,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
if (err_type == BGS_GUARD_ERR_MASK) {
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
|
||||
0x10, 0x1);
|
||||
cmd->result = DRIVER_SENSE << 24
|
||||
| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
|
||||
cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
phba->bg_guard_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9069 BLKGRD: LBA %lx grd_tag error %x != %x\n",
|
||||
@ -3028,8 +3041,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
} else if (err_type == BGS_REFTAG_ERR_MASK) {
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
|
||||
0x10, 0x3);
|
||||
cmd->result = DRIVER_SENSE << 24
|
||||
| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
|
||||
cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
phba->bg_reftag_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
@ -3040,8 +3053,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
} else if (err_type == BGS_APPTAG_ERR_MASK) {
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
|
||||
0x10, 0x2);
|
||||
cmd->result = DRIVER_SENSE << 24
|
||||
| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
|
||||
cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
phba->bg_apptag_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
@ -3096,7 +3109,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
spin_unlock(&_dump_buf_lock);
|
||||
|
||||
if (lpfc_bgs_get_invalid_prof(bgstat)) {
|
||||
cmd->result = ScsiResult(DID_ERROR, 0);
|
||||
cmd->result = DID_ERROR << 16;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9072 BLKGRD: Invalid BG Profile in cmd"
|
||||
" 0x%x lba 0x%llx blk cnt 0x%x "
|
||||
@ -3108,7 +3121,7 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
}
|
||||
|
||||
if (lpfc_bgs_get_uninit_dif_block(bgstat)) {
|
||||
cmd->result = ScsiResult(DID_ERROR, 0);
|
||||
cmd->result = DID_ERROR << 16;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9073 BLKGRD: Invalid BG PDIF Block in cmd"
|
||||
" 0x%x lba 0x%llx blk cnt 0x%x "
|
||||
@ -3124,8 +3137,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
|
||||
0x10, 0x1);
|
||||
cmd->result = DRIVER_SENSE << 24
|
||||
| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
|
||||
cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
phba->bg_guard_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
"9055 BLKGRD: Guard Tag error in cmd"
|
||||
@ -3140,8 +3153,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
|
||||
0x10, 0x3);
|
||||
cmd->result = DRIVER_SENSE << 24
|
||||
| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
|
||||
cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
phba->bg_reftag_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
@ -3157,8 +3170,8 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
|
||||
0x10, 0x2);
|
||||
cmd->result = DRIVER_SENSE << 24
|
||||
| ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION);
|
||||
cmd->result = DRIVER_SENSE << 24 | DID_ABORT << 16 |
|
||||
SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
phba->bg_apptag_err_cnt++;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG,
|
||||
@ -3311,12 +3324,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
}
|
||||
/*
|
||||
* Setup the first Payload BDE. For FCoE we just key off
|
||||
* Performance Hints, for FC we utilize fcp_embed_pbde.
|
||||
* Performance Hints, for FC we use lpfc_enable_pbde.
|
||||
* We populate words 13-15 of IOCB/WQE.
|
||||
*/
|
||||
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
|
||||
phba->fcp_embed_pbde) {
|
||||
phba->cfg_enable_pbde) {
|
||||
bde = (struct ulp_bde64 *)
|
||||
&(iocb_cmd->unsli3.sli3Words[5]);
|
||||
&(iocb_cmd->unsli3.sli3Words[5]);
|
||||
bde->addrLow = first_data_sgl->addr_lo;
|
||||
bde->addrHigh = first_data_sgl->addr_hi;
|
||||
bde->tus.f.bdeSize =
|
||||
@ -3330,6 +3344,13 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
|
||||
sgl->word2 = le32_to_cpu(sgl->word2);
|
||||
bf_set(lpfc_sli4_sge_last, sgl, 1);
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
|
||||
if ((phba->sli3_options & LPFC_SLI4_PERFH_ENABLED) ||
|
||||
phba->cfg_enable_pbde) {
|
||||
bde = (struct ulp_bde64 *)
|
||||
&(iocb_cmd->unsli3.sli3Words[5]);
|
||||
memset(bde, 0, (sizeof(uint32_t) * 3));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3866,7 +3887,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
}
|
||||
|
||||
out:
|
||||
cmnd->result = ScsiResult(host_status, scsi_status);
|
||||
cmnd->result = host_status << 16 | scsi_status;
|
||||
lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
|
||||
}
|
||||
|
||||
@ -4019,7 +4040,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
break;
|
||||
case IOSTAT_NPORT_BSY:
|
||||
case IOSTAT_FABRIC_BSY:
|
||||
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
|
||||
cmd->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
fast_path_evt = lpfc_alloc_fast_evt(phba);
|
||||
if (!fast_path_evt)
|
||||
break;
|
||||
@ -4053,14 +4074,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
lpfc_cmd->result == IOERR_ELXSEC_CRYPTO_ERROR ||
|
||||
lpfc_cmd->result ==
|
||||
IOERR_ELXSEC_CRYPTO_COMPARE_ERROR) {
|
||||
cmd->result = ScsiResult(DID_NO_CONNECT, 0);
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
break;
|
||||
}
|
||||
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
|
||||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
|
||||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
|
||||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
|
||||
cmd->result = ScsiResult(DID_REQUEUE, 0);
|
||||
cmd->result = DID_REQUEUE << 16;
|
||||
break;
|
||||
}
|
||||
if ((lpfc_cmd->result == IOERR_RX_DMA_FAILED ||
|
||||
@ -4094,16 +4115,16 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
}
|
||||
/* else: fall through */
|
||||
default:
|
||||
cmd->result = ScsiResult(DID_ERROR, 0);
|
||||
cmd->result = DID_ERROR << 16;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!pnode || !NLP_CHK_NODE_ACT(pnode)
|
||||
|| (pnode->nlp_state != NLP_STE_MAPPED_NODE))
|
||||
cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
|
||||
SAM_STAT_BUSY);
|
||||
cmd->result = DID_TRANSPORT_DISRUPTED << 16 |
|
||||
SAM_STAT_BUSY;
|
||||
} else
|
||||
cmd->result = ScsiResult(DID_OK, 0);
|
||||
cmd->result = DID_OK << 16;
|
||||
|
||||
if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
|
||||
uint32_t *lp = (uint32_t *)cmd->sense_buffer;
|
||||
@ -4122,7 +4143,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
if (pnode && NLP_CHK_NODE_ACT(pnode)) {
|
||||
atomic_dec(&pnode->cmd_pending);
|
||||
if (pnode->cmd_qdepth >
|
||||
atomic_read(&pnode->cmd_pending) &&
|
||||
(atomic_read(&pnode->cmd_pending) >
|
||||
@ -4135,8 +4155,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
pnode->last_change_time = jiffies;
|
||||
}
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
|
||||
atomic_dec(&pnode->cmd_pending);
|
||||
}
|
||||
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
|
||||
|
||||
@ -4530,6 +4548,11 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
int err;
|
||||
|
||||
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
|
||||
|
||||
/* sanity check on references */
|
||||
if (unlikely(!rdata) || unlikely(!rport))
|
||||
goto out_fail_command;
|
||||
|
||||
err = fc_remote_port_chkready(rport);
|
||||
if (err) {
|
||||
cmnd->result = err;
|
||||
@ -4555,33 +4578,36 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
*/
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
|
||||
goto out_tgt_busy;
|
||||
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
|
||||
"3377 Target Queue Full, scsi Id:%d Qdepth:%d"
|
||||
" Pending command:%d"
|
||||
" WWNN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
|
||||
" WWPN:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
|
||||
ndlp->nlp_sid, ndlp->cmd_qdepth,
|
||||
atomic_read(&ndlp->cmd_pending),
|
||||
ndlp->nlp_nodename.u.wwn[0],
|
||||
ndlp->nlp_nodename.u.wwn[1],
|
||||
ndlp->nlp_nodename.u.wwn[2],
|
||||
ndlp->nlp_nodename.u.wwn[3],
|
||||
ndlp->nlp_nodename.u.wwn[4],
|
||||
ndlp->nlp_nodename.u.wwn[5],
|
||||
ndlp->nlp_nodename.u.wwn[6],
|
||||
ndlp->nlp_nodename.u.wwn[7],
|
||||
ndlp->nlp_portname.u.wwn[0],
|
||||
ndlp->nlp_portname.u.wwn[1],
|
||||
ndlp->nlp_portname.u.wwn[2],
|
||||
ndlp->nlp_portname.u.wwn[3],
|
||||
ndlp->nlp_portname.u.wwn[4],
|
||||
ndlp->nlp_portname.u.wwn[5],
|
||||
ndlp->nlp_portname.u.wwn[6],
|
||||
ndlp->nlp_portname.u.wwn[7]);
|
||||
goto out_tgt_busy;
|
||||
if (lpfc_ndlp_check_qdepth(phba, ndlp)) {
|
||||
if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_ERROR,
|
||||
"3377 Target Queue Full, scsi Id:%d "
|
||||
"Qdepth:%d Pending command:%d"
|
||||
" WWNN:%02x:%02x:%02x:%02x:"
|
||||
"%02x:%02x:%02x:%02x, "
|
||||
" WWPN:%02x:%02x:%02x:%02x:"
|
||||
"%02x:%02x:%02x:%02x",
|
||||
ndlp->nlp_sid, ndlp->cmd_qdepth,
|
||||
atomic_read(&ndlp->cmd_pending),
|
||||
ndlp->nlp_nodename.u.wwn[0],
|
||||
ndlp->nlp_nodename.u.wwn[1],
|
||||
ndlp->nlp_nodename.u.wwn[2],
|
||||
ndlp->nlp_nodename.u.wwn[3],
|
||||
ndlp->nlp_nodename.u.wwn[4],
|
||||
ndlp->nlp_nodename.u.wwn[5],
|
||||
ndlp->nlp_nodename.u.wwn[6],
|
||||
ndlp->nlp_nodename.u.wwn[7],
|
||||
ndlp->nlp_portname.u.wwn[0],
|
||||
ndlp->nlp_portname.u.wwn[1],
|
||||
ndlp->nlp_portname.u.wwn[2],
|
||||
ndlp->nlp_portname.u.wwn[3],
|
||||
ndlp->nlp_portname.u.wwn[4],
|
||||
ndlp->nlp_portname.u.wwn[5],
|
||||
ndlp->nlp_portname.u.wwn[6],
|
||||
ndlp->nlp_portname.u.wwn[7]);
|
||||
goto out_tgt_busy;
|
||||
}
|
||||
}
|
||||
atomic_inc(&ndlp->cmd_pending);
|
||||
|
||||
lpfc_cmd = lpfc_get_scsi_buf(phba, ndlp);
|
||||
if (lpfc_cmd == NULL) {
|
||||
@ -4599,6 +4625,7 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
*/
|
||||
lpfc_cmd->pCmd = cmnd;
|
||||
lpfc_cmd->rdata = rdata;
|
||||
lpfc_cmd->ndlp = ndlp;
|
||||
lpfc_cmd->timeout = 0;
|
||||
lpfc_cmd->start_time = jiffies;
|
||||
cmnd->host_scribble = (unsigned char *)lpfc_cmd;
|
||||
@ -4681,7 +4708,6 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
|
||||
lpfc_release_scsi_buf(phba, lpfc_cmd);
|
||||
out_host_busy:
|
||||
atomic_dec(&ndlp->cmd_pending);
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
out_tgt_busy:
|
||||
@ -4714,7 +4740,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
struct lpfc_scsi_buf *lpfc_cmd;
|
||||
IOCB_t *cmd, *icmd;
|
||||
int ret = SUCCESS, status = 0;
|
||||
struct lpfc_sli_ring *pring_s4;
|
||||
struct lpfc_sli_ring *pring_s4 = NULL;
|
||||
int ret_val;
|
||||
unsigned long flags;
|
||||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
|
||||
@ -4744,8 +4770,25 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
}
|
||||
|
||||
iocb = &lpfc_cmd->cur_iocbq;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
if (!(phba->cfg_fof) ||
|
||||
(!(iocb->iocb_flag & LPFC_IO_FOF))) {
|
||||
pring_s4 =
|
||||
phba->sli4_hba.fcp_wq[iocb->hba_wqidx]->pring;
|
||||
} else {
|
||||
iocb->hba_wqidx = 0;
|
||||
pring_s4 = phba->sli4_hba.oas_wq->pring;
|
||||
}
|
||||
if (!pring_s4) {
|
||||
ret = FAILED;
|
||||
goto out_unlock;
|
||||
}
|
||||
spin_lock(&pring_s4->ring_lock);
|
||||
}
|
||||
/* the command is in process of being cancelled */
|
||||
if (!(iocb->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"3169 SCSI Layer abort requested I/O has been "
|
||||
@ -4759,6 +4802,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
* see the completion before the eh fired. Just return SUCCESS.
|
||||
*/
|
||||
if (lpfc_cmd->pCmd != cmnd) {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"3170 SCSI Layer abort requested I/O has been "
|
||||
"completed by LLD.\n");
|
||||
@ -4771,6 +4816,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
if (iocb->iocb_flag & LPFC_DRIVER_ABORTED) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
|
||||
"3389 SCSI Layer I/O Abort Request is pending\n");
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
goto wait_for_cmpl;
|
||||
}
|
||||
@ -4778,6 +4825,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
abtsiocb = __lpfc_sli_get_iocbq(phba);
|
||||
if (abtsiocb == NULL) {
|
||||
ret = FAILED;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -4815,14 +4864,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
|
||||
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
|
||||
abtsiocb->vport = vport;
|
||||
lpfc_cmd->waitq = &waitq;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
pring_s4 = lpfc_sli4_calc_ring(phba, abtsiocb);
|
||||
if (pring_s4 == NULL) {
|
||||
ret = FAILED;
|
||||
goto out_unlock;
|
||||
}
|
||||
/* Note: both hbalock and ring_lock must be set here */
|
||||
spin_lock(&pring_s4->ring_lock);
|
||||
ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
|
||||
abtsiocb, 0);
|
||||
spin_unlock(&pring_s4->ring_lock);
|
||||
@ -4835,6 +4879,17 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
|
||||
|
||||
if (ret_val == IOCB_ERROR) {
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_lock_irqsave(&pring_s4->ring_lock, flags);
|
||||
else
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
/* Indicate the IO is not being aborted by the driver. */
|
||||
iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
|
||||
lpfc_cmd->waitq = NULL;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||
spin_unlock_irqrestore(&pring_s4->ring_lock, flags);
|
||||
else
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
lpfc_sli_release_iocbq(phba, abtsiocb);
|
||||
ret = FAILED;
|
||||
goto out;
|
||||
@ -4845,7 +4900,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
|
||||
&phba->sli.sli3_ring[LPFC_FCP_RING], HA_R0RE_REQ);
|
||||
|
||||
wait_for_cmpl:
|
||||
lpfc_cmd->waitq = &waitq;
|
||||
/* Wait for abort to complete */
|
||||
wait_event_timeout(waitq,
|
||||
(lpfc_cmd->pCmd != cmnd),
|
||||
@ -5006,6 +5060,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct scsi_cmnd *cmnd,
|
||||
lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo;
|
||||
lpfc_cmd->rdata = rdata;
|
||||
lpfc_cmd->pCmd = cmnd;
|
||||
lpfc_cmd->ndlp = pnode;
|
||||
|
||||
status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id,
|
||||
task_mgmt_cmd);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user