mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 15:40:56 +07:00
SCSI misc on 20170704
This is mostly updates of the usual suspects: lpfc, qla2xxx, bnx2fc, qedf, hpsa, hisi_sas, smartpqi, cxlflash, aacraid, csiostor along with a host of minor and miscellaneous changes. Signed-off-by: James E.J. Bottomley <jejb@linux.vnet.ibm.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABAgAGBQJZW7JMAAoJEAVr7HOZEZN4IM4P/AqBtvH+6Lo1Eb+3A/HnHskK hIxVxgBxaw3fhW5AegDfVCvrdVTTEkCB/g5CIKN8NCWEx6OGmCX0Lu6lnjld9BOZ cTlPtzNwFGlgHrz34LwCc3vlc5ovMpTQBrpGAQpGGWoAZIP+c3ilEihIYTEMNCsN dmjI71AigDE5g6X1OT361IJ1gydkjfG41IcRe/jlMtEgRNdy3B2JVIdATL89Pw4b 0uZO3uUTn8EGEKUdyJZCNpie7sGZv8u2LhA+Znby2C4h3bwWNV/d0p7ped4xrQY5 yVpZEUbYVdcOOYBgeBJlfwOhvjRQTdxeK4d7W9XTb+AQf30F3DgSepdMCdf3BjVt KnQvBOTxyidB8xsCL46wlxxNew3qoUtaKoY88WUOOnnJwU5U7hlRtPkf/eO2i5QF +k7fxUpFfkBTS4I2gXnyGWpmSoxwJerd0knojSOjrjJcAlcgM65+pocUAea/0Dpr SsfL2sTb12gk6bkF9UlRv8/4aSsWYb92WW1nbTt2nFRXncPNN5Qzc3lGj//36O+b 2bka+aSKVAFoNAnQ1pUE8EJxSboy5q7y4509iZzO/Fom+pVuzBROm5fmrpcOE5dP IjW7gqSFB6578tnNiK049rrrPja5wkUa+Ptc8s0FjPAVyIDrp2RN+f2nljOBBhW8 3Z1nXMG0eFqvb5taLtfZ =D9QX -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This is mostly updates of the usual suspects: lpfc, qla2xxx, bnx2fc, qedf, hpsa, hisi_sas, smartpqi, cxlflash, aacraid, csiostor along with a host of minor and miscellaneous changes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (276 commits) qla2xxx: Fix NVMe entry_type for iocb packet on BE system scsi: qla2xxx: avoid unused-function warning scsi: snic: fix a couple of spelling mistakes/typos scsi: qla2xxx: fix a bunch of typos and spelling mistakes scsi: lpfc: don't double count abort errors scsi: lpfc: spin_lock_irq() is not nestable scsi: hisi_sas: optimise DMA slot memory scsi: ibmvfc: constify dev_pm_ops structures. scsi: ibmvscsi: constify dev_pm_ops structures. scsi: cxlflash: Update debug prints in reset handlers scsi: cxlflash: Update send_tmf() parameters scsi: cxlflash: Avoid double free of character device scsi: Add STARGET_CREATED_REMOVE state to scsi_target_state scsi: ses: do not add a device to an enclosure if enclosure_add_links() fails. scsi: ufs: flush eh_work when eh_work scheduled. scsi: qla2xxx: Protect access to qpair members with qpair->qp_lock scsi: sun_esp: fix device reference leaks scsi: fnic: changing queue command to return result DID_IMM_RETRY when rport is init scsi: fnic: correct speed display and add support for 25,40 and 100G scsi: fnic: added timestamp reporting in fnic debug stats ...
This commit is contained in:
commit
9031114841
@ -326,7 +326,7 @@ Code Seq#(hex) Include File Comments
|
||||
0xB5 00-0F uapi/linux/rpmsg.h <mailto:linux-remoteproc@vger.kernel.org>
|
||||
0xC0 00-0F linux/usb/iowarrior.h
|
||||
0xCA 00-0F uapi/misc/cxl.h
|
||||
0xCA 80-8F uapi/scsi/cxlflash_ioctl.h
|
||||
0xCA 80-BF uapi/scsi/cxlflash_ioctl.h
|
||||
0xCB 00-1F CBM serial IEC bus in development:
|
||||
<mailto:michael.klein@puffin.lb.shuttle.de>
|
||||
0xCD 01 linux/reiserfs_fs.h
|
||||
|
@ -124,8 +124,8 @@ Block library API
|
||||
http://github.com/open-power/capiflash
|
||||
|
||||
|
||||
CXL Flash Driver IOCTLs
|
||||
=======================
|
||||
CXL Flash Driver LUN IOCTLs
|
||||
===========================
|
||||
|
||||
Users, such as the block library, that wish to interface with a flash
|
||||
device (LUN) via user space access need to use the services provided
|
||||
@ -257,6 +257,12 @@ DK_CXLFLASH_VLUN_RESIZE
|
||||
operating in the virtual mode and used to program a LUN translation
|
||||
table that the AFU references when provided with a resource handle.
|
||||
|
||||
This ioctl can return -EAGAIN if an AFU sync operation takes too long.
|
||||
In addition to returning a failure to user, cxlflash will also schedule
|
||||
an asynchronous AFU reset. Should the user choose to retry the operation,
|
||||
it is expected to succeed. If this ioctl fails with -EAGAIN, the user
|
||||
can either retry the operation or treat it as a failure.
|
||||
|
||||
DK_CXLFLASH_RELEASE
|
||||
-------------------
|
||||
This ioctl is responsible for releasing a previously obtained
|
||||
@ -309,6 +315,12 @@ DK_CXLFLASH_VLUN_CLONE
|
||||
clone. This is to avoid a stale entry in the file descriptor table of the
|
||||
child process.
|
||||
|
||||
This ioctl can return -EAGAIN if an AFU sync operation takes too long.
|
||||
In addition to returning a failure to user, cxlflash will also schedule
|
||||
an asynchronous AFU reset. Should the user choose to retry the operation,
|
||||
it is expected to succeed. If this ioctl fails with -EAGAIN, the user
|
||||
can either retry the operation or treat it as a failure.
|
||||
|
||||
DK_CXLFLASH_VERIFY
|
||||
------------------
|
||||
This ioctl is used to detect various changes such as the capacity of
|
||||
@ -355,3 +367,63 @@ DK_CXLFLASH_MANAGE_LUN
|
||||
exclusive user space access (superpipe). In case a LUN is visible
|
||||
across multiple ports and adapters, this ioctl is used to uniquely
|
||||
identify each LUN by its World Wide Node Name (WWNN).
|
||||
|
||||
|
||||
CXL Flash Driver Host IOCTLs
|
||||
============================
|
||||
|
||||
Each host adapter instance that is supported by the cxlflash driver
|
||||
has a special character device associated with it to enable a set of
|
||||
host management function. These character devices are hosted in a
|
||||
class dedicated for cxlflash and can be accessed via /dev/cxlflash/*.
|
||||
|
||||
Applications can be written to perform various functions using the
|
||||
host ioctl APIs below.
|
||||
|
||||
The structure definitions for these IOCTLs are available in:
|
||||
uapi/scsi/cxlflash_ioctl.h
|
||||
|
||||
HT_CXLFLASH_LUN_PROVISION
|
||||
-------------------------
|
||||
This ioctl is used to create and delete persistent LUNs on cxlflash
|
||||
devices that lack an external LUN management interface. It is only
|
||||
valid when used with AFUs that support the LUN provision capability.
|
||||
|
||||
When sufficient space is available, LUNs can be created by specifying
|
||||
the target port to host the LUN and a desired size in 4K blocks. Upon
|
||||
success, the LUN ID and WWID of the created LUN will be returned and
|
||||
the SCSI bus can be scanned to detect the change in LUN topology. Note
|
||||
that partial allocations are not supported. Should a creation fail due
|
||||
to a space issue, the target port can be queried for its current LUN
|
||||
geometry.
|
||||
|
||||
To remove a LUN, the device must first be disassociated from the Linux
|
||||
SCSI subsystem. The LUN deletion can then be initiated by specifying a
|
||||
target port and LUN ID. Upon success, the LUN geometry associated with
|
||||
the port will be updated to reflect new number of provisioned LUNs and
|
||||
available capacity.
|
||||
|
||||
To query the LUN geometry of a port, the target port is specified and
|
||||
upon success, the following information is presented:
|
||||
|
||||
- Maximum number of provisioned LUNs allowed for the port
|
||||
- Current number of provisioned LUNs for the port
|
||||
- Maximum total capacity of provisioned LUNs for the port (4K blocks)
|
||||
- Current total capacity of provisioned LUNs for the port (4K blocks)
|
||||
|
||||
With this information, the number of available LUNs and capacity can be
|
||||
can be calculated.
|
||||
|
||||
HT_CXLFLASH_AFU_DEBUG
|
||||
---------------------
|
||||
This ioctl is used to debug AFUs by supporting a command pass-through
|
||||
interface. It is only valid when used with AFUs that support the AFU
|
||||
debug capability.
|
||||
|
||||
With exception of buffer management, AFU debug commands are opaque to
|
||||
cxlflash and treated as pass-through. For debug commands that do require
|
||||
data transfer, the user supplies an adequately sized data buffer and must
|
||||
specify the data transfer direction with respect to the host. There is a
|
||||
maximum transfer size of 256K imposed. Note that partial read completions
|
||||
are not supported - when errors are experienced with a host read data
|
||||
transfer, the data buffer is not copied back to the user.
|
||||
|
@ -332,7 +332,6 @@ static struct vmbus_channel *alloc_channel(void)
|
||||
if (!channel)
|
||||
return NULL;
|
||||
|
||||
spin_lock_init(&channel->inbound_lock);
|
||||
spin_lock_init(&channel->lock);
|
||||
|
||||
INIT_LIST_HEAD(&channel->sc_list);
|
||||
|
@ -375,6 +375,7 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
|
||||
struct device *dev)
|
||||
{
|
||||
struct enclosure_component *cdev;
|
||||
int err;
|
||||
|
||||
if (!edev || component >= edev->components)
|
||||
return -EINVAL;
|
||||
@ -384,12 +385,17 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
|
||||
if (cdev->dev == dev)
|
||||
return -EEXIST;
|
||||
|
||||
if (cdev->dev)
|
||||
if (cdev->dev) {
|
||||
enclosure_remove_links(cdev);
|
||||
|
||||
put_device(cdev->dev);
|
||||
}
|
||||
cdev->dev = get_device(dev);
|
||||
return enclosure_add_links(cdev);
|
||||
err = enclosure_add_links(cdev);
|
||||
if (err) {
|
||||
put_device(cdev->dev);
|
||||
cdev->dev = NULL;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enclosure_add_device);
|
||||
|
||||
|
@ -296,8 +296,8 @@ NCR_700_detect(struct scsi_host_template *tpnt,
|
||||
if(tpnt->sdev_attrs == NULL)
|
||||
tpnt->sdev_attrs = NCR_700_dev_attrs;
|
||||
|
||||
memory = dma_alloc_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
|
||||
&pScript, GFP_KERNEL);
|
||||
memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
|
||||
GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
|
||||
if(memory == NULL) {
|
||||
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
|
||||
return NULL;
|
||||
@ -410,8 +410,8 @@ NCR_700_release(struct Scsi_Host *host)
|
||||
struct NCR_700_Host_Parameters *hostdata =
|
||||
(struct NCR_700_Host_Parameters *)host->hostdata[0];
|
||||
|
||||
dma_free_noncoherent(hostdata->dev, TOTAL_MEM_SIZE,
|
||||
hostdata->script, hostdata->pScript);
|
||||
dma_free_attrs(hostdata->dev, TOTAL_MEM_SIZE, hostdata->script,
|
||||
hostdata->pScript, DMA_ATTR_NON_CONSISTENT);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -47,17 +47,6 @@ config SCSI_NETLINK
|
||||
default n
|
||||
depends on NET
|
||||
|
||||
config SCSI_MQ_DEFAULT
|
||||
bool "SCSI: use blk-mq I/O path by default"
|
||||
depends on SCSI
|
||||
---help---
|
||||
This option enables the new blk-mq based I/O path for SCSI
|
||||
devices by default. With the option the scsi_mod.use_blk_mq
|
||||
module/boot option defaults to Y, without it to N, but it can
|
||||
still be overridden either way.
|
||||
|
||||
If unsure say N.
|
||||
|
||||
config SCSI_PROC_FS
|
||||
bool "legacy /proc/scsi/ support"
|
||||
depends on SCSI && PROC_FS
|
||||
|
@ -2071,20 +2071,15 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
||||
expose_physicals = 0;
|
||||
}
|
||||
|
||||
if(dev->dac_support != 0) {
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64)) &&
|
||||
!pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
|
||||
if (dev->dac_support) {
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
|
||||
if (!dev->in_reset)
|
||||
printk(KERN_INFO"%s%d: 64 Bit DAC enabled\n",
|
||||
dev->name, dev->id);
|
||||
} else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32)) &&
|
||||
!pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
|
||||
printk(KERN_INFO"%s%d: DMA mask set failed, 64 Bit DAC disabled\n",
|
||||
dev->name, dev->id);
|
||||
dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
|
||||
} else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
|
||||
dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
|
||||
dev->dac_support = 0;
|
||||
} else {
|
||||
printk(KERN_WARNING"%s%d: No suitable DMA available.\n",
|
||||
dev->name, dev->id);
|
||||
dev_info(&dev->pdev->dev, "No suitable DMA available\n");
|
||||
rcode = -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ enum {
|
||||
#define PMC_GLOBAL_INT_BIT0 0x00000001
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 50792
|
||||
# define AAC_DRIVER_BUILD 50834
|
||||
# define AAC_DRIVER_BRANCH "-custom"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
@ -415,6 +415,7 @@ struct aac_ciss_identify_pd {
|
||||
* These macros convert from physical channels to virtual channels
|
||||
*/
|
||||
#define CONTAINER_CHANNEL (0)
|
||||
#define NATIVE_CHANNEL (1)
|
||||
#define CONTAINER_TO_CHANNEL(cont) (CONTAINER_CHANNEL)
|
||||
#define CONTAINER_TO_ID(cont) (cont)
|
||||
#define CONTAINER_TO_LUN(cont) (0)
|
||||
@ -423,7 +424,6 @@ struct aac_ciss_identify_pd {
|
||||
#define PMC_DEVICE_S6 0x28b
|
||||
#define PMC_DEVICE_S7 0x28c
|
||||
#define PMC_DEVICE_S8 0x28d
|
||||
#define PMC_DEVICE_S9 0x28f
|
||||
|
||||
#define aac_phys_to_logical(x) ((x)+1)
|
||||
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
|
||||
@ -2377,6 +2377,7 @@ struct revision
|
||||
#define SOFT_RESET_TIME 60
|
||||
|
||||
|
||||
|
||||
struct aac_common
|
||||
{
|
||||
/*
|
||||
@ -2489,6 +2490,8 @@ struct aac_hba_info {
|
||||
#define IOP_RESET_ALWAYS 0x00001001
|
||||
#define RE_INIT_ADAPTER 0x000000ee
|
||||
|
||||
#define IOP_SRC_RESET_MASK 0x00000100
|
||||
|
||||
/*
|
||||
* Adapter Status Register
|
||||
*
|
||||
@ -2512,6 +2515,7 @@ struct aac_hba_info {
|
||||
|
||||
#define SELF_TEST_FAILED 0x00000004
|
||||
#define MONITOR_PANIC 0x00000020
|
||||
#define KERNEL_BOOTING 0x00000040
|
||||
#define KERNEL_UP_AND_RUNNING 0x00000080
|
||||
#define KERNEL_PANIC 0x00000100
|
||||
#define FLASH_UPD_PENDING 0x00002000
|
||||
@ -2684,6 +2688,18 @@ int aac_probe_container(struct aac_dev *dev, int cid);
|
||||
int _aac_rx_init(struct aac_dev *dev);
|
||||
int aac_rx_select_comm(struct aac_dev *dev, int comm);
|
||||
int aac_rx_deliver_producer(struct fib * fib);
|
||||
|
||||
static inline int aac_is_src(struct aac_dev *dev)
|
||||
{
|
||||
u16 device = dev->pdev->device;
|
||||
|
||||
if (device == PMC_DEVICE_S6 ||
|
||||
device == PMC_DEVICE_S7 ||
|
||||
device == PMC_DEVICE_S8)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
char * get_container_type(unsigned type);
|
||||
extern int numacb;
|
||||
extern char aac_driver_version[];
|
||||
|
@ -668,7 +668,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL);
|
||||
if (!p) {
|
||||
rcode = -ENOMEM;
|
||||
goto cleanup;
|
||||
@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
|
||||
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL);
|
||||
if(!p) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
sg_count[i], i, upsg->count));
|
||||
@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
|
||||
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL);
|
||||
if(!p) {
|
||||
dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
sg_count[i], i, usg->count));
|
||||
@ -845,8 +845,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
/* Does this really need to be GFP_DMA? */
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL|__GFP_DMA);
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32);
|
||||
if (!p) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
sg_count[i], i, usg->count));
|
||||
@ -887,7 +886,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
rcode = -EINVAL;
|
||||
goto cleanup;
|
||||
}
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL);
|
||||
p = kmalloc(sg_count[i], GFP_KERNEL|GFP_DMA32);
|
||||
if (!p) {
|
||||
dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
|
||||
sg_count[i], i, upsg->count));
|
||||
@ -950,12 +949,15 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
||||
&((struct aac_native_hba *)srbfib->hw_fib_va)->resp.err;
|
||||
struct aac_srb_reply reply;
|
||||
|
||||
memset(&reply, 0, sizeof(reply));
|
||||
reply.status = ST_OK;
|
||||
if (srbfib->flags & FIB_CONTEXT_FLAG_FASTRESP) {
|
||||
/* fast response */
|
||||
reply.srb_status = SRB_STATUS_SUCCESS;
|
||||
reply.scsi_status = 0;
|
||||
reply.data_xfer_length = byte_count;
|
||||
reply.sense_data_size = 0;
|
||||
memset(reply.sense_data, 0, AAC_SENSE_BUFFERSIZE);
|
||||
} else {
|
||||
reply.srb_status = err->service_response;
|
||||
reply.scsi_status = err->status;
|
||||
@ -1019,6 +1021,7 @@ static int aac_get_hba_info(struct aac_dev *dev, void __user *arg)
|
||||
{
|
||||
struct aac_hba_info hbainfo;
|
||||
|
||||
memset(&hbainfo, 0, sizeof(hbainfo));
|
||||
hbainfo.adapter_number = (u8) dev->id;
|
||||
hbainfo.system_io_bus_number = dev->pdev->bus->number;
|
||||
hbainfo.device_number = (dev->pdev->devfn >> 3);
|
||||
|
@ -53,11 +53,8 @@ static inline int aac_is_msix_mode(struct aac_dev *dev)
|
||||
{
|
||||
u32 status = 0;
|
||||
|
||||
if (dev->pdev->device == PMC_DEVICE_S6 ||
|
||||
dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8) {
|
||||
if (aac_is_src(dev))
|
||||
status = src_readl(dev, MUnit.OMR);
|
||||
}
|
||||
return (status & AAC_INT_MODE_MSIX);
|
||||
}
|
||||
|
||||
@ -325,9 +322,7 @@ int aac_send_shutdown(struct aac_dev * dev)
|
||||
/* FIB should be freed only after getting the response from the F/W */
|
||||
if (status != -ERESTARTSYS)
|
||||
aac_fib_free(fibctx);
|
||||
if ((dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8 ||
|
||||
dev->pdev->device == PMC_DEVICE_S9) &&
|
||||
if (aac_is_src(dev) &&
|
||||
dev->msi_enabled)
|
||||
aac_set_intx_mode(dev);
|
||||
return status;
|
||||
@ -583,9 +578,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
||||
dev->max_fib_size = status[1] & 0xFFE0;
|
||||
host->sg_tablesize = status[2] >> 16;
|
||||
dev->sg_tablesize = status[2] & 0xFFFF;
|
||||
if (dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8 ||
|
||||
dev->pdev->device == PMC_DEVICE_S9) {
|
||||
if (aac_is_src(dev)) {
|
||||
if (host->can_queue > (status[3] >> 16) -
|
||||
AAC_NUM_MGT_FIB)
|
||||
host->can_queue = (status[3] >> 16) -
|
||||
@ -604,10 +597,7 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
||||
pr_warn("numacb=%d ignored\n", numacb);
|
||||
}
|
||||
|
||||
if (dev->pdev->device == PMC_DEVICE_S6 ||
|
||||
dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8 ||
|
||||
dev->pdev->device == PMC_DEVICE_S9)
|
||||
if (aac_is_src(dev))
|
||||
aac_define_int_mode(dev);
|
||||
/*
|
||||
* Ok now init the communication subsystem
|
||||
|
@ -803,11 +803,11 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
|
||||
if (aac_check_eeh_failure(dev))
|
||||
return -EFAULT;
|
||||
|
||||
/* Only set for first known interruptable command */
|
||||
if (down_interruptible(&fibptr->event_wait)) {
|
||||
fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
|
||||
if (down_interruptible(&fibptr->event_wait))
|
||||
fibptr->done = 2;
|
||||
up(&fibptr->event_wait);
|
||||
}
|
||||
fibptr->flags &= ~(FIB_CONTEXT_FLAG_WAIT);
|
||||
|
||||
spin_lock_irqsave(&fibptr->event_lock, flags);
|
||||
if ((fibptr->done == 0) || (fibptr->done == 2)) {
|
||||
fibptr->done = 2; /* Tell interrupt we aborted */
|
||||
@ -1513,6 +1513,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||
struct scsi_cmnd *command_list;
|
||||
int jafo = 0;
|
||||
int bled;
|
||||
u64 dmamask;
|
||||
int num_of_fibs = 0;
|
||||
|
||||
/*
|
||||
* Assumptions:
|
||||
@ -1546,10 +1548,20 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||
/*
|
||||
* Loop through the fibs, close the synchronous FIBS
|
||||
*/
|
||||
for (retval = 1, index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
|
||||
retval = 1;
|
||||
num_of_fibs = aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB;
|
||||
for (index = 0; index < num_of_fibs; index++) {
|
||||
|
||||
struct fib *fib = &aac->fibs[index];
|
||||
if (!(fib->hw_fib_va->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
|
||||
(fib->hw_fib_va->header.XferState & cpu_to_le32(ResponseExpected))) {
|
||||
__le32 XferState = fib->hw_fib_va->header.XferState;
|
||||
bool is_response_expected = false;
|
||||
|
||||
if (!(XferState & cpu_to_le32(NoResponseExpected | Async)) &&
|
||||
(XferState & cpu_to_le32(ResponseExpected)))
|
||||
is_response_expected = true;
|
||||
|
||||
if (is_response_expected
|
||||
|| fib->flags & FIB_CONTEXT_FLAG_WAIT) {
|
||||
unsigned long flagv;
|
||||
spin_lock_irqsave(&fib->event_lock, flagv);
|
||||
up(&fib->event_wait);
|
||||
@ -1580,21 +1592,27 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
||||
aac_free_irq(aac);
|
||||
kfree(aac->fsa_dev);
|
||||
aac->fsa_dev = NULL;
|
||||
|
||||
dmamask = DMA_BIT_MASK(32);
|
||||
quirks = aac_get_driver_ident(index)->quirks;
|
||||
if (quirks & AAC_QUIRK_31BIT) {
|
||||
if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(31)))) ||
|
||||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(31)))))
|
||||
goto out;
|
||||
} else {
|
||||
if (((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32)))) ||
|
||||
((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_BIT_MASK(32)))))
|
||||
goto out;
|
||||
if (quirks & AAC_QUIRK_31BIT)
|
||||
retval = pci_set_dma_mask(aac->pdev, dmamask);
|
||||
else if (!(quirks & AAC_QUIRK_SRC))
|
||||
retval = pci_set_dma_mask(aac->pdev, dmamask);
|
||||
else
|
||||
retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
|
||||
|
||||
if (quirks & AAC_QUIRK_31BIT && !retval) {
|
||||
dmamask = DMA_BIT_MASK(31);
|
||||
retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
|
||||
}
|
||||
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
|
||||
goto out;
|
||||
if (quirks & AAC_QUIRK_31BIT)
|
||||
if ((retval = pci_set_dma_mask(aac->pdev, DMA_BIT_MASK(32))))
|
||||
goto out;
|
||||
|
||||
if (jafo) {
|
||||
aac->thread = kthread_run(aac_command_thread, aac, "%s",
|
||||
aac->name);
|
||||
@ -1768,8 +1786,6 @@ int aac_check_health(struct aac_dev * aac)
|
||||
int BlinkLED;
|
||||
unsigned long time_now, flagv = 0;
|
||||
struct list_head * entry;
|
||||
struct Scsi_Host * host;
|
||||
int bled;
|
||||
|
||||
/* Extending the scope of fib_lock slightly to protect aac->in_reset */
|
||||
if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
|
||||
@ -1881,19 +1897,6 @@ int aac_check_health(struct aac_dev * aac)
|
||||
|
||||
printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
|
||||
|
||||
if (!aac_check_reset || ((aac_check_reset == 1) &&
|
||||
(aac->supplement_adapter_info.supported_options2 &
|
||||
AAC_OPTION_IGNORE_RESET)))
|
||||
goto out;
|
||||
host = aac->scsi_host_ptr;
|
||||
if (aac->thread->pid != current->pid)
|
||||
spin_lock_irqsave(host->host_lock, flagv);
|
||||
bled = aac_check_reset != 1 ? 1 : 0;
|
||||
_aac_reset_adapter(aac, bled, IOP_HWSOFT_RESET);
|
||||
if (aac->thread->pid != current->pid)
|
||||
spin_unlock_irqrestore(host->host_lock, flagv);
|
||||
return BlinkLED;
|
||||
|
||||
out:
|
||||
aac->in_reset = 0;
|
||||
return BlinkLED;
|
||||
@ -2483,7 +2486,7 @@ int aac_command_thread(void *data)
|
||||
if ((time_before(next_check_jiffies,next_jiffies))
|
||||
&& ((difference = next_check_jiffies - jiffies) <= 0)) {
|
||||
next_check_jiffies = next_jiffies;
|
||||
if (aac_check_health(dev) == 0) {
|
||||
if (aac_adapter_check_health(dev) == 0) {
|
||||
difference = ((long)(unsigned)check_interval)
|
||||
* HZ;
|
||||
next_check_jiffies = jiffies + difference;
|
||||
@ -2496,7 +2499,7 @@ int aac_command_thread(void *data)
|
||||
int ret;
|
||||
|
||||
/* Don't even try to talk to adapter if its sick */
|
||||
ret = aac_check_health(dev);
|
||||
ret = aac_adapter_check_health(dev);
|
||||
if (ret || !dev->queues)
|
||||
break;
|
||||
next_check_jiffies = jiffies
|
||||
@ -2588,10 +2591,7 @@ void aac_free_irq(struct aac_dev *dev)
|
||||
int cpu;
|
||||
|
||||
cpu = cpumask_first(cpu_online_mask);
|
||||
if (dev->pdev->device == PMC_DEVICE_S6 ||
|
||||
dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8 ||
|
||||
dev->pdev->device == PMC_DEVICE_S9) {
|
||||
if (aac_is_src(dev)) {
|
||||
if (dev->max_msix > 1) {
|
||||
for (i = 0; i < dev->max_msix; i++)
|
||||
free_irq(pci_irq_vector(dev->pdev, i),
|
||||
|
@ -405,17 +405,23 @@ static int aac_slave_configure(struct scsi_device *sdev)
|
||||
int chn, tid;
|
||||
unsigned int depth = 0;
|
||||
unsigned int set_timeout = 0;
|
||||
bool set_qd_dev_type = false;
|
||||
u8 devtype = 0;
|
||||
|
||||
chn = aac_logical_to_phys(sdev_channel(sdev));
|
||||
tid = sdev_id(sdev);
|
||||
if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS &&
|
||||
aac->hba_map[chn][tid].devtype == AAC_DEVTYPE_NATIVE_RAW) {
|
||||
if (chn < AAC_MAX_BUSES && tid < AAC_MAX_TARGETS && aac->sa_firmware) {
|
||||
devtype = aac->hba_map[chn][tid].devtype;
|
||||
|
||||
if (devtype == AAC_DEVTYPE_NATIVE_RAW)
|
||||
depth = aac->hba_map[chn][tid].qd_limit;
|
||||
else if (devtype == AAC_DEVTYPE_ARC_RAW)
|
||||
set_qd_dev_type = true;
|
||||
|
||||
set_timeout = 1;
|
||||
goto common_config;
|
||||
}
|
||||
|
||||
|
||||
if (aac->jbod && (sdev->type == TYPE_DISK))
|
||||
sdev->removable = 1;
|
||||
|
||||
@ -466,9 +472,26 @@ static int aac_slave_configure(struct scsi_device *sdev)
|
||||
++num_lsu;
|
||||
|
||||
depth = (host->can_queue - num_one) / num_lsu;
|
||||
|
||||
if (sdev_channel(sdev) != NATIVE_CHANNEL)
|
||||
goto common_config;
|
||||
|
||||
set_qd_dev_type = true;
|
||||
|
||||
}
|
||||
|
||||
common_config:
|
||||
|
||||
/*
|
||||
* Check if SATA drive
|
||||
*/
|
||||
if (set_qd_dev_type) {
|
||||
if (strncmp(sdev->vendor, "ATA", 3) == 0)
|
||||
depth = 32;
|
||||
else
|
||||
depth = 64;
|
||||
}
|
||||
|
||||
/*
|
||||
* Firmware has an individual device recovery time typically
|
||||
* of 35 seconds, give us a margin.
|
||||
@ -601,6 +624,56 @@ static int aac_ioctl(struct scsi_device *sdev, int cmd, void __user * arg)
|
||||
return aac_do_ioctl(dev, cmd, arg);
|
||||
}
|
||||
|
||||
static int get_num_of_incomplete_fibs(struct aac_dev *aac)
|
||||
{
|
||||
|
||||
unsigned long flags;
|
||||
struct scsi_device *sdev = NULL;
|
||||
struct Scsi_Host *shost = aac->scsi_host_ptr;
|
||||
struct scsi_cmnd *scmnd = NULL;
|
||||
struct device *ctrl_dev;
|
||||
|
||||
int mlcnt = 0;
|
||||
int llcnt = 0;
|
||||
int ehcnt = 0;
|
||||
int fwcnt = 0;
|
||||
int krlcnt = 0;
|
||||
|
||||
__shost_for_each_device(sdev, shost) {
|
||||
spin_lock_irqsave(&sdev->list_lock, flags);
|
||||
list_for_each_entry(scmnd, &sdev->cmd_list, list) {
|
||||
switch (scmnd->SCp.phase) {
|
||||
case AAC_OWNER_FIRMWARE:
|
||||
fwcnt++;
|
||||
break;
|
||||
case AAC_OWNER_ERROR_HANDLER:
|
||||
ehcnt++;
|
||||
break;
|
||||
case AAC_OWNER_LOWLEVEL:
|
||||
llcnt++;
|
||||
break;
|
||||
case AAC_OWNER_MIDLEVEL:
|
||||
mlcnt++;
|
||||
break;
|
||||
default:
|
||||
krlcnt++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&sdev->list_lock, flags);
|
||||
}
|
||||
|
||||
ctrl_dev = &aac->pdev->dev;
|
||||
|
||||
dev_info(ctrl_dev, "outstanding cmd: midlevel-%d\n", mlcnt);
|
||||
dev_info(ctrl_dev, "outstanding cmd: lowlevel-%d\n", llcnt);
|
||||
dev_info(ctrl_dev, "outstanding cmd: error handler-%d\n", ehcnt);
|
||||
dev_info(ctrl_dev, "outstanding cmd: firmware-%d\n", fwcnt);
|
||||
dev_info(ctrl_dev, "outstanding cmd: kernel-%d\n", krlcnt);
|
||||
|
||||
return mlcnt + llcnt + ehcnt + fwcnt;
|
||||
}
|
||||
|
||||
static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
{
|
||||
struct scsi_device * dev = cmd->device;
|
||||
@ -661,8 +734,8 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
|
||||
(fib_callback) aac_hba_callback,
|
||||
(void *) cmd);
|
||||
|
||||
/* Wait up to 2 minutes for completion */
|
||||
for (count = 0; count < 120; ++count) {
|
||||
/* Wait up to 15 secs for completion */
|
||||
for (count = 0; count < 15; ++count) {
|
||||
if (cmd->SCp.sent_command) {
|
||||
ret = SUCCESS;
|
||||
break;
|
||||
@ -754,6 +827,12 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
int count;
|
||||
u32 bus, cid;
|
||||
int ret = FAILED;
|
||||
int status = 0;
|
||||
__le32 supported_options2 = 0;
|
||||
bool is_mu_reset;
|
||||
bool is_ignore_reset;
|
||||
bool is_doorbell_reset;
|
||||
|
||||
|
||||
bus = aac_logical_to_phys(scmd_channel(cmd));
|
||||
cid = scmd_id(cmd);
|
||||
@ -817,8 +896,8 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
(fib_callback) aac_hba_callback,
|
||||
(void *) cmd);
|
||||
|
||||
/* Wait up to 2 minutes for completion */
|
||||
for (count = 0; count < 120; ++count) {
|
||||
/* Wait up to 15 seconds for completion */
|
||||
for (count = 0; count < 15; ++count) {
|
||||
if (cmd->SCp.sent_command) {
|
||||
ret = SUCCESS;
|
||||
break;
|
||||
@ -826,12 +905,10 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
msleep(1000);
|
||||
}
|
||||
|
||||
if (ret != SUCCESS)
|
||||
pr_err("%s: Host adapter reset request timed out\n",
|
||||
AAC_DRIVERNAME);
|
||||
if (ret == SUCCESS)
|
||||
goto out;
|
||||
|
||||
} else {
|
||||
struct scsi_cmnd *command;
|
||||
unsigned long flags;
|
||||
|
||||
/* Mark the assoc. FIB to not complete, eh handler does this */
|
||||
for (count = 0;
|
||||
@ -846,68 +923,42 @@ static int aac_eh_reset(struct scsi_cmnd* cmd)
|
||||
cmd->SCp.phase = AAC_OWNER_ERROR_HANDLER;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pr_err("%s: Host adapter reset request. SCSI hang ?\n",
|
||||
AAC_DRIVERNAME);
|
||||
pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
|
||||
|
||||
count = aac_check_health(aac);
|
||||
if (count)
|
||||
return count;
|
||||
/*
|
||||
* Wait for all commands to complete to this specific
|
||||
* target (block maximum 60 seconds).
|
||||
* Check the health of the controller
|
||||
*/
|
||||
for (count = 60; count; --count) {
|
||||
int active = aac->in_reset;
|
||||
status = aac_adapter_check_health(aac);
|
||||
if (status)
|
||||
dev_err(&aac->pdev->dev, "Adapter health - %d\n", status);
|
||||
|
||||
if (active == 0)
|
||||
__shost_for_each_device(dev, host) {
|
||||
spin_lock_irqsave(&dev->list_lock, flags);
|
||||
list_for_each_entry(command, &dev->cmd_list,
|
||||
list) {
|
||||
if ((command != cmd) &&
|
||||
(command->SCp.phase ==
|
||||
AAC_OWNER_FIRMWARE)) {
|
||||
active++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&dev->list_lock, flags);
|
||||
if (active)
|
||||
break;
|
||||
|
||||
}
|
||||
/*
|
||||
* We can exit If all the commands are complete
|
||||
*/
|
||||
if (active == 0)
|
||||
count = get_num_of_incomplete_fibs(aac);
|
||||
if (count == 0)
|
||||
return SUCCESS;
|
||||
ssleep(1);
|
||||
}
|
||||
pr_err("%s: SCSI bus appears hung\n", AAC_DRIVERNAME);
|
||||
|
||||
/*
|
||||
* Check if reset is supported by the firmware
|
||||
*/
|
||||
supported_options2 = aac->supplement_adapter_info.supported_options2;
|
||||
is_mu_reset = supported_options2 & AAC_OPTION_MU_RESET;
|
||||
is_doorbell_reset = supported_options2 & AAC_OPTION_DOORBELL_RESET;
|
||||
is_ignore_reset = supported_options2 & AAC_OPTION_IGNORE_RESET;
|
||||
/*
|
||||
* This adapter needs a blind reset, only do so for
|
||||
* Adapters that support a register, instead of a commanded,
|
||||
* reset.
|
||||
*/
|
||||
if (((aac->supplement_adapter_info.supported_options2 &
|
||||
AAC_OPTION_MU_RESET) ||
|
||||
(aac->supplement_adapter_info.supported_options2 &
|
||||
AAC_OPTION_DOORBELL_RESET)) &&
|
||||
aac_check_reset &&
|
||||
((aac_check_reset != 1) ||
|
||||
!(aac->supplement_adapter_info.supported_options2 &
|
||||
AAC_OPTION_IGNORE_RESET))) {
|
||||
if ((is_mu_reset || is_doorbell_reset)
|
||||
&& aac_check_reset
|
||||
&& (aac_check_reset != -1 || !is_ignore_reset)) {
|
||||
/* Bypass wait for command quiesce */
|
||||
aac_reset_adapter(aac, 2, IOP_HWSOFT_RESET);
|
||||
}
|
||||
ret = SUCCESS;
|
||||
}
|
||||
/*
|
||||
* Cause an immediate retry of the command with a ten second delay
|
||||
* after successful tur
|
||||
*/
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1365,10 +1416,7 @@ static void __aac_shutdown(struct aac_dev * aac)
|
||||
kthread_stop(aac->thread);
|
||||
}
|
||||
aac_adapter_disable_int(aac);
|
||||
if (aac->pdev->device == PMC_DEVICE_S6 ||
|
||||
aac->pdev->device == PMC_DEVICE_S7 ||
|
||||
aac->pdev->device == PMC_DEVICE_S8 ||
|
||||
aac->pdev->device == PMC_DEVICE_S9) {
|
||||
if (aac_is_src(aac)) {
|
||||
if (aac->max_msix > 1) {
|
||||
for (i = 0; i < aac->max_msix; i++) {
|
||||
free_irq(pci_irq_vector(aac->pdev, i),
|
||||
@ -1403,6 +1451,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
int error = -ENODEV;
|
||||
int unique_id = 0;
|
||||
u64 dmamask;
|
||||
int mask_bits = 0;
|
||||
extern int aac_sync_mode;
|
||||
|
||||
/*
|
||||
@ -1426,18 +1475,32 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto out;
|
||||
error = -ENODEV;
|
||||
|
||||
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
|
||||
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
|
||||
goto out_disable_pdev;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If the quirk31 bit is set, the adapter needs adapter
|
||||
* to driver communication memory to be allocated below 2gig
|
||||
*/
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT) {
|
||||
dmamask = DMA_BIT_MASK(31);
|
||||
else
|
||||
mask_bits = 31;
|
||||
} else {
|
||||
dmamask = DMA_BIT_MASK(32);
|
||||
mask_bits = 32;
|
||||
}
|
||||
|
||||
if (pci_set_dma_mask(pdev, dmamask) ||
|
||||
pci_set_consistent_dma_mask(pdev, dmamask))
|
||||
error = pci_set_consistent_dma_mask(pdev, dmamask);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
|
||||
, mask_bits);
|
||||
goto out_disable_pdev;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
@ -1501,15 +1564,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
goto out_deinit;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we had set a smaller DMA mask earlier, set it to 4gig
|
||||
* now since the adapter can dma data to at least a 4gig
|
||||
* address space.
|
||||
*/
|
||||
if (aac_drivers[index].quirks & AAC_QUIRK_31BIT)
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
|
||||
goto out_deinit;
|
||||
|
||||
aac->maximum_num_channels = aac_drivers[index].channels;
|
||||
error = aac_get_adapter_info(aac);
|
||||
if (error < 0)
|
||||
@ -1627,9 +1681,7 @@ static int aac_acquire_resources(struct aac_dev *dev)
|
||||
aac_adapter_enable_int(dev);
|
||||
|
||||
|
||||
if ((dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8 ||
|
||||
dev->pdev->device == PMC_DEVICE_S9))
|
||||
if (aac_is_src(dev))
|
||||
aac_define_int_mode(dev);
|
||||
|
||||
if (dev->msi_enabled)
|
||||
|
@ -694,33 +694,52 @@ static void aac_dump_fw_fib_iop_reset(struct aac_dev *dev)
|
||||
0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
static void aac_send_iop_reset(struct aac_dev *dev, int bled)
|
||||
static bool aac_is_ctrl_up_and_running(struct aac_dev *dev)
|
||||
{
|
||||
u32 var, reset_mask;
|
||||
bool ctrl_up = true;
|
||||
unsigned long status, start;
|
||||
bool is_up = false;
|
||||
|
||||
start = jiffies;
|
||||
do {
|
||||
schedule();
|
||||
status = src_readl(dev, MUnit.OMR);
|
||||
|
||||
if (status == 0xffffffff)
|
||||
status = 0;
|
||||
|
||||
if (status & KERNEL_BOOTING) {
|
||||
start = jiffies;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (time_after(jiffies, start+HZ*SOFT_RESET_TIME)) {
|
||||
ctrl_up = false;
|
||||
break;
|
||||
}
|
||||
|
||||
is_up = status & KERNEL_UP_AND_RUNNING;
|
||||
|
||||
} while (!is_up);
|
||||
|
||||
return ctrl_up;
|
||||
}
|
||||
|
||||
static void aac_notify_fw_of_iop_reset(struct aac_dev *dev)
|
||||
{
|
||||
aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL,
|
||||
NULL, NULL, NULL, NULL);
|
||||
}
|
||||
|
||||
static void aac_send_iop_reset(struct aac_dev *dev)
|
||||
{
|
||||
aac_dump_fw_fib_iop_reset(dev);
|
||||
|
||||
bled = aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS,
|
||||
0, 0, 0, 0, 0, 0, &var,
|
||||
&reset_mask, NULL, NULL, NULL);
|
||||
|
||||
if ((bled || var != 0x00000001) && !dev->doorbell_mask)
|
||||
bled = -EINVAL;
|
||||
else if (dev->doorbell_mask) {
|
||||
reset_mask = dev->doorbell_mask;
|
||||
bled = 0;
|
||||
var = 0x00000001;
|
||||
}
|
||||
aac_notify_fw_of_iop_reset(dev);
|
||||
|
||||
aac_set_intx_mode(dev);
|
||||
|
||||
if (!bled && (dev->supplement_adapter_info.supported_options2 &
|
||||
AAC_OPTION_DOORBELL_RESET)) {
|
||||
src_writel(dev, MUnit.IDR, reset_mask);
|
||||
} else {
|
||||
src_writel(dev, MUnit.IDR, 0x100);
|
||||
}
|
||||
msleep(30000);
|
||||
src_writel(dev, MUnit.IDR, IOP_SRC_RESET_MASK);
|
||||
}
|
||||
|
||||
static void aac_send_hardware_soft_reset(struct aac_dev *dev)
|
||||
@ -735,14 +754,14 @@ static void aac_send_hardware_soft_reset(struct aac_dev *dev)
|
||||
|
||||
static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
|
||||
{
|
||||
unsigned long status, start;
|
||||
bool is_ctrl_up;
|
||||
int ret = 0;
|
||||
|
||||
if (bled < 0)
|
||||
goto invalid_out;
|
||||
|
||||
if (bled)
|
||||
pr_err("%s%d: adapter kernel panic'd %x.\n",
|
||||
dev->name, dev->id, bled);
|
||||
dev_err(&dev->pdev->dev, "adapter kernel panic'd %x.\n", bled);
|
||||
|
||||
/*
|
||||
* When there is a BlinkLED, IOP_RESET has not effect
|
||||
@ -752,48 +771,55 @@ static int aac_src_restart_adapter(struct aac_dev *dev, int bled, u8 reset_type)
|
||||
|
||||
dev->a_ops.adapter_enable_int = aac_src_disable_interrupt;
|
||||
|
||||
switch (reset_type) {
|
||||
case IOP_HWSOFT_RESET:
|
||||
aac_send_iop_reset(dev, bled);
|
||||
dev_err(&dev->pdev->dev, "Controller reset type is %d\n", reset_type);
|
||||
|
||||
if (reset_type & HW_IOP_RESET) {
|
||||
dev_info(&dev->pdev->dev, "Issuing IOP reset\n");
|
||||
aac_send_iop_reset(dev);
|
||||
|
||||
/*
|
||||
* Check to see if KERNEL_UP_AND_RUNNING
|
||||
* Wait for the adapter to be up and running.
|
||||
* If !KERNEL_UP_AND_RUNNING issue HW Soft Reset
|
||||
* Creates a delay or wait till up and running comes thru
|
||||
*/
|
||||
status = src_readl(dev, MUnit.OMR);
|
||||
if (dev->sa_firmware
|
||||
&& !(status & KERNEL_UP_AND_RUNNING)) {
|
||||
start = jiffies;
|
||||
do {
|
||||
status = src_readl(dev, MUnit.OMR);
|
||||
if (time_after(jiffies,
|
||||
start+HZ*SOFT_RESET_TIME)) {
|
||||
is_ctrl_up = aac_is_ctrl_up_and_running(dev);
|
||||
if (!is_ctrl_up)
|
||||
dev_err(&dev->pdev->dev, "IOP reset failed\n");
|
||||
else {
|
||||
dev_info(&dev->pdev->dev, "IOP reset succeded\n");
|
||||
goto set_startup;
|
||||
}
|
||||
}
|
||||
|
||||
if (!dev->sa_firmware) {
|
||||
dev_err(&dev->pdev->dev, "ARC Reset attempt failed\n");
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (reset_type & HW_SOFT_RESET) {
|
||||
dev_info(&dev->pdev->dev, "Issuing SOFT reset\n");
|
||||
aac_send_hardware_soft_reset(dev);
|
||||
start = jiffies;
|
||||
}
|
||||
} while (!(status & KERNEL_UP_AND_RUNNING));
|
||||
}
|
||||
break;
|
||||
case HW_SOFT_RESET:
|
||||
if (dev->sa_firmware) {
|
||||
aac_send_hardware_soft_reset(dev);
|
||||
aac_set_intx_mode(dev);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
aac_send_iop_reset(dev, bled);
|
||||
break;
|
||||
dev->msi_enabled = 0;
|
||||
|
||||
is_ctrl_up = aac_is_ctrl_up_and_running(dev);
|
||||
if (!is_ctrl_up) {
|
||||
dev_err(&dev->pdev->dev, "SOFT reset failed\n");
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
} else
|
||||
dev_info(&dev->pdev->dev, "SOFT reset succeded\n");
|
||||
}
|
||||
|
||||
invalid_out:
|
||||
|
||||
if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
|
||||
return -ENODEV;
|
||||
|
||||
set_startup:
|
||||
if (startup_timeout < 300)
|
||||
startup_timeout = 300;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return ret;
|
||||
|
||||
invalid_out:
|
||||
if (src_readl(dev, MUnit.OMR) & KERNEL_PANIC)
|
||||
ret = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -776,7 +776,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev)
|
||||
* from/to alternative Ram.
|
||||
*/
|
||||
if (ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(EXTD_DMA) &&
|
||||
m68k_num_memory > 1) {
|
||||
m68k_realnum_memory > 1) {
|
||||
atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI");
|
||||
if (!atari_dma_buffer) {
|
||||
pr_err(PFX "can't allocate ST-RAM double buffer\n");
|
||||
|
@ -3,7 +3,8 @@
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -1,7 +1,8 @@
|
||||
/* bnx2fc.h: QLogic Linux FCoE offload driver.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -65,7 +66,7 @@
|
||||
#include "bnx2fc_constants.h"
|
||||
|
||||
#define BNX2FC_NAME "bnx2fc"
|
||||
#define BNX2FC_VERSION "2.10.3"
|
||||
#define BNX2FC_VERSION "2.11.8"
|
||||
|
||||
#define PFX "bnx2fc: "
|
||||
|
||||
|
@ -3,7 +3,8 @@
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -3,7 +3,8 @@
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -3,7 +3,8 @@
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -4,7 +4,8 @@
|
||||
* and responses.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -61,13 +62,20 @@ int bnx2fc_send_rrq(struct bnx2fc_cmd *aborted_io_req)
|
||||
|
||||
struct fc_els_rrq rrq;
|
||||
struct bnx2fc_rport *tgt = aborted_io_req->tgt;
|
||||
struct fc_lport *lport = tgt->rdata->local_port;
|
||||
struct fc_lport *lport = NULL;
|
||||
struct bnx2fc_els_cb_arg *cb_arg = NULL;
|
||||
u32 sid = tgt->sid;
|
||||
u32 r_a_tov = lport->r_a_tov;
|
||||
u32 sid = 0;
|
||||
u32 r_a_tov = 0;
|
||||
unsigned long start = jiffies;
|
||||
int rc;
|
||||
|
||||
if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags))
|
||||
return -EINVAL;
|
||||
|
||||
lport = tgt->rdata->local_port;
|
||||
sid = tgt->sid;
|
||||
r_a_tov = lport->r_a_tov;
|
||||
|
||||
BNX2FC_ELS_DBG("Sending RRQ orig_xid = 0x%x\n",
|
||||
aborted_io_req->xid);
|
||||
memset(&rrq, 0, sizeof(rrq));
|
||||
|
@ -4,7 +4,8 @@
|
||||
* FIP/FCoE packets, listen to link events etc.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -522,10 +523,12 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
struct fcoe_crc_eof crc_eof;
|
||||
struct fc_frame *fp;
|
||||
struct fc_lport *vn_port;
|
||||
struct fcoe_port *port;
|
||||
struct fcoe_port *port, *phys_port;
|
||||
u8 *mac = NULL;
|
||||
u8 *dest_mac = NULL;
|
||||
struct fcoe_hdr *hp;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct fcoe_ctlr *ctlr;
|
||||
|
||||
fr = fcoe_dev_from_skb(skb);
|
||||
lport = fr->fr_dev;
|
||||
@ -561,8 +564,19 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
phys_port = lport_priv(lport);
|
||||
interface = phys_port->priv;
|
||||
ctlr = bnx2fc_to_ctlr(interface);
|
||||
|
||||
fh = fc_frame_header_get(fp);
|
||||
|
||||
if (ntoh24(&dest_mac[3]) != ntoh24(fh->fh_d_id)) {
|
||||
BNX2FC_HBA_DBG(lport, "FC frame d_id mismatch with MAC %pM.\n",
|
||||
dest_mac);
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
|
||||
if (vn_port) {
|
||||
port = lport_priv(vn_port);
|
||||
@ -572,6 +586,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (ctlr->state) {
|
||||
if (!ether_addr_equal(mac, ctlr->dest_addr)) {
|
||||
BNX2FC_HBA_DBG(lport, "Wrong source address: mac:%pM dest_addr:%pM.\n",
|
||||
mac, ctlr->dest_addr);
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
|
||||
fh->fh_type == FC_TYPE_FCP) {
|
||||
/* Drop FCP data. We dont this in L2 path */
|
||||
@ -597,6 +619,18 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the destination ID from the frame header does not match what we
|
||||
* have on record for lport and the search for a NPIV port came up
|
||||
* empty then this is not addressed to our port so simply drop it.
|
||||
*/
|
||||
if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) {
|
||||
BNX2FC_HBA_DBG(lport, "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n",
|
||||
lport->port_id, ntoh24(fh->fh_d_id));
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
stats = per_cpu_ptr(lport->stats, smp_processor_id());
|
||||
stats->RxFrames++;
|
||||
stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
|
||||
@ -2105,6 +2139,9 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
|
||||
{
|
||||
struct fc_vport_identifiers vpid;
|
||||
uint i, created = 0;
|
||||
u64 wwnn = 0;
|
||||
char wwpn_str[32];
|
||||
char wwnn_str[32];
|
||||
|
||||
if (npiv_tbl->count > MAX_NPIV_ENTRIES) {
|
||||
BNX2FC_HBA_DBG(lport, "Exceeded count max of npiv table\n");
|
||||
@ -2123,11 +2160,23 @@ static uint bnx2fc_npiv_create_vports(struct fc_lport *lport,
|
||||
vpid.disable = false;
|
||||
|
||||
for (i = 0; i < npiv_tbl->count; i++) {
|
||||
vpid.node_name = wwn_to_u64(npiv_tbl->wwnn[i]);
|
||||
wwnn = wwn_to_u64(npiv_tbl->wwnn[i]);
|
||||
if (wwnn == 0) {
|
||||
/*
|
||||
* If we get a 0 element from for the WWNN then assume
|
||||
* the WWNN should be the same as the physical port.
|
||||
*/
|
||||
wwnn = lport->wwnn;
|
||||
}
|
||||
vpid.node_name = wwnn;
|
||||
vpid.port_name = wwn_to_u64(npiv_tbl->wwpn[i]);
|
||||
scnprintf(vpid.symbolic_name, sizeof(vpid.symbolic_name),
|
||||
"NPIV[%u]:%016llx-%016llx",
|
||||
created, vpid.port_name, vpid.node_name);
|
||||
fcoe_wwn_to_str(vpid.node_name, wwnn_str, sizeof(wwnn_str));
|
||||
fcoe_wwn_to_str(vpid.port_name, wwpn_str, sizeof(wwpn_str));
|
||||
BNX2FC_HBA_DBG(lport, "Creating vport %s:%s.\n", wwnn_str,
|
||||
wwpn_str);
|
||||
if (fc_vport_create(lport->host, 0, &vpid))
|
||||
created++;
|
||||
else
|
||||
@ -2524,6 +2573,11 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
|
||||
bnx2fc_hba_destroy(hba);
|
||||
}
|
||||
|
||||
static void bnx2fc_rport_terminate_io(struct fc_rport *rport)
|
||||
{
|
||||
/* This is a no-op */
|
||||
}
|
||||
|
||||
/**
|
||||
* bnx2fc_fcoe_reset - Resets the fcoe
|
||||
*
|
||||
@ -2860,7 +2914,7 @@ static struct fc_function_template bnx2fc_transport_function = {
|
||||
|
||||
.issue_fc_host_lip = bnx2fc_fcoe_reset,
|
||||
|
||||
.terminate_rport_io = fc_rport_terminate_io,
|
||||
.terminate_rport_io = bnx2fc_rport_terminate_io,
|
||||
|
||||
.vport_create = bnx2fc_vport_create,
|
||||
.vport_delete = bnx2fc_vport_destroy,
|
||||
|
@ -3,7 +3,8 @@
|
||||
* with 57712 FCoE firmware.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -2,7 +2,8 @@
|
||||
* IO manager and SCSI IO processing.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
@ -1166,16 +1167,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
|
||||
printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
|
||||
"not on active_q\n", io_req->xid);
|
||||
/*
|
||||
* This condition can happen only due to the FW bug,
|
||||
* where we do not receive cleanup response from
|
||||
* the FW. Handle this case gracefully by erroring
|
||||
* back the IO request to SCSI-ml
|
||||
* The IO is still with the FW.
|
||||
* Return failure and let SCSI-ml retry eh_abort.
|
||||
*/
|
||||
bnx2fc_scsi_done(io_req, DID_ABORT);
|
||||
|
||||
kref_put(&io_req->refcount, bnx2fc_cmd_release);
|
||||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
return SUCCESS;
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -3,7 +3,8 @@
|
||||
* session resources such as connection id and qp resources.
|
||||
*
|
||||
* Copyright (c) 2008-2013 Broadcom Corporation
|
||||
* Copyright (c) 2014-2015 QLogic Corporation
|
||||
* Copyright (c) 2014-2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
|
@ -1909,7 +1909,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
|
||||
|
||||
bnx2i_ep_active_list_add(hba, bnx2i_ep);
|
||||
|
||||
if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
|
||||
rc = bnx2i_map_ep_dbell_regs(bnx2i_ep);
|
||||
if (rc)
|
||||
goto del_active_ep;
|
||||
|
||||
mutex_unlock(&hba->net_dev_lock);
|
||||
|
@ -794,18 +794,24 @@ csio_hw_dev_ready(struct csio_hw *hw)
|
||||
{
|
||||
uint32_t reg;
|
||||
int cnt = 6;
|
||||
int src_pf;
|
||||
|
||||
while (((reg = csio_rd_reg32(hw, PL_WHOAMI_A)) == 0xFFFFFFFF) &&
|
||||
(--cnt != 0))
|
||||
mdelay(100);
|
||||
|
||||
if ((cnt == 0) && (((int32_t)(SOURCEPF_G(reg)) < 0) ||
|
||||
(SOURCEPF_G(reg) >= CSIO_MAX_PFN))) {
|
||||
if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
|
||||
src_pf = SOURCEPF_G(reg);
|
||||
else
|
||||
src_pf = T6_SOURCEPF_G(reg);
|
||||
|
||||
if ((cnt == 0) && (((int32_t)(src_pf) < 0) ||
|
||||
(src_pf >= CSIO_MAX_PFN))) {
|
||||
csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
hw->pfn = SOURCEPF_G(reg);
|
||||
hw->pfn = src_pf;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1581,10 +1587,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
|
||||
unsigned int mtype = 0, maddr = 0;
|
||||
uint32_t *cfg_data;
|
||||
int value_to_add = 0;
|
||||
const char *fw_cfg_file;
|
||||
|
||||
if (request_firmware(&cf, FW_CFG_NAME_T5, dev) < 0) {
|
||||
if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
|
||||
fw_cfg_file = FW_CFG_NAME_T5;
|
||||
else
|
||||
fw_cfg_file = FW_CFG_NAME_T6;
|
||||
|
||||
if (request_firmware(&cf, fw_cfg_file, dev) < 0) {
|
||||
csio_err(hw, "could not find config file %s, err: %d\n",
|
||||
FW_CFG_NAME_T5, ret);
|
||||
fw_cfg_file, ret);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
@ -1623,9 +1635,8 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
|
||||
ret = csio_memory_write(hw, mtype, maddr + size, 4, &last.word);
|
||||
}
|
||||
if (ret == 0) {
|
||||
csio_info(hw, "config file upgraded to %s\n",
|
||||
FW_CFG_NAME_T5);
|
||||
snprintf(path, 64, "%s%s", "/lib/firmware/", FW_CFG_NAME_T5);
|
||||
csio_info(hw, "config file upgraded to %s\n", fw_cfg_file);
|
||||
snprintf(path, 64, "%s%s", "/lib/firmware/", fw_cfg_file);
|
||||
}
|
||||
|
||||
leave:
|
||||
@ -1886,6 +1897,19 @@ static struct fw_info fw_info_array[] = {
|
||||
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
|
||||
.intfver_fcoe = FW_INTFVER(T5, FCOE),
|
||||
},
|
||||
}, {
|
||||
.chip = CHELSIO_T6,
|
||||
.fs_name = FW_CFG_NAME_T6,
|
||||
.fw_mod_name = FW_FNAME_T6,
|
||||
.fw_hdr = {
|
||||
.chip = FW_HDR_CHIP_T6,
|
||||
.fw_ver = __cpu_to_be32(FW_VERSION(T6)),
|
||||
.intfver_nic = FW_INTFVER(T6, NIC),
|
||||
.intfver_vnic = FW_INTFVER(T6, VNIC),
|
||||
.intfver_ri = FW_INTFVER(T6, RI),
|
||||
.intfver_iscsi = FW_INTFVER(T6, ISCSI),
|
||||
.intfver_fcoe = FW_INTFVER(T6, FCOE),
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
@ -2002,6 +2026,7 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
|
||||
struct device *dev = &pci_dev->dev ;
|
||||
const u8 *fw_data = NULL;
|
||||
unsigned int fw_size = 0;
|
||||
const char *fw_bin_file;
|
||||
|
||||
/* This is the firmware whose headers the driver was compiled
|
||||
* against
|
||||
@ -2014,9 +2039,14 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (request_firmware(&fw, FW_FNAME_T5, dev) < 0) {
|
||||
if (csio_is_t5(pci_dev->device & CSIO_HW_CHIP_MASK))
|
||||
fw_bin_file = FW_FNAME_T5;
|
||||
else
|
||||
fw_bin_file = FW_FNAME_T6;
|
||||
|
||||
if (request_firmware(&fw, fw_bin_file, dev) < 0) {
|
||||
csio_err(hw, "could not find firmware image %s, err: %d\n",
|
||||
FW_FNAME_T5, ret);
|
||||
fw_bin_file, ret);
|
||||
} else {
|
||||
fw_data = fw->data;
|
||||
fw_size = fw->size;
|
||||
@ -2038,6 +2068,17 @@ csio_hw_flash_fw(struct csio_hw *hw, int *reset)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int csio_hw_check_fwver(struct csio_hw *hw)
|
||||
{
|
||||
if (csio_is_t6(hw->pdev->device & CSIO_HW_CHIP_MASK) &&
|
||||
(hw->fwrev < CSIO_MIN_T6_FW)) {
|
||||
csio_hw_print_fw_version(hw, "T6 unsupported fw");
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* csio_hw_configure - Configure HW
|
||||
* @hw - HW module
|
||||
@ -2105,6 +2146,10 @@ csio_hw_configure(struct csio_hw *hw)
|
||||
if (rv != 0)
|
||||
goto out;
|
||||
|
||||
rv = csio_hw_check_fwver(hw);
|
||||
if (rv < 0)
|
||||
goto out;
|
||||
|
||||
/* If the firmware doesn't support Configuration Files,
|
||||
* return an error.
|
||||
*/
|
||||
@ -2132,6 +2177,10 @@ csio_hw_configure(struct csio_hw *hw)
|
||||
}
|
||||
|
||||
} else {
|
||||
rv = csio_hw_check_fwver(hw);
|
||||
if (rv < 0)
|
||||
goto out;
|
||||
|
||||
if (hw->fw_state == CSIO_DEV_STATE_INIT) {
|
||||
|
||||
hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
|
||||
@ -2241,9 +2290,14 @@ static void
|
||||
csio_hw_intr_enable(struct csio_hw *hw)
|
||||
{
|
||||
uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
|
||||
uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
u32 pf = 0;
|
||||
uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE_A);
|
||||
|
||||
if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
|
||||
pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
else
|
||||
pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
|
||||
/*
|
||||
* Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
|
||||
* by FW, so do nothing for INTX.
|
||||
@ -2293,7 +2347,12 @@ csio_hw_intr_enable(struct csio_hw *hw)
|
||||
void
|
||||
csio_hw_intr_disable(struct csio_hw *hw)
|
||||
{
|
||||
uint32_t pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
u32 pf = 0;
|
||||
|
||||
if (csio_is_t5(hw->pdev->device & CSIO_HW_CHIP_MASK))
|
||||
pf = SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
else
|
||||
pf = T6_SOURCEPF_G(csio_rd_reg32(hw, PL_WHOAMI_A));
|
||||
|
||||
if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
|
||||
return;
|
||||
@ -2918,6 +2977,8 @@ static void csio_cplsw_intr_handler(struct csio_hw *hw)
|
||||
*/
|
||||
static void csio_le_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
|
||||
|
||||
static struct intr_info le_intr_info[] = {
|
||||
{ LIPMISS_F, "LE LIP miss", -1, 0 },
|
||||
{ LIP0_F, "LE 0 LIP error", -1, 0 },
|
||||
@ -2927,7 +2988,18 @@ static void csio_le_intr_handler(struct csio_hw *hw)
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A, le_intr_info))
|
||||
static struct intr_info t6_le_intr_info[] = {
|
||||
{ T6_LIPMISS_F, "LE LIP miss", -1, 0 },
|
||||
{ T6_LIP0_F, "LE 0 LIP error", -1, 0 },
|
||||
{ TCAMINTPERR_F, "LE parity error", -1, 1 },
|
||||
{ T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
|
||||
{ SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE_A,
|
||||
(chip == CHELSIO_T5) ?
|
||||
le_intr_info : t6_le_intr_info))
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
||||
|
@ -71,6 +71,7 @@
|
||||
#define CSIO_MAX_CMD_PER_LUN 32
|
||||
#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
|
||||
#define CSIO_MAX_SECTOR_SIZE 128
|
||||
#define CSIO_MIN_T6_FW 0x01102D00 /* FW 1.16.45.0 */
|
||||
|
||||
/* Interrupts */
|
||||
#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
|
||||
|
@ -39,11 +39,15 @@
|
||||
/* Define MACRO values */
|
||||
#define CSIO_HW_T5 0x5000
|
||||
#define CSIO_T5_FCOE_ASIC 0x5600
|
||||
#define CSIO_HW_T6 0x6000
|
||||
#define CSIO_T6_FCOE_ASIC 0x6600
|
||||
#define CSIO_HW_CHIP_MASK 0xF000
|
||||
|
||||
#define T5_REGMAP_SIZE (332 * 1024)
|
||||
#define FW_FNAME_T5 "cxgb4/t5fw.bin"
|
||||
#define FW_CFG_NAME_T5 "cxgb4/t5-config.txt"
|
||||
#define FW_FNAME_T6 "cxgb4/t6fw.bin"
|
||||
#define FW_CFG_NAME_T6 "cxgb4/t6-config.txt"
|
||||
|
||||
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
|
||||
#define CHELSIO_CHIP_FPGA 0x100
|
||||
@ -51,12 +55,17 @@
|
||||
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
|
||||
|
||||
#define CHELSIO_T5 0x5
|
||||
#define CHELSIO_T6 0x6
|
||||
|
||||
enum chip_type {
|
||||
T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
|
||||
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
|
||||
T5_FIRST_REV = T5_A0,
|
||||
T5_LAST_REV = T5_A1,
|
||||
|
||||
T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
|
||||
T6_FIRST_REV = T6_A0,
|
||||
T6_LAST_REV = T6_A0,
|
||||
};
|
||||
|
||||
static inline int csio_is_t5(uint16_t chip)
|
||||
@ -64,6 +73,11 @@ static inline int csio_is_t5(uint16_t chip)
|
||||
return (chip == CSIO_HW_T5);
|
||||
}
|
||||
|
||||
static inline int csio_is_t6(uint16_t chip)
|
||||
{
|
||||
return (chip == CSIO_HW_T6);
|
||||
}
|
||||
|
||||
/* Define MACRO DEFINITIONS */
|
||||
#define CSIO_DEVICE(devid, idx) \
|
||||
{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
|
||||
|
@ -71,27 +71,6 @@ csio_t5_set_mem_win(struct csio_hw *hw, uint32_t win)
|
||||
static void
|
||||
csio_t5_pcie_intr_handler(struct csio_hw *hw)
|
||||
{
|
||||
static struct intr_info sysbus_intr_info[] = {
|
||||
{ RNPP_F, "RXNP array parity error", -1, 1 },
|
||||
{ RPCP_F, "RXPC array parity error", -1, 1 },
|
||||
{ RCIP_F, "RXCIF array parity error", -1, 1 },
|
||||
{ RCCP_F, "Rx completions control array parity error", -1, 1 },
|
||||
{ RFTP_F, "RXFT array parity error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
static struct intr_info pcie_port_intr_info[] = {
|
||||
{ TPCP_F, "TXPC array parity error", -1, 1 },
|
||||
{ TNPP_F, "TXNP array parity error", -1, 1 },
|
||||
{ TFTP_F, "TXFT array parity error", -1, 1 },
|
||||
{ TCAP_F, "TXCA array parity error", -1, 1 },
|
||||
{ TCIP_F, "TXCIF array parity error", -1, 1 },
|
||||
{ RCAP_F, "RXCA array parity error", -1, 1 },
|
||||
{ OTDD_F, "outbound request TLP discarded", -1, 1 },
|
||||
{ RDPE_F, "Rx data parity error", -1, 1 },
|
||||
{ TDUE_F, "Tx uncorrectable data error", -1, 1 },
|
||||
{ 0, NULL, 0, 0 }
|
||||
};
|
||||
|
||||
static struct intr_info pcie_intr_info[] = {
|
||||
{ MSTGRPPERR_F, "Master Response Read Queue parity error",
|
||||
-1, 1 },
|
||||
@ -133,13 +112,7 @@ csio_t5_pcie_intr_handler(struct csio_hw *hw)
|
||||
};
|
||||
|
||||
int fat;
|
||||
fat = csio_handle_intr_status(hw,
|
||||
PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
|
||||
sysbus_intr_info) +
|
||||
csio_handle_intr_status(hw,
|
||||
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
|
||||
pcie_port_intr_info) +
|
||||
csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
|
||||
fat = csio_handle_intr_status(hw, PCIE_INT_CAUSE_A, pcie_intr_info);
|
||||
if (fat)
|
||||
csio_hw_fatal_err(hw);
|
||||
}
|
||||
|
@ -952,8 +952,9 @@ static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
struct csio_hw *hw;
|
||||
struct csio_lnode *ln;
|
||||
|
||||
/* probe only T5 cards */
|
||||
if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)))
|
||||
/* probe only T5 and T6 cards */
|
||||
if (!csio_is_t5((pdev->device & CSIO_HW_CHIP_MASK)) &&
|
||||
!csio_is_t6((pdev->device & CSIO_HW_CHIP_MASK)))
|
||||
return -ENODEV;
|
||||
|
||||
rv = csio_pci_init(pdev, &bars);
|
||||
@ -1253,3 +1254,4 @@ MODULE_LICENSE(CSIO_DRV_LICENSE);
|
||||
MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
|
||||
MODULE_VERSION(CSIO_DRV_VERSION);
|
||||
MODULE_FIRMWARE(FW_FNAME_T5);
|
||||
MODULE_FIRMWARE(FW_FNAME_T6);
|
||||
|
@ -50,7 +50,7 @@
|
||||
#define CSIO_DRV_AUTHOR "Chelsio Communications"
|
||||
#define CSIO_DRV_LICENSE "Dual BSD/GPL"
|
||||
#define CSIO_DRV_DESC "Chelsio FCoE driver"
|
||||
#define CSIO_DRV_VERSION "1.0.0"
|
||||
#define CSIO_DRV_VERSION "1.0.0-ko"
|
||||
|
||||
extern struct fc_function_template csio_fc_transport_funcs;
|
||||
extern struct fc_function_template csio_fc_transport_vport_funcs;
|
||||
|
@ -238,14 +238,23 @@ csio_osname(uint8_t *buf, size_t buf_len)
|
||||
}
|
||||
|
||||
static inline void
|
||||
csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
|
||||
csio_append_attrib(uint8_t **ptr, uint16_t type, void *val, size_t val_len)
|
||||
{
|
||||
uint16_t len;
|
||||
struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
|
||||
|
||||
if (WARN_ON(val_len > U16_MAX))
|
||||
return;
|
||||
|
||||
len = val_len;
|
||||
|
||||
ae->type = htons(type);
|
||||
len += 4; /* includes attribute type and length */
|
||||
len = (len + 3) & ~3; /* should be multiple of 4 bytes */
|
||||
ae->len = htons(len);
|
||||
memcpy(ae->value, val, len);
|
||||
memcpy(ae->value, val, val_len);
|
||||
if (len > val_len)
|
||||
memset(ae->value + val_len, 0, len - val_len);
|
||||
*ptr += len;
|
||||
}
|
||||
|
||||
@ -335,7 +344,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
||||
numattrs++;
|
||||
val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
|
||||
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
|
||||
(uint8_t *)&val,
|
||||
&val,
|
||||
FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
|
||||
numattrs++;
|
||||
|
||||
@ -346,23 +355,22 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
||||
else
|
||||
val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
|
||||
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
|
||||
(uint8_t *)&val,
|
||||
FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
|
||||
&val, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
|
||||
numattrs++;
|
||||
|
||||
mfs = ln->ln_sparm.csp.sp_bb_data;
|
||||
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
|
||||
(uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
|
||||
&mfs, sizeof(mfs));
|
||||
numattrs++;
|
||||
|
||||
strcpy(buf, "csiostor");
|
||||
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
|
||||
(uint16_t)strlen(buf));
|
||||
strlen(buf));
|
||||
numattrs++;
|
||||
|
||||
if (!csio_hostname(buf, sizeof(buf))) {
|
||||
csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
|
||||
buf, (uint16_t)strlen(buf));
|
||||
buf, strlen(buf));
|
||||
numattrs++;
|
||||
}
|
||||
attrib_blk->numattrs = htonl(numattrs);
|
||||
@ -444,33 +452,32 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
|
||||
|
||||
strcpy(buf, "Chelsio Communications");
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
|
||||
(uint16_t)strlen(buf));
|
||||
strlen(buf));
|
||||
numattrs++;
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
|
||||
hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
|
||||
hw->vpd.sn, sizeof(hw->vpd.sn));
|
||||
numattrs++;
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
|
||||
(uint16_t)sizeof(hw->vpd.id));
|
||||
sizeof(hw->vpd.id));
|
||||
numattrs++;
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
|
||||
hw->model_desc, (uint16_t)strlen(hw->model_desc));
|
||||
hw->model_desc, strlen(hw->model_desc));
|
||||
numattrs++;
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
|
||||
hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
|
||||
hw->hw_ver, sizeof(hw->hw_ver));
|
||||
numattrs++;
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
|
||||
hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
|
||||
hw->fwrev_str, strlen(hw->fwrev_str));
|
||||
numattrs++;
|
||||
|
||||
if (!csio_osname(buf, sizeof(buf))) {
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
|
||||
buf, (uint16_t)strlen(buf));
|
||||
buf, strlen(buf));
|
||||
numattrs++;
|
||||
}
|
||||
|
||||
csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
|
||||
(uint8_t *)&maxpayload,
|
||||
FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
|
||||
&maxpayload, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
|
||||
len = (uint32_t)(pld - (uint8_t *)cmd);
|
||||
numattrs++;
|
||||
attrib_blk->numattrs = htonl(numattrs);
|
||||
@ -1794,6 +1801,8 @@ csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
|
||||
struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
|
||||
int rv;
|
||||
|
||||
BUG_ON(pld_len > pld->len);
|
||||
|
||||
io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
|
||||
io_req->fw_handle = (uintptr_t) (io_req);
|
||||
io_req->eq_idx = mgmtm->eq_idx;
|
||||
|
@ -480,12 +480,14 @@ csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
|
||||
|
||||
flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
|
||||
if (flq_idx != -1) {
|
||||
enum chip_type chip = CHELSIO_CHIP_VERSION(hw->chip_id);
|
||||
struct csio_q *flq = hw->wrm.q_arr[flq_idx];
|
||||
|
||||
iqp.fl0paden = 1;
|
||||
iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
|
||||
iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
|
||||
iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
|
||||
iqp.fl0fbmax = ((chip == CHELSIO_T5) ?
|
||||
X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B);
|
||||
iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
|
||||
iqp.fl0addr = csio_q_pstart(hw, flq_idx);
|
||||
}
|
||||
|
@ -1608,6 +1608,7 @@ static int init_act_open(struct cxgbi_sock *csk)
|
||||
struct neighbour *n = NULL;
|
||||
void *daddr;
|
||||
unsigned int step;
|
||||
unsigned int rxq_idx;
|
||||
unsigned int size, size6;
|
||||
unsigned int linkspeed;
|
||||
unsigned int rcv_winf, snd_winf;
|
||||
@ -1686,7 +1687,9 @@ static int init_act_open(struct cxgbi_sock *csk)
|
||||
step = lldi->ntxq / lldi->nchan;
|
||||
csk->txq_idx = cxgb4_port_idx(ndev) * step;
|
||||
step = lldi->nrxq / lldi->nchan;
|
||||
csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
|
||||
rxq_idx = (cxgb4_port_idx(ndev) * step) + (cdev->rxq_idx_cntr % step);
|
||||
cdev->rxq_idx_cntr++;
|
||||
csk->rss_qid = lldi->rxq_ids[rxq_idx];
|
||||
linkspeed = ((struct port_info *)netdev_priv(ndev))->link_cfg.speed;
|
||||
csk->snd_win = cxgb4i_snd_win;
|
||||
csk->rcv_win = cxgb4i_rcv_win;
|
||||
|
@ -477,6 +477,7 @@ struct cxgbi_device {
|
||||
unsigned int skb_rx_extra; /* for msg coalesced mode */
|
||||
unsigned int tx_max_size;
|
||||
unsigned int rx_max_size;
|
||||
unsigned int rxq_idx_cntr;
|
||||
struct cxgbi_ports_map pmap;
|
||||
|
||||
void (*dev_ddp_cleanup)(struct cxgbi_device *);
|
||||
|
@ -15,6 +15,8 @@
|
||||
#ifndef _CXLFLASH_COMMON_H
|
||||
#define _CXLFLASH_COMMON_H
|
||||
|
||||
#include <linux/async.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/irq_poll.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rwsem.h>
|
||||
@ -85,7 +87,8 @@ enum cxlflash_init_state {
|
||||
INIT_STATE_NONE,
|
||||
INIT_STATE_PCI,
|
||||
INIT_STATE_AFU,
|
||||
INIT_STATE_SCSI
|
||||
INIT_STATE_SCSI,
|
||||
INIT_STATE_CDEV
|
||||
};
|
||||
|
||||
enum cxlflash_state {
|
||||
@ -115,6 +118,8 @@ struct cxlflash_cfg {
|
||||
struct pci_device_id *dev_id;
|
||||
struct Scsi_Host *host;
|
||||
int num_fc_ports;
|
||||
struct cdev cdev;
|
||||
struct device *chardev;
|
||||
|
||||
ulong cxlflash_regs_pci;
|
||||
|
||||
@ -142,8 +147,10 @@ struct cxlflash_cfg {
|
||||
wait_queue_head_t tmf_waitq;
|
||||
spinlock_t tmf_slock;
|
||||
bool tmf_active;
|
||||
bool ws_unmap; /* Write-same unmap supported */
|
||||
wait_queue_head_t reset_waitq;
|
||||
enum cxlflash_state state;
|
||||
async_cookie_t async_reset_cookie;
|
||||
};
|
||||
|
||||
struct afu_cmd {
|
||||
@ -155,7 +162,10 @@ struct afu_cmd {
|
||||
struct list_head queue;
|
||||
u32 hwq_index;
|
||||
|
||||
u8 cmd_tmf:1;
|
||||
u8 cmd_tmf:1,
|
||||
cmd_aborted:1;
|
||||
|
||||
struct list_head list; /* Pending commands link */
|
||||
|
||||
/* As per the SISLITE spec the IOARCB EA has to be 16-byte aligned.
|
||||
* However for performance reasons the IOARCB/IOASA should be
|
||||
@ -168,12 +178,20 @@ static inline struct afu_cmd *sc_to_afuc(struct scsi_cmnd *sc)
|
||||
return PTR_ALIGN(scsi_cmd_priv(sc), __alignof__(struct afu_cmd));
|
||||
}
|
||||
|
||||
static inline struct afu_cmd *sc_to_afuci(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct afu_cmd *afuc = sc_to_afuc(sc);
|
||||
|
||||
INIT_LIST_HEAD(&afuc->queue);
|
||||
return afuc;
|
||||
}
|
||||
|
||||
static inline struct afu_cmd *sc_to_afucz(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct afu_cmd *afuc = sc_to_afuc(sc);
|
||||
|
||||
memset(afuc, 0, sizeof(*afuc));
|
||||
return afuc;
|
||||
return sc_to_afuci(sc);
|
||||
}
|
||||
|
||||
struct hwq {
|
||||
@ -191,9 +209,10 @@ struct hwq {
|
||||
struct sisl_ctrl_map __iomem *ctrl_map; /* MC control map */
|
||||
ctx_hndl_t ctx_hndl; /* master's context handle */
|
||||
u32 index; /* Index of this hwq */
|
||||
struct list_head pending_cmds; /* Commands pending completion */
|
||||
|
||||
atomic_t hsq_credits;
|
||||
spinlock_t hsq_slock;
|
||||
spinlock_t hsq_slock; /* Hardware send queue lock */
|
||||
struct sisl_ioarcb *hsq_start;
|
||||
struct sisl_ioarcb *hsq_end;
|
||||
struct sisl_ioarcb *hsq_curr;
|
||||
@ -204,7 +223,6 @@ struct hwq {
|
||||
bool toggle;
|
||||
|
||||
s64 room;
|
||||
spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
|
||||
|
||||
struct irq_poll irqpoll;
|
||||
} __aligned(cache_line_size());
|
||||
@ -212,7 +230,7 @@ struct hwq {
|
||||
struct afu {
|
||||
struct hwq hwqs[CXLFLASH_MAX_HWQS];
|
||||
int (*send_cmd)(struct afu *, struct afu_cmd *);
|
||||
void (*context_reset)(struct afu_cmd *);
|
||||
int (*context_reset)(struct hwq *);
|
||||
|
||||
/* AFU HW */
|
||||
struct cxlflash_afu_map __iomem *afu_map; /* entire MMIO map */
|
||||
@ -245,21 +263,31 @@ static inline bool afu_is_irqpoll_enabled(struct afu *afu)
|
||||
return !!afu->irqpoll_weight;
|
||||
}
|
||||
|
||||
static inline bool afu_is_cmd_mode(struct afu *afu, u64 cmd_mode)
|
||||
static inline bool afu_has_cap(struct afu *afu, u64 cap)
|
||||
{
|
||||
u64 afu_cap = afu->interface_version >> SISL_INTVER_CAP_SHIFT;
|
||||
|
||||
return afu_cap & cmd_mode;
|
||||
return afu_cap & cap;
|
||||
}
|
||||
|
||||
static inline bool afu_is_afu_debug(struct afu *afu)
|
||||
{
|
||||
return afu_has_cap(afu, SISL_INTVER_CAP_AFU_DEBUG);
|
||||
}
|
||||
|
||||
static inline bool afu_is_lun_provision(struct afu *afu)
|
||||
{
|
||||
return afu_has_cap(afu, SISL_INTVER_CAP_LUN_PROVISION);
|
||||
}
|
||||
|
||||
static inline bool afu_is_sq_cmd_mode(struct afu *afu)
|
||||
{
|
||||
return afu_is_cmd_mode(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
|
||||
return afu_has_cap(afu, SISL_INTVER_CAP_SQ_CMD_MODE);
|
||||
}
|
||||
|
||||
static inline bool afu_is_ioarrin_cmd_mode(struct afu *afu)
|
||||
{
|
||||
return afu_is_cmd_mode(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
|
||||
return afu_has_cap(afu, SISL_INTVER_CAP_IOARRIN_CMD_MODE);
|
||||
}
|
||||
|
||||
static inline u64 lun_to_lunid(u64 lun)
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -22,6 +22,7 @@
|
||||
|
||||
#define CXLFLASH_NAME "cxlflash"
|
||||
#define CXLFLASH_ADAPTER_NAME "IBM POWER CXL Flash Adapter"
|
||||
#define CXLFLASH_MAX_ADAPTERS 32
|
||||
|
||||
#define PCI_DEVICE_ID_IBM_CORSA 0x04F0
|
||||
#define PCI_DEVICE_ID_IBM_FLASH_GT 0x0600
|
||||
@ -40,6 +41,10 @@
|
||||
/* FC defines */
|
||||
#define FC_MTIP_CMDCONFIG 0x010
|
||||
#define FC_MTIP_STATUS 0x018
|
||||
#define FC_MAX_NUM_LUNS 0x080 /* Max LUNs host can provision for port */
|
||||
#define FC_CUR_NUM_LUNS 0x088 /* Cur number LUNs provisioned for port */
|
||||
#define FC_MAX_CAP_PORT 0x090 /* Max capacity all LUNs for port (4K blocks) */
|
||||
#define FC_CUR_CAP_PORT 0x098 /* Cur capacity all LUNs for port (4K blocks) */
|
||||
|
||||
#define FC_PNAME 0x300
|
||||
#define FC_CONFIG 0x320
|
||||
@ -62,6 +67,8 @@
|
||||
|
||||
/* AFU command timeout values */
|
||||
#define MC_AFU_SYNC_TIMEOUT 5 /* 5 secs */
|
||||
#define MC_LUN_PROV_TIMEOUT 5 /* 5 secs */
|
||||
#define MC_AFU_DEBUG_TIMEOUT 5 /* 5 secs */
|
||||
|
||||
/* AFU command room retry limit */
|
||||
#define MC_ROOM_RETRY_CNT 10
|
||||
|
@ -72,6 +72,13 @@ struct sisl_ioarcb {
|
||||
u16 timeout; /* in units specified by req_flags */
|
||||
u32 rsvd1;
|
||||
u8 cdb[16]; /* must be in big endian */
|
||||
#define SISL_AFU_CMD_SYNC 0xC0 /* AFU sync command */
|
||||
#define SISL_AFU_CMD_LUN_PROVISION 0xD0 /* AFU LUN provision command */
|
||||
#define SISL_AFU_CMD_DEBUG 0xE0 /* AFU debug command */
|
||||
|
||||
#define SISL_AFU_LUN_PROVISION_CREATE 0x00 /* LUN provision create type */
|
||||
#define SISL_AFU_LUN_PROVISION_DELETE 0x01 /* LUN provision delete type */
|
||||
|
||||
union {
|
||||
u64 reserved; /* Reserved for IOARRIN mode */
|
||||
struct sisl_ioasa *ioasa; /* IOASA EA for SQ Mode */
|
||||
@ -156,6 +163,7 @@ struct sisl_rc {
|
||||
};
|
||||
|
||||
#define SISL_SENSE_DATA_LEN 20 /* Sense data length */
|
||||
#define SISL_WWID_DATA_LEN 16 /* WWID data length */
|
||||
|
||||
/*
|
||||
* IOASA: 64 bytes & must follow IOARCB, min 16 byte alignment required,
|
||||
@ -167,7 +175,12 @@ struct sisl_ioasa {
|
||||
u32 ioasc;
|
||||
#define SISL_IOASC_GOOD_COMPLETION 0x00000000U
|
||||
};
|
||||
|
||||
union {
|
||||
u32 resid;
|
||||
u32 lunid_hi;
|
||||
};
|
||||
|
||||
u8 port;
|
||||
u8 afu_extra;
|
||||
/* when afu_rc=0x04, 0x14, 0x31 (_xxx_DMA_ERR):
|
||||
@ -190,7 +203,14 @@ struct sisl_ioasa {
|
||||
|
||||
u8 scsi_extra;
|
||||
u8 fc_extra;
|
||||
|
||||
union {
|
||||
u8 sense_data[SISL_SENSE_DATA_LEN];
|
||||
struct {
|
||||
u32 lunid_lo;
|
||||
u8 wwid[SISL_WWID_DATA_LEN];
|
||||
};
|
||||
};
|
||||
|
||||
/* These fields are defined by the SISlite architecture for the
|
||||
* host to use as they see fit for their implementation.
|
||||
@ -263,6 +283,7 @@ struct sisl_host_map {
|
||||
__be64 rrq_end; /* write sequence: start followed by end */
|
||||
__be64 cmd_room;
|
||||
__be64 ctx_ctrl; /* least significant byte or b56:63 is LISN# */
|
||||
#define SISL_CTX_CTRL_UNMAP_SECTOR 0x8000000000000000ULL /* b0 */
|
||||
__be64 mbox_w; /* restricted use */
|
||||
__be64 sq_start; /* Submission Queue (R/W): write sequence and */
|
||||
__be64 sq_end; /* inclusion semantics are the same as RRQ */
|
||||
@ -392,6 +413,8 @@ struct sisl_global_regs {
|
||||
#define SISL_INTVER_CAP_SQ_CMD_MODE 0x400000000000ULL
|
||||
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_A 0x200000000000ULL
|
||||
#define SISL_INTVER_CAP_RESERVED_CMD_MODE_B 0x100000000000ULL
|
||||
#define SISL_INTVER_CAP_LUN_PROVISION 0x080000000000ULL
|
||||
#define SISL_INTVER_CAP_AFU_DEBUG 0x040000000000ULL
|
||||
};
|
||||
|
||||
#define CXLFLASH_NUM_FC_PORTS_PER_BANK 2 /* fixed # of ports per bank */
|
||||
|
@ -56,6 +56,19 @@ static void marshal_det_to_rele(struct dk_cxlflash_detach *detach,
|
||||
release->context_id = detach->context_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* marshal_udir_to_rele() - translate udirect to release structure
|
||||
* @udirect: Source structure from which to translate/copy.
|
||||
* @release: Destination structure for the translate/copy.
|
||||
*/
|
||||
static void marshal_udir_to_rele(struct dk_cxlflash_udirect *udirect,
|
||||
struct dk_cxlflash_release *release)
|
||||
{
|
||||
release->hdr = udirect->hdr;
|
||||
release->context_id = udirect->context_id;
|
||||
release->rsrc_handle = udirect->rsrc_handle;
|
||||
}
|
||||
|
||||
/**
|
||||
* cxlflash_free_errpage() - frees resources associated with global error page
|
||||
*/
|
||||
@ -622,6 +635,7 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
|
||||
res_hndl_t rhndl = release->rsrc_handle;
|
||||
|
||||
int rc = 0;
|
||||
int rcr = 0;
|
||||
u64 ctxid = DECODE_CTXID(release->context_id),
|
||||
rctxid = release->context_id;
|
||||
|
||||
@ -686,8 +700,12 @@ int _cxlflash_disk_release(struct scsi_device *sdev,
|
||||
rhte_f1->dw = 0;
|
||||
dma_wmb(); /* Make RHT entry bottom-half clearing visible */
|
||||
|
||||
if (!ctxi->err_recovery_active)
|
||||
cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
|
||||
if (!ctxi->err_recovery_active) {
|
||||
rcr = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
|
||||
if (unlikely(rcr))
|
||||
dev_dbg(dev, "%s: AFU sync failed rc=%d\n",
|
||||
__func__, rcr);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unsupported LUN mode!");
|
||||
@ -1929,6 +1947,7 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
|
||||
struct afu *afu = cfg->afu;
|
||||
struct llun_info *lli = sdev->hostdata;
|
||||
struct glun_info *gli = lli->parent;
|
||||
struct dk_cxlflash_release rel = { { 0 }, 0 };
|
||||
|
||||
struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg;
|
||||
|
||||
@ -1970,13 +1989,18 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
|
||||
rsrc_handle = (rhte - ctxi->rht_start);
|
||||
|
||||
rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port);
|
||||
cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
|
||||
|
||||
last_lba = gli->max_lba;
|
||||
pphys->hdr.return_flags = 0;
|
||||
pphys->last_lba = last_lba;
|
||||
pphys->rsrc_handle = rsrc_handle;
|
||||
|
||||
rc = cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC);
|
||||
if (unlikely(rc)) {
|
||||
dev_dbg(dev, "%s: AFU sync failed rc=%d\n", __func__, rc);
|
||||
goto err2;
|
||||
}
|
||||
|
||||
out:
|
||||
if (likely(ctxi))
|
||||
put_context(ctxi);
|
||||
@ -1984,6 +2008,10 @@ static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg)
|
||||
__func__, rsrc_handle, rc, last_lba);
|
||||
return rc;
|
||||
|
||||
err2:
|
||||
marshal_udir_to_rele(pphys, &rel);
|
||||
_cxlflash_disk_release(sdev, ctxi, &rel);
|
||||
goto out;
|
||||
err1:
|
||||
cxlflash_lun_detach(gli);
|
||||
goto out;
|
||||
|
@ -446,6 +446,7 @@ static int write_same16(struct scsi_device *sdev,
|
||||
while (left > 0) {
|
||||
|
||||
scsi_cmd[0] = WRITE_SAME_16;
|
||||
scsi_cmd[1] = cfg->ws_unmap ? 0x8 : 0;
|
||||
put_unaligned_be64(offset, &scsi_cmd[2]);
|
||||
put_unaligned_be32(ws_limit < left ? ws_limit : left,
|
||||
&scsi_cmd[10]);
|
||||
@ -594,7 +595,9 @@ static int grow_lxt(struct afu *afu,
|
||||
rhte->lxt_cnt = my_new_size;
|
||||
dma_wmb(); /* Make RHT entry's LXT table size update visible */
|
||||
|
||||
cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
|
||||
rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
|
||||
if (unlikely(rc))
|
||||
rc = -EAGAIN;
|
||||
|
||||
/* free old lxt if reallocated */
|
||||
if (lxt != lxt_old)
|
||||
@ -673,8 +676,11 @@ static int shrink_lxt(struct afu *afu,
|
||||
rhte->lxt_start = lxt;
|
||||
dma_wmb(); /* Make RHT entry's LXT table update visible */
|
||||
|
||||
if (needs_sync)
|
||||
cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
|
||||
if (needs_sync) {
|
||||
rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
|
||||
if (unlikely(rc))
|
||||
rc = -EAGAIN;
|
||||
}
|
||||
|
||||
if (needs_ws) {
|
||||
/*
|
||||
@ -792,6 +798,21 @@ int _cxlflash_vlun_resize(struct scsi_device *sdev,
|
||||
rc = grow_lxt(afu, sdev, ctxid, rhndl, rhte, &new_size);
|
||||
else if (new_size < rhte->lxt_cnt)
|
||||
rc = shrink_lxt(afu, sdev, rhndl, rhte, ctxi, &new_size);
|
||||
else {
|
||||
/*
|
||||
* Rare case where there is already sufficient space, just
|
||||
* need to perform a translation sync with the AFU. This
|
||||
* scenario likely follows a previous sync failure during
|
||||
* a resize operation. Accordingly, perform the heavyweight
|
||||
* form of translation sync as it is unknown which type of
|
||||
* resize failed previously.
|
||||
*/
|
||||
rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC);
|
||||
if (unlikely(rc)) {
|
||||
rc = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
resize->hdr.return_flags = 0;
|
||||
resize->last_lba = (new_size * MC_CHUNK_SIZE * gli->blk_len);
|
||||
@ -1084,10 +1105,13 @@ static int clone_lxt(struct afu *afu,
|
||||
{
|
||||
struct cxlflash_cfg *cfg = afu->parent;
|
||||
struct device *dev = &cfg->dev->dev;
|
||||
struct sisl_lxt_entry *lxt;
|
||||
struct sisl_lxt_entry *lxt = NULL;
|
||||
bool locked = false;
|
||||
u32 ngrps;
|
||||
u64 aun; /* chunk# allocated by block allocator */
|
||||
int i, j;
|
||||
int j;
|
||||
int i = 0;
|
||||
int rc = 0;
|
||||
|
||||
ngrps = LXT_NUM_GROUPS(rhte_src->lxt_cnt);
|
||||
|
||||
@ -1095,33 +1119,29 @@ static int clone_lxt(struct afu *afu,
|
||||
/* allocate new LXTs for clone */
|
||||
lxt = kzalloc((sizeof(*lxt) * LXT_GROUP_SIZE * ngrps),
|
||||
GFP_KERNEL);
|
||||
if (unlikely(!lxt))
|
||||
return -ENOMEM;
|
||||
if (unlikely(!lxt)) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* copy over */
|
||||
memcpy(lxt, rhte_src->lxt_start,
|
||||
(sizeof(*lxt) * rhte_src->lxt_cnt));
|
||||
|
||||
/* clone the LBAs in block allocator via ref_cnt */
|
||||
/* clone the LBAs in block allocator via ref_cnt, note that the
|
||||
* block allocator mutex must be held until it is established
|
||||
* that this routine will complete without the need for a
|
||||
* cleanup.
|
||||
*/
|
||||
mutex_lock(&blka->mutex);
|
||||
locked = true;
|
||||
for (i = 0; i < rhte_src->lxt_cnt; i++) {
|
||||
aun = (lxt[i].rlba_base >> MC_CHUNK_SHIFT);
|
||||
if (ba_clone(&blka->ba_lun, aun) == -1ULL) {
|
||||
/* free the clones already made */
|
||||
for (j = 0; j < i; j++) {
|
||||
aun = (lxt[j].rlba_base >>
|
||||
MC_CHUNK_SHIFT);
|
||||
ba_free(&blka->ba_lun, aun);
|
||||
}
|
||||
|
||||
mutex_unlock(&blka->mutex);
|
||||
kfree(lxt);
|
||||
return -EIO;
|
||||
rc = -EIO;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&blka->mutex);
|
||||
} else {
|
||||
lxt = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1136,10 +1156,31 @@ static int clone_lxt(struct afu *afu,
|
||||
rhte->lxt_cnt = rhte_src->lxt_cnt;
|
||||
dma_wmb(); /* Make RHT entry's LXT table size update visible */
|
||||
|
||||
cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
|
||||
rc = cxlflash_afu_sync(afu, ctxid, rhndl, AFU_LW_SYNC);
|
||||
if (unlikely(rc)) {
|
||||
rc = -EAGAIN;
|
||||
goto err2;
|
||||
}
|
||||
|
||||
dev_dbg(dev, "%s: returning\n", __func__);
|
||||
return 0;
|
||||
out:
|
||||
if (locked)
|
||||
mutex_unlock(&blka->mutex);
|
||||
dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc);
|
||||
return rc;
|
||||
err2:
|
||||
/* Reset the RHTE */
|
||||
rhte->lxt_cnt = 0;
|
||||
dma_wmb();
|
||||
rhte->lxt_start = NULL;
|
||||
dma_wmb();
|
||||
err:
|
||||
/* free the clones already made */
|
||||
for (j = 0; j < i; j++) {
|
||||
aun = (lxt[j].rlba_base >> MC_CHUNK_SHIFT);
|
||||
ba_free(&blka->ba_lun, aun);
|
||||
}
|
||||
kfree(lxt);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -57,7 +57,6 @@
|
||||
/* device handler flags */
|
||||
#define ALUA_OPTIMIZE_STPG 0x01
|
||||
#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
|
||||
#define ALUA_SYNC_STPG 0x04
|
||||
/* State machine flags */
|
||||
#define ALUA_PG_RUN_RTPG 0x10
|
||||
#define ALUA_PG_RUN_STPG 0x20
|
||||
@ -70,7 +69,6 @@ MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than
|
||||
static LIST_HEAD(port_group_list);
|
||||
static DEFINE_SPINLOCK(port_group_lock);
|
||||
static struct workqueue_struct *kaluad_wq;
|
||||
static struct workqueue_struct *kaluad_sync_wq;
|
||||
|
||||
struct alua_port_group {
|
||||
struct kref kref;
|
||||
@ -380,8 +378,6 @@ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&pg->lock, flags);
|
||||
if (sdev->synchronous_alua)
|
||||
pg->flags |= ALUA_SYNC_STPG;
|
||||
if (pg_updated)
|
||||
list_add_rcu(&h->node, &pg->dh_list);
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
@ -785,7 +781,6 @@ static void alua_rtpg_work(struct work_struct *work)
|
||||
int err = SCSI_DH_OK;
|
||||
struct alua_queue_data *qdata, *tmp;
|
||||
unsigned long flags;
|
||||
struct workqueue_struct *alua_wq = kaluad_wq;
|
||||
|
||||
spin_lock_irqsave(&pg->lock, flags);
|
||||
sdev = pg->rtpg_sdev;
|
||||
@ -796,8 +791,6 @@ static void alua_rtpg_work(struct work_struct *work)
|
||||
kref_put(&pg->kref, release_port_group);
|
||||
return;
|
||||
}
|
||||
if (pg->flags & ALUA_SYNC_STPG)
|
||||
alua_wq = kaluad_sync_wq;
|
||||
pg->flags |= ALUA_PG_RUNNING;
|
||||
if (pg->flags & ALUA_PG_RUN_RTPG) {
|
||||
int state = pg->state;
|
||||
@ -810,7 +803,7 @@ static void alua_rtpg_work(struct work_struct *work)
|
||||
pg->flags &= ~ALUA_PG_RUNNING;
|
||||
pg->flags |= ALUA_PG_RUN_RTPG;
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
queue_delayed_work(alua_wq, &pg->rtpg_work,
|
||||
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
|
||||
pg->interval * HZ);
|
||||
return;
|
||||
}
|
||||
@ -822,7 +815,7 @@ static void alua_rtpg_work(struct work_struct *work)
|
||||
pg->flags &= ~ALUA_PG_RUNNING;
|
||||
pg->flags |= ALUA_PG_RUN_RTPG;
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
queue_delayed_work(alua_wq, &pg->rtpg_work,
|
||||
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
|
||||
pg->interval * HZ);
|
||||
return;
|
||||
}
|
||||
@ -839,7 +832,7 @@ static void alua_rtpg_work(struct work_struct *work)
|
||||
pg->interval = 0;
|
||||
pg->flags &= ~ALUA_PG_RUNNING;
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
queue_delayed_work(alua_wq, &pg->rtpg_work,
|
||||
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
|
||||
pg->interval * HZ);
|
||||
return;
|
||||
}
|
||||
@ -874,8 +867,6 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
|
||||
{
|
||||
int start_queue = 0;
|
||||
unsigned long flags;
|
||||
struct workqueue_struct *alua_wq = kaluad_wq;
|
||||
|
||||
if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
|
||||
return false;
|
||||
|
||||
@ -900,12 +891,10 @@ static bool alua_rtpg_queue(struct alua_port_group *pg,
|
||||
}
|
||||
}
|
||||
|
||||
if (pg->flags & ALUA_SYNC_STPG)
|
||||
alua_wq = kaluad_sync_wq;
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
|
||||
if (start_queue) {
|
||||
if (queue_delayed_work(alua_wq, &pg->rtpg_work,
|
||||
if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
|
||||
msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
|
||||
sdev = NULL;
|
||||
else
|
||||
@ -1166,16 +1155,11 @@ static int __init alua_init(void)
|
||||
/* Temporary failure, bypass */
|
||||
return SCSI_DH_DEV_TEMP_BUSY;
|
||||
}
|
||||
kaluad_sync_wq = create_workqueue("kaluad_sync");
|
||||
if (!kaluad_sync_wq) {
|
||||
destroy_workqueue(kaluad_wq);
|
||||
return SCSI_DH_DEV_TEMP_BUSY;
|
||||
}
|
||||
|
||||
r = scsi_register_device_handler(&alua_dh);
|
||||
if (r != 0) {
|
||||
printk(KERN_ERR "%s: Failed to register scsi device handler",
|
||||
ALUA_DH_NAME);
|
||||
destroy_workqueue(kaluad_sync_wq);
|
||||
destroy_workqueue(kaluad_wq);
|
||||
}
|
||||
return r;
|
||||
@ -1184,7 +1168,6 @@ static int __init alua_init(void)
|
||||
static void __exit alua_exit(void)
|
||||
{
|
||||
scsi_unregister_device_handler(&alua_dh);
|
||||
destroy_workqueue(kaluad_sync_wq);
|
||||
destroy_workqueue(kaluad_wq);
|
||||
}
|
||||
|
||||
|
@ -945,8 +945,8 @@ struct esas2r_adapter {
|
||||
struct list_head vrq_mds_head;
|
||||
struct esas2r_mem_desc *vrq_mds;
|
||||
int num_vrqs;
|
||||
struct semaphore fm_api_semaphore;
|
||||
struct semaphore fs_api_semaphore;
|
||||
struct mutex fm_api_mutex;
|
||||
struct mutex fs_api_mutex;
|
||||
struct semaphore nvram_semaphore;
|
||||
struct atto_ioctl *local_atto_ioctl;
|
||||
u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
|
||||
|
@ -327,8 +327,8 @@ int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
|
||||
esas2r_debug("new adapter %p, name %s", a, a->name);
|
||||
spin_lock_init(&a->request_lock);
|
||||
spin_lock_init(&a->fw_event_lock);
|
||||
sema_init(&a->fm_api_semaphore, 1);
|
||||
sema_init(&a->fs_api_semaphore, 1);
|
||||
mutex_init(&a->fm_api_mutex);
|
||||
mutex_init(&a->fs_api_mutex);
|
||||
sema_init(&a->nvram_semaphore, 1);
|
||||
|
||||
esas2r_fw_event_off(a);
|
||||
|
@ -110,7 +110,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
|
||||
{
|
||||
struct esas2r_request *rq;
|
||||
|
||||
if (down_interruptible(&a->fm_api_semaphore)) {
|
||||
if (mutex_lock_interruptible(&a->fm_api_mutex)) {
|
||||
fi->status = FI_STAT_BUSY;
|
||||
return;
|
||||
}
|
||||
@ -173,7 +173,7 @@ static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
|
||||
free_req:
|
||||
esas2r_free_request(a, (struct esas2r_request *)rq);
|
||||
free_sem:
|
||||
up(&a->fm_api_semaphore);
|
||||
mutex_unlock(&a->fm_api_mutex);
|
||||
return;
|
||||
|
||||
}
|
||||
@ -1962,7 +1962,7 @@ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
|
||||
(struct esas2r_ioctl_fs *)a->fs_api_buffer;
|
||||
|
||||
/* If another flash request is already in progress, return. */
|
||||
if (down_interruptible(&a->fs_api_semaphore)) {
|
||||
if (mutex_lock_interruptible(&a->fs_api_mutex)) {
|
||||
busy:
|
||||
fs->status = ATTO_STS_OUT_OF_RSRC;
|
||||
return -EBUSY;
|
||||
@ -1978,7 +1978,7 @@ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
|
||||
rq = esas2r_alloc_request(a);
|
||||
if (rq == NULL) {
|
||||
esas2r_debug("esas2r_read_fs: out of requests");
|
||||
up(&a->fs_api_semaphore);
|
||||
mutex_unlock(&a->fs_api_mutex);
|
||||
goto busy;
|
||||
}
|
||||
|
||||
@ -2006,7 +2006,7 @@ int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
|
||||
;
|
||||
dont_wait:
|
||||
/* Free the request and keep going */
|
||||
up(&a->fs_api_semaphore);
|
||||
mutex_unlock(&a->fs_api_mutex);
|
||||
esas2r_free_request(a, (struct esas2r_request *)rq);
|
||||
|
||||
/* Pick up possible error code from above */
|
||||
|
@ -519,7 +519,7 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
|
||||
* @skb: The receive skb
|
||||
* @netdev: The associated net device
|
||||
* @ptype: The packet_type structure which was used to register this handler
|
||||
* @orig_dev: The original net_device the the skb was received on.
|
||||
* @orig_dev: The original net_device the skb was received on.
|
||||
* (in case dev is a bond)
|
||||
*
|
||||
* Returns: 0 for success
|
||||
@ -542,7 +542,7 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
|
||||
* @skb: The receive skb
|
||||
* @netdev: The associated net device
|
||||
* @ptype: The packet_type structure which was used to register this handler
|
||||
* @orig_dev: The original net_device the the skb was received on.
|
||||
* @orig_dev: The original net_device the skb was received on.
|
||||
* (in case dev is a bond)
|
||||
*
|
||||
* Returns: 0 for success
|
||||
@ -2258,7 +2258,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
|
||||
fcoe_interface_cleanup(fcoe);
|
||||
mutex_unlock(&fcoe_config_mutex);
|
||||
fcoe_ctlr_device_delete(ctlr_dev);
|
||||
goto out;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Make this the "master" N_Port */
|
||||
@ -2299,7 +2299,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_mode fip_mode,
|
||||
out_nodev:
|
||||
rtnl_unlock();
|
||||
mutex_unlock(&fcoe_config_mutex);
|
||||
out:
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -2590,7 +2590,7 @@ module_exit(fcoe_exit);
|
||||
* fcoe_flogi_resp() - FCoE specific FLOGI and FDISC response handler
|
||||
* @seq: active sequence in the FLOGI or FDISC exchange
|
||||
* @fp: response frame, or error encoded in a pointer (timeout)
|
||||
* @arg: pointer the the fcoe_ctlr structure
|
||||
* @arg: pointer to the fcoe_ctlr structure
|
||||
*
|
||||
* This handles MAC address management for FCoE, then passes control on to
|
||||
* the libfc FLOGI response handler.
|
||||
@ -2619,7 +2619,7 @@ static void fcoe_flogi_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
|
||||
* fcoe_logo_resp() - FCoE specific LOGO response handler
|
||||
* @seq: active sequence in the LOGO exchange
|
||||
* @fp: response frame, or error encoded in a pointer (timeout)
|
||||
* @arg: pointer the the fcoe_ctlr structure
|
||||
* @arg: pointer to the fcoe_ctlr structure
|
||||
*
|
||||
* This handles MAC address management for FCoE, then passes control on to
|
||||
* the libfc LOGO response handler.
|
||||
|
@ -632,6 +632,7 @@ static ssize_t fnic_reset_stats_write(struct file *file,
|
||||
sizeof(struct io_path_stats) - sizeof(u64));
|
||||
memset(fw_stats_p+1, 0,
|
||||
sizeof(struct fw_stats) - sizeof(u64));
|
||||
getnstimeofday(&stats->stats_timestamps.last_reset_time);
|
||||
}
|
||||
|
||||
(*ppos)++;
|
||||
|
@ -65,6 +65,30 @@ void fnic_handle_link(struct work_struct *work)
|
||||
fnic->link_status = vnic_dev_link_status(fnic->vdev);
|
||||
fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
|
||||
|
||||
switch (vnic_dev_port_speed(fnic->vdev)) {
|
||||
case DCEM_PORTSPEED_10G:
|
||||
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_10GBIT;
|
||||
fnic->lport->link_supported_speeds = FC_PORTSPEED_10GBIT;
|
||||
break;
|
||||
case DCEM_PORTSPEED_25G:
|
||||
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_25GBIT;
|
||||
fnic->lport->link_supported_speeds = FC_PORTSPEED_25GBIT;
|
||||
break;
|
||||
case DCEM_PORTSPEED_40G:
|
||||
case DCEM_PORTSPEED_4x10G:
|
||||
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_40GBIT;
|
||||
fnic->lport->link_supported_speeds = FC_PORTSPEED_40GBIT;
|
||||
break;
|
||||
case DCEM_PORTSPEED_100G:
|
||||
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_100GBIT;
|
||||
fnic->lport->link_supported_speeds = FC_PORTSPEED_100GBIT;
|
||||
break;
|
||||
default:
|
||||
fc_host_speed(fnic->lport->host) = FC_PORTSPEED_UNKNOWN;
|
||||
fnic->lport->link_supported_speeds = FC_PORTSPEED_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
|
||||
if (old_link_status == fnic->link_status) {
|
||||
if (!fnic->link_status) {
|
||||
/* DOWN -> DOWN */
|
||||
|
@ -66,4 +66,13 @@ struct fnic_io_req {
|
||||
struct completion *dr_done; /* completion for device reset */
|
||||
};
|
||||
|
||||
enum fnic_port_speeds {
|
||||
DCEM_PORTSPEED_NONE = 0,
|
||||
DCEM_PORTSPEED_1G = 1000,
|
||||
DCEM_PORTSPEED_10G = 10000,
|
||||
DCEM_PORTSPEED_40G = 40000,
|
||||
DCEM_PORTSPEED_4x10G = 41000,
|
||||
DCEM_PORTSPEED_25G = 25000,
|
||||
DCEM_PORTSPEED_100G = 100000,
|
||||
};
|
||||
#endif /* _FNIC_IO_H_ */
|
||||
|
@ -176,11 +176,21 @@ static void fnic_get_host_speed(struct Scsi_Host *shost)
|
||||
|
||||
/* Add in other values as they get defined in fw */
|
||||
switch (port_speed) {
|
||||
case 10000:
|
||||
case DCEM_PORTSPEED_10G:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
|
||||
break;
|
||||
case DCEM_PORTSPEED_25G:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_25GBIT;
|
||||
break;
|
||||
case DCEM_PORTSPEED_40G:
|
||||
case DCEM_PORTSPEED_4x10G:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_40GBIT;
|
||||
break;
|
||||
case DCEM_PORTSPEED_100G:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_100GBIT;
|
||||
break;
|
||||
default:
|
||||
fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
|
||||
fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -466,15 +466,27 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
|
||||
}
|
||||
|
||||
rp = rport->dd_data;
|
||||
if (!rp || rp->rp_state != RPORT_ST_READY) {
|
||||
if (!rp || rp->rp_state == RPORT_ST_DELETE) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"returning DID_NO_CONNECT for IO as rport is removed\n");
|
||||
"rport 0x%x removed, returning DID_NO_CONNECT\n",
|
||||
rport->port_id);
|
||||
|
||||
atomic64_inc(&fnic_stats->misc_stats.rport_not_ready);
|
||||
sc->result = DID_NO_CONNECT<<16;
|
||||
done(sc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (rp->rp_state != RPORT_ST_READY) {
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"rport 0x%x in state 0x%x, returning DID_IMM_RETRY\n",
|
||||
rport->port_id, rp->rp_state);
|
||||
|
||||
sc->result = DID_IMM_RETRY << 16;
|
||||
done(sc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (lp->state != LPORT_ST_READY || !(lp->link_up))
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
||||
@ -633,6 +645,7 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
|
||||
|
||||
atomic64_set(&fnic->fnic_stats.fw_stats.active_fw_reqs, 0);
|
||||
atomic64_set(&fnic->fnic_stats.io_stats.active_ios, 0);
|
||||
atomic64_set(&fnic->io_cmpl_skip, 0);
|
||||
|
||||
spin_lock_irqsave(&fnic->fnic_lock, flags);
|
||||
|
||||
|
@ -16,6 +16,12 @@
|
||||
*/
|
||||
#ifndef _FNIC_STATS_H_
|
||||
#define _FNIC_STATS_H_
|
||||
|
||||
struct stats_timestamps {
|
||||
struct timespec last_reset_time;
|
||||
struct timespec last_read_time;
|
||||
};
|
||||
|
||||
struct io_path_stats {
|
||||
atomic64_t active_ios;
|
||||
atomic64_t max_active_ios;
|
||||
@ -110,6 +116,7 @@ struct misc_stats {
|
||||
};
|
||||
|
||||
struct fnic_stats {
|
||||
struct stats_timestamps stats_timestamps;
|
||||
struct io_path_stats io_stats;
|
||||
struct abort_stats abts_stats;
|
||||
struct terminate_stats term_stats;
|
||||
|
@ -219,7 +219,31 @@ int fnic_get_stats_data(struct stats_debug_info *debug,
|
||||
int buf_size = debug->buf_size;
|
||||
struct timespec val1, val2;
|
||||
|
||||
getnstimeofday(&val1);
|
||||
len = snprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"------------------------------------------\n"
|
||||
"\t\tTime\n"
|
||||
"------------------------------------------\n");
|
||||
|
||||
len += snprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"Current time : [%ld:%ld]\n"
|
||||
"Last stats reset time: [%ld:%ld]\n"
|
||||
"Last stats read time: [%ld:%ld]\n"
|
||||
"delta since last reset: [%ld:%ld]\n"
|
||||
"delta since last read: [%ld:%ld]\n",
|
||||
val1.tv_sec, val1.tv_nsec,
|
||||
stats->stats_timestamps.last_reset_time.tv_sec,
|
||||
stats->stats_timestamps.last_reset_time.tv_nsec,
|
||||
stats->stats_timestamps.last_read_time.tv_sec,
|
||||
stats->stats_timestamps.last_read_time.tv_nsec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_sec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_reset_time).tv_nsec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_sec,
|
||||
timespec_sub(val1, stats->stats_timestamps.last_read_time).tv_nsec);
|
||||
|
||||
stats->stats_timestamps.last_read_time = val1;
|
||||
|
||||
len += snprintf(debug->debug_buffer + len, buf_size - len,
|
||||
"------------------------------------------\n"
|
||||
"\t\tIO Statistics\n"
|
||||
"------------------------------------------\n");
|
||||
|
@ -6,4 +6,12 @@ config SCSI_HISI_SAS
|
||||
select BLK_DEV_INTEGRITY
|
||||
depends on ATA
|
||||
help
|
||||
This driver supports HiSilicon's SAS HBA
|
||||
This driver supports HiSilicon's SAS HBA, including support based
|
||||
on platform device
|
||||
|
||||
config SCSI_HISI_SAS_PCI
|
||||
tristate "HiSilicon SAS on PCI bus"
|
||||
depends on SCSI_HISI_SAS
|
||||
depends on PCI
|
||||
help
|
||||
This driver supports HiSilicon's SAS HBA based on PCI device
|
||||
|
@ -1,2 +1,3 @@
|
||||
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_main.o
|
||||
obj-$(CONFIG_SCSI_HISI_SAS) += hisi_sas_v1_hw.o hisi_sas_v2_hw.o
|
||||
obj-$(CONFIG_SCSI_HISI_SAS_PCI) += hisi_sas_v3_hw.o
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/regmap.h>
|
||||
@ -33,10 +34,24 @@
|
||||
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
|
||||
#define HISI_SAS_RESET_BIT 0
|
||||
|
||||
#define HISI_SAS_STATUS_BUF_SZ \
|
||||
(sizeof(struct hisi_sas_err_record) + 1024)
|
||||
#define HISI_SAS_COMMAND_TABLE_SZ \
|
||||
(((sizeof(union hisi_sas_command_table)+3)/4)*4)
|
||||
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
|
||||
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
|
||||
|
||||
#define hisi_sas_status_buf_addr(buf) \
|
||||
(buf + offsetof(struct hisi_sas_slot_buf_table, status_buffer))
|
||||
#define hisi_sas_status_buf_addr_mem(slot) hisi_sas_status_buf_addr(slot->buf)
|
||||
#define hisi_sas_status_buf_addr_dma(slot) \
|
||||
hisi_sas_status_buf_addr(slot->buf_dma)
|
||||
|
||||
#define hisi_sas_cmd_hdr_addr(buf) \
|
||||
(buf + offsetof(struct hisi_sas_slot_buf_table, command_header))
|
||||
#define hisi_sas_cmd_hdr_addr_mem(slot) hisi_sas_cmd_hdr_addr(slot->buf)
|
||||
#define hisi_sas_cmd_hdr_addr_dma(slot) hisi_sas_cmd_hdr_addr(slot->buf_dma)
|
||||
|
||||
#define hisi_sas_sge_addr(buf) \
|
||||
(buf + offsetof(struct hisi_sas_slot_buf_table, sge_page))
|
||||
#define hisi_sas_sge_addr_mem(slot) hisi_sas_sge_addr(slot->buf)
|
||||
#define hisi_sas_sge_addr_dma(slot) hisi_sas_sge_addr(slot->buf_dma)
|
||||
|
||||
#define HISI_SAS_MAX_SSP_RESP_SZ (sizeof(struct ssp_frame_hdr) + 1024)
|
||||
#define HISI_SAS_MAX_SMP_RESP_SZ 1028
|
||||
@ -46,6 +61,12 @@
|
||||
((type == SAS_EDGE_EXPANDER_DEVICE) || \
|
||||
(type == SAS_FANOUT_EXPANDER_DEVICE))
|
||||
|
||||
#define HISI_SAS_SATA_PROTOCOL_NONDATA 0x1
|
||||
#define HISI_SAS_SATA_PROTOCOL_PIO 0x2
|
||||
#define HISI_SAS_SATA_PROTOCOL_DMA 0x4
|
||||
#define HISI_SAS_SATA_PROTOCOL_FPDMA 0x8
|
||||
#define HISI_SAS_SATA_PROTOCOL_ATAPI 0x10
|
||||
|
||||
struct hisi_hba;
|
||||
|
||||
enum {
|
||||
@ -78,11 +99,11 @@ struct hisi_sas_phy {
|
||||
struct work_struct phyup_ws;
|
||||
u64 port_id; /* from hw */
|
||||
u64 dev_sas_addr;
|
||||
u64 phy_type;
|
||||
u64 frame_rcvd_size;
|
||||
u8 frame_rcvd[32];
|
||||
u8 phy_attached;
|
||||
u8 reserved[3];
|
||||
u32 phy_type;
|
||||
enum sas_linkrate minimum_linkrate;
|
||||
enum sas_linkrate maximum_linkrate;
|
||||
};
|
||||
@ -102,20 +123,23 @@ struct hisi_sas_cq {
|
||||
|
||||
struct hisi_sas_dq {
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct hisi_sas_slot *slot_prep;
|
||||
spinlock_t lock;
|
||||
int wr_point;
|
||||
int id;
|
||||
};
|
||||
|
||||
struct hisi_sas_device {
|
||||
enum sas_device_type dev_type;
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct domain_device *sas_device;
|
||||
u64 attached_phy;
|
||||
u64 device_id;
|
||||
atomic64_t running_req;
|
||||
struct hisi_sas_dq *dq;
|
||||
struct list_head list;
|
||||
u8 dev_status;
|
||||
u64 attached_phy;
|
||||
atomic64_t running_req;
|
||||
enum sas_device_type dev_type;
|
||||
int device_id;
|
||||
int sata_idx;
|
||||
u8 dev_status;
|
||||
};
|
||||
|
||||
struct hisi_sas_slot {
|
||||
@ -129,14 +153,10 @@ struct hisi_sas_slot {
|
||||
int cmplt_queue_slot;
|
||||
int idx;
|
||||
int abort;
|
||||
void *buf;
|
||||
dma_addr_t buf_dma;
|
||||
void *cmd_hdr;
|
||||
dma_addr_t cmd_hdr_dma;
|
||||
void *status_buffer;
|
||||
dma_addr_t status_buffer_dma;
|
||||
void *command_table;
|
||||
dma_addr_t command_table_dma;
|
||||
struct hisi_sas_sge_page *sge_page;
|
||||
dma_addr_t sge_page_dma;
|
||||
struct work_struct abort_slot;
|
||||
struct timer_list internal_abort_timer;
|
||||
};
|
||||
@ -154,9 +174,8 @@ struct hisi_sas_hw {
|
||||
struct domain_device *device);
|
||||
struct hisi_sas_device *(*alloc_dev)(struct domain_device *device);
|
||||
void (*sl_notify)(struct hisi_hba *hisi_hba, int phy_no);
|
||||
int (*get_free_slot)(struct hisi_hba *hisi_hba, u32 dev_id,
|
||||
int *q, int *s);
|
||||
void (*start_delivery)(struct hisi_hba *hisi_hba);
|
||||
int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq);
|
||||
void (*start_delivery)(struct hisi_sas_dq *dq);
|
||||
int (*prep_ssp)(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_slot *slot, int is_tmf,
|
||||
struct hisi_sas_tmf_task *tmf);
|
||||
@ -179,6 +198,8 @@ struct hisi_sas_hw {
|
||||
void (*free_device)(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *dev);
|
||||
int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id);
|
||||
void (*dereg_device)(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device);
|
||||
int (*soft_reset)(struct hisi_hba *hisi_hba);
|
||||
int max_command_entries;
|
||||
int complete_hdr_size;
|
||||
@ -188,7 +209,10 @@ struct hisi_hba {
|
||||
/* This must be the first element, used by SHOST_TO_SAS_HA */
|
||||
struct sas_ha_struct *p;
|
||||
|
||||
struct platform_device *pdev;
|
||||
struct platform_device *platform_dev;
|
||||
struct pci_dev *pci_dev;
|
||||
struct device *dev;
|
||||
|
||||
void __iomem *regs;
|
||||
struct regmap *ctrl;
|
||||
u32 ctrl_reset_reg;
|
||||
@ -217,12 +241,9 @@ struct hisi_hba {
|
||||
struct hisi_sas_port port[HISI_SAS_MAX_PHYS];
|
||||
|
||||
int queue_count;
|
||||
struct hisi_sas_slot *slot_prep;
|
||||
|
||||
struct dma_pool *sge_page_pool;
|
||||
struct dma_pool *buffer_pool;
|
||||
struct hisi_sas_device devices[HISI_SAS_MAX_DEVICES];
|
||||
struct dma_pool *command_table_pool;
|
||||
struct dma_pool *status_buffer_pool;
|
||||
struct hisi_sas_cmd_hdr *cmd_hdr[HISI_SAS_MAX_QUEUES];
|
||||
dma_addr_t cmd_hdr_dma[HISI_SAS_MAX_QUEUES];
|
||||
void *complete_hdr[HISI_SAS_MAX_QUEUES];
|
||||
@ -334,7 +355,7 @@ struct hisi_sas_command_table_stp {
|
||||
#define HISI_SAS_SGE_PAGE_CNT SG_CHUNK_SIZE
|
||||
struct hisi_sas_sge_page {
|
||||
struct hisi_sas_sge sge[HISI_SAS_SGE_PAGE_CNT];
|
||||
};
|
||||
} __aligned(16);
|
||||
|
||||
struct hisi_sas_command_table_ssp {
|
||||
struct ssp_frame_hdr hdr;
|
||||
@ -353,9 +374,31 @@ union hisi_sas_command_table {
|
||||
struct hisi_sas_command_table_ssp ssp;
|
||||
struct hisi_sas_command_table_smp smp;
|
||||
struct hisi_sas_command_table_stp stp;
|
||||
} __aligned(16);
|
||||
|
||||
struct hisi_sas_status_buffer {
|
||||
struct hisi_sas_err_record err;
|
||||
u8 iu[1024];
|
||||
} __aligned(16);
|
||||
|
||||
struct hisi_sas_slot_buf_table {
|
||||
struct hisi_sas_status_buffer status_buffer;
|
||||
union hisi_sas_command_table command_header;
|
||||
struct hisi_sas_sge_page sge_page;
|
||||
};
|
||||
|
||||
extern struct scsi_transport_template *hisi_sas_stt;
|
||||
extern struct scsi_host_template *hisi_sas_sht;
|
||||
|
||||
extern void hisi_sas_init_add(struct hisi_hba *hisi_hba);
|
||||
extern int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost);
|
||||
extern void hisi_sas_free(struct hisi_hba *hisi_hba);
|
||||
extern u8 hisi_sas_get_ata_protocol(u8 cmd, int direction);
|
||||
extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port);
|
||||
extern void hisi_sas_sata_done(struct sas_task *task,
|
||||
struct hisi_sas_slot *slot);
|
||||
extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag);
|
||||
extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba);
|
||||
extern int hisi_sas_probe(struct platform_device *pdev,
|
||||
const struct hisi_sas_hw *ops);
|
||||
extern int hisi_sas_remove(struct platform_device *pdev);
|
||||
|
@ -23,6 +23,97 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
int abort_flag, int tag);
|
||||
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
|
||||
|
||||
u8 hisi_sas_get_ata_protocol(u8 cmd, int direction)
|
||||
{
|
||||
switch (cmd) {
|
||||
case ATA_CMD_FPDMA_WRITE:
|
||||
case ATA_CMD_FPDMA_READ:
|
||||
case ATA_CMD_FPDMA_RECV:
|
||||
case ATA_CMD_FPDMA_SEND:
|
||||
case ATA_CMD_NCQ_NON_DATA:
|
||||
return HISI_SAS_SATA_PROTOCOL_FPDMA;
|
||||
|
||||
case ATA_CMD_DOWNLOAD_MICRO:
|
||||
case ATA_CMD_ID_ATA:
|
||||
case ATA_CMD_PMP_READ:
|
||||
case ATA_CMD_READ_LOG_EXT:
|
||||
case ATA_CMD_PIO_READ:
|
||||
case ATA_CMD_PIO_READ_EXT:
|
||||
case ATA_CMD_PMP_WRITE:
|
||||
case ATA_CMD_WRITE_LOG_EXT:
|
||||
case ATA_CMD_PIO_WRITE:
|
||||
case ATA_CMD_PIO_WRITE_EXT:
|
||||
return HISI_SAS_SATA_PROTOCOL_PIO;
|
||||
|
||||
case ATA_CMD_DSM:
|
||||
case ATA_CMD_DOWNLOAD_MICRO_DMA:
|
||||
case ATA_CMD_PMP_READ_DMA:
|
||||
case ATA_CMD_PMP_WRITE_DMA:
|
||||
case ATA_CMD_READ:
|
||||
case ATA_CMD_READ_EXT:
|
||||
case ATA_CMD_READ_LOG_DMA_EXT:
|
||||
case ATA_CMD_READ_STREAM_DMA_EXT:
|
||||
case ATA_CMD_TRUSTED_RCV_DMA:
|
||||
case ATA_CMD_TRUSTED_SND_DMA:
|
||||
case ATA_CMD_WRITE:
|
||||
case ATA_CMD_WRITE_EXT:
|
||||
case ATA_CMD_WRITE_FUA_EXT:
|
||||
case ATA_CMD_WRITE_QUEUED:
|
||||
case ATA_CMD_WRITE_LOG_DMA_EXT:
|
||||
case ATA_CMD_WRITE_STREAM_DMA_EXT:
|
||||
return HISI_SAS_SATA_PROTOCOL_DMA;
|
||||
|
||||
case ATA_CMD_CHK_POWER:
|
||||
case ATA_CMD_DEV_RESET:
|
||||
case ATA_CMD_EDD:
|
||||
case ATA_CMD_FLUSH:
|
||||
case ATA_CMD_FLUSH_EXT:
|
||||
case ATA_CMD_VERIFY:
|
||||
case ATA_CMD_VERIFY_EXT:
|
||||
case ATA_CMD_SET_FEATURES:
|
||||
case ATA_CMD_STANDBY:
|
||||
case ATA_CMD_STANDBYNOW1:
|
||||
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
||||
default:
|
||||
if (direction == DMA_NONE)
|
||||
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
||||
return HISI_SAS_SATA_PROTOCOL_PIO;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
|
||||
|
||||
void hisi_sas_sata_done(struct sas_task *task,
|
||||
struct hisi_sas_slot *slot)
|
||||
{
|
||||
struct task_status_struct *ts = &task->task_status;
|
||||
struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
|
||||
struct hisi_sas_status_buffer *status_buf =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
u8 *iu = &status_buf->iu[0];
|
||||
struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
|
||||
|
||||
resp->frame_len = sizeof(struct dev_to_host_fis);
|
||||
memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
|
||||
|
||||
ts->buf_valid_size = sizeof(*resp);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
|
||||
|
||||
int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag)
|
||||
{
|
||||
struct ata_queued_cmd *qc = task->uldd_task;
|
||||
|
||||
if (qc) {
|
||||
if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
|
||||
qc->tf.command == ATA_CMD_FPDMA_READ) {
|
||||
*tag = qc->tag;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag);
|
||||
|
||||
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
|
||||
{
|
||||
return device->port->ha->lldd_ha;
|
||||
@ -79,7 +170,7 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
|
||||
{
|
||||
|
||||
if (task) {
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct domain_device *device = task->dev;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
|
||||
@ -94,17 +185,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
|
||||
atomic64_dec(&sas_dev->running_req);
|
||||
}
|
||||
|
||||
if (slot->command_table)
|
||||
dma_pool_free(hisi_hba->command_table_pool,
|
||||
slot->command_table, slot->command_table_dma);
|
||||
if (slot->buf)
|
||||
dma_pool_free(hisi_hba->buffer_pool, slot->buf, slot->buf_dma);
|
||||
|
||||
if (slot->status_buffer)
|
||||
dma_pool_free(hisi_hba->status_buffer_pool,
|
||||
slot->status_buffer, slot->status_buffer_dma);
|
||||
|
||||
if (slot->sge_page)
|
||||
dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
|
||||
slot->sge_page_dma);
|
||||
|
||||
list_del_init(&slot->entry);
|
||||
slot->task = NULL;
|
||||
@ -156,7 +239,7 @@ static void hisi_sas_slot_abort(struct work_struct *work)
|
||||
struct scsi_cmnd *cmnd = task->uldd_task;
|
||||
struct hisi_sas_tmf_task tmf_task;
|
||||
struct scsi_lun lun;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int tag = abort_slot->idx;
|
||||
unsigned long flags;
|
||||
|
||||
@ -179,17 +262,18 @@ static void hisi_sas_slot_abort(struct work_struct *work)
|
||||
task->task_done(task);
|
||||
}
|
||||
|
||||
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
|
||||
int is_tmf, struct hisi_sas_tmf_task *tmf,
|
||||
static int hisi_sas_task_prep(struct sas_task *task, struct hisi_sas_dq
|
||||
*dq, int is_tmf, struct hisi_sas_tmf_task *tmf,
|
||||
int *pass)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = dq->hisi_hba;
|
||||
struct domain_device *device = task->dev;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_sas_port *port;
|
||||
struct hisi_sas_slot *slot;
|
||||
struct hisi_sas_cmd_hdr *cmd_hdr_base;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
|
||||
unsigned long flags;
|
||||
|
||||
@ -209,7 +293,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
|
||||
|
||||
if (DEV_IS_GONE(sas_dev)) {
|
||||
if (sas_dev)
|
||||
dev_info(dev, "task prep: device %llu not ready\n",
|
||||
dev_info(dev, "task prep: device %d not ready\n",
|
||||
sas_dev->device_id);
|
||||
else
|
||||
dev_info(dev, "task prep: device %016llx not ready\n",
|
||||
@ -240,18 +324,24 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
|
||||
} else
|
||||
n_elem = task->num_scatter;
|
||||
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
if (hisi_hba->hw->slot_index_alloc)
|
||||
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, &slot_idx,
|
||||
device);
|
||||
else
|
||||
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
goto err_out;
|
||||
rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
|
||||
&dlvry_queue, &dlvry_queue_slot);
|
||||
}
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
|
||||
rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
|
||||
if (rc)
|
||||
goto err_out_tag;
|
||||
|
||||
dlvry_queue = dq->id;
|
||||
dlvry_queue_slot = dq->wr_point;
|
||||
slot = &hisi_hba->slot_info[slot_idx];
|
||||
memset(slot, 0, sizeof(struct hisi_sas_slot));
|
||||
|
||||
@ -266,24 +356,15 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
|
||||
task->lldd_task = slot;
|
||||
INIT_WORK(&slot->abort_slot, hisi_sas_slot_abort);
|
||||
|
||||
slot->status_buffer = dma_pool_alloc(hisi_hba->status_buffer_pool,
|
||||
GFP_ATOMIC,
|
||||
&slot->status_buffer_dma);
|
||||
if (!slot->status_buffer) {
|
||||
slot->buf = dma_pool_alloc(hisi_hba->buffer_pool,
|
||||
GFP_ATOMIC, &slot->buf_dma);
|
||||
if (!slot->buf) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_slot_buf;
|
||||
}
|
||||
memset(slot->status_buffer, 0, HISI_SAS_STATUS_BUF_SZ);
|
||||
|
||||
slot->command_table = dma_pool_alloc(hisi_hba->command_table_pool,
|
||||
GFP_ATOMIC,
|
||||
&slot->command_table_dma);
|
||||
if (!slot->command_table) {
|
||||
rc = -ENOMEM;
|
||||
goto err_out_status_buf;
|
||||
}
|
||||
memset(slot->command_table, 0, HISI_SAS_COMMAND_TABLE_SZ);
|
||||
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
|
||||
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
|
||||
memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ);
|
||||
|
||||
switch (task->task_proto) {
|
||||
case SAS_PROTOCOL_SMP:
|
||||
@ -306,9 +387,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "task prep: rc = 0x%x\n", rc);
|
||||
if (slot->sge_page)
|
||||
goto err_out_sge;
|
||||
goto err_out_command_table;
|
||||
goto err_out_buf;
|
||||
}
|
||||
|
||||
list_add_tail(&slot->entry, &sas_dev->list);
|
||||
@ -316,26 +395,22 @@ static int hisi_sas_task_prep(struct sas_task *task, struct hisi_hba *hisi_hba,
|
||||
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
hisi_hba->slot_prep = slot;
|
||||
dq->slot_prep = slot;
|
||||
|
||||
atomic64_inc(&sas_dev->running_req);
|
||||
++(*pass);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_sge:
|
||||
dma_pool_free(hisi_hba->sge_page_pool, slot->sge_page,
|
||||
slot->sge_page_dma);
|
||||
err_out_command_table:
|
||||
dma_pool_free(hisi_hba->command_table_pool, slot->command_table,
|
||||
slot->command_table_dma);
|
||||
err_out_status_buf:
|
||||
dma_pool_free(hisi_hba->status_buffer_pool, slot->status_buffer,
|
||||
slot->status_buffer_dma);
|
||||
err_out_buf:
|
||||
dma_pool_free(hisi_hba->buffer_pool, slot->buf,
|
||||
slot->buf_dma);
|
||||
err_out_slot_buf:
|
||||
/* Nothing to be done */
|
||||
err_out_tag:
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_index_free(hisi_hba, slot_idx);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
err_out:
|
||||
dev_err(dev, "task prep: failed[%d]!\n", rc);
|
||||
if (!sas_protocol_ata(task->task_proto))
|
||||
@ -353,20 +428,23 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
|
||||
u32 pass = 0;
|
||||
unsigned long flags;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct domain_device *device = task->dev;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_sas_dq *dq = sas_dev->dq;
|
||||
|
||||
if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
|
||||
return -EINVAL;
|
||||
|
||||
/* protect task_prep and start_delivery sequence */
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
rc = hisi_sas_task_prep(task, hisi_hba, is_tmf, tmf, &pass);
|
||||
spin_lock_irqsave(&dq->lock, flags);
|
||||
rc = hisi_sas_task_prep(task, dq, is_tmf, tmf, &pass);
|
||||
if (rc)
|
||||
dev_err(dev, "task exec: failed[%d]!\n", rc);
|
||||
|
||||
if (likely(pass))
|
||||
hisi_hba->hw->start_delivery(hisi_hba);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
hisi_hba->hw->start_delivery(dq);
|
||||
spin_unlock_irqrestore(&dq->lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -421,12 +499,16 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
|
||||
spin_lock(&hisi_hba->lock);
|
||||
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
||||
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
|
||||
int queue = i % hisi_hba->queue_count;
|
||||
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
|
||||
|
||||
hisi_hba->devices[i].device_id = i;
|
||||
sas_dev = &hisi_hba->devices[i];
|
||||
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
||||
sas_dev->dev_type = device->dev_type;
|
||||
sas_dev->hisi_hba = hisi_hba;
|
||||
sas_dev->sas_device = device;
|
||||
sas_dev->dq = dq;
|
||||
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
|
||||
break;
|
||||
}
|
||||
@ -441,7 +523,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct domain_device *parent_dev = device->parent;
|
||||
struct hisi_sas_device *sas_dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
if (hisi_hba->hw->alloc_dev)
|
||||
sas_dev = hisi_hba->hw->alloc_dev(device);
|
||||
@ -622,19 +704,28 @@ static void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
|
||||
}
|
||||
}
|
||||
|
||||
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device)
|
||||
{
|
||||
if (hisi_hba->hw->dereg_device)
|
||||
hisi_hba->hw->dereg_device(hisi_hba, device);
|
||||
}
|
||||
|
||||
static void hisi_sas_dev_gone(struct domain_device *device)
|
||||
{
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
u64 dev_id = sas_dev->device_id;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int dev_id = sas_dev->device_id;
|
||||
|
||||
dev_info(dev, "found dev[%lld:%x] is gone\n",
|
||||
dev_info(dev, "found dev[%d:%x] is gone\n",
|
||||
sas_dev->device_id, sas_dev->dev_type);
|
||||
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
hisi_hba->hw->free_device(hisi_hba, sas_dev);
|
||||
device->lldd_dev = NULL;
|
||||
memset(sas_dev, 0, sizeof(*sas_dev));
|
||||
@ -691,8 +782,13 @@ static void hisi_sas_task_done(struct sas_task *task)
|
||||
static void hisi_sas_tmf_timedout(unsigned long data)
|
||||
{
|
||||
struct sas_task *task = (struct sas_task *)data;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&task->task_state_lock, flags);
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
|
||||
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
@ -704,7 +800,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
|
||||
{
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct sas_task *task;
|
||||
int res, retry;
|
||||
|
||||
@ -821,7 +917,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
|
||||
struct ata_link *link;
|
||||
int rc = TMF_RESP_FUNC_FAILED;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int s = sizeof(struct host_to_dev_fis);
|
||||
unsigned long flags;
|
||||
|
||||
@ -879,7 +975,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
||||
return -1;
|
||||
|
||||
if (!test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
|
||||
unsigned long flags;
|
||||
|
||||
@ -912,7 +1008,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
||||
struct domain_device *device = task->dev;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(task->dev);
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc = TMF_RESP_FUNC_FAILED;
|
||||
unsigned long flags;
|
||||
|
||||
@ -961,9 +1057,10 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
||||
if (task->dev->dev_type == SAS_SATA_DEV) {
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
rc = hisi_sas_softreset_ata_disk(device);
|
||||
}
|
||||
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
||||
} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
|
||||
/* SMP */
|
||||
struct hisi_sas_slot *slot = task->lldd_task;
|
||||
u32 tag = slot->idx;
|
||||
@ -1027,6 +1124,10 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
||||
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
||||
|
||||
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
||||
@ -1041,7 +1142,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
||||
{
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
unsigned long flags;
|
||||
int rc = TMF_RESP_FUNC_FAILED;
|
||||
|
||||
@ -1054,6 +1155,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
if (rc == TMF_RESP_FUNC_FAILED)
|
||||
goto out;
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
phy = sas_get_local_phy(device);
|
||||
|
||||
@ -1077,7 +1179,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
||||
}
|
||||
out:
|
||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||
dev_err(dev, "lu_reset: for device[%llx]:rc= %d\n",
|
||||
dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
|
||||
sas_dev->device_id, rc);
|
||||
return rc;
|
||||
}
|
||||
@ -1124,19 +1226,20 @@ static int hisi_sas_query_task(struct sas_task *task)
|
||||
}
|
||||
|
||||
static int
|
||||
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
|
||||
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
||||
struct sas_task *task, int abort_flag,
|
||||
int task_tag)
|
||||
{
|
||||
struct domain_device *device = task->dev;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_port *port;
|
||||
struct hisi_sas_slot *slot;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct hisi_sas_cmd_hdr *cmd_hdr_base;
|
||||
struct hisi_sas_dq *dq = sas_dev->dq;
|
||||
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
|
||||
unsigned long flags;
|
||||
unsigned long flags, flags_dq;
|
||||
|
||||
if (unlikely(test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)))
|
||||
return -EINVAL;
|
||||
@ -1147,14 +1250,22 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
|
||||
port = to_hisi_sas_port(sas_port);
|
||||
|
||||
/* simply get a slot and send abort command */
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
rc = hisi_sas_slot_index_alloc(hisi_hba, &slot_idx);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
goto err_out;
|
||||
rc = hisi_hba->hw->get_free_slot(hisi_hba, sas_dev->device_id,
|
||||
&dlvry_queue, &dlvry_queue_slot);
|
||||
}
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
|
||||
spin_lock_irqsave(&dq->lock, flags_dq);
|
||||
rc = hisi_hba->hw->get_free_slot(hisi_hba, dq);
|
||||
if (rc)
|
||||
goto err_out_tag;
|
||||
|
||||
dlvry_queue = dq->id;
|
||||
dlvry_queue_slot = dq->wr_point;
|
||||
|
||||
slot = &hisi_hba->slot_info[slot_idx];
|
||||
memset(slot, 0, sizeof(struct hisi_sas_slot));
|
||||
|
||||
@ -1181,17 +1292,21 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, u64 device_id,
|
||||
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
|
||||
hisi_hba->slot_prep = slot;
|
||||
dq->slot_prep = slot;
|
||||
|
||||
atomic64_inc(&sas_dev->running_req);
|
||||
|
||||
/* send abort command to our chip */
|
||||
hisi_hba->hw->start_delivery(hisi_hba);
|
||||
/* send abort command to the chip */
|
||||
hisi_hba->hw->start_delivery(dq);
|
||||
spin_unlock_irqrestore(&dq->lock, flags_dq);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_tag:
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_index_free(hisi_hba, slot_idx);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
spin_unlock_irqrestore(&dq->lock, flags_dq);
|
||||
err_out:
|
||||
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
|
||||
|
||||
@ -1214,9 +1329,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
{
|
||||
struct sas_task *task;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int res;
|
||||
unsigned long flags;
|
||||
|
||||
if (!hisi_hba->hw->prep_abort)
|
||||
return -EOPNOTSUPP;
|
||||
@ -1233,11 +1347,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
/* Lock as we are alloc'ing a slot, which cannot be interrupted */
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
|
||||
task, abort_flag, tag);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
if (res) {
|
||||
del_timer(&task->slow_task->timer);
|
||||
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
|
||||
@ -1247,6 +1358,17 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
wait_for_completion(&task->slow_task->completion);
|
||||
res = TMF_RESP_FUNC_FAILED;
|
||||
|
||||
/* Internal abort timed out */
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
||||
struct hisi_sas_slot *slot = task->lldd_task;
|
||||
|
||||
if (slot)
|
||||
slot->task = NULL;
|
||||
dev_err(dev, "internal task abort: timeout.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
||||
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
|
||||
res = TMF_RESP_FUNC_COMPLETE;
|
||||
@ -1259,13 +1381,6 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Internal abort timed out */
|
||||
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
||||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
||||
dev_err(dev, "internal task abort: timeout.\n");
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
dev_dbg(dev, "internal task abort: task to dev %016llx task=%p "
|
||||
"resp: 0x%x sts 0x%x\n",
|
||||
@ -1353,9 +1468,10 @@ void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_rescan_topology);
|
||||
|
||||
static struct scsi_transport_template *hisi_sas_stt;
|
||||
struct scsi_transport_template *hisi_sas_stt;
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_stt);
|
||||
|
||||
static struct scsi_host_template hisi_sas_sht = {
|
||||
static struct scsi_host_template _hisi_sas_sht = {
|
||||
.module = THIS_MODULE,
|
||||
.name = DRV_NAME,
|
||||
.queuecommand = sas_queuecommand,
|
||||
@ -1375,6 +1491,8 @@ static struct scsi_host_template hisi_sas_sht = {
|
||||
.target_destroy = sas_target_destroy,
|
||||
.ioctl = sas_ioctl,
|
||||
};
|
||||
struct scsi_host_template *hisi_sas_sht = &_hisi_sas_sht;
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_sht);
|
||||
|
||||
static struct sas_domain_function_template hisi_sas_transport_ops = {
|
||||
.lldd_dev_found = hisi_sas_dev_found,
|
||||
@ -1422,10 +1540,9 @@ void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
|
||||
|
||||
static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
{
|
||||
struct platform_device *pdev = hisi_hba->pdev;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
|
||||
|
||||
spin_lock_init(&hisi_hba->lock);
|
||||
@ -1468,16 +1585,9 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
s = HISI_SAS_STATUS_BUF_SZ;
|
||||
hisi_hba->status_buffer_pool = dma_pool_create("status_buffer",
|
||||
dev, s, 16, 0);
|
||||
if (!hisi_hba->status_buffer_pool)
|
||||
goto err_out;
|
||||
|
||||
s = HISI_SAS_COMMAND_TABLE_SZ;
|
||||
hisi_hba->command_table_pool = dma_pool_create("command_table",
|
||||
dev, s, 16, 0);
|
||||
if (!hisi_hba->command_table_pool)
|
||||
s = sizeof(struct hisi_sas_slot_buf_table);
|
||||
hisi_hba->buffer_pool = dma_pool_create("dma_buffer", dev, s, 16, 0);
|
||||
if (!hisi_hba->buffer_pool)
|
||||
goto err_out;
|
||||
|
||||
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
|
||||
@ -1512,11 +1622,6 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
if (!hisi_hba->slot_index_tags)
|
||||
goto err_out;
|
||||
|
||||
hisi_hba->sge_page_pool = dma_pool_create("status_sge", dev,
|
||||
sizeof(struct hisi_sas_sge_page), 16, 0);
|
||||
if (!hisi_hba->sge_page_pool)
|
||||
goto err_out;
|
||||
|
||||
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
|
||||
hisi_hba->initial_fis = dma_alloc_coherent(dev, s,
|
||||
&hisi_hba->initial_fis_dma, GFP_KERNEL);
|
||||
@ -1542,10 +1647,11 @@ static int hisi_sas_alloc(struct hisi_hba *hisi_hba, struct Scsi_Host *shost)
|
||||
err_out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_alloc);
|
||||
|
||||
static void hisi_sas_free(struct hisi_hba *hisi_hba)
|
||||
void hisi_sas_free(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int i, s, max_command_entries = hisi_hba->hw->max_command_entries;
|
||||
|
||||
for (i = 0; i < hisi_hba->queue_count; i++) {
|
||||
@ -1562,9 +1668,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
|
||||
hisi_hba->complete_hdr_dma[i]);
|
||||
}
|
||||
|
||||
dma_pool_destroy(hisi_hba->status_buffer_pool);
|
||||
dma_pool_destroy(hisi_hba->command_table_pool);
|
||||
dma_pool_destroy(hisi_hba->sge_page_pool);
|
||||
dma_pool_destroy(hisi_hba->buffer_pool);
|
||||
|
||||
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
|
||||
if (hisi_hba->itct)
|
||||
@ -1598,6 +1702,7 @@ static void hisi_sas_free(struct hisi_hba *hisi_hba)
|
||||
if (hisi_hba->wq)
|
||||
destroy_workqueue(hisi_hba->wq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_free);
|
||||
|
||||
static void hisi_sas_rst_work_handler(struct work_struct *work)
|
||||
{
|
||||
@ -1607,6 +1712,74 @@ static void hisi_sas_rst_work_handler(struct work_struct *work)
|
||||
hisi_sas_controller_reset(hisi_hba);
|
||||
}
|
||||
|
||||
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct platform_device *pdev = hisi_hba->platform_dev;
|
||||
struct device_node *np = pdev ? pdev->dev.of_node : NULL;
|
||||
struct clk *refclk;
|
||||
|
||||
if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
|
||||
SAS_ADDR_SIZE)) {
|
||||
dev_err(dev, "could not get property sas-addr\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (np) {
|
||||
/*
|
||||
* These properties are only required for platform device-based
|
||||
* controller with DT firmware.
|
||||
*/
|
||||
hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
|
||||
"hisilicon,sas-syscon");
|
||||
if (IS_ERR(hisi_hba->ctrl)) {
|
||||
dev_err(dev, "could not get syscon\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (device_property_read_u32(dev, "ctrl-reset-reg",
|
||||
&hisi_hba->ctrl_reset_reg)) {
|
||||
dev_err(dev,
|
||||
"could not get property ctrl-reset-reg\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
|
||||
&hisi_hba->ctrl_reset_sts_reg)) {
|
||||
dev_err(dev,
|
||||
"could not get property ctrl-reset-sts-reg\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
|
||||
&hisi_hba->ctrl_clock_ena_reg)) {
|
||||
dev_err(dev,
|
||||
"could not get property ctrl-clock-ena-reg\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
||||
refclk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(refclk))
|
||||
dev_dbg(dev, "no ref clk property\n");
|
||||
else
|
||||
hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
|
||||
|
||||
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
|
||||
dev_err(dev, "could not get property phy-count\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (device_property_read_u32(dev, "queue-count",
|
||||
&hisi_hba->queue_count)) {
|
||||
dev_err(dev, "could not get property queue-count\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
|
||||
|
||||
static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
||||
const struct hisi_sas_hw *hw)
|
||||
{
|
||||
@ -1614,10 +1787,8 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
||||
struct Scsi_Host *shost;
|
||||
struct hisi_hba *hisi_hba;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct clk *refclk;
|
||||
|
||||
shost = scsi_host_alloc(&hisi_sas_sht, sizeof(*hisi_hba));
|
||||
shost = scsi_host_alloc(hisi_sas_sht, sizeof(*hisi_hba));
|
||||
if (!shost) {
|
||||
dev_err(dev, "scsi host alloc failed\n");
|
||||
return NULL;
|
||||
@ -1626,46 +1797,14 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
||||
|
||||
INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
|
||||
hisi_hba->hw = hw;
|
||||
hisi_hba->pdev = pdev;
|
||||
hisi_hba->dev = dev;
|
||||
hisi_hba->platform_dev = pdev;
|
||||
hisi_hba->shost = shost;
|
||||
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
|
||||
|
||||
init_timer(&hisi_hba->timer);
|
||||
|
||||
if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
|
||||
SAS_ADDR_SIZE))
|
||||
goto err_out;
|
||||
|
||||
if (np) {
|
||||
hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
|
||||
"hisilicon,sas-syscon");
|
||||
if (IS_ERR(hisi_hba->ctrl))
|
||||
goto err_out;
|
||||
|
||||
if (device_property_read_u32(dev, "ctrl-reset-reg",
|
||||
&hisi_hba->ctrl_reset_reg))
|
||||
goto err_out;
|
||||
|
||||
if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
|
||||
&hisi_hba->ctrl_reset_sts_reg))
|
||||
goto err_out;
|
||||
|
||||
if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
|
||||
&hisi_hba->ctrl_clock_ena_reg))
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
refclk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(refclk))
|
||||
dev_dbg(dev, "no ref clk property\n");
|
||||
else
|
||||
hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
|
||||
|
||||
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy))
|
||||
goto err_out;
|
||||
|
||||
if (device_property_read_u32(dev, "queue-count",
|
||||
&hisi_hba->queue_count))
|
||||
if (hisi_sas_get_fw_info(hisi_hba) < 0)
|
||||
goto err_out;
|
||||
|
||||
if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)) &&
|
||||
@ -1691,7 +1830,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
|
||||
void hisi_sas_init_add(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -1700,6 +1839,7 @@ static void hisi_sas_init_add(struct hisi_hba *hisi_hba)
|
||||
hisi_hba->sas_addr,
|
||||
SAS_ADDR_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_init_add);
|
||||
|
||||
int hisi_sas_probe(struct platform_device *pdev,
|
||||
const struct hisi_sas_hw *hw)
|
||||
@ -1743,7 +1883,7 @@ int hisi_sas_probe(struct platform_device *pdev,
|
||||
shost->cmd_per_lun = hisi_hba->hw->max_command_entries;
|
||||
|
||||
sha->sas_ha_name = DRV_NAME;
|
||||
sha->dev = &hisi_hba->pdev->dev;
|
||||
sha->dev = hisi_hba->dev;
|
||||
sha->lldd_module = THIS_MODULE;
|
||||
sha->sas_addr = &hisi_hba->sas_addr[0];
|
||||
sha->num_phys = hisi_hba->n_phy;
|
||||
|
@ -505,7 +505,7 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
struct domain_device *device = sas_dev->sas_device;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u64 qw0, device_id = sas_dev->device_id;
|
||||
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
@ -571,7 +571,7 @@ static int reset_hw_v1_hw(struct hisi_hba *hisi_hba)
|
||||
int i;
|
||||
unsigned long end_time;
|
||||
u32 val;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
for (i = 0; i < hisi_hba->n_phy; i++) {
|
||||
u32 phy_ctrl = hisi_sas_phy_read32(hisi_hba, i, PHY_CTRL);
|
||||
@ -756,7 +756,7 @@ static void init_reg_v1_hw(struct hisi_hba *hisi_hba)
|
||||
|
||||
static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc;
|
||||
|
||||
rc = reset_hw_v1_hw(hisi_hba);
|
||||
@ -900,22 +900,17 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id)
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function allocates across all queues to load balance.
|
||||
* Slots are allocated from queues in a round-robin fashion.
|
||||
*
|
||||
/*
|
||||
* The callpath to this function and upto writing the write
|
||||
* queue pointer should be safe from interruption.
|
||||
*/
|
||||
static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
|
||||
int *q, int *s)
|
||||
static int
|
||||
get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct hisi_sas_dq *dq;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int queue = dq->id;
|
||||
u32 r, w;
|
||||
int queue = dev_id % hisi_hba->queue_count;
|
||||
|
||||
dq = &hisi_hba->dq[queue];
|
||||
w = dq->wr_point;
|
||||
r = hisi_sas_read32_relaxed(hisi_hba,
|
||||
DLVRY_Q_0_RD_PTR + (queue * 0x14));
|
||||
@ -924,16 +919,14 @@ static int get_free_slot_v1_hw(struct hisi_hba *hisi_hba, u32 dev_id,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
*q = queue;
|
||||
*s = w;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void start_delivery_v1_hw(struct hisi_hba *hisi_hba)
|
||||
static void start_delivery_v1_hw(struct hisi_sas_dq *dq)
|
||||
{
|
||||
int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
|
||||
int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
|
||||
struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
|
||||
struct hisi_hba *hisi_hba = dq->hisi_hba;
|
||||
int dlvry_queue = dq->slot_prep->dlvry_queue;
|
||||
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
|
||||
|
||||
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
|
||||
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
|
||||
@ -946,7 +939,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
|
||||
struct scatterlist *scatter,
|
||||
int n_elem)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
@ -956,13 +950,8 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
|
||||
&slot->sge_page_dma);
|
||||
if (!slot->sge_page)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_sg(scatter, sg, n_elem, i) {
|
||||
struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
|
||||
struct hisi_sas_sge *entry = &sge_page->sge[i];
|
||||
|
||||
entry->addr = cpu_to_le64(sg_dma_address(sg));
|
||||
entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
|
||||
@ -970,7 +959,7 @@ static int prep_prd_sge_v1_hw(struct hisi_hba *hisi_hba,
|
||||
entry->data_off = 0;
|
||||
}
|
||||
|
||||
hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
|
||||
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
|
||||
|
||||
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
|
||||
|
||||
@ -983,7 +972,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
|
||||
struct sas_task *task = slot->task;
|
||||
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
|
||||
struct domain_device *device = task->dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_port *port = slot->port;
|
||||
struct scatterlist *sg_req, *sg_resp;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
@ -1033,7 +1022,7 @@ static int prep_smp_v1_hw(struct hisi_hba *hisi_hba,
|
||||
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
|
||||
|
||||
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1114,10 +1103,11 @@ static int prep_ssp_v1_hw(struct hisi_hba *hisi_hba,
|
||||
}
|
||||
|
||||
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
|
||||
hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
|
||||
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
|
||||
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
|
||||
sizeof(struct ssp_frame_hdr);
|
||||
if (task->ssp_task.enable_first_burst) {
|
||||
fburst = (1 << 7);
|
||||
dw2 |= 1 << CMD_HDR_FIRST_BURST_OFF;
|
||||
@ -1154,8 +1144,9 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_slot *slot)
|
||||
{
|
||||
struct task_status_struct *ts = &task->task_status;
|
||||
struct hisi_sas_err_record_v1 *err_record = slot->status_buffer;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct hisi_sas_err_record_v1 *err_record =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
switch (task->task_proto) {
|
||||
case SAS_PROTOCOL_SSP:
|
||||
@ -1281,7 +1272,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
||||
{
|
||||
struct sas_task *task = slot->task;
|
||||
struct hisi_sas_device *sas_dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct task_status_struct *ts;
|
||||
struct domain_device *device;
|
||||
enum exec_status sts;
|
||||
@ -1371,8 +1362,11 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
||||
switch (task->task_proto) {
|
||||
case SAS_PROTOCOL_SSP:
|
||||
{
|
||||
struct ssp_response_iu *iu = slot->status_buffer +
|
||||
sizeof(struct hisi_sas_err_record);
|
||||
struct hisi_sas_status_buffer *status_buffer =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
struct ssp_response_iu *iu = (struct ssp_response_iu *)
|
||||
&status_buffer->iu[0];
|
||||
|
||||
sas_ssp_task_response(dev, task, iu);
|
||||
break;
|
||||
}
|
||||
@ -1389,7 +1383,7 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
|
||||
DMA_TO_DEVICE);
|
||||
memcpy(to + sg_resp->offset,
|
||||
slot->status_buffer +
|
||||
hisi_sas_status_buf_addr_mem(slot) +
|
||||
sizeof(struct hisi_sas_err_record),
|
||||
sg_dma_len(sg_resp));
|
||||
kunmap_atomic(to);
|
||||
@ -1430,7 +1424,7 @@ static irqreturn_t int_phyup_v1_hw(int irq_no, void *p)
|
||||
{
|
||||
struct hisi_sas_phy *phy = p;
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
int i, phy_no = sas_phy->id;
|
||||
u32 irq_value, context, port_id, link_rate;
|
||||
@ -1511,7 +1505,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct sas_ha_struct *sha = &hisi_hba->sha;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int phy_no = sas_phy->id;
|
||||
u32 irq_value;
|
||||
irqreturn_t res = IRQ_HANDLED;
|
||||
@ -1538,7 +1532,7 @@ static irqreturn_t int_abnormal_v1_hw(int irq, void *p)
|
||||
{
|
||||
struct hisi_sas_phy *phy = p;
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
u32 irq_value, irq_mask_old;
|
||||
int phy_no = sas_phy->id;
|
||||
@ -1641,7 +1635,7 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p)
|
||||
static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = p;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 ecc_int = hisi_sas_read32(hisi_hba, SAS_ECC_INTR);
|
||||
|
||||
if (ecc_int & SAS_ECC_INTR_DQ_ECC1B_MSK) {
|
||||
@ -1700,7 +1694,7 @@ static irqreturn_t fatal_ecc_int_v1_hw(int irq, void *p)
|
||||
static irqreturn_t fatal_axi_int_v1_hw(int irq, void *p)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = p;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 axi_int = hisi_sas_read32(hisi_hba, ENT_INT_SRC2);
|
||||
u32 axi_info = hisi_sas_read32(hisi_hba, HGC_AXI_FIFO_ERR_INFO);
|
||||
|
||||
@ -1738,7 +1732,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_MAX_QUEUES] = {
|
||||
|
||||
static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct platform_device *pdev = hisi_hba->pdev;
|
||||
struct platform_device *pdev = hisi_hba->platform_dev;
|
||||
struct device *dev = &pdev->dev;
|
||||
int i, j, irq, rc, idx;
|
||||
|
||||
|
@ -554,12 +554,6 @@ enum {
|
||||
#define DIR_TO_DEVICE 2
|
||||
#define DIR_RESERVED 3
|
||||
|
||||
#define SATA_PROTOCOL_NONDATA 0x1
|
||||
#define SATA_PROTOCOL_PIO 0x2
|
||||
#define SATA_PROTOCOL_DMA 0x4
|
||||
#define SATA_PROTOCOL_FPDMA 0x8
|
||||
#define SATA_PROTOCOL_ATAPI 0x10
|
||||
|
||||
#define ERR_ON_TX_PHASE(err_phase) (err_phase == 0x2 || \
|
||||
err_phase == 0x4 || err_phase == 0x8 ||\
|
||||
err_phase == 0x6 || err_phase == 0xa)
|
||||
@ -659,7 +653,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, int *slot_idx,
|
||||
static bool sata_index_alloc_v2_hw(struct hisi_hba *hisi_hba, int *idx)
|
||||
{
|
||||
unsigned int index;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
void *bitmap = hisi_hba->sata_dev_bitmap;
|
||||
|
||||
index = find_first_zero_bit(bitmap, HISI_MAX_SATA_SUPPORT_V2_HW);
|
||||
@ -695,6 +689,9 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
|
||||
if (sata_dev && (i & 1))
|
||||
continue;
|
||||
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
|
||||
int queue = i % hisi_hba->queue_count;
|
||||
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
|
||||
|
||||
hisi_hba->devices[i].device_id = i;
|
||||
sas_dev = &hisi_hba->devices[i];
|
||||
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
||||
@ -702,6 +699,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device)
|
||||
sas_dev->hisi_hba = hisi_hba;
|
||||
sas_dev->sas_device = device;
|
||||
sas_dev->sata_idx = sata_idx;
|
||||
sas_dev->dq = dq;
|
||||
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
|
||||
break;
|
||||
}
|
||||
@ -756,7 +754,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
struct domain_device *device = sas_dev->sas_device;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u64 qw0, device_id = sas_dev->device_id;
|
||||
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
|
||||
struct domain_device *parent_dev = device->parent;
|
||||
@ -809,7 +807,7 @@ static void free_device_v2_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_device *sas_dev)
|
||||
{
|
||||
u64 dev_id = sas_dev->device_id;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id];
|
||||
u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3);
|
||||
int i;
|
||||
@ -853,7 +851,7 @@ static int reset_hw_v2_hw(struct hisi_hba *hisi_hba)
|
||||
int i, reset_val;
|
||||
u32 val;
|
||||
unsigned long end_time;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
/* The mask needs to be set depending on the number of phys */
|
||||
if (hisi_hba->n_phy == 9)
|
||||
@ -989,7 +987,7 @@ static void phys_try_accept_stp_links_v2_hw(struct hisi_hba *hisi_hba)
|
||||
|
||||
static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int i;
|
||||
|
||||
/* Global registers init */
|
||||
@ -1170,7 +1168,7 @@ static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
|
||||
|
||||
static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int rc;
|
||||
|
||||
rc = reset_hw_v2_hw(hisi_hba);
|
||||
@ -1219,7 +1217,7 @@ static bool tx_fifo_is_empty_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
int i, max_loop = 1000;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 status, axi_status, dfx_val, dfx_tx_val;
|
||||
|
||||
for (i = 0; i < max_loop; i++) {
|
||||
@ -1245,7 +1243,7 @@ static bool axi_bus_is_idle_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
static bool wait_io_done_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
int i, max_loop = 1000;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 status, tx_dfx0;
|
||||
|
||||
for (i = 0; i < max_loop; i++) {
|
||||
@ -1283,7 +1281,7 @@ static bool allowed_disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
static void disable_phy_v2_hw(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
u32 cfg, axi_val, dfx0_val, txid_auto;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
/* Close axi bus. */
|
||||
axi_val = hisi_sas_read32(hisi_hba, AXI_MASTER_CFG_BASE +
|
||||
@ -1454,22 +1452,17 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id)
|
||||
return bitmap;
|
||||
}
|
||||
|
||||
/**
|
||||
* This function allocates across all queues to load balance.
|
||||
* Slots are allocated from queues in a round-robin fashion.
|
||||
*
|
||||
/*
|
||||
* The callpath to this function and upto writing the write
|
||||
* queue pointer should be safe from interruption.
|
||||
*/
|
||||
static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
|
||||
int *q, int *s)
|
||||
static int
|
||||
get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct hisi_sas_dq *dq;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int queue = dq->id;
|
||||
u32 r, w;
|
||||
int queue = dev_id % hisi_hba->queue_count;
|
||||
|
||||
dq = &hisi_hba->dq[queue];
|
||||
w = dq->wr_point;
|
||||
r = hisi_sas_read32_relaxed(hisi_hba,
|
||||
DLVRY_Q_0_RD_PTR + (queue * 0x14));
|
||||
@ -1479,16 +1472,14 @@ static int get_free_slot_v2_hw(struct hisi_hba *hisi_hba, u32 dev_id,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
*q = queue;
|
||||
*s = w;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void start_delivery_v2_hw(struct hisi_hba *hisi_hba)
|
||||
static void start_delivery_v2_hw(struct hisi_sas_dq *dq)
|
||||
{
|
||||
int dlvry_queue = hisi_hba->slot_prep->dlvry_queue;
|
||||
int dlvry_queue_slot = hisi_hba->slot_prep->dlvry_queue_slot;
|
||||
struct hisi_sas_dq *dq = &hisi_hba->dq[dlvry_queue];
|
||||
struct hisi_hba *hisi_hba = dq->hisi_hba;
|
||||
int dlvry_queue = dq->slot_prep->dlvry_queue;
|
||||
int dlvry_queue_slot = dq->slot_prep->dlvry_queue_slot;
|
||||
|
||||
dq->wr_point = ++dlvry_queue_slot % HISI_SAS_QUEUE_SLOTS;
|
||||
hisi_sas_write32(hisi_hba, DLVRY_Q_0_WR_PTR + (dlvry_queue * 0x14),
|
||||
@ -1501,7 +1492,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
|
||||
struct scatterlist *scatter,
|
||||
int n_elem)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct hisi_sas_sge_page *sge_page = hisi_sas_sge_addr_mem(slot);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
@ -1511,13 +1503,8 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
slot->sge_page = dma_pool_alloc(hisi_hba->sge_page_pool, GFP_ATOMIC,
|
||||
&slot->sge_page_dma);
|
||||
if (!slot->sge_page)
|
||||
return -ENOMEM;
|
||||
|
||||
for_each_sg(scatter, sg, n_elem, i) {
|
||||
struct hisi_sas_sge *entry = &slot->sge_page->sge[i];
|
||||
struct hisi_sas_sge *entry = &sge_page->sge[i];
|
||||
|
||||
entry->addr = cpu_to_le64(sg_dma_address(sg));
|
||||
entry->page_ctrl_0 = entry->page_ctrl_1 = 0;
|
||||
@ -1525,7 +1512,7 @@ static int prep_prd_sge_v2_hw(struct hisi_hba *hisi_hba,
|
||||
entry->data_off = 0;
|
||||
}
|
||||
|
||||
hdr->prd_table_addr = cpu_to_le64(slot->sge_page_dma);
|
||||
hdr->prd_table_addr = cpu_to_le64(hisi_sas_sge_addr_dma(slot));
|
||||
|
||||
hdr->sg_len = cpu_to_le32(n_elem << CMD_HDR_DATA_SGL_LEN_OFF);
|
||||
|
||||
@ -1538,7 +1525,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
|
||||
struct sas_task *task = slot->task;
|
||||
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
|
||||
struct domain_device *device = task->dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_port *port = slot->port;
|
||||
struct scatterlist *sg_req, *sg_resp;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
@ -1589,7 +1576,7 @@ static int prep_smp_v2_hw(struct hisi_hba *hisi_hba,
|
||||
hdr->transfer_tags = cpu_to_le32(slot->idx << CMD_HDR_IPTT_OFF);
|
||||
|
||||
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
return 0;
|
||||
|
||||
@ -1663,10 +1650,11 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
|
||||
}
|
||||
|
||||
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
|
||||
hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
|
||||
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
buf_cmd = slot->command_table + sizeof(struct ssp_frame_hdr);
|
||||
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot) +
|
||||
sizeof(struct ssp_frame_hdr);
|
||||
|
||||
memcpy(buf_cmd, &task->ssp_task.LUN, 8);
|
||||
if (!is_tmf) {
|
||||
@ -1692,20 +1680,6 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sata_done_v2_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
|
||||
struct hisi_sas_slot *slot)
|
||||
{
|
||||
struct task_status_struct *ts = &task->task_status;
|
||||
struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
|
||||
struct dev_to_host_fis *d2h = slot->status_buffer +
|
||||
sizeof(struct hisi_sas_err_record);
|
||||
|
||||
resp->frame_len = sizeof(struct dev_to_host_fis);
|
||||
memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
|
||||
|
||||
ts->buf_valid_size = sizeof(*resp);
|
||||
}
|
||||
|
||||
#define TRANS_TX_ERR 0
|
||||
#define TRANS_RX_ERR 1
|
||||
#define DMA_TX_ERR 2
|
||||
@ -1907,7 +1881,8 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
|
||||
int err_phase)
|
||||
{
|
||||
struct task_status_struct *ts = &task->task_status;
|
||||
struct hisi_sas_err_record_v2 *err_record = slot->status_buffer;
|
||||
struct hisi_sas_err_record_v2 *err_record =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
u32 trans_tx_fail_type = cpu_to_le32(err_record->trans_tx_fail_type);
|
||||
u32 trans_rx_fail_type = cpu_to_le32(err_record->trans_rx_fail_type);
|
||||
u16 dma_tx_err_type = cpu_to_le16(err_record->dma_tx_err_type);
|
||||
@ -2198,7 +2173,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
|
||||
break;
|
||||
}
|
||||
}
|
||||
sata_done_v2_hw(hisi_hba, task, slot);
|
||||
hisi_sas_sata_done(task, slot);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -2211,7 +2186,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
{
|
||||
struct sas_task *task = slot->task;
|
||||
struct hisi_sas_device *sas_dev;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct task_status_struct *ts;
|
||||
struct domain_device *device;
|
||||
enum exec_status sts;
|
||||
@ -2296,8 +2271,10 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
switch (task->task_proto) {
|
||||
case SAS_PROTOCOL_SSP:
|
||||
{
|
||||
struct ssp_response_iu *iu = slot->status_buffer +
|
||||
sizeof(struct hisi_sas_err_record);
|
||||
struct hisi_sas_status_buffer *status_buffer =
|
||||
hisi_sas_status_buf_addr_mem(slot);
|
||||
struct ssp_response_iu *iu = (struct ssp_response_iu *)
|
||||
&status_buffer->iu[0];
|
||||
|
||||
sas_ssp_task_response(dev, task, iu);
|
||||
break;
|
||||
@ -2315,7 +2292,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
|
||||
DMA_TO_DEVICE);
|
||||
memcpy(to + sg_resp->offset,
|
||||
slot->status_buffer +
|
||||
hisi_sas_status_buf_addr_mem(slot) +
|
||||
sizeof(struct hisi_sas_err_record),
|
||||
sg_dma_len(sg_resp));
|
||||
kunmap_atomic(to);
|
||||
@ -2326,7 +2303,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
{
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
sata_done_v2_hw(hisi_hba, task, slot);
|
||||
hisi_sas_sata_done(task, slot);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -2344,7 +2321,9 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
spin_lock_irqsave(&task->task_state_lock, flags);
|
||||
task->task_state_flags |= SAS_TASK_STATE_DONE;
|
||||
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
||||
spin_lock_irqsave(&hisi_hba->lock, flags);
|
||||
hisi_sas_slot_task_free(hisi_hba, task, slot);
|
||||
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
||||
sts = ts->stat;
|
||||
|
||||
if (task->task_done)
|
||||
@ -2353,78 +2332,6 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot)
|
||||
return sts;
|
||||
}
|
||||
|
||||
static u8 get_ata_protocol(u8 cmd, int direction)
|
||||
{
|
||||
switch (cmd) {
|
||||
case ATA_CMD_FPDMA_WRITE:
|
||||
case ATA_CMD_FPDMA_READ:
|
||||
case ATA_CMD_FPDMA_RECV:
|
||||
case ATA_CMD_FPDMA_SEND:
|
||||
case ATA_CMD_NCQ_NON_DATA:
|
||||
return SATA_PROTOCOL_FPDMA;
|
||||
|
||||
case ATA_CMD_DOWNLOAD_MICRO:
|
||||
case ATA_CMD_ID_ATA:
|
||||
case ATA_CMD_PMP_READ:
|
||||
case ATA_CMD_READ_LOG_EXT:
|
||||
case ATA_CMD_PIO_READ:
|
||||
case ATA_CMD_PIO_READ_EXT:
|
||||
case ATA_CMD_PMP_WRITE:
|
||||
case ATA_CMD_WRITE_LOG_EXT:
|
||||
case ATA_CMD_PIO_WRITE:
|
||||
case ATA_CMD_PIO_WRITE_EXT:
|
||||
return SATA_PROTOCOL_PIO;
|
||||
|
||||
case ATA_CMD_DSM:
|
||||
case ATA_CMD_DOWNLOAD_MICRO_DMA:
|
||||
case ATA_CMD_PMP_READ_DMA:
|
||||
case ATA_CMD_PMP_WRITE_DMA:
|
||||
case ATA_CMD_READ:
|
||||
case ATA_CMD_READ_EXT:
|
||||
case ATA_CMD_READ_LOG_DMA_EXT:
|
||||
case ATA_CMD_READ_STREAM_DMA_EXT:
|
||||
case ATA_CMD_TRUSTED_RCV_DMA:
|
||||
case ATA_CMD_TRUSTED_SND_DMA:
|
||||
case ATA_CMD_WRITE:
|
||||
case ATA_CMD_WRITE_EXT:
|
||||
case ATA_CMD_WRITE_FUA_EXT:
|
||||
case ATA_CMD_WRITE_QUEUED:
|
||||
case ATA_CMD_WRITE_LOG_DMA_EXT:
|
||||
case ATA_CMD_WRITE_STREAM_DMA_EXT:
|
||||
return SATA_PROTOCOL_DMA;
|
||||
|
||||
case ATA_CMD_CHK_POWER:
|
||||
case ATA_CMD_DEV_RESET:
|
||||
case ATA_CMD_EDD:
|
||||
case ATA_CMD_FLUSH:
|
||||
case ATA_CMD_FLUSH_EXT:
|
||||
case ATA_CMD_VERIFY:
|
||||
case ATA_CMD_VERIFY_EXT:
|
||||
case ATA_CMD_SET_FEATURES:
|
||||
case ATA_CMD_STANDBY:
|
||||
case ATA_CMD_STANDBYNOW1:
|
||||
return SATA_PROTOCOL_NONDATA;
|
||||
default:
|
||||
if (direction == DMA_NONE)
|
||||
return SATA_PROTOCOL_NONDATA;
|
||||
return SATA_PROTOCOL_PIO;
|
||||
}
|
||||
}
|
||||
|
||||
static int get_ncq_tag_v2_hw(struct sas_task *task, u32 *tag)
|
||||
{
|
||||
struct ata_queued_cmd *qc = task->uldd_task;
|
||||
|
||||
if (qc) {
|
||||
if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
|
||||
qc->tf.command == ATA_CMD_FPDMA_READ) {
|
||||
*tag = qc->tag;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
||||
struct hisi_sas_slot *slot)
|
||||
{
|
||||
@ -2465,13 +2372,14 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
||||
(task->ata_task.fis.control & ATA_SRST))
|
||||
dw1 |= 1 << CMD_HDR_RESET_OFF;
|
||||
|
||||
dw1 |= (get_ata_protocol(task->ata_task.fis.command, task->data_dir))
|
||||
dw1 |= (hisi_sas_get_ata_protocol(
|
||||
task->ata_task.fis.command, task->data_dir))
|
||||
<< CMD_HDR_FRAME_TYPE_OFF;
|
||||
dw1 |= sas_dev->device_id << CMD_HDR_DEV_ID_OFF;
|
||||
hdr->dw1 = cpu_to_le32(dw1);
|
||||
|
||||
/* dw2 */
|
||||
if (task->ata_task.use_ncq && get_ncq_tag_v2_hw(task, &hdr_tag)) {
|
||||
if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) {
|
||||
task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
|
||||
dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF;
|
||||
}
|
||||
@ -2490,12 +2398,11 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
hdr->data_transfer_len = cpu_to_le32(task->total_xfer_len);
|
||||
hdr->cmd_table_addr = cpu_to_le64(slot->command_table_dma);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(slot->status_buffer_dma);
|
||||
hdr->cmd_table_addr = cpu_to_le64(hisi_sas_cmd_hdr_addr_dma(slot));
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
buf_cmd = slot->command_table;
|
||||
buf_cmd = hisi_sas_cmd_hdr_addr_mem(slot);
|
||||
|
||||
if (likely(!task->ata_task.device_control_reg_update))
|
||||
task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
|
||||
@ -2578,7 +2485,7 @@ static int phy_up_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
u32 port_id, link_rate, hard_phy_linkrate;
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 *frame_rcvd = (u32 *)sas_phy->frame_rcvd;
|
||||
struct sas_identify_frame *id = (struct sas_identify_frame *)frame_rcvd;
|
||||
|
||||
@ -2765,7 +2672,7 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = p;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 ent_msk, ent_tmp, irq_msk;
|
||||
int phy_no = 0;
|
||||
|
||||
@ -2825,7 +2732,7 @@ static irqreturn_t int_chnl_int_v2_hw(int irq_no, void *p)
|
||||
static void
|
||||
one_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba, u32 irq_value)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 reg_val;
|
||||
|
||||
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_1B_OFF)) {
|
||||
@ -2914,7 +2821,7 @@ static void multi_bit_ecc_error_process_v2_hw(struct hisi_hba *hisi_hba,
|
||||
u32 irq_value)
|
||||
{
|
||||
u32 reg_val;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
if (irq_value & BIT(SAS_ECC_INTR_DQE_ECC_MB_OFF)) {
|
||||
reg_val = hisi_sas_read32(hisi_hba, HGC_DQE_ECC_ADDR);
|
||||
@ -3064,7 +2971,7 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = p;
|
||||
u32 irq_value, irq_msk, err_value;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
irq_msk = hisi_sas_read32(hisi_hba, ENT_INT_SRC_MSK3);
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, irq_msk | 0xfffffffe);
|
||||
@ -3162,13 +3069,14 @@ static void cq_tasklet_v2_hw(unsigned long val)
|
||||
struct hisi_sas_complete_v2_hdr *complete_queue;
|
||||
u32 rd_point = cq->rd_point, wr_point, dev_id;
|
||||
int queue = cq->id;
|
||||
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
|
||||
|
||||
if (unlikely(hisi_hba->reject_stp_links_msk))
|
||||
phys_try_accept_stp_links_v2_hw(hisi_hba);
|
||||
|
||||
complete_queue = hisi_hba->complete_hdr[queue];
|
||||
|
||||
spin_lock(&hisi_hba->lock);
|
||||
spin_lock(&dq->lock);
|
||||
wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR +
|
||||
(0x14 * queue));
|
||||
|
||||
@ -3218,7 +3126,7 @@ static void cq_tasklet_v2_hw(unsigned long val)
|
||||
/* update rd_point */
|
||||
cq->rd_point = rd_point;
|
||||
hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point);
|
||||
spin_unlock(&hisi_hba->lock);
|
||||
spin_unlock(&dq->lock);
|
||||
}
|
||||
|
||||
static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p)
|
||||
@ -3239,7 +3147,7 @@ static irqreturn_t sata_int_v2_hw(int irq_no, void *p)
|
||||
struct hisi_sas_phy *phy = p;
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct hisi_sas_initial_fis *initial_fis;
|
||||
struct dev_to_host_fis *fis;
|
||||
u32 ent_tmp, ent_msk, ent_int, port_id, link_rate, hard_phy_linkrate;
|
||||
@ -3341,7 +3249,7 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
|
||||
*/
|
||||
static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct platform_device *pdev = hisi_hba->pdev;
|
||||
struct platform_device *pdev = hisi_hba->platform_dev;
|
||||
struct device *dev = &pdev->dev;
|
||||
int i, irq, rc, irq_map[128];
|
||||
|
||||
@ -3455,7 +3363,7 @@ static int hisi_sas_v2_init(struct hisi_hba *hisi_hba)
|
||||
|
||||
static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct platform_device *pdev = hisi_hba->pdev;
|
||||
struct platform_device *pdev = hisi_hba->platform_dev;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hisi_hba->queue_count; i++)
|
||||
@ -3477,7 +3385,7 @@ static void interrupt_disable_v2_hw(struct hisi_hba *hisi_hba)
|
||||
|
||||
static int soft_reset_v2_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = &hisi_hba->pdev->dev;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
u32 old_state, state;
|
||||
int rc, cnt;
|
||||
int phy_no;
|
||||
|
1846
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
Normal file
1846
drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -57,6 +57,7 @@ struct hpsa_sas_phy {
|
||||
bool added_to_port;
|
||||
};
|
||||
|
||||
#define EXTERNAL_QD 7
|
||||
struct hpsa_scsi_dev_t {
|
||||
unsigned int devtype;
|
||||
int bus, target, lun; /* as presented to the OS */
|
||||
@ -244,6 +245,7 @@ struct ctlr_info {
|
||||
u32 __percpu *lockup_detected;
|
||||
struct delayed_work monitor_ctlr_work;
|
||||
struct delayed_work rescan_ctlr_work;
|
||||
struct delayed_work event_monitor_work;
|
||||
int remove_in_progress;
|
||||
/* Address of h->q[x] is passed to intr handler to know which queue */
|
||||
u8 q[MAX_REPLY_QUEUES];
|
||||
@ -296,11 +298,11 @@ struct ctlr_info {
|
||||
struct workqueue_struct *resubmit_wq;
|
||||
struct workqueue_struct *rescan_ctlr_wq;
|
||||
atomic_t abort_cmds_available;
|
||||
wait_queue_head_t abort_cmd_wait_queue;
|
||||
wait_queue_head_t event_sync_wait_queue;
|
||||
struct mutex reset_mutex;
|
||||
u8 reset_in_progress;
|
||||
struct hpsa_sas_node *sas_host;
|
||||
spinlock_t reset_lock;
|
||||
};
|
||||
|
||||
struct offline_device_entry {
|
||||
|
@ -809,10 +809,7 @@ struct bmic_identify_physical_device {
|
||||
u8 max_temperature_degreesC;
|
||||
u8 logical_blocks_per_phys_block_exp; /* phyblocksize = 512*2^exp */
|
||||
__le16 current_queue_depth_limit;
|
||||
u8 switch_name[10];
|
||||
__le16 switch_port;
|
||||
u8 alternate_paths_switch_name[40];
|
||||
u8 alternate_paths_switch_port[8];
|
||||
u8 reserved_switch_stuff[60];
|
||||
__le16 power_on_hours; /* valid only if gas gauge supported */
|
||||
__le16 percent_endurance_used; /* valid only if gas gauge supported. */
|
||||
#define BMIC_PHYS_DRIVE_SSD_WEAROUT(idphydrv) \
|
||||
@ -828,11 +825,22 @@ struct bmic_identify_physical_device {
|
||||
(idphydrv->smart_carrier_authentication == 0x01)
|
||||
u8 smart_carrier_app_fw_version;
|
||||
u8 smart_carrier_bootloader_fw_version;
|
||||
u8 sanitize_support_flags;
|
||||
u8 drive_key_flags;
|
||||
u8 encryption_key_name[64];
|
||||
__le32 misc_drive_flags;
|
||||
__le16 dek_index;
|
||||
u8 padding[112];
|
||||
};
|
||||
__le16 hba_drive_encryption_flags;
|
||||
__le16 max_overwrite_time;
|
||||
__le16 max_block_erase_time;
|
||||
__le16 max_crypto_erase_time;
|
||||
u8 device_connector_info[5];
|
||||
u8 connector_name[8][8];
|
||||
u8 page_83_id[16];
|
||||
u8 max_link_rate[256];
|
||||
u8 neg_phys_link_rate[256];
|
||||
u8 box_conn_name[8];
|
||||
} __attribute((aligned(512)));
|
||||
|
||||
struct bmic_sense_subsystem_info {
|
||||
u8 primary_slot_number;
|
||||
|
@ -800,7 +800,7 @@ static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag)
|
||||
hptiop_finish_scsi_req(hba, tag, req);
|
||||
}
|
||||
|
||||
void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
|
||||
static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
|
||||
{
|
||||
struct hpt_iop_request_header __iomem *req;
|
||||
struct hpt_iop_request_ioctl_command __iomem *p;
|
||||
|
@ -4935,7 +4935,7 @@ static struct vio_device_id ibmvfc_device_table[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
|
||||
|
||||
static struct dev_pm_ops ibmvfc_pm_ops = {
|
||||
static const struct dev_pm_ops ibmvfc_pm_ops = {
|
||||
.resume = ibmvfc_resume
|
||||
};
|
||||
|
||||
|
@ -2336,7 +2336,7 @@ static struct vio_device_id ibmvscsi_device_table[] = {
|
||||
};
|
||||
MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
|
||||
|
||||
static struct dev_pm_ops ibmvscsi_pm_ops = {
|
||||
static const struct dev_pm_ops ibmvscsi_pm_ops = {
|
||||
.resume = ibmvscsi_resume
|
||||
};
|
||||
|
||||
|
@ -2556,7 +2556,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
|
||||
* the array. */
|
||||
if (items)
|
||||
num_arrays++;
|
||||
q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
|
||||
q->pool = kvzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
|
||||
if (q->pool == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -2590,7 +2590,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
|
||||
|
||||
for (i = 0; i < q->max; i++)
|
||||
kfree(q->pool[i]);
|
||||
kfree(q->pool);
|
||||
kvfree(q->pool);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_pool_free);
|
||||
|
||||
|
@ -27,30 +27,38 @@
|
||||
#include "sas_internal.h"
|
||||
#include "sas_dump.h"
|
||||
|
||||
void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
|
||||
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!test_bit(SAS_HA_REGISTERED, &ha->state))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
if (test_bit(SAS_HA_DRAINING, &ha->state)) {
|
||||
/* add it to the defer list, if not already pending */
|
||||
if (list_empty(&sw->drain_node))
|
||||
list_add(&sw->drain_node, &ha->defer_q);
|
||||
} else
|
||||
scsi_queue_work(ha->core.shost, &sw->work);
|
||||
rc = scsi_queue_work(ha->core.shost, &sw->work);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void sas_queue_event(int event, unsigned long *pending,
|
||||
static int sas_queue_event(int event, unsigned long *pending,
|
||||
struct sas_work *work,
|
||||
struct sas_ha_struct *ha)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!test_and_set_bit(event, pending)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ha->lock, flags);
|
||||
sas_queue_work(ha, work);
|
||||
rc = sas_queue_work(ha, work);
|
||||
spin_unlock_irqrestore(&ha->lock, flags);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
@ -116,31 +124,31 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
|
||||
mutex_unlock(&ha->disco_mutex);
|
||||
}
|
||||
|
||||
static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
|
||||
static int notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
|
||||
{
|
||||
BUG_ON(event >= HA_NUM_EVENTS);
|
||||
|
||||
sas_queue_event(event, &sas_ha->pending,
|
||||
return sas_queue_event(event, &sas_ha->pending,
|
||||
&sas_ha->ha_events[event].work, sas_ha);
|
||||
}
|
||||
|
||||
static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
|
||||
static int notify_port_event(struct asd_sas_phy *phy, enum port_event event)
|
||||
{
|
||||
struct sas_ha_struct *ha = phy->ha;
|
||||
|
||||
BUG_ON(event >= PORT_NUM_EVENTS);
|
||||
|
||||
sas_queue_event(event, &phy->port_events_pending,
|
||||
return sas_queue_event(event, &phy->port_events_pending,
|
||||
&phy->port_events[event].work, ha);
|
||||
}
|
||||
|
||||
void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
|
||||
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
|
||||
{
|
||||
struct sas_ha_struct *ha = phy->ha;
|
||||
|
||||
BUG_ON(event >= PHY_NUM_EVENTS);
|
||||
|
||||
sas_queue_event(event, &phy->phy_events_pending,
|
||||
return sas_queue_event(event, &phy->phy_events_pending,
|
||||
&phy->phy_events[event].work, ha);
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ void sas_porte_broadcast_rcvd(struct work_struct *work);
|
||||
void sas_porte_link_reset_err(struct work_struct *work);
|
||||
void sas_porte_timer_event(struct work_struct *work);
|
||||
void sas_porte_hard_reset(struct work_struct *work);
|
||||
void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
|
||||
int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw);
|
||||
|
||||
int sas_notify_lldd_dev_found(struct domain_device *);
|
||||
void sas_notify_lldd_dev_gone(struct domain_device *);
|
||||
@ -85,7 +85,7 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
|
||||
enum phy_func phy_func, struct sas_phy_linkrates *);
|
||||
int sas_smp_get_phy_events(struct sas_phy *phy);
|
||||
|
||||
void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
|
||||
int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
|
||||
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
|
||||
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
|
||||
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
|
||||
|
@ -756,6 +756,7 @@ struct lpfc_hba {
|
||||
uint8_t nvmet_support; /* driver supports NVMET */
|
||||
#define LPFC_NVMET_MAX_PORTS 32
|
||||
uint8_t mds_diags_support;
|
||||
uint32_t initial_imax;
|
||||
|
||||
/* HBA Config Parameters */
|
||||
uint32_t cfg_ack0;
|
||||
@ -777,6 +778,7 @@ struct lpfc_hba {
|
||||
uint32_t cfg_poll_tmo;
|
||||
uint32_t cfg_task_mgmt_tmo;
|
||||
uint32_t cfg_use_msi;
|
||||
uint32_t cfg_auto_imax;
|
||||
uint32_t cfg_fcp_imax;
|
||||
uint32_t cfg_fcp_cpu_map;
|
||||
uint32_t cfg_fcp_io_channel;
|
||||
@ -913,16 +915,16 @@ struct lpfc_hba {
|
||||
/*
|
||||
* stat counters
|
||||
*/
|
||||
uint64_t fc4ScsiInputRequests;
|
||||
uint64_t fc4ScsiOutputRequests;
|
||||
uint64_t fc4ScsiControlRequests;
|
||||
uint64_t fc4ScsiIoCmpls;
|
||||
uint64_t fc4NvmeInputRequests;
|
||||
uint64_t fc4NvmeOutputRequests;
|
||||
uint64_t fc4NvmeControlRequests;
|
||||
uint64_t fc4NvmeIoCmpls;
|
||||
uint64_t fc4NvmeLsRequests;
|
||||
uint64_t fc4NvmeLsCmpls;
|
||||
atomic_t fc4ScsiInputRequests;
|
||||
atomic_t fc4ScsiOutputRequests;
|
||||
atomic_t fc4ScsiControlRequests;
|
||||
atomic_t fc4ScsiIoCmpls;
|
||||
atomic_t fc4NvmeInputRequests;
|
||||
atomic_t fc4NvmeOutputRequests;
|
||||
atomic_t fc4NvmeControlRequests;
|
||||
atomic_t fc4NvmeIoCmpls;
|
||||
atomic_t fc4NvmeLsRequests;
|
||||
atomic_t fc4NvmeLsCmpls;
|
||||
|
||||
uint64_t bg_guard_err_cnt;
|
||||
uint64_t bg_apptag_err_cnt;
|
||||
@ -1050,6 +1052,7 @@ struct lpfc_hba {
|
||||
|
||||
uint8_t temp_sensor_support;
|
||||
/* Fields used for heart beat. */
|
||||
unsigned long last_eqdelay_time;
|
||||
unsigned long last_completion_time;
|
||||
unsigned long skipped_hb;
|
||||
struct timer_list hb_tmofunc;
|
||||
|
@ -148,9 +148,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct nvme_fc_remote_port *nrport;
|
||||
uint64_t data1, data2, data3, tot;
|
||||
char *statep;
|
||||
int len = 0;
|
||||
|
||||
@ -171,7 +171,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
else
|
||||
statep = "INIT";
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"NVME Target: Enabled State %s\n",
|
||||
"NVME Target Enabled State %s\n",
|
||||
statep);
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"%s%d WWPN x%llx WWNN x%llx DID x%06x\n",
|
||||
@ -245,11 +245,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
atomic_read(&tgtp->xmt_abort_rsp),
|
||||
atomic_read(&tgtp->xmt_abort_rsp_error));
|
||||
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
tot = phba->sli4_hba.nvmet_xri_cnt -
|
||||
(phba->sli4_hba.nvmet_ctx_get_cnt +
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"IO_CTX: %08x outstanding %08x total %x",
|
||||
phba->sli4_hba.nvmet_ctx_cnt,
|
||||
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
|
||||
"CTX Outstanding %08llx\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_total);
|
||||
phba->sli4_hba.nvmet_io_wait_total,
|
||||
tot);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len, "\n");
|
||||
return len;
|
||||
@ -265,7 +275,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
len = snprintf(buf, PAGE_SIZE, "NVME Initiator Enabled\n");
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
if (localport->port_id)
|
||||
@ -281,9 +290,12 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
wwn_to_u64(vport->fc_nodename.u.wwn),
|
||||
localport->port_id, statep);
|
||||
|
||||
list_for_each_entry(rport, &lport->rport_list, list) {
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (!ndlp->nrport)
|
||||
continue;
|
||||
|
||||
/* local short-hand pointer. */
|
||||
nrport = rport->remoteport;
|
||||
nrport = ndlp->nrport->remoteport;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
switch (nrport->port_state) {
|
||||
@ -311,25 +323,23 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "DID x%06x ",
|
||||
nrport->port_id);
|
||||
|
||||
switch (nrport->port_role) {
|
||||
case FC_PORT_ROLE_NVME_INITIATOR:
|
||||
/* An NVME rport can have multiple roles. */
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"INITIATOR ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_TARGET:
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"TARGET ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_DISCOVERY:
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"DISCOVERY ");
|
||||
break;
|
||||
default:
|
||||
"DISCSRVC ");
|
||||
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
|
||||
FC_PORT_ROLE_NVME_TARGET |
|
||||
FC_PORT_ROLE_NVME_DISCOVERY))
|
||||
len += snprintf(buf + len, PAGE_SIZE - len,
|
||||
"UNKNOWN_ROLE x%x",
|
||||
"UNKNOWN ROLE x%x",
|
||||
nrport->port_role);
|
||||
break;
|
||||
}
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "%s ", statep);
|
||||
/* Terminate the string. */
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
|
||||
@ -338,19 +348,21 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
|
||||
|
||||
len += snprintf(buf + len, PAGE_SIZE - len, "\nNVME Statistics\n");
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"LS: Xmt %016llx Cmpl %016llx\n",
|
||||
phba->fc4NvmeLsRequests,
|
||||
phba->fc4NvmeLsCmpls);
|
||||
"LS: Xmt %016x Cmpl %016x\n",
|
||||
atomic_read(&phba->fc4NvmeLsRequests),
|
||||
atomic_read(&phba->fc4NvmeLsCmpls));
|
||||
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(&phba->fc4NvmeInputRequests);
|
||||
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
|
||||
data3 = atomic_read(&phba->fc4NvmeControlRequests);
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
|
||||
phba->fc4NvmeInputRequests,
|
||||
phba->fc4NvmeOutputRequests,
|
||||
phba->fc4NvmeControlRequests);
|
||||
data1, data2, data3);
|
||||
|
||||
len += snprintf(buf+len, PAGE_SIZE-len,
|
||||
" Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
|
||||
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
tot, (data1 + data2 + data3) - tot);
|
||||
return len;
|
||||
}
|
||||
|
||||
@ -1342,6 +1354,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
|
||||
goto board_mode_out;
|
||||
}
|
||||
wait_for_completion(&online_compl);
|
||||
if (status)
|
||||
status = -EIO;
|
||||
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
|
||||
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
|
||||
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
|
||||
@ -3198,9 +3212,12 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport)
|
||||
|
||||
shost = lpfc_shost_from_vport(vport);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp)
|
||||
if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport)
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
if (!NLP_CHK_NODE_ACT(ndlp))
|
||||
continue;
|
||||
if (ndlp->rport)
|
||||
ndlp->rport->dev_loss_tmo = vport->cfg_devloss_tmo;
|
||||
}
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
}
|
||||
|
||||
@ -4467,9 +4484,11 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
|
||||
return -EINVAL;
|
||||
|
||||
phba->cfg_fcp_imax = (uint32_t)val;
|
||||
phba->initial_imax = phba->cfg_fcp_imax;
|
||||
|
||||
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
lpfc_modify_hba_eq_delay(phba, i);
|
||||
lpfc_modify_hba_eq_delay(phba, i, LPFC_MAX_EQ_DELAY_EQID_CNT,
|
||||
val);
|
||||
|
||||
return strlen(buf);
|
||||
}
|
||||
@ -4524,6 +4543,16 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val)
|
||||
static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR,
|
||||
lpfc_fcp_imax_show, lpfc_fcp_imax_store);
|
||||
|
||||
/*
|
||||
* lpfc_auto_imax: Controls Auto-interrupt coalescing values support.
|
||||
* 0 No auto_imax support
|
||||
* 1 auto imax on
|
||||
* Auto imax will change the value of fcp_imax on a per EQ basis, using
|
||||
* the EQ Delay Multiplier, depending on the activity for that EQ.
|
||||
* Value range [0,1]. Default value is 1.
|
||||
*/
|
||||
LPFC_ATTR_RW(auto_imax, 1, 0, 1, "Enable Auto imax");
|
||||
|
||||
/**
|
||||
* lpfc_state_show - Display current driver CPU affinity
|
||||
* @dev: class converted to a Scsi_host structure.
|
||||
@ -5150,6 +5179,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
|
||||
&dev_attr_lpfc_task_mgmt_tmo,
|
||||
&dev_attr_lpfc_use_msi,
|
||||
&dev_attr_lpfc_nvme_oas,
|
||||
&dev_attr_lpfc_auto_imax,
|
||||
&dev_attr_lpfc_fcp_imax,
|
||||
&dev_attr_lpfc_fcp_cpu_map,
|
||||
&dev_attr_lpfc_fcp_io_channel,
|
||||
@ -6168,6 +6198,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
lpfc_enable_SmartSAN_init(phba, lpfc_enable_SmartSAN);
|
||||
lpfc_use_msi_init(phba, lpfc_use_msi);
|
||||
lpfc_nvme_oas_init(phba, lpfc_nvme_oas);
|
||||
lpfc_auto_imax_init(phba, lpfc_auto_imax);
|
||||
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
|
||||
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
|
||||
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
|
||||
@ -6212,6 +6243,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
|
||||
phba->cfg_enable_fc4_type |= LPFC_ENABLE_FCP;
|
||||
}
|
||||
|
||||
if (phba->cfg_auto_imax && !phba->cfg_fcp_imax)
|
||||
phba->cfg_auto_imax = 0;
|
||||
phba->initial_imax = phba->cfg_fcp_imax;
|
||||
|
||||
/* A value of 0 means use the number of CPUs found in the system */
|
||||
if (phba->cfg_fcp_io_channel == 0)
|
||||
phba->cfg_fcp_io_channel = phba->sli4_hba.num_present_cpu;
|
||||
|
@ -503,26 +503,23 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type)
|
||||
Did, vport->fc_flag, vport->fc_rscn_id_cnt);
|
||||
|
||||
/*
|
||||
* This NPortID was previously a FCP target,
|
||||
* This NPortID was previously a FCP/NVMe target,
|
||||
* Don't even bother to send GFF_ID.
|
||||
*/
|
||||
ndlp = lpfc_findnode_did(vport, Did);
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp))
|
||||
ndlp->nlp_fc4_type = fc4_type;
|
||||
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
|
||||
ndlp->nlp_fc4_type = fc4_type;
|
||||
|
||||
if (ndlp->nlp_type & NLP_FCP_TARGET)
|
||||
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
|
||||
(ndlp->nlp_type &
|
||||
(NLP_FCP_TARGET | NLP_NVME_TARGET))) {
|
||||
if (fc4_type == FC_TYPE_FCP)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
|
||||
if (fc4_type == FC_TYPE_NVME)
|
||||
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
|
||||
lpfc_setup_disc_node(vport, Did);
|
||||
|
||||
else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
|
||||
} else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID,
|
||||
0, Did) == 0)
|
||||
vport->num_disc_nodes++;
|
||||
|
||||
else
|
||||
lpfc_setup_disc_node(vport, Did);
|
||||
}
|
||||
} else {
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT,
|
||||
"Skip2 GID_FTrsp: did:x%x flg:x%x cnt:%d",
|
||||
|
@ -323,7 +323,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
|
||||
raw_index = phba->hbq_get[i];
|
||||
getidx = le32_to_cpu(raw_index);
|
||||
len += snprintf(buf+len, size-len,
|
||||
"entrys:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
|
||||
"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
|
||||
hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
|
||||
hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
|
||||
|
||||
@ -550,8 +550,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
struct lpfc_nodelist *ndlp;
|
||||
unsigned char *statep;
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct nvme_fc_remote_port *nrport;
|
||||
|
||||
@ -623,6 +621,13 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
ndlp->nlp_sid);
|
||||
if (ndlp->nlp_type & NLP_FCP_INITIATOR)
|
||||
len += snprintf(buf+len, size-len, "FCP_INITIATOR ");
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
len += snprintf(buf + len,
|
||||
size - len, "NVME_TGT sid:%d ",
|
||||
NLP_NO_SID);
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
len += snprintf(buf + len,
|
||||
size - len, "NVME_INITIATOR ");
|
||||
len += snprintf(buf+len, size-len, "usgmap:%x ",
|
||||
ndlp->nlp_usg_map);
|
||||
len += snprintf(buf+len, size-len, "refcnt:%x",
|
||||
@ -660,7 +665,6 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
goto out_exit;
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
if (localport->port_id)
|
||||
@ -673,9 +677,12 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
localport->port_id, statep);
|
||||
|
||||
len += snprintf(buf + len, size - len, "\tRport List:\n");
|
||||
list_for_each_entry(rport, &lport->rport_list, list) {
|
||||
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
|
||||
/* local short-hand pointer. */
|
||||
nrport = rport->remoteport;
|
||||
if (!ndlp->nrport)
|
||||
continue;
|
||||
|
||||
nrport = ndlp->nrport->remoteport;
|
||||
|
||||
/* Port state is only one of two values for now. */
|
||||
switch (nrport->port_state) {
|
||||
@ -698,26 +705,23 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
nrport->port_name);
|
||||
len += snprintf(buf + len, size - len, "WWNN x%llx ",
|
||||
nrport->node_name);
|
||||
switch (nrport->port_role) {
|
||||
case FC_PORT_ROLE_NVME_INITIATOR:
|
||||
|
||||
/* An NVME rport can have multiple roles. */
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
|
||||
len += snprintf(buf + len, size - len,
|
||||
"NVME INITIATOR ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_TARGET:
|
||||
"INITIATOR ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
|
||||
len += snprintf(buf + len, size - len,
|
||||
"NVME TARGET ");
|
||||
break;
|
||||
case FC_PORT_ROLE_NVME_DISCOVERY:
|
||||
"TARGET ");
|
||||
if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
|
||||
len += snprintf(buf + len, size - len,
|
||||
"NVME DISCOVERY ");
|
||||
break;
|
||||
default:
|
||||
"DISCSRVC ");
|
||||
if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
|
||||
FC_PORT_ROLE_NVME_TARGET |
|
||||
FC_PORT_ROLE_NVME_DISCOVERY))
|
||||
len += snprintf(buf + len, size - len,
|
||||
"UNKNOWN ROLE x%x",
|
||||
nrport->port_role);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Terminate the string. */
|
||||
len += snprintf(buf + len, size - len, "\n");
|
||||
}
|
||||
@ -746,6 +750,7 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
|
||||
uint64_t tot, data1, data2, data3;
|
||||
int len = 0;
|
||||
int cnt;
|
||||
|
||||
@ -843,11 +848,21 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
}
|
||||
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
tot = phba->sli4_hba.nvmet_xri_cnt -
|
||||
(phba->sli4_hba.nvmet_ctx_get_cnt +
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"IO_CTX: %08x outstanding %08x total %08x\n",
|
||||
phba->sli4_hba.nvmet_ctx_cnt,
|
||||
"IO_CTX: %08x WAIT: cur %08x tot %08x\n"
|
||||
"CTX Outstanding %08llx\n",
|
||||
phba->sli4_hba.nvmet_xri_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_cnt,
|
||||
phba->sli4_hba.nvmet_io_wait_total);
|
||||
phba->sli4_hba.nvmet_io_wait_total,
|
||||
tot);
|
||||
} else {
|
||||
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||
return len;
|
||||
@ -856,18 +871,22 @@ lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
"\nNVME Lport Statistics\n");
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"LS: Xmt %016llx Cmpl %016llx\n",
|
||||
phba->fc4NvmeLsRequests,
|
||||
phba->fc4NvmeLsCmpls);
|
||||
"LS: Xmt %016x Cmpl %016x\n",
|
||||
atomic_read(&phba->fc4NvmeLsRequests),
|
||||
atomic_read(&phba->fc4NvmeLsCmpls));
|
||||
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(&phba->fc4NvmeInputRequests);
|
||||
data2 = atomic_read(&phba->fc4NvmeOutputRequests);
|
||||
data3 = atomic_read(&phba->fc4NvmeControlRequests);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
"FCP: Rd %016llx Wr %016llx IO %016llx\n",
|
||||
phba->fc4NvmeInputRequests,
|
||||
phba->fc4NvmeOutputRequests,
|
||||
phba->fc4NvmeControlRequests);
|
||||
data1, data2, data3);
|
||||
|
||||
len += snprintf(buf + len, size - len,
|
||||
" Cmpl %016llx\n", phba->fc4NvmeIoCmpls);
|
||||
" Cmpl %016llx Outstanding %016llx\n",
|
||||
tot, (data1 + data2 + data3) - tot);
|
||||
}
|
||||
|
||||
return len;
|
||||
@ -3229,9 +3248,9 @@ __lpfc_idiag_print_eq(struct lpfc_queue *qp, char *eqtype,
|
||||
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
"\n%s EQ info: EQ-STAT[max:x%x noE:x%x "
|
||||
"bs:x%x proc:x%llx]\n",
|
||||
"bs:x%x proc:x%llx eqd %d]\n",
|
||||
eqtype, qp->q_cnt_1, qp->q_cnt_2, qp->q_cnt_3,
|
||||
(unsigned long long)qp->q_cnt_4);
|
||||
(unsigned long long)qp->q_cnt_4, qp->q_mode);
|
||||
len += snprintf(pbuffer + len, LPFC_QUE_INFO_GET_BUF_SIZE - len,
|
||||
"EQID[%02d], QE-CNT[%04d], QE-SZ[%04d], "
|
||||
"HST-IDX[%04d], PRT-IDX[%04d], PST[%03d]",
|
||||
|
@ -2168,6 +2168,19 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
ndlp->nlp_fc4_type, ndlp->nlp_DID);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* SLI3 ports don't support NVME. If this rport is a strict NVME
|
||||
* FC4 type, implicitly LOGO.
|
||||
*/
|
||||
if (phba->sli_rev == LPFC_SLI_REV3 &&
|
||||
ndlp->nlp_fc4_type == NLP_FC4_NVME) {
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY,
|
||||
"3088 Rport fc4 type 0x%x not supported by SLI3 adapter\n",
|
||||
ndlp->nlp_type);
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
|
||||
return 1;
|
||||
}
|
||||
|
||||
elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
|
||||
ndlp->nlp_DID, elscmd);
|
||||
if (!elsiocb)
|
||||
@ -2268,7 +2281,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
/* The driver supports 2 FC4 types. Make sure
|
||||
* a PRLI is issued for all types before exiting.
|
||||
*/
|
||||
if (local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
|
||||
if (phba->sli_rev == LPFC_SLI_REV4 &&
|
||||
local_nlp_type & (NLP_FC4_FCP | NLP_FC4_NVME))
|
||||
goto send_next_prli;
|
||||
|
||||
return 0;
|
||||
@ -3332,6 +3346,19 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
*/
|
||||
switch (stat.un.b.lsRjtRsnCode) {
|
||||
case LSRJT_UNABLE_TPC:
|
||||
/* The driver has a VALID PLOGI but the rport has
|
||||
* rejected the PRLI - can't do it now. Delay
|
||||
* for 1 second and try again - don't care about
|
||||
* the explanation.
|
||||
*/
|
||||
if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) {
|
||||
delay = 1000;
|
||||
maxretry = lpfc_max_els_tries + 1;
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Legacy bug fix code for targets with PLOGI delays. */
|
||||
if (stat.un.b.lsRjtRsnCodeExp ==
|
||||
LSEXP_CMD_IN_PROGRESS) {
|
||||
if (cmd == ELS_CMD_PLOGI) {
|
||||
@ -3350,9 +3377,7 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
if ((cmd == ELS_CMD_PLOGI) ||
|
||||
(cmd == ELS_CMD_PRLI) ||
|
||||
(cmd == ELS_CMD_NVMEPRLI)) {
|
||||
if (cmd == ELS_CMD_PLOGI) {
|
||||
delay = 1000;
|
||||
maxretry = lpfc_max_els_tries + 1;
|
||||
retry = 1;
|
||||
@ -5678,27 +5703,13 @@ lpfc_els_rcv_lcb(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if (beacon->lcb_frequency == 0) {
|
||||
if (beacon->lcb_sub_command != LPFC_LCB_ON &&
|
||||
beacon->lcb_sub_command != LPFC_LCB_OFF) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if ((beacon->lcb_type != LPFC_LCB_GREEN) &&
|
||||
(beacon->lcb_type != LPFC_LCB_AMBER)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if ((beacon->lcb_sub_command != LPFC_LCB_ON) &&
|
||||
(beacon->lcb_sub_command != LPFC_LCB_OFF)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if ((beacon->lcb_sub_command == LPFC_LCB_ON) &&
|
||||
(beacon->lcb_type != LPFC_LCB_GREEN) &&
|
||||
(beacon->lcb_type != LPFC_LCB_AMBER)) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
if (be16_to_cpu(beacon->lcb_duration) != 0) {
|
||||
if (beacon->lcb_sub_command == LPFC_LCB_ON &&
|
||||
be16_to_cpu(beacon->lcb_duration) != 0) {
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
goto rjt;
|
||||
}
|
||||
|
@ -4167,14 +4167,14 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_unregister_remote_port(ndlp);
|
||||
}
|
||||
|
||||
/* Notify the NVME transport of this rport's loss on the
|
||||
* Initiator. For NVME Target, should upcall transport
|
||||
* in the else clause when API available.
|
||||
*/
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
if (vport->phba->nvmet_support == 0)
|
||||
/* Start devloss */
|
||||
lpfc_nvme_unregister_port(vport, ndlp);
|
||||
else
|
||||
/* NVMET has no upcall. */
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4182,8 +4182,10 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
|
||||
if (new_state == NLP_STE_MAPPED_NODE ||
|
||||
new_state == NLP_STE_UNMAPPED_NODE) {
|
||||
if ((ndlp->nlp_fc4_type & NLP_FC4_FCP) ||
|
||||
(ndlp->nlp_DID == Fabric_DID)) {
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_FCP ||
|
||||
ndlp->nlp_DID == Fabric_DID ||
|
||||
ndlp->nlp_DID == NameServer_DID ||
|
||||
ndlp->nlp_DID == FDMI_DID) {
|
||||
vport->phba->nport_event_cnt++;
|
||||
/*
|
||||
* Tell the fc transport about the port, if we haven't
|
||||
@ -4192,7 +4194,8 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||
lpfc_register_remote_port(vport, ndlp);
|
||||
}
|
||||
/* Notify the NVME transport of this new rport. */
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->sli_rev >= LPFC_SLI_REV4 &&
|
||||
ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
if (vport->phba->nvmet_support == 0) {
|
||||
/* Register this rport with the transport.
|
||||
* Initiators take the NDLP ref count in
|
||||
|
@ -197,6 +197,7 @@ struct lpfc_sli_intf {
|
||||
|
||||
/* Delay Multiplier constant */
|
||||
#define LPFC_DMULT_CONST 651042
|
||||
#define LPFC_DMULT_MAX 1023
|
||||
|
||||
/* Configuration of Interrupts / sec for entire HBA port */
|
||||
#define LPFC_MIN_IMAX 5000
|
||||
@ -657,6 +658,15 @@ struct lpfc_register {
|
||||
#define LPFC_CTL_PORT_ER1_OFFSET 0x40C
|
||||
#define LPFC_CTL_PORT_ER2_OFFSET 0x410
|
||||
|
||||
#define LPFC_CTL_PORT_EQ_DELAY_OFFSET 0x418
|
||||
#define lpfc_sliport_eqdelay_delay_SHIFT 16
|
||||
#define lpfc_sliport_eqdelay_delay_MASK 0xffff
|
||||
#define lpfc_sliport_eqdelay_delay_WORD word0
|
||||
#define lpfc_sliport_eqdelay_id_SHIFT 0
|
||||
#define lpfc_sliport_eqdelay_id_MASK 0xfff
|
||||
#define lpfc_sliport_eqdelay_id_WORD word0
|
||||
#define LPFC_SEC_TO_USEC 1000000
|
||||
|
||||
/* The following Registers apply to SLI4 if_type 0 UCNAs. They typically
|
||||
* reside in BAR 2.
|
||||
*/
|
||||
@ -3258,6 +3268,10 @@ struct lpfc_sli4_parameters {
|
||||
#define cfg_xib_SHIFT 4
|
||||
#define cfg_xib_MASK 0x00000001
|
||||
#define cfg_xib_WORD word19
|
||||
#define cfg_eqdr_SHIFT 8
|
||||
#define cfg_eqdr_MASK 0x00000001
|
||||
#define cfg_eqdr_WORD word19
|
||||
#define LPFC_NODELAY_MAX_IO 32
|
||||
};
|
||||
|
||||
#define LPFC_SET_UE_RECOVERY 0x10
|
||||
|
@ -1249,6 +1249,12 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
int retval, i;
|
||||
struct lpfc_sli *psli = &phba->sli;
|
||||
LIST_HEAD(completions);
|
||||
struct lpfc_queue *qp;
|
||||
unsigned long time_elapsed;
|
||||
uint32_t tick_cqe, max_cqe, val;
|
||||
uint64_t tot, data1, data2, data3;
|
||||
struct lpfc_register reg_data;
|
||||
void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
|
||||
|
||||
vports = lpfc_create_vport_work_array(phba);
|
||||
if (vports != NULL)
|
||||
@ -1263,6 +1269,98 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
||||
(phba->pport->fc_flag & FC_OFFLINE_MODE))
|
||||
return;
|
||||
|
||||
if (phba->cfg_auto_imax) {
|
||||
if (!phba->last_eqdelay_time) {
|
||||
phba->last_eqdelay_time = jiffies;
|
||||
goto skip_eqdelay;
|
||||
}
|
||||
time_elapsed = jiffies - phba->last_eqdelay_time;
|
||||
phba->last_eqdelay_time = jiffies;
|
||||
|
||||
tot = 0xffff;
|
||||
/* Check outstanding IO count */
|
||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||
if (phba->nvmet_support) {
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
tot = phba->sli4_hba.nvmet_xri_cnt -
|
||||
(phba->sli4_hba.nvmet_ctx_get_cnt +
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
} else {
|
||||
tot = atomic_read(&phba->fc4NvmeIoCmpls);
|
||||
data1 = atomic_read(
|
||||
&phba->fc4NvmeInputRequests);
|
||||
data2 = atomic_read(
|
||||
&phba->fc4NvmeOutputRequests);
|
||||
data3 = atomic_read(
|
||||
&phba->fc4NvmeControlRequests);
|
||||
tot = (data1 + data2 + data3) - tot;
|
||||
}
|
||||
}
|
||||
|
||||
/* Interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax / phba->io_channel_irqs;
|
||||
tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
|
||||
|
||||
/* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
|
||||
max_cqe = time_elapsed * tick_cqe;
|
||||
|
||||
for (i = 0; i < phba->io_channel_irqs; i++) {
|
||||
/* Fast-path EQ */
|
||||
qp = phba->sli4_hba.hba_eq[i];
|
||||
if (!qp)
|
||||
continue;
|
||||
|
||||
/* Use no EQ delay if we don't have many outstanding
|
||||
* IOs, or if we are only processing 1 CQE/ISR or less.
|
||||
* Otherwise, assume we can process up to lpfc_fcp_imax
|
||||
* interrupts per HBA.
|
||||
*/
|
||||
if (tot < LPFC_NODELAY_MAX_IO ||
|
||||
qp->EQ_cqe_cnt <= max_cqe)
|
||||
val = 0;
|
||||
else
|
||||
val = phba->cfg_fcp_imax;
|
||||
|
||||
if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
|
||||
/* Use EQ Delay Register method */
|
||||
|
||||
/* Convert for EQ Delay register */
|
||||
if (val) {
|
||||
/* First, interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax /
|
||||
phba->io_channel_irqs;
|
||||
|
||||
/* us delay between each interrupt */
|
||||
val = LPFC_SEC_TO_USEC / val;
|
||||
}
|
||||
if (val != qp->q_mode) {
|
||||
reg_data.word0 = 0;
|
||||
bf_set(lpfc_sliport_eqdelay_id,
|
||||
®_data, qp->queue_id);
|
||||
bf_set(lpfc_sliport_eqdelay_delay,
|
||||
®_data, val);
|
||||
writel(reg_data.word0, eqdreg);
|
||||
}
|
||||
} else {
|
||||
/* Use mbox command method */
|
||||
if (val != qp->q_mode)
|
||||
lpfc_modify_hba_eq_delay(phba, i,
|
||||
1, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* val is cfg_fcp_imax or 0 for mbox delay or us delay
|
||||
* between interrupts for EQDR.
|
||||
*/
|
||||
qp->q_mode = val;
|
||||
qp->EQ_cqe_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
skip_eqdelay:
|
||||
spin_lock_irq(&phba->pport->work_port_lock);
|
||||
|
||||
if (time_after(phba->last_completion_time +
|
||||
@ -2707,13 +2805,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
NLP_EVT_DEVICE_RECOVERY);
|
||||
|
||||
if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
||||
/* Remove the NVME transport reference now and
|
||||
* continue to remove the node.
|
||||
*/
|
||||
lpfc_nlp_put(ndlp);
|
||||
}
|
||||
|
||||
lpfc_disc_state_machine(vport, ndlp, NULL,
|
||||
NLP_EVT_DEVICE_RM);
|
||||
}
|
||||
@ -3392,7 +3483,6 @@ lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
|
||||
|
||||
/* For NVMET, ALL remaining XRIs are dedicated for IO processing */
|
||||
nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
||||
|
||||
if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
|
||||
/* els xri-sgl expanded */
|
||||
xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
|
||||
@ -3596,14 +3686,6 @@ lpfc_get_wwpn(struct lpfc_hba *phba)
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
MAILBOX_t *mb;
|
||||
|
||||
if (phba->sli_rev < LPFC_SLI_REV4) {
|
||||
/* Reset the port first */
|
||||
lpfc_sli_brdrestart(phba);
|
||||
rc = lpfc_sli_chipset_init(phba);
|
||||
if (rc)
|
||||
return (uint64_t)-1;
|
||||
}
|
||||
|
||||
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
|
||||
GFP_KERNEL);
|
||||
if (!mboxq)
|
||||
@ -3757,8 +3839,19 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
||||
int i;
|
||||
uint64_t wwn;
|
||||
bool use_no_reset_hba = false;
|
||||
int rc;
|
||||
|
||||
if (lpfc_no_hba_reset_cnt) {
|
||||
if (phba->sli_rev < LPFC_SLI_REV4 &&
|
||||
dev == &phba->pcidev->dev) {
|
||||
/* Reset the port first */
|
||||
lpfc_sli_brdrestart(phba);
|
||||
rc = lpfc_sli_chipset_init(phba);
|
||||
if (rc)
|
||||
return NULL;
|
||||
}
|
||||
wwn = lpfc_get_wwpn(phba);
|
||||
}
|
||||
|
||||
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
|
||||
if (wwn == lpfc_no_hba_reset[i]) {
|
||||
@ -5837,7 +5930,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
|
||||
|
||||
/* Fast-path XRI aborted CQ Event work queue list */
|
||||
@ -5846,7 +5940,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||
|
||||
/* This abort list used by worker thread */
|
||||
spin_lock_init(&phba->sli4_hba.sgl_list_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
|
||||
|
||||
/*
|
||||
@ -6731,6 +6826,16 @@ lpfc_create_shost(struct lpfc_hba *phba)
|
||||
phba->fc_arbtov = FF_DEF_ARBTOV;
|
||||
|
||||
atomic_set(&phba->sdev_cnt, 0);
|
||||
atomic_set(&phba->fc4ScsiInputRequests, 0);
|
||||
atomic_set(&phba->fc4ScsiOutputRequests, 0);
|
||||
atomic_set(&phba->fc4ScsiControlRequests, 0);
|
||||
atomic_set(&phba->fc4ScsiIoCmpls, 0);
|
||||
atomic_set(&phba->fc4NvmeInputRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeOutputRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeControlRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeIoCmpls, 0);
|
||||
atomic_set(&phba->fc4NvmeLsRequests, 0);
|
||||
atomic_set(&phba->fc4NvmeLsCmpls, 0);
|
||||
vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
|
||||
if (!vport)
|
||||
return -ENODEV;
|
||||
@ -7247,6 +7352,9 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
|
||||
break;
|
||||
case LPFC_SLI_INTF_IF_TYPE_2:
|
||||
phba->sli4_hba.u.if_type2.EQDregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_EQ_DELAY_OFFSET;
|
||||
phba->sli4_hba.u.if_type2.ERR1regaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_ER1_OFFSET;
|
||||
@ -8773,7 +8881,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
lpfc_modify_hba_eq_delay(phba, qidx);
|
||||
lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
|
||||
phba->cfg_fcp_imax);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -9655,6 +9764,7 @@ static int
|
||||
lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
{
|
||||
int vectors, rc, index;
|
||||
char *name;
|
||||
|
||||
/* Set up MSI-X multi-message vectors */
|
||||
vectors = phba->io_channel_irqs;
|
||||
@ -9673,9 +9783,9 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
|
||||
/* Assign MSI-X vectors to interrupt handlers */
|
||||
for (index = 0; index < vectors; index++) {
|
||||
memset(&phba->sli4_hba.handler_name[index], 0, 16);
|
||||
snprintf((char *)&phba->sli4_hba.handler_name[index],
|
||||
LPFC_SLI4_HANDLER_NAME_SZ,
|
||||
name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
|
||||
memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
|
||||
snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
|
||||
LPFC_DRIVER_HANDLER_NAME"%d", index);
|
||||
|
||||
phba->sli4_hba.hba_eq_hdl[index].idx = index;
|
||||
@ -9684,12 +9794,12 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
||||
if (phba->cfg_fof && (index == (vectors - 1)))
|
||||
rc = request_irq(pci_irq_vector(phba->pcidev, index),
|
||||
&lpfc_sli4_fof_intr_handler, 0,
|
||||
(char *)&phba->sli4_hba.handler_name[index],
|
||||
name,
|
||||
&phba->sli4_hba.hba_eq_hdl[index]);
|
||||
else
|
||||
rc = request_irq(pci_irq_vector(phba->pcidev, index),
|
||||
&lpfc_sli4_hba_intr_handler, 0,
|
||||
(char *)&phba->sli4_hba.handler_name[index],
|
||||
name,
|
||||
&phba->sli4_hba.hba_eq_hdl[index]);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
@ -10241,6 +10351,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||
if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
|
||||
phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
|
||||
|
||||
if (bf_get(cfg_eqdr, mbx_sli4_parameters))
|
||||
phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
|
||||
|
||||
/* Make sure that sge_supp_len can be handled by the driver */
|
||||
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
||||
sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
|
||||
|
@ -186,13 +186,12 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
|
||||
|
||||
/* Remove this rport from the lport's list - memory is owned by the
|
||||
* transport. Remove the ndlp reference for the NVME transport before
|
||||
* calling state machine to remove the node, this is devloss = 0
|
||||
* semantics.
|
||||
* calling state machine to remove the node.
|
||||
*/
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6146 remoteport delete complete %p\n",
|
||||
remoteport);
|
||||
list_del(&rport->list);
|
||||
ndlp->nrport = NULL;
|
||||
lpfc_nlp_put(ndlp);
|
||||
|
||||
rport_err:
|
||||
@ -212,7 +211,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
struct lpfc_dmabuf *buf_ptr;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
vport->phba->fc4NvmeLsCmpls++;
|
||||
atomic_inc(&vport->phba->fc4NvmeLsCmpls);
|
||||
|
||||
pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
|
||||
@ -479,7 +478,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
|
||||
pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
|
||||
&pnvme_lsreq->rspdma);
|
||||
|
||||
vport->phba->fc4NvmeLsRequests++;
|
||||
atomic_inc(&vport->phba->fc4NvmeLsRequests);
|
||||
|
||||
/* Hardcode the wait to 30 seconds. Connections are failing otherwise.
|
||||
* This code allows it all to work.
|
||||
@ -774,7 +773,7 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
|
||||
wcqe);
|
||||
return;
|
||||
}
|
||||
phba->fc4NvmeIoCmpls++;
|
||||
atomic_inc(&phba->fc4NvmeIoCmpls);
|
||||
|
||||
nCmd = lpfc_ncmd->nvmeCmd;
|
||||
rport = lpfc_ncmd->nrport;
|
||||
@ -999,7 +998,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
||||
NVME_WRITE_CMD);
|
||||
|
||||
phba->fc4NvmeOutputRequests++;
|
||||
atomic_inc(&phba->fc4NvmeOutputRequests);
|
||||
} else {
|
||||
/* Word 7 */
|
||||
bf_set(wqe_cmnd, &wqe->generic.wqe_com,
|
||||
@ -1020,7 +1019,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
||||
NVME_READ_CMD);
|
||||
|
||||
phba->fc4NvmeInputRequests++;
|
||||
atomic_inc(&phba->fc4NvmeInputRequests);
|
||||
}
|
||||
} else {
|
||||
/* Word 4 */
|
||||
@ -1041,7 +1040,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||
/* Word 11 */
|
||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
|
||||
|
||||
phba->fc4NvmeControlRequests++;
|
||||
atomic_inc(&phba->fc4NvmeControlRequests);
|
||||
}
|
||||
/*
|
||||
* Finish initializing those WQE fields that are independent
|
||||
@ -1362,6 +1361,13 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||
return 0;
|
||||
|
||||
out_free_nvme_buf:
|
||||
if (lpfc_ncmd->nvmeCmd->sg_cnt) {
|
||||
if (lpfc_ncmd->nvmeCmd->io_dir == NVMEFC_FCP_WRITE)
|
||||
atomic_dec(&phba->fc4NvmeOutputRequests);
|
||||
else
|
||||
atomic_dec(&phba->fc4NvmeInputRequests);
|
||||
} else
|
||||
atomic_dec(&phba->fc4NvmeControlRequests);
|
||||
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||
out_fail:
|
||||
return ret;
|
||||
@ -1421,7 +1427,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_vport *vport;
|
||||
struct lpfc_hba *phba;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct lpfc_nvme_buf *lpfc_nbuf;
|
||||
struct lpfc_iocbq *abts_buf;
|
||||
@ -1443,38 +1448,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
pnvme_rport->port_id,
|
||||
pnvme_fcreq);
|
||||
|
||||
/*
|
||||
* Catch race where our node has transitioned, but the
|
||||
* transport is still transitioning.
|
||||
*/
|
||||
ndlp = rport->ndlp;
|
||||
if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
|
||||
"6054 rport %p, ndlp %p, DID x%06x ndlp "
|
||||
" not ready.\n",
|
||||
rport, ndlp, pnvme_rport->port_id);
|
||||
|
||||
ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6055 Could not find node for "
|
||||
"DID %x\n",
|
||||
pnvme_rport->port_id);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* The remote node has to be ready to send an abort. */
|
||||
if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
|
||||
!(ndlp->nlp_type & NLP_NVME_TARGET)) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6048 rport %p, DID x%06x not ready for "
|
||||
"IO. State x%x, Type x%x\n",
|
||||
rport, pnvme_rport->port_id,
|
||||
ndlp->nlp_state, ndlp->nlp_type);
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the hba is getting reset, this flag is set. It is
|
||||
* cleared when the reset is complete and rings reestablished.
|
||||
*/
|
||||
@ -1535,7 +1508,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
|
||||
nvmereq_wqe->sli4_xritag,
|
||||
nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
|
||||
nvmereq_wqe->hba_wqidx, pnvme_rport->port_id);
|
||||
|
||||
/* Outstanding abort is in progress */
|
||||
if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
|
||||
@ -2208,7 +2181,6 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
vport->localport = localport;
|
||||
lport->vport = vport;
|
||||
INIT_LIST_HEAD(&lport->rport_list);
|
||||
vport->nvmei_support = 1;
|
||||
len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
|
||||
vport->phba->total_nvme_bufs += len;
|
||||
@ -2233,7 +2205,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
struct nvme_fc_local_port *localport;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
|
||||
int ret;
|
||||
|
||||
if (vport->nvmei_support == 0)
|
||||
@ -2246,19 +2217,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||
"6011 Destroying NVME localport %p\n",
|
||||
localport);
|
||||
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
|
||||
/* The last node ref has to get released now before the rport
|
||||
* private memory area is released by the transport.
|
||||
*/
|
||||
list_del(&rport->list);
|
||||
|
||||
init_completion(&rport->rport_unreg_done);
|
||||
ret = nvme_fc_unregister_remoteport(rport->remoteport);
|
||||
if (ret)
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6008 rport fail destroy %x\n", ret);
|
||||
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
|
||||
}
|
||||
|
||||
/* lport's rport list is clear. Unregister
|
||||
* lport and release resources.
|
||||
@ -2340,40 +2298,32 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
localport = vport->localport;
|
||||
lport = (struct lpfc_nvme_lport *)localport->private;
|
||||
|
||||
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
|
||||
|
||||
/* The driver isn't expecting the rport wwn to change
|
||||
* but it might get a different DID on a different
|
||||
* fabric.
|
||||
/* NVME rports are not preserved across devloss.
|
||||
* Just register this instance. Note, rpinfo->dev_loss_tmo
|
||||
* is left 0 to indicate accept transport defaults. The
|
||||
* driver communicates port role capabilities consistent
|
||||
* with the PRLI response data.
|
||||
*/
|
||||
list_for_each_entry(rport, &lport->rport_list, list) {
|
||||
if (rport->remoteport->port_name !=
|
||||
wwn_to_u64(ndlp->nlp_portname.u.wwn))
|
||||
continue;
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6035 lport %p, found matching rport "
|
||||
"at wwpn 0x%llx, Data: x%x x%x x%x "
|
||||
"x%06x\n",
|
||||
lport,
|
||||
rport->remoteport->port_name,
|
||||
rport->remoteport->port_id,
|
||||
rport->remoteport->port_role,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
remote_port = rport->remoteport;
|
||||
if ((remote_port->port_id == 0) &&
|
||||
(remote_port->port_role ==
|
||||
FC_PORT_ROLE_NVME_DISCOVERY)) {
|
||||
remote_port->port_id = ndlp->nlp_DID;
|
||||
remote_port->port_role &=
|
||||
~FC_PORT_ROLE_NVME_DISCOVERY;
|
||||
memset(&rpinfo, 0, sizeof(struct nvme_fc_port_info));
|
||||
rpinfo.port_id = ndlp->nlp_DID;
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
remote_port->port_role |=
|
||||
FC_PORT_ROLE_NVME_TARGET;
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
remote_port->port_role |=
|
||||
FC_PORT_ROLE_NVME_INITIATOR;
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
|
||||
|
||||
if (ndlp->nlp_type & NLP_NVME_DISCOVERY)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
|
||||
|
||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port);
|
||||
if (!ret) {
|
||||
/* If the ndlp already has an nrport, this is just
|
||||
* a resume of the existing rport. Else this is a
|
||||
* new rport.
|
||||
*/
|
||||
rport = remote_port->private;
|
||||
if (ndlp->nrport == rport) {
|
||||
lpfc_printf_vlog(ndlp->vport, KERN_INFO,
|
||||
LOG_NVME_DISC,
|
||||
"6014 Rebinding lport to "
|
||||
@ -2384,41 +2334,24 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
remote_port->port_role,
|
||||
ndlp->nlp_type,
|
||||
ndlp->nlp_DID);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NVME rports are not preserved across devloss.
|
||||
* Just register this instance.
|
||||
*/
|
||||
rpinfo.port_id = ndlp->nlp_DID;
|
||||
rpinfo.port_role = 0;
|
||||
if (ndlp->nlp_type & NLP_NVME_TARGET)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
|
||||
if (ndlp->nlp_type & NLP_NVME_INITIATOR)
|
||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
|
||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||
ret = nvme_fc_register_remoteport(localport, &rpinfo,
|
||||
&remote_port);
|
||||
if (!ret) {
|
||||
rport = remote_port->private;
|
||||
} else {
|
||||
/* New rport. */
|
||||
rport->remoteport = remote_port;
|
||||
rport->lport = lport;
|
||||
rport->ndlp = lpfc_nlp_get(ndlp);
|
||||
if (!rport->ndlp)
|
||||
return -1;
|
||||
ndlp->nrport = rport;
|
||||
INIT_LIST_HEAD(&rport->list);
|
||||
list_add_tail(&rport->list, &lport->rport_list);
|
||||
lpfc_printf_vlog(vport, KERN_INFO,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
"6022 Binding new rport to lport %p "
|
||||
"Rport WWNN 0x%llx, Rport WWPN 0x%llx "
|
||||
"DID x%06x Role x%x\n",
|
||||
"6022 Binding new rport to "
|
||||
"lport %p Rport WWNN 0x%llx, "
|
||||
"Rport WWPN 0x%llx DID "
|
||||
"x%06x Role x%x\n",
|
||||
lport,
|
||||
rpinfo.node_name, rpinfo.port_name,
|
||||
rpinfo.port_id, rpinfo.port_role);
|
||||
}
|
||||
} else {
|
||||
lpfc_printf_vlog(vport, KERN_ERR,
|
||||
LOG_NVME_DISC | LOG_NODE,
|
||||
@ -2426,13 +2359,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
"err: %d, DID x%06x\n",
|
||||
ret, ndlp->nlp_DID);
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
|
||||
"6027 Unknown nlp_type x%x on DID x%06x "
|
||||
"ndlp %p. Not Registering nvme rport\n",
|
||||
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
|
||||
}
|
||||
|
||||
return ret;
|
||||
#else
|
||||
return 0;
|
||||
@ -2460,7 +2387,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct lpfc_nvme_rport *rport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
unsigned long wait_tmo;
|
||||
|
||||
localport = vport->localport;
|
||||
|
||||
@ -2491,6 +2417,10 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
*/
|
||||
if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
|
||||
init_completion(&rport->rport_unreg_done);
|
||||
|
||||
/* No concern about the role change on the nvme remoteport.
|
||||
* The transport will update it.
|
||||
*/
|
||||
ret = nvme_fc_unregister_remoteport(remoteport);
|
||||
if (ret != 0) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
@ -2499,17 +2429,6 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||
ret, remoteport->port_state);
|
||||
}
|
||||
|
||||
/* Wait for the driver's delete completion routine to finish
|
||||
* before proceeding. This guarantees the transport and driver
|
||||
* have completed the unreg process.
|
||||
*/
|
||||
wait_tmo = msecs_to_jiffies(5000);
|
||||
ret = wait_for_completion_timeout(&rport->rport_unreg_done,
|
||||
wait_tmo);
|
||||
if (ret == 0) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||
"6169 Unreg nvme wait timeout\n");
|
||||
}
|
||||
}
|
||||
return;
|
||||
|
||||
|
@ -35,13 +35,11 @@ struct lpfc_nvme_qhandle {
|
||||
/* Declare nvme-based local and remote port definitions. */
|
||||
struct lpfc_nvme_lport {
|
||||
struct lpfc_vport *vport;
|
||||
struct list_head rport_list;
|
||||
struct completion lport_unreg_done;
|
||||
/* Add sttats counters here */
|
||||
};
|
||||
|
||||
struct lpfc_nvme_rport {
|
||||
struct list_head list;
|
||||
struct lpfc_nvme_lport *lport;
|
||||
struct nvme_fc_remote_port *remoteport;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
@ -112,6 +112,15 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
result = wcqe->parameter;
|
||||
ctxp = cmdwqe->context2;
|
||||
|
||||
if (ctxp->state != LPFC_NVMET_STE_LS_RSP || ctxp->entry_cnt != 2) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6410 NVMET LS cmpl state mismatch IO x%x: "
|
||||
"%d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
|
||||
if (!phba->targetport)
|
||||
goto out;
|
||||
|
||||
@ -123,15 +132,14 @@ lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
|
||||
|
||||
out:
|
||||
ctxp = cmdwqe->context2;
|
||||
rsp = &ctxp->ctx.ls_req;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
|
||||
ctxp->oxid, status, result);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
|
||||
ctxp, status, result);
|
||||
"6038 NVMET LS rsp cmpl: %d %d oxid x%x\n",
|
||||
status, result, ctxp->oxid);
|
||||
|
||||
lpfc_nlp_put(cmdwqe->context1);
|
||||
cmdwqe->context2 = NULL;
|
||||
@ -162,7 +170,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
struct lpfc_nvmet_tgtport *tgtp;
|
||||
struct fc_frame_header *fc_hdr;
|
||||
struct rqb_dmabuf *nvmebuf;
|
||||
struct lpfc_dmabuf *hbufp;
|
||||
uint32_t *payload;
|
||||
uint32_t size, oxid, sid, rc;
|
||||
unsigned long iflag;
|
||||
@ -173,11 +180,16 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->txrdy_phys = 0;
|
||||
}
|
||||
|
||||
if (ctxp->state == LPFC_NVMET_STE_FREE) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6411 NVMET free, already free IO x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_FREE;
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
||||
if (phba->sli4_hba.nvmet_io_wait_cnt) {
|
||||
hbufp = &nvmebuf->hbuf;
|
||||
list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
|
||||
nvmebuf, struct rqb_dmabuf,
|
||||
hbuf.list);
|
||||
@ -193,7 +205,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
|
||||
memset(ctxp, 0, sizeof(ctxp->ctx));
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->offset = 0;
|
||||
@ -256,11 +267,11 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
|
||||
list_add_tail(&ctx_buf->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_list);
|
||||
phba->sli4_hba.nvmet_ctx_cnt++;
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt++;
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_put_lock, iflag);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -580,8 +591,17 @@ lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
|
||||
int rc;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6023 %s: Entrypoint ctx %p %p\n", __func__,
|
||||
ctxp, tgtport);
|
||||
"6023 NVMET LS rsp oxid x%x\n", ctxp->oxid);
|
||||
|
||||
if ((ctxp->state != LPFC_NVMET_STE_LS_RCV) ||
|
||||
(ctxp->entry_cnt != 1)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6412 NVMET LS rsp state mismatch "
|
||||
"oxid x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_LS_RSP;
|
||||
ctxp->entry_cnt++;
|
||||
|
||||
nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
|
||||
rsp->rsplen);
|
||||
@ -751,15 +771,14 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
unsigned long flags;
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6103 Abort op: oxri x%x flg x%x cnt %d\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
|
||||
"6103 NVMET Abort op: oxri x%x flg x%x ste %d\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->state);
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
|
||||
"xri x%x flg x%x cnt x%x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
|
||||
lpfc_nvmeio_data(phba, "NVMET FCP ABRT: xri x%x flg x%x ste x%x\n",
|
||||
ctxp->oxid, ctxp->flag, ctxp->state);
|
||||
|
||||
atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
|
||||
ctxp->entry_cnt++;
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
|
||||
/* Since iaab/iaar are NOT set, we need to check
|
||||
@ -770,11 +789,16 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
|
||||
return;
|
||||
}
|
||||
ctxp->flag |= LPFC_NVMET_ABORT_OP;
|
||||
if (ctxp->flag & LPFC_NVMET_IO_INP)
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
|
||||
/* An state of LPFC_NVMET_STE_RCV means we have just received
|
||||
* the NVME command and have not started processing it.
|
||||
* (by issuing any IO WQEs on this exchange yet)
|
||||
*/
|
||||
if (ctxp->state == LPFC_NVMET_STE_RCV)
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
else
|
||||
lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
|
||||
ctxp->oxid);
|
||||
spin_unlock_irqrestore(&ctxp->ctxlock, flags);
|
||||
}
|
||||
@ -790,6 +814,13 @@ lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
|
||||
unsigned long flags;
|
||||
bool aborting = false;
|
||||
|
||||
if (ctxp->state != LPFC_NVMET_STE_DONE &&
|
||||
ctxp->state != LPFC_NVMET_STE_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6413 NVMET release bad state %d %d oxid x%x\n",
|
||||
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ctxp->ctxlock, flags);
|
||||
if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
|
||||
(ctxp->flag & LPFC_NVMET_XBUSY)) {
|
||||
@ -828,37 +859,55 @@ static struct nvmet_fc_target_template lpfc_tgttemplate = {
|
||||
.target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
|
||||
};
|
||||
|
||||
void
|
||||
static void
|
||||
lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
|
||||
unsigned long flags;
|
||||
|
||||
list_for_each_entry_safe(
|
||||
ctx_buf, next_ctx_buf,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
|
||||
spin_lock_irqsave(
|
||||
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list, list) {
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_del_init(&ctx_buf->list);
|
||||
spin_unlock_irqrestore(
|
||||
&phba->sli4_hba.abts_nvme_buf_list_lock, flags);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
__lpfc_clear_active_sglq(phba,
|
||||
ctx_buf->sglq->sli4_lxritag);
|
||||
ctx_buf->sglq->state = SGL_FREED;
|
||||
ctx_buf->sglq->ndlp = NULL;
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_add_tail(&ctx_buf->sglq->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
|
||||
flags);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
|
||||
kfree(ctx_buf->context);
|
||||
}
|
||||
list_for_each_entry_safe(ctx_buf, next_ctx_buf,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_put_list, list) {
|
||||
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
list_del_init(&ctx_buf->list);
|
||||
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||
__lpfc_clear_active_sglq(phba,
|
||||
ctx_buf->sglq->sli4_lxritag);
|
||||
ctx_buf->sglq->state = SGL_FREED;
|
||||
ctx_buf->sglq->ndlp = NULL;
|
||||
|
||||
spin_lock(&phba->sli4_hba.sgl_list_lock);
|
||||
list_add_tail(&ctx_buf->sglq->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
||||
spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
||||
|
||||
lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
|
||||
kfree(ctx_buf->context);
|
||||
}
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, flags);
|
||||
}
|
||||
|
||||
int
|
||||
static int
|
||||
lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_nvmet_ctxbuf *ctx_buf;
|
||||
@ -891,6 +940,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
return -ENOMEM;
|
||||
}
|
||||
ctx_buf->context->ctxbuf = ctx_buf;
|
||||
ctx_buf->context->state = LPFC_NVMET_STE_FREE;
|
||||
|
||||
ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctx_buf->iocbq) {
|
||||
@ -926,12 +976,12 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
|
||||
"6407 Ran out of NVMET XRIs\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock(&phba->sli4_hba.nvmet_io_lock);
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
list_add_tail(&ctx_buf->list,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_list);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_io_lock);
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_get_lock);
|
||||
}
|
||||
phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt = phba->sli4_hba.nvmet_xri_cnt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1103,7 +1153,7 @@ lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
"6318 XB aborted %x flg x%x (%x)\n",
|
||||
"6318 XB aborted oxid %x flg x%x (%x)\n",
|
||||
ctxp->oxid, ctxp->flag, released);
|
||||
if (released)
|
||||
lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
|
||||
@ -1253,7 +1303,8 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
ctxp->oxid = oxid;
|
||||
ctxp->sid = sid;
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->state = LPFC_NVMET_STE_RCV;
|
||||
ctxp->state = LPFC_NVMET_STE_LS_RCV;
|
||||
ctxp->entry_cnt = 1;
|
||||
ctxp->rqb_buffer = (void *)nvmebuf;
|
||||
|
||||
lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
|
||||
@ -1268,8 +1319,8 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||
payload, size);
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
|
||||
"%08x %08x %08x\n", __func__, ctxp, size, rc,
|
||||
"6037 NVMET Unsol rcv: sz %d rc %d: %08x %08x %08x "
|
||||
"%08x %08x %08x\n", size, rc,
|
||||
*payload, *(payload+1), *(payload+2),
|
||||
*(payload+3), *(payload+4), *(payload+5));
|
||||
|
||||
@ -1337,13 +1388,31 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
goto dropit;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
if (phba->sli4_hba.nvmet_ctx_cnt) {
|
||||
list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
|
||||
spin_lock_irqsave(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
|
||||
if (phba->sli4_hba.nvmet_ctx_get_cnt) {
|
||||
list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
|
||||
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
|
||||
phba->sli4_hba.nvmet_ctx_cnt--;
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt--;
|
||||
} else {
|
||||
spin_lock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
if (phba->sli4_hba.nvmet_ctx_put_cnt) {
|
||||
list_splice(&phba->sli4_hba.lpfc_nvmet_ctx_put_list,
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list);
|
||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_ctx_put_list);
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt =
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt;
|
||||
phba->sli4_hba.nvmet_ctx_put_cnt = 0;
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
|
||||
list_remove_head(
|
||||
&phba->sli4_hba.lpfc_nvmet_ctx_get_list,
|
||||
ctx_buf, struct lpfc_nvmet_ctxbuf, list);
|
||||
phba->sli4_hba.nvmet_ctx_get_cnt--;
|
||||
} else {
|
||||
spin_unlock(&phba->sli4_hba.nvmet_ctx_put_lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
|
||||
}
|
||||
spin_unlock_irqrestore(&phba->sli4_hba.nvmet_ctx_get_lock, iflag);
|
||||
|
||||
fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
|
||||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
@ -1383,7 +1452,11 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr);
|
||||
|
||||
ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
|
||||
memset(ctxp, 0, sizeof(ctxp->ctx));
|
||||
if (ctxp->state != LPFC_NVMET_STE_FREE) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6414 NVMET Context corrupt %d %d oxid x%x\n",
|
||||
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
||||
}
|
||||
ctxp->wqeq = NULL;
|
||||
ctxp->txrdy = NULL;
|
||||
ctxp->offset = 0;
|
||||
@ -1547,9 +1620,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
|
||||
if (!lpfc_is_link_up(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6104 lpfc_nvmet_prep_ls_wqe: link err: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6104 NVMET prep LS wqe: link err: "
|
||||
"NPORT x%x oxid:x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1557,9 +1630,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe = lpfc_sli_get_iocbq(phba);
|
||||
if (nvmewqe == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6105 NVMET prep LS wqe: No WQE: "
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1568,9 +1641,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||
"6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6106 NVMET prep LS wqe: No ndlp: "
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
goto nvme_wqe_free_wqeq_exit;
|
||||
}
|
||||
ctxp->wqeq = nvmewqe;
|
||||
@ -1642,9 +1715,9 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
|
||||
nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
|
||||
|
||||
/* Xmit NVME response to remote NPORT <did> */
|
||||
/* Xmit NVMET response to remote NPORT <did> */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
|
||||
"6039 Xmit NVME LS response to remote "
|
||||
"6039 Xmit NVMET LS response to remote "
|
||||
"NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
|
||||
ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
|
||||
rspsize);
|
||||
@ -1676,9 +1749,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
|
||||
if (!lpfc_is_link_up(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6107 lpfc_nvmet_prep_fcp_wqe: link err:"
|
||||
"NPORT x%x oxid:x%x\n", ctxp->sid,
|
||||
ctxp->oxid);
|
||||
"6107 NVMET prep FCP wqe: link err:"
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1687,17 +1760,18 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
|
||||
"NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6108 NVMET prep FCP wqe: no ndlp: "
|
||||
"NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
|
||||
"NPORT x%x oxid:x%x cnt %d\n",
|
||||
ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
|
||||
"6109 NVMET prep FCP wqe: seg cnt err: "
|
||||
"NPORT x%x oxid x%x ste %d cnt %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state,
|
||||
phba->cfg_nvme_seg_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1708,9 +1782,9 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
nvmewqe = ctxp->ctxbuf->iocbq;
|
||||
if (nvmewqe == NULL) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6110 lpfc_nvmet_prep_fcp_wqe: No "
|
||||
"WQE: NPORT x%x oxid:x%x\n",
|
||||
ctxp->sid, ctxp->oxid);
|
||||
"6110 NVMET prep FCP wqe: No "
|
||||
"WQE: NPORT x%x oxid x%x ste %d\n",
|
||||
ctxp->sid, ctxp->oxid, ctxp->state);
|
||||
return NULL;
|
||||
}
|
||||
ctxp->wqeq = nvmewqe;
|
||||
@ -1722,13 +1796,12 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
/* Sanity check */
|
||||
if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
|
||||
(ctxp->entry_cnt == 1)) ||
|
||||
((ctxp->state == LPFC_NVMET_STE_DATA) &&
|
||||
(ctxp->entry_cnt > 1))) {
|
||||
(ctxp->state == LPFC_NVMET_STE_DATA)) {
|
||||
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6111 Wrong state %s: %d cnt %d\n",
|
||||
__func__, ctxp->state, ctxp->entry_cnt);
|
||||
"6111 Wrong state NVMET FCP: %d cnt %d\n",
|
||||
ctxp->state, ctxp->entry_cnt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -1832,7 +1905,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
|
||||
bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_DATA;
|
||||
break;
|
||||
|
||||
case NVMET_FCOP_WRITEDATA:
|
||||
@ -1923,7 +1995,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
sgl->word2 = cpu_to_le32(sgl->word2);
|
||||
sgl->sge_len = 0;
|
||||
sgl++;
|
||||
ctxp->state = LPFC_NVMET_STE_DATA;
|
||||
atomic_inc(&tgtp->xmt_fcp_write);
|
||||
break;
|
||||
|
||||
@ -1980,7 +2051,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
|
||||
FCP_COMMAND_TRSP);
|
||||
bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
|
||||
ctxp->state = LPFC_NVMET_STE_RSP;
|
||||
|
||||
if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
|
||||
/* Good response - all zero's on wire */
|
||||
@ -2029,6 +2099,8 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||
sgl++;
|
||||
ctxp->offset += cnt;
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_DATA;
|
||||
ctxp->entry_cnt++;
|
||||
return nvmewqe;
|
||||
}
|
||||
|
||||
@ -2124,10 +2196,6 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
status = bf_get(lpfc_wcqe_c_status, wcqe);
|
||||
result = wcqe->parameter;
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
|
||||
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
|
||||
|
||||
if (!ctxp) {
|
||||
/* if context is clear, related io alrady complete */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||
@ -2137,6 +2205,10 @@ lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
return;
|
||||
}
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (ctxp->flag & LPFC_NVMET_ABORT_OP)
|
||||
atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
|
||||
|
||||
/* Sanity check */
|
||||
if (ctxp->state != LPFC_NVMET_STE_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
@ -2210,13 +2282,28 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
|
||||
ctxp, wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
if (ctxp) {
|
||||
if (!ctxp) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6415 NVMET LS Abort No ctx: WCQE: "
|
||||
"%08x %08x %08x %08x\n",
|
||||
wcqe->word0, wcqe->total_data_placed,
|
||||
result, wcqe->word3);
|
||||
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctxp->state != LPFC_NVMET_STE_LS_ABORT) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6416 NVMET LS abort cmpl state mismatch: "
|
||||
"oxid x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
}
|
||||
|
||||
cmdwqe->context2 = NULL;
|
||||
cmdwqe->context3 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
kfree(ctxp);
|
||||
} else
|
||||
lpfc_sli_release_iocbq(phba, cmdwqe);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2240,7 +2327,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6134 Drop ABTS - wrong NDLP state x%x.\n",
|
||||
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||
|
||||
@ -2250,7 +2337,6 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||
|
||||
abts_wqeq = ctxp->wqeq;
|
||||
wqe_abts = &abts_wqeq->wqe;
|
||||
ctxp->state = LPFC_NVMET_STE_ABORT;
|
||||
|
||||
/*
|
||||
* Since we zero the whole WQE, we need to ensure we set the WQE fields
|
||||
@ -2338,7 +2424,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||
(ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6160 Drop ABORT - wrong NDLP state x%x.\n",
|
||||
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||
|
||||
@ -2351,7 +2437,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctxp->abort_wqeq) {
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6161 ABORT failed: No wqeqs: "
|
||||
"xri: x%x\n", ctxp->oxid);
|
||||
/* No failure to an ABTS request. */
|
||||
@ -2437,6 +2523,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
abts_wqeq->iocb_cmpl = 0;
|
||||
abts_wqeq->iocb_flag |= LPFC_IO_NVME;
|
||||
abts_wqeq->context2 = ctxp;
|
||||
abts_wqeq->vport = phba->pport;
|
||||
rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
|
||||
spin_unlock_irqrestore(&phba->hbalock, flags);
|
||||
if (rc == WQE_SUCCESS) {
|
||||
@ -2471,6 +2558,15 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
ctxp->wqeq->hba_wqidx = 0;
|
||||
}
|
||||
|
||||
if (ctxp->state == LPFC_NVMET_STE_FREE) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6417 NVMET ABORT ctx freed %d %d oxid x%x\n",
|
||||
ctxp->state, ctxp->entry_cnt, ctxp->oxid);
|
||||
rc = WQE_BUSY;
|
||||
goto aerr;
|
||||
}
|
||||
ctxp->state = LPFC_NVMET_STE_ABORT;
|
||||
ctxp->entry_cnt++;
|
||||
rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
|
||||
if (rc == 0)
|
||||
goto aerr;
|
||||
@ -2487,10 +2583,9 @@ lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||
}
|
||||
|
||||
aerr:
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
|
||||
ctxp->oxid, rc);
|
||||
return 1;
|
||||
@ -2507,12 +2602,24 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
if ((ctxp->state == LPFC_NVMET_STE_LS_RCV && ctxp->entry_cnt == 1) ||
|
||||
(ctxp->state == LPFC_NVMET_STE_LS_RSP && ctxp->entry_cnt == 2)) {
|
||||
ctxp->state = LPFC_NVMET_STE_LS_ABORT;
|
||||
ctxp->entry_cnt++;
|
||||
} else {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||
"6418 NVMET LS abort state mismatch "
|
||||
"IO x%x: %d %d\n",
|
||||
ctxp->oxid, ctxp->state, ctxp->entry_cnt);
|
||||
ctxp->state = LPFC_NVMET_STE_LS_ABORT;
|
||||
}
|
||||
|
||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||
if (!ctxp->wqeq) {
|
||||
/* Issue ABTS for this WQE based on iotag */
|
||||
ctxp->wqeq = lpfc_sli_get_iocbq(phba);
|
||||
if (!ctxp->wqeq) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6068 Abort failed: No wqeqs: "
|
||||
"xri: x%x\n", xri);
|
||||
/* No failure to an ABTS request. */
|
||||
@ -2523,7 +2630,10 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
abts_wqeq = ctxp->wqeq;
|
||||
wqe_abts = &abts_wqeq->wqe;
|
||||
|
||||
lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
|
||||
if (lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri) == 0) {
|
||||
rc = WQE_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&phba->hbalock, flags);
|
||||
abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
|
||||
@ -2535,13 +2645,13 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
|
||||
atomic_inc(&tgtp->xmt_abort_unsol);
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||
abts_wqeq->context2 = NULL;
|
||||
abts_wqeq->context3 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, abts_wqeq);
|
||||
kfree(ctxp);
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
|
||||
"6056 Failed to Issue ABTS. Status x%x\n", rc);
|
||||
return 0;
|
||||
}
|
||||
|
@ -93,12 +93,14 @@ struct lpfc_nvmet_rcv_ctx {
|
||||
uint16_t cpu;
|
||||
uint16_t state;
|
||||
/* States */
|
||||
#define LPFC_NVMET_STE_FREE 0
|
||||
#define LPFC_NVMET_STE_RCV 1
|
||||
#define LPFC_NVMET_STE_DATA 2
|
||||
#define LPFC_NVMET_STE_ABORT 3
|
||||
#define LPFC_NVMET_STE_RSP 4
|
||||
#define LPFC_NVMET_STE_DONE 5
|
||||
#define LPFC_NVMET_STE_LS_RCV 1
|
||||
#define LPFC_NVMET_STE_LS_ABORT 2
|
||||
#define LPFC_NVMET_STE_LS_RSP 3
|
||||
#define LPFC_NVMET_STE_RCV 4
|
||||
#define LPFC_NVMET_STE_DATA 5
|
||||
#define LPFC_NVMET_STE_ABORT 6
|
||||
#define LPFC_NVMET_STE_DONE 7
|
||||
#define LPFC_NVMET_STE_FREE 0xff
|
||||
uint16_t flag;
|
||||
#define LPFC_NVMET_IO_INP 0x1 /* IO is in progress on exchange */
|
||||
#define LPFC_NVMET_ABORT_OP 0x2 /* Abort WQE issued on exchange */
|
||||
|
@ -3931,7 +3931,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
|
||||
struct Scsi_Host *shost;
|
||||
uint32_t logit = LOG_FCP;
|
||||
|
||||
phba->fc4ScsiIoCmpls++;
|
||||
atomic_inc(&phba->fc4ScsiIoCmpls);
|
||||
|
||||
/* Sanity check on return of outstanding command */
|
||||
cmd = lpfc_cmd->pCmd;
|
||||
@ -4250,19 +4250,19 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
||||
vport->cfg_first_burst_size;
|
||||
}
|
||||
fcp_cmnd->fcpCntl3 = WRITE_DATA;
|
||||
phba->fc4ScsiOutputRequests++;
|
||||
atomic_inc(&phba->fc4ScsiOutputRequests);
|
||||
} else {
|
||||
iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
|
||||
iocb_cmd->ulpPU = PARM_READ_CHECK;
|
||||
fcp_cmnd->fcpCntl3 = READ_DATA;
|
||||
phba->fc4ScsiInputRequests++;
|
||||
atomic_inc(&phba->fc4ScsiInputRequests);
|
||||
}
|
||||
} else {
|
||||
iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
|
||||
iocb_cmd->un.fcpi.fcpi_parm = 0;
|
||||
iocb_cmd->ulpPU = 0;
|
||||
fcp_cmnd->fcpCntl3 = 0;
|
||||
phba->fc4ScsiControlRequests++;
|
||||
atomic_inc(&phba->fc4ScsiControlRequests);
|
||||
}
|
||||
if (phba->sli_rev == 3 &&
|
||||
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
|
||||
@ -4640,7 +4640,16 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
|
||||
(uint32_t)
|
||||
(cmnd->request->timeout / 1000));
|
||||
|
||||
|
||||
switch (lpfc_cmd->fcp_cmnd->fcpCntl3) {
|
||||
case WRITE_DATA:
|
||||
atomic_dec(&phba->fc4ScsiOutputRequests);
|
||||
break;
|
||||
case READ_DATA:
|
||||
atomic_dec(&phba->fc4ScsiInputRequests);
|
||||
break;
|
||||
default:
|
||||
atomic_dec(&phba->fc4ScsiControlRequests);
|
||||
}
|
||||
goto out_host_busy_free_buf;
|
||||
}
|
||||
if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
|
||||
|
@ -968,6 +968,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
|
||||
list_remove_head(lpfc_els_sgl_list, sglq,
|
||||
struct lpfc_sglq, list);
|
||||
if (sglq == start_sglq) {
|
||||
list_add_tail(&sglq->list, lpfc_els_sgl_list);
|
||||
sglq = NULL;
|
||||
break;
|
||||
} else
|
||||
@ -4302,7 +4303,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
|
||||
|
||||
/* Perform FCoE PCI function reset before freeing queue memory */
|
||||
rc = lpfc_pci_function_reset(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
/* Restore PCI cmd register */
|
||||
pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
|
||||
@ -4427,6 +4427,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
|
||||
pci_disable_pcie_error_reporting(phba->pcidev);
|
||||
|
||||
lpfc_hba_down_post(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -6926,18 +6927,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
cnt = phba->cfg_iocb_cnt * 1024;
|
||||
/* We need 1 iocbq for every SGL, for IO processing */
|
||||
cnt += phba->sli4_hba.nvmet_xri_cnt;
|
||||
/* Initialize and populate the iocb list per host */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2821 initialize iocb list %d total %d\n",
|
||||
phba->cfg_iocb_cnt, cnt);
|
||||
rc = lpfc_init_iocb_list(phba, cnt);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1413 Failed to init iocb list.\n");
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
|
||||
lpfc_nvmet_create_targetport(phba);
|
||||
} else {
|
||||
/* update host scsi xri-sgl sizes and mappings */
|
||||
rc = lpfc_sli4_scsi_sgl_update(phba);
|
||||
@ -6958,18 +6947,24 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
||||
}
|
||||
|
||||
cnt = phba->cfg_iocb_cnt * 1024;
|
||||
}
|
||||
|
||||
if (!phba->sli.iocbq_lookup) {
|
||||
/* Initialize and populate the iocb list per host */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"2820 initialize iocb list %d total %d\n",
|
||||
"2821 initialize iocb list %d total %d\n",
|
||||
phba->cfg_iocb_cnt, cnt);
|
||||
rc = lpfc_init_iocb_list(phba, cnt);
|
||||
if (rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"6301 Failed to init iocb list.\n");
|
||||
"1413 Failed to init iocb list.\n");
|
||||
goto out_destroy_queue;
|
||||
}
|
||||
}
|
||||
|
||||
if (phba->nvmet_support)
|
||||
lpfc_nvmet_create_targetport(phba);
|
||||
|
||||
if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
|
||||
/* Post initial buffers to all RQs created */
|
||||
for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
|
||||
@ -7512,7 +7507,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
|
||||
"(%d):0308 Mbox cmd issue - BUSY Data: "
|
||||
"x%x x%x x%x x%x\n",
|
||||
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
|
||||
mbx->mbxCommand, phba->pport->port_state,
|
||||
mbx->mbxCommand,
|
||||
phba->pport ? phba->pport->port_state : 0xff,
|
||||
psli->sli_flag, flag);
|
||||
|
||||
psli->slistat.mbox_busy++;
|
||||
@ -7564,7 +7560,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
|
||||
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
|
||||
"x%x\n",
|
||||
pmbox->vport ? pmbox->vport->vpi : 0,
|
||||
mbx->mbxCommand, phba->pport->port_state,
|
||||
mbx->mbxCommand,
|
||||
phba->pport ? phba->pport->port_state : 0xff,
|
||||
psli->sli_flag, flag);
|
||||
|
||||
if (mbx->mbxCommand != MBX_HEARTBEAT) {
|
||||
@ -10950,6 +10947,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_iocbq *iocbq;
|
||||
struct lpfc_iocbq *abtsiocb;
|
||||
struct lpfc_sli_ring *pring_s4;
|
||||
IOCB_t *cmd = NULL;
|
||||
int errcnt = 0, ret_val = 0;
|
||||
int i;
|
||||
@ -11003,6 +11001,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
|
||||
|
||||
/* Setup callback routine and issue the command. */
|
||||
abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
pring_s4 = lpfc_sli4_calc_ring(phba, iocbq);
|
||||
if (!pring_s4)
|
||||
continue;
|
||||
ret_val = lpfc_sli_issue_iocb(phba, pring_s4->ringno,
|
||||
abtsiocb, 0);
|
||||
} else
|
||||
ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
|
||||
abtsiocb, 0);
|
||||
if (ret_val == IOCB_ERROR) {
|
||||
@ -13256,6 +13261,7 @@ lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
|
||||
case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"6126 Receive Frame Truncated!!\n");
|
||||
/* Drop thru */
|
||||
case FC_STATUS_RQ_SUCCESS:
|
||||
lpfc_sli4_rq_release(hrq, drq);
|
||||
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||
@ -13466,6 +13472,7 @@ lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
|
||||
/* Track the max number of CQEs processed in 1 EQ */
|
||||
if (ecount > cq->CQ_max_cqe)
|
||||
cq->CQ_max_cqe = ecount;
|
||||
cq->assoc_qp->EQ_cqe_cnt += ecount;
|
||||
|
||||
/* Catch the no cq entry condition */
|
||||
if (unlikely(ecount == 0))
|
||||
@ -13547,6 +13554,9 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Save EQ associated with this CQ */
|
||||
cq->assoc_qp = phba->sli4_hba.fof_eq;
|
||||
|
||||
/* Process all the entries to the OAS CQ */
|
||||
while ((cqe = lpfc_sli4_cq_get(cq))) {
|
||||
workposted |= lpfc_sli4_fp_handle_cqe(phba, cq, cqe);
|
||||
@ -13557,6 +13567,7 @@ lpfc_sli4_fof_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
|
||||
/* Track the max number of CQEs processed in 1 EQ */
|
||||
if (ecount > cq->CQ_max_cqe)
|
||||
cq->CQ_max_cqe = ecount;
|
||||
cq->assoc_qp->EQ_cqe_cnt += ecount;
|
||||
|
||||
/* Catch the no cq entry condition */
|
||||
if (unlikely(ecount == 0))
|
||||
@ -13617,7 +13628,6 @@ lpfc_sli4_fof_intr_handler(int irq, void *dev_id)
|
||||
|
||||
/* Check device state for handling interrupt */
|
||||
if (unlikely(lpfc_intr_state_check(phba))) {
|
||||
eq->EQ_badstate++;
|
||||
/* Check again for link_state with lock held */
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
if (phba->link_state < LPFC_LINK_DOWN)
|
||||
@ -13729,7 +13739,6 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
||||
|
||||
/* Check device state for handling interrupt */
|
||||
if (unlikely(lpfc_intr_state_check(phba))) {
|
||||
fpeq->EQ_badstate++;
|
||||
/* Check again for link_state with lock held */
|
||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||
if (phba->link_state < LPFC_LINK_DOWN)
|
||||
@ -13988,14 +13997,15 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
|
||||
* fails this function will return -ENXIO.
|
||||
**/
|
||||
int
|
||||
lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
|
||||
lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
uint32_t numq, uint32_t imax)
|
||||
{
|
||||
struct lpfc_mbx_modify_eq_delay *eq_delay;
|
||||
LPFC_MBOXQ_t *mbox;
|
||||
struct lpfc_queue *eq;
|
||||
int cnt, rc, length, status = 0;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
uint32_t result;
|
||||
uint32_t result, val;
|
||||
int qidx;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint16_t dmult;
|
||||
@ -14014,22 +14024,45 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
|
||||
eq_delay = &mbox->u.mqe.un.eq_delay;
|
||||
|
||||
/* Calculate delay multiper from maximum interrupt per second */
|
||||
result = phba->cfg_fcp_imax / phba->io_channel_irqs;
|
||||
result = imax / phba->io_channel_irqs;
|
||||
if (result > LPFC_DMULT_CONST || result == 0)
|
||||
dmult = 0;
|
||||
else
|
||||
dmult = LPFC_DMULT_CONST/result - 1;
|
||||
if (dmult > LPFC_DMULT_MAX)
|
||||
dmult = LPFC_DMULT_MAX;
|
||||
|
||||
cnt = 0;
|
||||
for (qidx = startq; qidx < phba->io_channel_irqs; qidx++) {
|
||||
eq = phba->sli4_hba.hba_eq[qidx];
|
||||
if (!eq)
|
||||
continue;
|
||||
eq->q_mode = imax;
|
||||
eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
|
||||
eq_delay->u.request.eq[cnt].phase = 0;
|
||||
eq_delay->u.request.eq[cnt].delay_multi = dmult;
|
||||
cnt++;
|
||||
if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||
|
||||
/* q_mode is only used for auto_imax */
|
||||
if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
|
||||
/* Use EQ Delay Register method for q_mode */
|
||||
|
||||
/* Convert for EQ Delay register */
|
||||
val = phba->cfg_fcp_imax;
|
||||
if (val) {
|
||||
/* First, interrupts per sec per EQ */
|
||||
val = phba->cfg_fcp_imax /
|
||||
phba->io_channel_irqs;
|
||||
|
||||
/* us delay between each interrupt */
|
||||
val = LPFC_SEC_TO_USEC / val;
|
||||
}
|
||||
eq->q_mode = val;
|
||||
} else {
|
||||
eq->q_mode = imax;
|
||||
}
|
||||
|
||||
if (cnt >= numq)
|
||||
break;
|
||||
}
|
||||
eq_delay->u.request.num_eq = cnt;
|
||||
@ -16126,9 +16159,6 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba,
|
||||
return rc;
|
||||
}
|
||||
|
||||
static char *lpfc_rctl_names[] = FC_RCTL_NAMES_INIT;
|
||||
static char *lpfc_type_names[] = FC_TYPE_NAMES_INIT;
|
||||
|
||||
/**
|
||||
* lpfc_fc_frame_check - Check that this frame is a valid frame to handle
|
||||
* @phba: pointer to lpfc_hba struct that the frame was received on
|
||||
@ -16203,22 +16233,18 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
|
||||
}
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"2538 Received frame rctl:%s (x%x), type:%s (x%x), "
|
||||
"2538 Received frame rctl:x%x, type:x%x, "
|
||||
"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
|
||||
(fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS) ? "MDS Diags" :
|
||||
lpfc_rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl,
|
||||
(fc_hdr->fh_type == FC_TYPE_VENDOR_UNIQUE) ?
|
||||
"Vendor Unique" : lpfc_type_names[fc_hdr->fh_type],
|
||||
fc_hdr->fh_type, be32_to_cpu(header[0]),
|
||||
be32_to_cpu(header[1]), be32_to_cpu(header[2]),
|
||||
be32_to_cpu(header[3]), be32_to_cpu(header[4]),
|
||||
be32_to_cpu(header[5]), be32_to_cpu(header[6]));
|
||||
fc_hdr->fh_r_ctl, fc_hdr->fh_type,
|
||||
be32_to_cpu(header[0]), be32_to_cpu(header[1]),
|
||||
be32_to_cpu(header[2]), be32_to_cpu(header[3]),
|
||||
be32_to_cpu(header[4]), be32_to_cpu(header[5]),
|
||||
be32_to_cpu(header[6]));
|
||||
return 0;
|
||||
drop:
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
|
||||
"2539 Dropped frame rctl:%s type:%s\n",
|
||||
lpfc_rctl_names[fc_hdr->fh_r_ctl],
|
||||
lpfc_type_names[fc_hdr->fh_type]);
|
||||
"2539 Dropped frame rctl:x%x type:x%x\n",
|
||||
fc_hdr->fh_r_ctl, fc_hdr->fh_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -321,6 +321,7 @@ struct lpfc_sli {
|
||||
#define LPFC_MENLO_MAINT 0x1000 /* need for menl fw download */
|
||||
#define LPFC_SLI_ASYNC_MBX_BLK 0x2000 /* Async mailbox is blocked */
|
||||
#define LPFC_SLI_SUPPRESS_RSP 0x4000 /* Suppress RSP feature is supported */
|
||||
#define LPFC_SLI_USE_EQDR 0x8000 /* EQ Delay Register is supported */
|
||||
|
||||
struct lpfc_sli_ring *sli3_ring;
|
||||
|
||||
|
@ -168,7 +168,7 @@ struct lpfc_queue {
|
||||
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
|
||||
struct lpfc_rqb *rqbp; /* ptr to RQ buffers */
|
||||
|
||||
uint16_t sgl_list_cnt;
|
||||
uint32_t q_mode;
|
||||
uint16_t db_format;
|
||||
#define LPFC_DB_RING_FORMAT 0x01
|
||||
#define LPFC_DB_LIST_FORMAT 0x02
|
||||
@ -181,7 +181,7 @@ struct lpfc_queue {
|
||||
/* defines for EQ stats */
|
||||
#define EQ_max_eqe q_cnt_1
|
||||
#define EQ_no_entry q_cnt_2
|
||||
#define EQ_badstate q_cnt_3
|
||||
#define EQ_cqe_cnt q_cnt_3
|
||||
#define EQ_processed q_cnt_4
|
||||
|
||||
/* defines for CQ stats */
|
||||
@ -407,8 +407,10 @@ struct lpfc_max_cfg_param {
|
||||
|
||||
struct lpfc_hba;
|
||||
/* SLI4 HBA multi-fcp queue handler struct */
|
||||
#define LPFC_SLI4_HANDLER_NAME_SZ 16
|
||||
struct lpfc_hba_eq_hdl {
|
||||
uint32_t idx;
|
||||
char handler_name[LPFC_SLI4_HANDLER_NAME_SZ];
|
||||
struct lpfc_hba *phba;
|
||||
atomic_t hba_eq_in_use;
|
||||
struct cpumask *cpumask;
|
||||
@ -480,7 +482,6 @@ struct lpfc_sli4_lnk_info {
|
||||
|
||||
#define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \
|
||||
LPFC_FOF_IO_CHAN_NUM)
|
||||
#define LPFC_SLI4_HANDLER_NAME_SZ 16
|
||||
|
||||
/* Used for IRQ vector to CPU mapping */
|
||||
struct lpfc_vector_map_info {
|
||||
@ -522,6 +523,7 @@ struct lpfc_sli4_hba {
|
||||
#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
|
||||
#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
|
||||
#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
|
||||
void __iomem *EQDregaddr;
|
||||
} if_type2;
|
||||
} u;
|
||||
|
||||
@ -548,7 +550,6 @@ struct lpfc_sli4_hba {
|
||||
uint32_t ue_to_rp;
|
||||
struct lpfc_register sli_intf;
|
||||
struct lpfc_pc_sli4_params pc_sli4_params;
|
||||
uint8_t handler_name[LPFC_SLI4_HANDLER_CNT][LPFC_SLI4_HANDLER_NAME_SZ];
|
||||
struct lpfc_hba_eq_hdl *hba_eq_hdl; /* HBA per-WQ handle */
|
||||
|
||||
/* Pointers to the constructed SLI4 queues */
|
||||
@ -620,7 +621,8 @@ struct lpfc_sli4_hba {
|
||||
uint16_t scsi_xri_start;
|
||||
uint16_t els_xri_cnt;
|
||||
uint16_t nvmet_xri_cnt;
|
||||
uint16_t nvmet_ctx_cnt;
|
||||
uint16_t nvmet_ctx_get_cnt;
|
||||
uint16_t nvmet_ctx_put_cnt;
|
||||
uint16_t nvmet_io_wait_cnt;
|
||||
uint16_t nvmet_io_wait_total;
|
||||
struct list_head lpfc_els_sgl_list;
|
||||
@ -629,7 +631,8 @@ struct lpfc_sli4_hba {
|
||||
struct list_head lpfc_abts_nvmet_ctx_list;
|
||||
struct list_head lpfc_abts_scsi_buf_list;
|
||||
struct list_head lpfc_abts_nvme_buf_list;
|
||||
struct list_head lpfc_nvmet_ctx_list;
|
||||
struct list_head lpfc_nvmet_ctx_get_list;
|
||||
struct list_head lpfc_nvmet_ctx_put_list;
|
||||
struct list_head lpfc_nvmet_io_wait_list;
|
||||
struct lpfc_sglq **lpfc_sglq_active_list;
|
||||
struct list_head lpfc_rpi_hdr_list;
|
||||
@ -661,7 +664,8 @@ struct lpfc_sli4_hba {
|
||||
spinlock_t abts_nvme_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
|
||||
spinlock_t sgl_list_lock; /* list of aborted els IOs */
|
||||
spinlock_t nvmet_io_lock;
|
||||
spinlock_t nvmet_ctx_get_lock; /* list of avail XRI contexts */
|
||||
spinlock_t nvmet_ctx_put_lock; /* list of avail XRI contexts */
|
||||
spinlock_t nvmet_io_wait_lock; /* IOs waiting for ctx resources */
|
||||
uint32_t physical_port;
|
||||
|
||||
@ -755,7 +759,8 @@ struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
|
||||
uint32_t);
|
||||
void lpfc_sli4_queue_free(struct lpfc_queue *);
|
||||
int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
|
||||
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq);
|
||||
int lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
|
||||
uint32_t numq, uint32_t imax);
|
||||
int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
|
||||
struct lpfc_queue *, uint32_t, uint32_t);
|
||||
int lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
|
||||
|
@ -20,7 +20,7 @@
|
||||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "11.2.0.14"
|
||||
#define LPFC_DRIVER_VERSION "11.4.0.1"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
@ -574,7 +574,7 @@ mraid_mm_attach_buf(mraid_mmadp_t *adp, uioc_t *kioc, int xferlen)
|
||||
|
||||
kioc->pool_index = right_pool;
|
||||
kioc->free_buf = 1;
|
||||
kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_KERNEL,
|
||||
kioc->buf_vaddr = pci_pool_alloc(pool->handle, GFP_ATOMIC,
|
||||
&kioc->buf_paddr);
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
|
@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
|
||||
sas_device_priv_data->sas_target->handle);
|
||||
sas_device_priv_data->block = 1;
|
||||
|
||||
r = scsi_internal_device_block(sdev, false);
|
||||
r = scsi_internal_device_block_nowait(sdev);
|
||||
if (r == -EINVAL)
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"device_block failed with return(%d) for handle(0x%04x)\n",
|
||||
@ -2883,7 +2883,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
|
||||
sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
|
||||
"handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
|
||||
sas_device_priv_data->block = 0;
|
||||
r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
|
||||
r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
|
||||
if (r == -EINVAL) {
|
||||
/* The device has been set to SDEV_RUNNING by SD layer during
|
||||
* device addition but the request queue is still stopped by
|
||||
@ -2895,14 +2895,14 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
|
||||
"performing a block followed by an unblock\n",
|
||||
r, sas_device_priv_data->sas_target->handle);
|
||||
sas_device_priv_data->block = 1;
|
||||
r = scsi_internal_device_block(sdev, false);
|
||||
r = scsi_internal_device_block_nowait(sdev);
|
||||
if (r)
|
||||
sdev_printk(KERN_WARNING, sdev, "retried device_block "
|
||||
"failed with return(%d) for handle(0x%04x)\n",
|
||||
r, sas_device_priv_data->sas_target->handle);
|
||||
|
||||
sas_device_priv_data->block = 0;
|
||||
r = scsi_internal_device_unblock(sdev, SDEV_RUNNING);
|
||||
r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
|
||||
if (r)
|
||||
sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
|
||||
" failed with return(%d) for handle(0x%04x)\n",
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
@ -8,6 +8,25 @@
|
||||
*/
|
||||
#include "qedf.h"
|
||||
|
||||
inline bool qedf_is_vport(struct qedf_ctx *qedf)
|
||||
{
|
||||
return qedf->lport->vport != NULL;
|
||||
}
|
||||
|
||||
/* Get base qedf for physical port from vport */
|
||||
static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct fc_lport *base_lport;
|
||||
|
||||
if (!(qedf_is_vport(qedf)))
|
||||
return NULL;
|
||||
|
||||
lport = qedf->lport;
|
||||
base_lport = shost_priv(vport_to_shost(lport->vport));
|
||||
return lport_priv(base_lport);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qedf_fcoe_mac_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
@ -26,34 +45,34 @@ qedf_fcoe_mac_show(struct device *dev,
|
||||
return scnprintf(buf, PAGE_SIZE, "%pM\n", fcoe_mac);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
qedf_fka_period_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct fc_lport *lport = shost_priv(class_to_shost(dev));
|
||||
struct qedf_ctx *qedf = lport_priv(lport);
|
||||
int fka_period = -1;
|
||||
|
||||
if (qedf_is_vport(qedf))
|
||||
qedf = qedf_get_base_qedf(qedf);
|
||||
|
||||
if (qedf->ctlr.sel_fcf)
|
||||
fka_period = qedf->ctlr.sel_fcf->fka_period;
|
||||
|
||||
return scnprintf(buf, PAGE_SIZE, "%d\n", fka_period);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(fcoe_mac, S_IRUGO, qedf_fcoe_mac_show, NULL);
|
||||
static DEVICE_ATTR(fka_period, S_IRUGO, qedf_fka_period_show, NULL);
|
||||
|
||||
struct device_attribute *qedf_host_attrs[] = {
|
||||
&dev_attr_fcoe_mac,
|
||||
&dev_attr_fka_period,
|
||||
NULL,
|
||||
};
|
||||
|
||||
extern const struct qed_fcoe_ops *qed_ops;
|
||||
|
||||
inline bool qedf_is_vport(struct qedf_ctx *qedf)
|
||||
{
|
||||
return (!(qedf->lport->vport == NULL));
|
||||
}
|
||||
|
||||
/* Get base qedf for physical port from vport */
|
||||
static struct qedf_ctx *qedf_get_base_qedf(struct qedf_ctx *qedf)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct fc_lport *base_lport;
|
||||
|
||||
if (!(qedf_is_vport(qedf)))
|
||||
return NULL;
|
||||
|
||||
lport = qedf->lport;
|
||||
base_lport = shost_priv(vport_to_shost(lport->vport));
|
||||
return (struct qedf_ctx *)(lport_priv(base_lport));
|
||||
}
|
||||
|
||||
void qedf_capture_grc_dump(struct qedf_ctx *qedf)
|
||||
{
|
||||
struct qedf_ctx *base_qedf;
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 QLogic Corporation
|
||||
* Copyright (c) 2016-2017 QLogic Corporation
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
@ -44,7 +44,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
|
||||
goto els_err;
|
||||
}
|
||||
|
||||
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "els 0x%x: fcport not ready\n", op);
|
||||
rc = -EINVAL;
|
||||
goto els_err;
|
||||
@ -225,7 +225,7 @@ int qedf_send_rrq(struct qedf_ioreq *aborted_io_req)
|
||||
fcport = aborted_io_req->fcport;
|
||||
|
||||
/* Check that fcport is still offloaded */
|
||||
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -550,7 +550,7 @@ static int qedf_send_srr(struct qedf_ioreq *orig_io_req, u32 offset, u8 r_ctl)
|
||||
fcport = orig_io_req->fcport;
|
||||
|
||||
/* Check that fcport is still offloaded */
|
||||
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(NULL, "fcport is no longer offloaded.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
@ -155,10 +155,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
|
||||
struct fip_wwn_desc *wp;
|
||||
struct fip_vn_desc *vp;
|
||||
size_t rlen, dlen;
|
||||
uint32_t cvl_port_id;
|
||||
__u8 cvl_mac[ETH_ALEN];
|
||||
u16 op;
|
||||
u8 sub;
|
||||
bool do_reset = false;
|
||||
|
||||
eth_hdr = (struct ethhdr *)skb_mac_header(skb);
|
||||
fiph = (struct fip_header *) ((void *)skb->data + 2 * ETH_ALEN + 2);
|
||||
@ -189,8 +188,6 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
|
||||
return;
|
||||
}
|
||||
|
||||
cvl_port_id = 0;
|
||||
memset(cvl_mac, 0, ETH_ALEN);
|
||||
/*
|
||||
* We need to loop through the CVL descriptors to determine
|
||||
* if we want to reset the fcoe link
|
||||
@ -204,7 +201,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
|
||||
mp = (struct fip_mac_desc *)desc;
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
|
||||
"fd_mac=%pM\n", mp->fd_mac);
|
||||
ether_addr_copy(cvl_mac, mp->fd_mac);
|
||||
if (ether_addr_equal(mp->fd_mac,
|
||||
qedf->ctlr.sel_fcf->fcf_mac))
|
||||
do_reset = true;
|
||||
break;
|
||||
case FIP_DT_NAME:
|
||||
wp = (struct fip_wwn_desc *)desc;
|
||||
@ -216,7 +215,9 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
|
||||
vp = (struct fip_vn_desc *)desc;
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
|
||||
"fd_fc_id=%x.\n", ntoh24(vp->fd_fc_id));
|
||||
cvl_port_id = ntoh24(vp->fd_fc_id);
|
||||
if (ntoh24(vp->fd_fc_id) ==
|
||||
qedf->lport->port_id)
|
||||
do_reset = true;
|
||||
break;
|
||||
default:
|
||||
/* Ignore anything else */
|
||||
@ -227,11 +228,8 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
|
||||
"cvl_port_id=%06x cvl_mac=%pM.\n", cvl_port_id,
|
||||
cvl_mac);
|
||||
if (cvl_port_id == qedf->lport->port_id &&
|
||||
ether_addr_equal(cvl_mac,
|
||||
qedf->ctlr.sel_fcf->fcf_mac)) {
|
||||
"do_reset=%d.\n", do_reset);
|
||||
if (do_reset) {
|
||||
fcoe_ctlr_link_down(&qedf->ctlr);
|
||||
qedf_wait_for_upload(qedf);
|
||||
fcoe_ctlr_link_up(&qedf->ctlr);
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* QLogic FCoE Offload Driver
|
||||
* Copyright (c) 2016 Cavium Inc.
|
||||
* Copyright (c) 2016-2017 Cavium Inc.
|
||||
*
|
||||
* This software is available under the terms of the GNU General Public License
|
||||
* (GPL) Version 2, available from the file COPYING in the main directory of
|
||||
@ -1041,11 +1041,14 @@ static void qedf_parse_fcp_rsp(struct qedf_ioreq *io_req,
|
||||
fcp_sns_len = SCSI_SENSE_BUFFERSIZE;
|
||||
}
|
||||
|
||||
/* The sense buffer can be NULL for TMF commands */
|
||||
if (sc_cmd->sense_buffer) {
|
||||
memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
if (fcp_sns_len)
|
||||
memcpy(sc_cmd->sense_buffer, sense_data,
|
||||
fcp_sns_len);
|
||||
}
|
||||
}
|
||||
|
||||
static void qedf_unmap_sg_list(struct qedf_ctx *qedf, struct qedf_ioreq *io_req)
|
||||
{
|
||||
@ -1476,8 +1479,8 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct qedf_rport *fcport = io_req->fcport;
|
||||
struct fc_rport_priv *rdata = fcport->rdata;
|
||||
struct qedf_ctx *qedf = fcport->qedf;
|
||||
struct fc_rport_priv *rdata;
|
||||
struct qedf_ctx *qedf;
|
||||
u16 xid;
|
||||
u32 r_a_tov = 0;
|
||||
int rc = 0;
|
||||
@ -1485,15 +1488,18 @@ int qedf_initiate_abts(struct qedf_ioreq *io_req, bool return_scsi_cmd_on_abts)
|
||||
struct fcoe_wqe *sqe;
|
||||
u16 sqe_idx;
|
||||
|
||||
r_a_tov = rdata->r_a_tov;
|
||||
lport = qedf->lport;
|
||||
|
||||
/* Sanity check qedf_rport before dereferencing any pointers */
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "tgt not offloaded\n");
|
||||
QEDF_ERR(NULL, "tgt not offloaded\n");
|
||||
rc = 1;
|
||||
goto abts_err;
|
||||
}
|
||||
|
||||
rdata = fcport->rdata;
|
||||
r_a_tov = rdata->r_a_tov;
|
||||
qedf = fcport->qedf;
|
||||
lport = qedf->lport;
|
||||
|
||||
if (lport->state != LPORT_ST_READY || !(lport->link_up)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "link is not ready\n");
|
||||
rc = 1;
|
||||
@ -1729,6 +1735,13 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
/* Sanity check qedf_rport before dereferencing any pointers */
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(NULL, "tgt not offloaded\n");
|
||||
rc = 1;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
qedf = fcport->qedf;
|
||||
if (!qedf) {
|
||||
QEDF_ERR(NULL, "qedf is NULL.\n");
|
||||
@ -1837,7 +1850,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
if (!(test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags))) {
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "fcport not offloaded\n");
|
||||
rc = FAILED;
|
||||
return FAILED;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user