2019-05-27 13:55:01 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2015-11-17 23:50:30 +07:00
|
|
|
/*
|
|
|
|
* Copyright (c) 2015 Linaro Ltd.
|
|
|
|
* Copyright (c) 2015 Hisilicon Limited.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "hisi_sas.h"
|
|
|
|
#define DRV_NAME "hisi_sas"
|
|
|
|
|
2015-11-17 23:50:49 +07:00
|
|
|
#define DEV_IS_GONE(dev) \
|
|
|
|
((!dev) || (dev->dev_type == SAS_PHY_UNUSED))
|
|
|
|
|
2016-02-25 16:42:11 +07:00
|
|
|
static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
|
|
|
|
u8 *lun, struct hisi_sas_tmf_task *tmf);
|
2016-08-24 18:05:47 +07:00
|
|
|
static int
|
|
|
|
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device,
|
|
|
|
int abort_flag, int tag);
|
2017-03-23 00:25:20 +07:00
|
|
|
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
|
2017-12-09 00:16:45 +07:00
|
|
|
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
|
|
|
|
void *funcdata);
|
2018-05-21 17:09:17 +07:00
|
|
|
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device);
|
|
|
|
static void hisi_sas_dev_gone(struct domain_device *device);
|
2016-02-25 16:42:11 +07:00
|
|
|
|
2017-12-28 17:20:47 +07:00
|
|
|
u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, int direction)
|
2017-06-14 22:33:14 +07:00
|
|
|
{
|
2017-12-28 17:20:47 +07:00
|
|
|
switch (fis->command) {
|
2017-06-14 22:33:14 +07:00
|
|
|
case ATA_CMD_FPDMA_WRITE:
|
|
|
|
case ATA_CMD_FPDMA_READ:
|
|
|
|
case ATA_CMD_FPDMA_RECV:
|
|
|
|
case ATA_CMD_FPDMA_SEND:
|
|
|
|
case ATA_CMD_NCQ_NON_DATA:
|
2018-03-07 19:25:12 +07:00
|
|
|
return HISI_SAS_SATA_PROTOCOL_FPDMA;
|
2017-06-14 22:33:14 +07:00
|
|
|
|
|
|
|
case ATA_CMD_DOWNLOAD_MICRO:
|
|
|
|
case ATA_CMD_ID_ATA:
|
|
|
|
case ATA_CMD_PMP_READ:
|
|
|
|
case ATA_CMD_READ_LOG_EXT:
|
|
|
|
case ATA_CMD_PIO_READ:
|
|
|
|
case ATA_CMD_PIO_READ_EXT:
|
|
|
|
case ATA_CMD_PMP_WRITE:
|
|
|
|
case ATA_CMD_WRITE_LOG_EXT:
|
|
|
|
case ATA_CMD_PIO_WRITE:
|
|
|
|
case ATA_CMD_PIO_WRITE_EXT:
|
2018-03-07 19:25:12 +07:00
|
|
|
return HISI_SAS_SATA_PROTOCOL_PIO;
|
2017-06-14 22:33:14 +07:00
|
|
|
|
|
|
|
case ATA_CMD_DSM:
|
|
|
|
case ATA_CMD_DOWNLOAD_MICRO_DMA:
|
|
|
|
case ATA_CMD_PMP_READ_DMA:
|
|
|
|
case ATA_CMD_PMP_WRITE_DMA:
|
|
|
|
case ATA_CMD_READ:
|
|
|
|
case ATA_CMD_READ_EXT:
|
|
|
|
case ATA_CMD_READ_LOG_DMA_EXT:
|
|
|
|
case ATA_CMD_READ_STREAM_DMA_EXT:
|
|
|
|
case ATA_CMD_TRUSTED_RCV_DMA:
|
|
|
|
case ATA_CMD_TRUSTED_SND_DMA:
|
|
|
|
case ATA_CMD_WRITE:
|
|
|
|
case ATA_CMD_WRITE_EXT:
|
|
|
|
case ATA_CMD_WRITE_FUA_EXT:
|
|
|
|
case ATA_CMD_WRITE_QUEUED:
|
|
|
|
case ATA_CMD_WRITE_LOG_DMA_EXT:
|
|
|
|
case ATA_CMD_WRITE_STREAM_DMA_EXT:
|
2017-08-10 23:09:34 +07:00
|
|
|
case ATA_CMD_ZAC_MGMT_IN:
|
2018-03-07 19:25:12 +07:00
|
|
|
return HISI_SAS_SATA_PROTOCOL_DMA;
|
2017-06-14 22:33:14 +07:00
|
|
|
|
|
|
|
case ATA_CMD_CHK_POWER:
|
|
|
|
case ATA_CMD_DEV_RESET:
|
|
|
|
case ATA_CMD_EDD:
|
|
|
|
case ATA_CMD_FLUSH:
|
|
|
|
case ATA_CMD_FLUSH_EXT:
|
|
|
|
case ATA_CMD_VERIFY:
|
|
|
|
case ATA_CMD_VERIFY_EXT:
|
|
|
|
case ATA_CMD_SET_FEATURES:
|
|
|
|
case ATA_CMD_STANDBY:
|
|
|
|
case ATA_CMD_STANDBYNOW1:
|
2017-08-10 23:09:34 +07:00
|
|
|
case ATA_CMD_ZAC_MGMT_OUT:
|
2018-03-07 19:25:12 +07:00
|
|
|
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
2017-12-28 17:20:47 +07:00
|
|
|
|
2018-03-23 23:05:12 +07:00
|
|
|
case ATA_CMD_SET_MAX:
|
|
|
|
switch (fis->features) {
|
|
|
|
case ATA_SET_MAX_PASSWD:
|
|
|
|
case ATA_SET_MAX_LOCK:
|
|
|
|
return HISI_SAS_SATA_PROTOCOL_PIO;
|
2017-12-28 17:20:47 +07:00
|
|
|
|
2018-03-23 23:05:12 +07:00
|
|
|
case ATA_SET_MAX_PASSWD_DMA:
|
|
|
|
case ATA_SET_MAX_UNLOCK_DMA:
|
|
|
|
return HISI_SAS_SATA_PROTOCOL_DMA;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
2017-12-28 17:20:47 +07:00
|
|
|
}
|
2018-03-23 23:05:12 +07:00
|
|
|
|
|
|
|
default:
|
|
|
|
{
|
2017-06-14 22:33:14 +07:00
|
|
|
if (direction == DMA_NONE)
|
|
|
|
return HISI_SAS_SATA_PROTOCOL_NONDATA;
|
|
|
|
return HISI_SAS_SATA_PROTOCOL_PIO;
|
|
|
|
}
|
2017-12-28 17:20:47 +07:00
|
|
|
}
|
2017-06-14 22:33:14 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_get_ata_protocol);
|
|
|
|
|
2017-06-14 22:33:15 +07:00
|
|
|
void hisi_sas_sata_done(struct sas_task *task,
|
|
|
|
struct hisi_sas_slot *slot)
|
|
|
|
{
|
|
|
|
struct task_status_struct *ts = &task->task_status;
|
|
|
|
struct ata_task_resp *resp = (struct ata_task_resp *)ts->buf;
|
2017-06-29 20:02:14 +07:00
|
|
|
struct hisi_sas_status_buffer *status_buf =
|
|
|
|
hisi_sas_status_buf_addr_mem(slot);
|
|
|
|
u8 *iu = &status_buf->iu[0];
|
|
|
|
struct dev_to_host_fis *d2h = (struct dev_to_host_fis *)iu;
|
2017-06-14 22:33:15 +07:00
|
|
|
|
|
|
|
resp->frame_len = sizeof(struct dev_to_host_fis);
|
|
|
|
memcpy(&resp->ending_fis[0], d2h, sizeof(struct dev_to_host_fis));
|
|
|
|
|
|
|
|
ts->buf_valid_size = sizeof(*resp);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_sata_done);
|
|
|
|
|
2018-05-02 22:56:30 +07:00
|
|
|
/*
|
|
|
|
* This function assumes linkrate mask fits in 8 bits, which it
|
|
|
|
* does for all HW versions supported.
|
|
|
|
*/
|
|
|
|
u8 hisi_sas_get_prog_phy_linkrate_mask(enum sas_linkrate max)
|
|
|
|
{
|
2019-02-06 17:52:56 +07:00
|
|
|
u8 rate = 0;
|
2018-05-02 22:56:30 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
max -= SAS_LINK_RATE_1_5_GBPS;
|
|
|
|
for (i = 0; i <= max; i++)
|
|
|
|
rate |= 1 << (i * 2);
|
|
|
|
return rate;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_get_prog_phy_linkrate_mask);
|
|
|
|
|
2015-11-17 23:50:49 +07:00
|
|
|
static struct hisi_hba *dev_to_hisi_hba(struct domain_device *device)
|
|
|
|
{
|
|
|
|
return device->port->ha->lldd_ha;
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:17 +07:00
|
|
|
struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port)
|
|
|
|
{
|
|
|
|
return container_of(sas_port, struct hisi_sas_port, sas_port);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(to_hisi_sas_port);
|
|
|
|
|
2017-08-10 23:09:40 +07:00
|
|
|
void hisi_sas_stop_phys(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
int phy_no;
|
|
|
|
|
|
|
|
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++)
|
2019-04-11 19:46:38 +07:00
|
|
|
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
|
2017-08-10 23:09:40 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_stop_phys);
|
|
|
|
|
2015-11-17 23:50:36 +07:00
|
|
|
static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
|
|
|
|
{
|
|
|
|
void *bitmap = hisi_hba->slot_index_tags;
|
|
|
|
|
|
|
|
clear_bit(slot_idx, bitmap);
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:49 +07:00
|
|
|
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
|
|
|
|
{
|
2019-08-05 20:47:58 +07:00
|
|
|
if (hisi_hba->hw->slot_index_alloc ||
|
|
|
|
slot_idx >= HISI_SAS_UNRESERVED_IPTT) {
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&hisi_hba->lock);
|
2018-09-24 22:06:33 +07:00
|
|
|
hisi_sas_slot_index_clear(hisi_hba, slot_idx);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&hisi_hba->lock);
|
2018-09-24 22:06:33 +07:00
|
|
|
}
|
2015-11-17 23:50:49 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
|
|
|
|
{
|
|
|
|
void *bitmap = hisi_hba->slot_index_tags;
|
|
|
|
|
|
|
|
set_bit(slot_idx, bitmap);
|
|
|
|
}
|
|
|
|
|
2018-09-24 22:06:33 +07:00
|
|
|
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
|
|
|
|
struct scsi_cmnd *scsi_cmnd)
|
2015-11-17 23:50:49 +07:00
|
|
|
{
|
2018-09-24 22:06:33 +07:00
|
|
|
int index;
|
2015-11-17 23:50:49 +07:00
|
|
|
void *bitmap = hisi_hba->slot_index_tags;
|
|
|
|
|
2018-09-24 22:06:33 +07:00
|
|
|
if (scsi_cmnd)
|
|
|
|
return scsi_cmnd->request->tag;
|
|
|
|
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&hisi_hba->lock);
|
2018-05-21 17:09:14 +07:00
|
|
|
index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count,
|
2018-09-24 22:06:33 +07:00
|
|
|
hisi_hba->last_slot_index + 1);
|
2018-05-21 17:09:14 +07:00
|
|
|
if (index >= hisi_hba->slot_index_count) {
|
2018-09-24 22:06:33 +07:00
|
|
|
index = find_next_zero_bit(bitmap,
|
|
|
|
hisi_hba->slot_index_count,
|
2019-08-05 20:47:58 +07:00
|
|
|
HISI_SAS_UNRESERVED_IPTT);
|
2018-09-24 22:06:33 +07:00
|
|
|
if (index >= hisi_hba->slot_index_count) {
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&hisi_hba->lock);
|
2018-05-21 17:09:14 +07:00
|
|
|
return -SAS_QUEUE_FULL;
|
2018-09-24 22:06:33 +07:00
|
|
|
}
|
2018-05-21 17:09:14 +07:00
|
|
|
}
|
2015-11-17 23:50:49 +07:00
|
|
|
hisi_sas_slot_index_set(hisi_hba, index);
|
2018-05-21 17:09:14 +07:00
|
|
|
hisi_hba->last_slot_index = index;
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&hisi_hba->lock);
|
2018-05-21 17:09:14 +07:00
|
|
|
|
2018-09-24 22:06:33 +07:00
|
|
|
return index;
|
2015-11-17 23:50:49 +07:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:36 +07:00
|
|
|
static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->slot_index_count; ++i)
|
|
|
|
hisi_sas_slot_index_clear(hisi_hba, i);
|
|
|
|
}
|
2015-11-17 23:50:50 +07:00
|
|
|
|
|
|
|
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
|
|
|
|
struct hisi_sas_slot *slot)
|
|
|
|
{
|
2019-02-06 17:52:55 +07:00
|
|
|
int device_id = slot->device_id;
|
|
|
|
struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id];
|
2015-11-17 23:50:50 +07:00
|
|
|
|
2017-04-10 20:22:00 +07:00
|
|
|
if (task) {
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2015-11-17 23:50:50 +07:00
|
|
|
|
2017-10-24 22:51:38 +07:00
|
|
|
if (!task->lldd_task)
|
|
|
|
return;
|
|
|
|
|
|
|
|
task->lldd_task = NULL;
|
|
|
|
|
2019-02-06 17:52:51 +07:00
|
|
|
if (!sas_protocol_ata(task->task_proto)) {
|
|
|
|
struct sas_ssp_task *ssp_task = &task->ssp_task;
|
|
|
|
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
|
|
|
|
|
2017-04-10 20:22:00 +07:00
|
|
|
if (slot->n_elem)
|
2017-12-09 00:16:33 +07:00
|
|
|
dma_unmap_sg(dev, task->scatter,
|
|
|
|
task->num_scatter,
|
2017-04-10 20:22:00 +07:00
|
|
|
task->data_dir);
|
2019-02-06 17:52:51 +07:00
|
|
|
if (slot->n_elem_dif)
|
|
|
|
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
|
|
|
|
scsi_prot_sg_count(scsi_cmnd),
|
|
|
|
task->data_dir);
|
|
|
|
}
|
2017-04-10 20:22:00 +07:00
|
|
|
}
|
2015-11-17 23:50:50 +07:00
|
|
|
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&sas_dev->lock);
|
2015-11-17 23:50:50 +07:00
|
|
|
list_del_init(&slot->entry);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&sas_dev->lock);
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
|
|
|
|
memset(slot, 0, offsetof(struct hisi_sas_slot, buf));
|
|
|
|
|
2015-11-17 23:50:50 +07:00
|
|
|
hisi_sas_slot_index_free(hisi_hba, slot->idx);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_slot_task_free);
|
|
|
|
|
2018-05-09 22:10:46 +07:00
|
|
|
static void hisi_sas_task_prep_smp(struct hisi_hba *hisi_hba,
|
2015-11-17 23:50:54 +07:00
|
|
|
struct hisi_sas_slot *slot)
|
|
|
|
{
|
2018-05-09 22:10:46 +07:00
|
|
|
hisi_hba->hw->prep_smp(hisi_hba, slot);
|
2015-11-17 23:50:54 +07:00
|
|
|
}
|
|
|
|
|
2018-05-09 22:10:46 +07:00
|
|
|
static void hisi_sas_task_prep_ssp(struct hisi_hba *hisi_hba,
|
2018-05-21 17:09:21 +07:00
|
|
|
struct hisi_sas_slot *slot)
|
2015-11-17 23:50:49 +07:00
|
|
|
{
|
2018-05-21 17:09:21 +07:00
|
|
|
hisi_hba->hw->prep_ssp(hisi_hba, slot);
|
2015-11-17 23:50:49 +07:00
|
|
|
}
|
|
|
|
|
2018-05-09 22:10:46 +07:00
|
|
|
static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
|
2016-01-26 01:47:20 +07:00
|
|
|
struct hisi_sas_slot *slot)
|
|
|
|
{
|
2018-05-09 22:10:46 +07:00
|
|
|
hisi_hba->hw->prep_stp(hisi_hba, slot);
|
2016-01-26 01:47:20 +07:00
|
|
|
}
|
|
|
|
|
2018-05-09 22:10:46 +07:00
|
|
|
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
|
2016-08-24 18:05:47 +07:00
|
|
|
struct hisi_sas_slot *slot,
|
|
|
|
int device_id, int abort_flag, int tag_to_abort)
|
|
|
|
{
|
2018-05-09 22:10:46 +07:00
|
|
|
hisi_hba->hw->prep_abort(hisi_hba, slot,
|
2016-08-24 18:05:47 +07:00
|
|
|
device_id, abort_flag, tag_to_abort);
|
|
|
|
}
|
|
|
|
|
2018-12-06 20:34:41 +07:00
|
|
|
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
|
|
|
|
struct sas_task *task, int n_elem,
|
2019-08-05 20:48:07 +07:00
|
|
|
int n_elem_req)
|
2018-12-06 20:34:41 +07:00
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
|
|
|
|
if (!sas_protocol_ata(task->task_proto)) {
|
|
|
|
if (task->num_scatter) {
|
|
|
|
if (n_elem)
|
|
|
|
dma_unmap_sg(dev, task->scatter,
|
|
|
|
task->num_scatter,
|
|
|
|
task->data_dir);
|
|
|
|
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
|
|
|
if (n_elem_req)
|
|
|
|
dma_unmap_sg(dev, &task->smp_task.smp_req,
|
|
|
|
1, DMA_TO_DEVICE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
|
|
|
|
struct sas_task *task, int *n_elem,
|
2019-08-05 20:48:07 +07:00
|
|
|
int *n_elem_req)
|
2018-12-06 20:34:41 +07:00
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (sas_protocol_ata(task->task_proto)) {
|
|
|
|
*n_elem = task->num_scatter;
|
|
|
|
} else {
|
2019-08-05 20:48:07 +07:00
|
|
|
unsigned int req_len;
|
2018-12-06 20:34:41 +07:00
|
|
|
|
|
|
|
if (task->num_scatter) {
|
|
|
|
*n_elem = dma_map_sg(dev, task->scatter,
|
|
|
|
task->num_scatter, task->data_dir);
|
|
|
|
if (!*n_elem) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto prep_out;
|
|
|
|
}
|
|
|
|
} else if (task->task_proto & SAS_PROTOCOL_SMP) {
|
|
|
|
*n_elem_req = dma_map_sg(dev, &task->smp_task.smp_req,
|
|
|
|
1, DMA_TO_DEVICE);
|
|
|
|
if (!*n_elem_req) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto prep_out;
|
|
|
|
}
|
|
|
|
req_len = sg_dma_len(&task->smp_task.smp_req);
|
|
|
|
if (req_len & 0x3) {
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_out_dma_unmap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
|
|
|
|
dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
|
|
|
|
*n_elem);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_out_dma_unmap;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out_dma_unmap:
|
|
|
|
/* It would be better to call dma_unmap_sg() here, but it's messy */
|
|
|
|
hisi_sas_dma_unmap(hisi_hba, task, *n_elem,
|
2019-08-05 20:48:07 +07:00
|
|
|
*n_elem_req);
|
2018-12-06 20:34:41 +07:00
|
|
|
prep_out:
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2019-02-06 17:52:51 +07:00
|
|
|
static void hisi_sas_dif_dma_unmap(struct hisi_hba *hisi_hba,
|
|
|
|
struct sas_task *task, int n_elem_dif)
|
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
|
|
|
|
if (n_elem_dif) {
|
|
|
|
struct sas_ssp_task *ssp_task = &task->ssp_task;
|
|
|
|
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
|
|
|
|
|
|
|
|
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
|
|
|
|
scsi_prot_sg_count(scsi_cmnd),
|
|
|
|
task->data_dir);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_dif_dma_map(struct hisi_hba *hisi_hba,
|
|
|
|
int *n_elem_dif, struct sas_task *task)
|
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
struct sas_ssp_task *ssp_task;
|
|
|
|
struct scsi_cmnd *scsi_cmnd;
|
|
|
|
int rc;
|
|
|
|
|
|
|
|
if (task->num_scatter) {
|
|
|
|
ssp_task = &task->ssp_task;
|
|
|
|
scsi_cmnd = ssp_task->cmd;
|
|
|
|
|
|
|
|
if (scsi_prot_sg_count(scsi_cmnd)) {
|
|
|
|
*n_elem_dif = dma_map_sg(dev,
|
|
|
|
scsi_prot_sglist(scsi_cmnd),
|
|
|
|
scsi_prot_sg_count(scsi_cmnd),
|
|
|
|
task->data_dir);
|
|
|
|
|
|
|
|
if (!*n_elem_dif)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
if (*n_elem_dif > HISI_SAS_SGE_DIF_PAGE_CNT) {
|
|
|
|
dev_err(dev, "task prep: n_elem_dif(%d) too large\n",
|
|
|
|
*n_elem_dif);
|
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_out_dif_dma_unmap;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out_dif_dma_unmap:
|
|
|
|
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
|
|
|
|
scsi_prot_sg_count(scsi_cmnd), task->data_dir);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-05-09 22:10:50 +07:00
|
|
|
static int hisi_sas_task_prep(struct sas_task *task,
|
|
|
|
struct hisi_sas_dq **dq_pointer,
|
2018-05-21 17:09:21 +07:00
|
|
|
bool is_tmf, struct hisi_sas_tmf_task *tmf,
|
2018-05-09 22:10:48 +07:00
|
|
|
int *pass)
|
2015-11-17 23:50:49 +07:00
|
|
|
{
|
|
|
|
struct domain_device *device = task->dev;
|
2018-11-09 21:06:35 +07:00
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
2015-11-17 23:50:49 +07:00
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
|
|
|
struct hisi_sas_port *port;
|
|
|
|
struct hisi_sas_slot *slot;
|
|
|
|
struct hisi_sas_cmd_hdr *cmd_hdr_base;
|
2017-03-23 00:25:17 +07:00
|
|
|
struct asd_sas_port *sas_port = device->port;
|
2018-11-09 21:06:35 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2018-05-09 22:10:45 +07:00
|
|
|
int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
|
2019-08-05 20:48:07 +07:00
|
|
|
int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
|
2018-05-09 22:10:50 +07:00
|
|
|
struct hisi_sas_dq *dq;
|
2018-07-18 21:14:31 +07:00
|
|
|
unsigned long flags;
|
2018-05-09 22:10:48 +07:00
|
|
|
int wr_q_index;
|
2015-11-17 23:50:49 +07:00
|
|
|
|
|
|
|
if (DEV_IS_GONE(sas_dev)) {
|
|
|
|
if (sas_dev)
|
2017-06-14 22:33:12 +07:00
|
|
|
dev_info(dev, "task prep: device %d not ready\n",
|
2015-11-17 23:50:49 +07:00
|
|
|
sas_dev->device_id);
|
|
|
|
else
|
|
|
|
dev_info(dev, "task prep: device %016llx not ready\n",
|
|
|
|
SAS_ADDR(device->sas_addr));
|
|
|
|
|
2018-03-07 19:25:11 +07:00
|
|
|
return -ECOMM;
|
2015-11-17 23:50:49 +07:00
|
|
|
}
|
2017-03-23 00:25:17 +07:00
|
|
|
|
2019-02-06 17:52:55 +07:00
|
|
|
if (hisi_hba->reply_map) {
|
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
unsigned int dq_index = hisi_hba->reply_map[cpu];
|
|
|
|
|
|
|
|
*dq_pointer = dq = &hisi_hba->dq[dq_index];
|
|
|
|
} else {
|
|
|
|
*dq_pointer = dq = sas_dev->dq;
|
|
|
|
}
|
2018-05-09 22:10:50 +07:00
|
|
|
|
2017-03-23 00:25:17 +07:00
|
|
|
port = to_hisi_sas_port(sas_port);
|
2016-08-24 18:05:52 +07:00
|
|
|
if (port && !port->port_attached) {
|
2016-09-06 22:36:18 +07:00
|
|
|
dev_info(dev, "task prep: %s port%d not attach device\n",
|
2017-03-23 00:25:38 +07:00
|
|
|
(dev_is_sata(device)) ?
|
2016-09-06 22:36:18 +07:00
|
|
|
"SATA/STP" : "SAS",
|
|
|
|
device->port->id);
|
|
|
|
|
2018-03-07 19:25:11 +07:00
|
|
|
return -ECOMM;
|
2015-11-17 23:50:49 +07:00
|
|
|
}
|
|
|
|
|
2018-12-06 20:34:41 +07:00
|
|
|
rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
|
2019-08-05 20:48:07 +07:00
|
|
|
&n_elem_req);
|
2018-12-06 20:34:41 +07:00
|
|
|
if (rc < 0)
|
|
|
|
goto prep_out;
|
2018-05-09 22:10:46 +07:00
|
|
|
|
2019-02-06 17:52:51 +07:00
|
|
|
if (!sas_protocol_ata(task->task_proto)) {
|
|
|
|
rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
|
|
|
|
if (rc < 0)
|
|
|
|
goto err_out_dma_unmap;
|
|
|
|
}
|
|
|
|
|
2016-04-15 20:36:36 +07:00
|
|
|
if (hisi_hba->hw->slot_index_alloc)
|
2018-09-24 22:06:33 +07:00
|
|
|
rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
|
|
|
|
else {
|
|
|
|
struct scsi_cmnd *scsi_cmnd = NULL;
|
|
|
|
|
|
|
|
if (task->uldd_task) {
|
|
|
|
struct ata_queued_cmd *qc;
|
|
|
|
|
|
|
|
if (dev_is_sata(device)) {
|
|
|
|
qc = task->uldd_task;
|
|
|
|
scsi_cmnd = qc->scsicmd;
|
|
|
|
} else {
|
|
|
|
scsi_cmnd = task->uldd_task;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
rc = hisi_sas_slot_index_alloc(hisi_hba, scsi_cmnd);
|
|
|
|
}
|
|
|
|
if (rc < 0)
|
2019-02-06 17:52:51 +07:00
|
|
|
goto err_out_dif_dma_unmap;
|
2017-06-14 22:33:13 +07:00
|
|
|
|
2018-09-24 22:06:33 +07:00
|
|
|
slot_idx = rc;
|
2018-05-09 22:10:47 +07:00
|
|
|
slot = &hisi_hba->slot_info[slot_idx];
|
|
|
|
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&dq->lock);
|
2019-08-05 20:47:59 +07:00
|
|
|
wr_q_index = dq->wr_point;
|
|
|
|
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
|
2018-05-09 22:10:48 +07:00
|
|
|
list_add_tail(&slot->delivery, &dq->list);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&dq->lock);
|
|
|
|
spin_lock(&sas_dev->lock);
|
2019-02-06 17:52:55 +07:00
|
|
|
list_add_tail(&slot->entry, &sas_dev->list);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&sas_dev->lock);
|
2015-11-17 23:50:49 +07:00
|
|
|
|
2017-06-14 22:33:13 +07:00
|
|
|
dlvry_queue = dq->id;
|
2018-05-09 22:10:48 +07:00
|
|
|
dlvry_queue_slot = wr_q_index;
|
2015-11-17 23:50:49 +07:00
|
|
|
|
2019-02-06 17:52:55 +07:00
|
|
|
slot->device_id = sas_dev->device_id;
|
2015-11-17 23:50:49 +07:00
|
|
|
slot->n_elem = n_elem;
|
2019-02-06 17:52:51 +07:00
|
|
|
slot->n_elem_dif = n_elem_dif;
|
2015-11-17 23:50:49 +07:00
|
|
|
slot->dlvry_queue = dlvry_queue;
|
|
|
|
slot->dlvry_queue_slot = dlvry_queue_slot;
|
|
|
|
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
|
|
|
|
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
|
|
|
|
slot->task = task;
|
|
|
|
slot->port = port;
|
2018-05-21 17:09:21 +07:00
|
|
|
slot->tmf = tmf;
|
|
|
|
slot->is_internal = is_tmf;
|
2015-11-17 23:50:49 +07:00
|
|
|
task->lldd_task = slot;
|
|
|
|
|
|
|
|
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
|
2017-06-29 20:02:14 +07:00
|
|
|
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
|
2019-08-05 20:48:04 +07:00
|
|
|
memset(hisi_sas_status_buf_addr_mem(slot), 0,
|
|
|
|
sizeof(struct hisi_sas_err_record));
|
2015-11-17 23:50:49 +07:00
|
|
|
|
|
|
|
switch (task->task_proto) {
|
2015-11-17 23:50:54 +07:00
|
|
|
case SAS_PROTOCOL_SMP:
|
2018-05-09 22:10:46 +07:00
|
|
|
hisi_sas_task_prep_smp(hisi_hba, slot);
|
2015-11-17 23:50:54 +07:00
|
|
|
break;
|
2015-11-17 23:50:49 +07:00
|
|
|
case SAS_PROTOCOL_SSP:
|
2018-05-21 17:09:21 +07:00
|
|
|
hisi_sas_task_prep_ssp(hisi_hba, slot);
|
2015-11-17 23:50:49 +07:00
|
|
|
break;
|
|
|
|
case SAS_PROTOCOL_SATA:
|
|
|
|
case SAS_PROTOCOL_STP:
|
|
|
|
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
2018-05-09 22:10:46 +07:00
|
|
|
hisi_sas_task_prep_ata(hisi_hba, slot);
|
2016-01-26 01:47:20 +07:00
|
|
|
break;
|
2015-11-17 23:50:49 +07:00
|
|
|
default:
|
|
|
|
dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
|
|
|
|
task->task_proto);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:29 +07:00
|
|
|
spin_lock_irqsave(&task->task_state_lock, flags);
|
2015-11-17 23:50:49 +07:00
|
|
|
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
2017-03-23 00:25:29 +07:00
|
|
|
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
2015-11-17 23:50:49 +07:00
|
|
|
|
|
|
|
++(*pass);
|
2018-07-18 21:14:32 +07:00
|
|
|
WRITE_ONCE(slot->ready, 1);
|
2015-11-17 23:50:49 +07:00
|
|
|
|
2015-12-09 17:48:36 +07:00
|
|
|
return 0;
|
2015-11-17 23:50:49 +07:00
|
|
|
|
2019-02-06 17:52:51 +07:00
|
|
|
err_out_dif_dma_unmap:
|
|
|
|
if (!sas_protocol_ata(task->task_proto))
|
|
|
|
hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
|
2018-05-09 22:10:45 +07:00
|
|
|
err_out_dma_unmap:
|
2018-12-06 20:34:41 +07:00
|
|
|
hisi_sas_dma_unmap(hisi_hba, task, n_elem,
|
2019-08-05 20:48:07 +07:00
|
|
|
n_elem_req);
|
2015-11-17 23:50:49 +07:00
|
|
|
prep_out:
|
2018-05-09 22:10:45 +07:00
|
|
|
dev_err(dev, "task prep: failed[%d]!\n", rc);
|
2015-11-17 23:50:49 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
|
2018-05-21 17:09:21 +07:00
|
|
|
bool is_tmf, struct hisi_sas_tmf_task *tmf)
|
2015-11-17 23:50:49 +07:00
|
|
|
{
|
|
|
|
u32 rc;
|
|
|
|
u32 pass = 0;
|
2018-11-09 21:06:35 +07:00
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct device *dev;
|
|
|
|
struct domain_device *device = task->dev;
|
|
|
|
struct asd_sas_port *sas_port = device->port;
|
2018-05-09 22:10:50 +07:00
|
|
|
struct hisi_sas_dq *dq = NULL;
|
2015-11-17 23:50:49 +07:00
|
|
|
|
2018-11-09 21:06:35 +07:00
|
|
|
if (!sas_port) {
|
|
|
|
struct task_status_struct *ts = &task->task_status;
|
|
|
|
|
|
|
|
ts->resp = SAS_TASK_UNDELIVERED;
|
|
|
|
ts->stat = SAS_PHY_DOWN;
|
|
|
|
/*
|
|
|
|
* libsas will use dev->port, should
|
|
|
|
* not call task_done for sata
|
|
|
|
*/
|
|
|
|
if (device->dev_type != SAS_SATA_DEV)
|
|
|
|
task->task_done(task);
|
|
|
|
return -ECOMM;
|
|
|
|
}
|
|
|
|
|
|
|
|
hisi_hba = dev_to_hisi_hba(device);
|
|
|
|
dev = hisi_hba->dev;
|
|
|
|
|
2018-05-31 19:50:45 +07:00
|
|
|
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) {
|
2019-10-24 21:08:11 +07:00
|
|
|
/*
|
|
|
|
* For IOs from upper layer, it may already disable preempt
|
|
|
|
* in the IO path, if disable preempt again in down(),
|
|
|
|
* function schedule() will report schedule_bug(), so check
|
|
|
|
* preemptible() before goto down().
|
|
|
|
*/
|
|
|
|
if (!preemptible())
|
2018-05-31 19:50:45 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
down(&hisi_hba->sem);
|
|
|
|
up(&hisi_hba->sem);
|
|
|
|
}
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2015-11-17 23:50:49 +07:00
|
|
|
/* protect task_prep and start_delivery sequence */
|
2018-05-09 22:10:50 +07:00
|
|
|
rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
|
2015-11-17 23:50:49 +07:00
|
|
|
if (rc)
|
|
|
|
dev_err(dev, "task exec: failed[%d]!\n", rc);
|
|
|
|
|
2018-05-09 22:10:50 +07:00
|
|
|
if (likely(pass)) {
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&dq->lock);
|
2017-06-14 22:33:13 +07:00
|
|
|
hisi_hba->hw->start_delivery(dq);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&dq->lock);
|
2018-05-09 22:10:50 +07:00
|
|
|
}
|
2015-11-17 23:50:49 +07:00
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
2015-11-17 23:50:36 +07:00
|
|
|
|
2015-11-17 23:50:48 +07:00
|
|
|
static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
struct sas_ha_struct *sas_ha;
|
|
|
|
|
|
|
|
if (!phy->phy_attached)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sas_ha = &hisi_hba->sha;
|
|
|
|
sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
|
|
|
|
|
|
|
|
if (sas_phy->phy) {
|
|
|
|
struct sas_phy *sphy = sas_phy->phy;
|
|
|
|
|
|
|
|
sphy->negotiated_linkrate = sas_phy->linkrate;
|
|
|
|
sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
|
2016-11-07 19:48:40 +07:00
|
|
|
sphy->maximum_linkrate_hw =
|
|
|
|
hisi_hba->hw->phy_get_max_linkrate();
|
|
|
|
if (sphy->minimum_linkrate == SAS_LINK_RATE_UNKNOWN)
|
|
|
|
sphy->minimum_linkrate = phy->minimum_linkrate;
|
|
|
|
|
|
|
|
if (sphy->maximum_linkrate == SAS_LINK_RATE_UNKNOWN)
|
|
|
|
sphy->maximum_linkrate = phy->maximum_linkrate;
|
2015-11-17 23:50:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (phy->phy_type & PORT_TYPE_SAS) {
|
|
|
|
struct sas_identify_frame *id;
|
|
|
|
|
|
|
|
id = (struct sas_identify_frame *)phy->frame_rcvd;
|
|
|
|
id->dev_type = phy->identify.device_type;
|
|
|
|
id->initiator_bits = SAS_PROTOCOL_ALL;
|
|
|
|
id->target_bits = phy->identify.target_port_protocols;
|
|
|
|
} else if (phy->phy_type & PORT_TYPE_SATA) {
|
2019-04-11 19:46:44 +07:00
|
|
|
/* Nothing */
|
2015-11-17 23:50:48 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
|
|
|
|
sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:51 +07:00
|
|
|
static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
|
|
|
struct hisi_sas_device *sas_dev = NULL;
|
2018-05-21 17:09:15 +07:00
|
|
|
int last = hisi_hba->last_dev_id;
|
|
|
|
int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES;
|
2015-11-17 23:50:51 +07:00
|
|
|
int i;
|
|
|
|
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&hisi_hba->lock);
|
2018-05-21 17:09:15 +07:00
|
|
|
for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) {
|
2015-11-17 23:50:51 +07:00
|
|
|
if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) {
|
2017-06-14 22:33:13 +07:00
|
|
|
int queue = i % hisi_hba->queue_count;
|
|
|
|
struct hisi_sas_dq *dq = &hisi_hba->dq[queue];
|
|
|
|
|
2015-11-17 23:50:51 +07:00
|
|
|
hisi_hba->devices[i].device_id = i;
|
|
|
|
sas_dev = &hisi_hba->devices[i];
|
2019-02-28 21:51:01 +07:00
|
|
|
sas_dev->dev_status = HISI_SAS_DEV_INIT;
|
2015-11-17 23:50:51 +07:00
|
|
|
sas_dev->dev_type = device->dev_type;
|
|
|
|
sas_dev->hisi_hba = hisi_hba;
|
|
|
|
sas_dev->sas_device = device;
|
2017-06-14 22:33:13 +07:00
|
|
|
sas_dev->dq = dq;
|
2019-02-06 17:52:55 +07:00
|
|
|
spin_lock_init(&sas_dev->lock);
|
2017-03-23 00:25:21 +07:00
|
|
|
INIT_LIST_HEAD(&hisi_hba->devices[i].list);
|
2015-11-17 23:50:51 +07:00
|
|
|
break;
|
|
|
|
}
|
2018-05-21 17:09:15 +07:00
|
|
|
i++;
|
2015-11-17 23:50:51 +07:00
|
|
|
}
|
2018-05-21 17:09:15 +07:00
|
|
|
hisi_hba->last_dev_id = i;
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&hisi_hba->lock);
|
2015-11-17 23:50:51 +07:00
|
|
|
|
|
|
|
return sas_dev;
|
|
|
|
}
|
|
|
|
|
2019-09-06 19:55:29 +07:00
|
|
|
#define HISI_SAS_DISK_RECOVER_CNT 3
|
2018-05-21 17:09:17 +07:00
|
|
|
static int hisi_sas_init_device(struct domain_device *device)
|
|
|
|
{
|
|
|
|
int rc = TMF_RESP_FUNC_COMPLETE;
|
|
|
|
struct scsi_lun lun;
|
|
|
|
struct hisi_sas_tmf_task tmf_task;
|
2019-09-06 19:55:29 +07:00
|
|
|
int retry = HISI_SAS_DISK_RECOVER_CNT;
|
2018-05-21 17:09:17 +07:00
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
2019-02-28 21:51:01 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
struct sas_phy *local_phy;
|
2018-05-21 17:09:17 +07:00
|
|
|
|
|
|
|
switch (device->dev_type) {
|
|
|
|
case SAS_END_DEVICE:
|
|
|
|
int_to_scsilun(0, &lun);
|
|
|
|
|
|
|
|
tmf_task.tmf = TMF_CLEAR_TASK_SET;
|
2019-09-06 19:55:29 +07:00
|
|
|
while (retry-- > 0) {
|
|
|
|
rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun,
|
|
|
|
&tmf_task);
|
|
|
|
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-05-21 17:09:17 +07:00
|
|
|
break;
|
|
|
|
case SAS_SATA_DEV:
|
|
|
|
case SAS_SATA_PM:
|
|
|
|
case SAS_SATA_PM_PORT:
|
|
|
|
case SAS_SATA_PENDING:
|
2019-02-28 21:51:01 +07:00
|
|
|
/*
|
|
|
|
* send HARD RESET to clear previous affiliation of
|
|
|
|
* STP target port
|
|
|
|
*/
|
|
|
|
local_phy = sas_get_local_phy(device);
|
2019-04-11 19:46:42 +07:00
|
|
|
if (!scsi_is_sas_phy_local(local_phy) &&
|
|
|
|
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
|
2019-02-28 21:51:01 +07:00
|
|
|
unsigned long deadline = ata_deadline(jiffies, 20000);
|
|
|
|
struct sata_device *sata_dev = &device->sata_dev;
|
|
|
|
struct ata_host *ata_host = sata_dev->ata_host;
|
|
|
|
struct ata_port_operations *ops = ata_host->ops;
|
|
|
|
struct ata_port *ap = sata_dev->ap;
|
|
|
|
struct ata_link *link;
|
|
|
|
unsigned int classes;
|
|
|
|
|
|
|
|
ata_for_each_link(link, ap, EDGE)
|
|
|
|
rc = ops->hardreset(link, &classes,
|
|
|
|
deadline);
|
|
|
|
}
|
|
|
|
sas_put_local_phy(local_phy);
|
|
|
|
if (rc) {
|
2019-04-11 19:46:39 +07:00
|
|
|
dev_warn(dev, "SATA disk hardreset fail: %d\n", rc);
|
2019-02-28 21:51:01 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2018-05-21 17:09:17 +07:00
|
|
|
while (retry-- > 0) {
|
|
|
|
rc = hisi_sas_softreset_ata_disk(device);
|
|
|
|
if (!rc)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:51 +07:00
|
|
|
static int hisi_sas_dev_found(struct domain_device *device)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
|
|
|
struct domain_device *parent_dev = device->parent;
|
|
|
|
struct hisi_sas_device *sas_dev;
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2018-05-21 17:09:17 +07:00
|
|
|
int rc;
|
2015-11-17 23:50:51 +07:00
|
|
|
|
2016-04-15 20:36:36 +07:00
|
|
|
if (hisi_hba->hw->alloc_dev)
|
|
|
|
sas_dev = hisi_hba->hw->alloc_dev(device);
|
|
|
|
else
|
|
|
|
sas_dev = hisi_sas_alloc_dev(device);
|
2015-11-17 23:50:51 +07:00
|
|
|
if (!sas_dev) {
|
|
|
|
dev_err(dev, "fail alloc dev: max support %d devices\n",
|
|
|
|
HISI_SAS_MAX_DEVICES);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
device->lldd_dev = sas_dev;
|
|
|
|
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
|
|
|
|
|
2019-06-10 19:41:41 +07:00
|
|
|
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
|
2015-11-17 23:50:51 +07:00
|
|
|
int phy_no;
|
|
|
|
u8 phy_num = parent_dev->ex_dev.num_phys;
|
|
|
|
struct ex_phy *phy;
|
|
|
|
|
|
|
|
for (phy_no = 0; phy_no < phy_num; phy_no++) {
|
|
|
|
phy = &parent_dev->ex_dev.ex_phy[phy_no];
|
|
|
|
if (SAS_ADDR(phy->attached_sas_addr) ==
|
2018-03-23 23:05:15 +07:00
|
|
|
SAS_ADDR(device->sas_addr))
|
2015-11-17 23:50:51 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phy_no == phy_num) {
|
|
|
|
dev_info(dev, "dev found: no attached "
|
|
|
|
"dev:%016llx at ex:%016llx\n",
|
|
|
|
SAS_ADDR(device->sas_addr),
|
|
|
|
SAS_ADDR(parent_dev->sas_addr));
|
2018-05-21 17:09:17 +07:00
|
|
|
rc = -EINVAL;
|
|
|
|
goto err_out;
|
2015-11-17 23:50:51 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-09 00:16:41 +07:00
|
|
|
dev_info(dev, "dev[%d:%x] found\n",
|
|
|
|
sas_dev->device_id, sas_dev->dev_type);
|
|
|
|
|
2018-05-21 17:09:17 +07:00
|
|
|
rc = hisi_sas_init_device(device);
|
|
|
|
if (rc)
|
|
|
|
goto err_out;
|
2019-02-28 21:51:01 +07:00
|
|
|
sas_dev->dev_status = HISI_SAS_DEV_NORMAL;
|
2015-11-17 23:50:51 +07:00
|
|
|
return 0;
|
2018-05-21 17:09:17 +07:00
|
|
|
|
|
|
|
err_out:
|
|
|
|
hisi_sas_dev_gone(device);
|
|
|
|
return rc;
|
2015-11-17 23:50:51 +07:00
|
|
|
}
|
|
|
|
|
2018-05-21 17:09:18 +07:00
|
|
|
int hisi_sas_slave_configure(struct scsi_device *sdev)
|
2016-02-25 16:42:14 +07:00
|
|
|
{
|
|
|
|
struct domain_device *dev = sdev_to_domain_dev(sdev);
|
|
|
|
int ret = sas_slave_configure(sdev);
|
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
if (!dev_is_sata(dev))
|
|
|
|
sas_change_queue_depth(sdev, 64);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2018-05-21 17:09:18 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_slave_configure);
|
2016-02-25 16:42:14 +07:00
|
|
|
|
2018-05-21 17:09:18 +07:00
|
|
|
void hisi_sas_scan_start(struct Scsi_Host *shost)
|
2015-11-17 23:50:55 +07:00
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = shost_priv(shost);
|
|
|
|
|
2017-03-23 00:25:19 +07:00
|
|
|
hisi_hba->hw->phys_init(hisi_hba);
|
2015-11-17 23:50:55 +07:00
|
|
|
}
|
2018-05-21 17:09:18 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_scan_start);
|
2015-11-17 23:50:55 +07:00
|
|
|
|
2018-05-21 17:09:18 +07:00
|
|
|
int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
2015-11-17 23:50:55 +07:00
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = shost_priv(shost);
|
|
|
|
struct sas_ha_struct *sha = &hisi_hba->sha;
|
|
|
|
|
2017-03-23 00:25:19 +07:00
|
|
|
/* Wait for PHY up interrupt to occur */
|
|
|
|
if (time < HZ)
|
2015-11-17 23:50:55 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
sas_drain_work(sha);
|
|
|
|
return 1;
|
|
|
|
}
|
2018-05-21 17:09:18 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
|
2015-11-17 23:50:55 +07:00
|
|
|
|
2015-11-17 23:50:48 +07:00
|
|
|
static void hisi_sas_phyup_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy =
|
2017-12-09 00:16:44 +07:00
|
|
|
container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
|
2015-11-17 23:50:48 +07:00
|
|
|
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
int phy_no = sas_phy->id;
|
|
|
|
|
2019-01-25 21:22:30 +07:00
|
|
|
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
|
|
|
|
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
|
2015-11-17 23:50:48 +07:00
|
|
|
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
|
|
|
|
}
|
2015-11-17 23:50:42 +07:00
|
|
|
|
2017-12-09 00:16:45 +07:00
|
|
|
static void hisi_sas_linkreset_work(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy =
|
|
|
|
container_of(work, typeof(*phy), works[HISI_PHYE_LINK_RESET]);
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
|
|
|
|
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
|
|
|
|
}
|
|
|
|
|
2017-12-09 00:16:44 +07:00
|
|
|
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
|
|
|
|
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
|
2017-12-09 00:16:45 +07:00
|
|
|
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
|
2017-12-09 00:16:44 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
|
|
|
|
enum hisi_sas_phy_event event)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
|
|
|
|
|
|
|
if (WARN_ON(event >= HISI_PHYES_NUM))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return queue_work(hisi_hba->wq, &phy->works[event]);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_notify_phy_event);
|
|
|
|
|
2019-01-25 21:22:35 +07:00
|
|
|
static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = from_timer(phy, t, timer);
|
|
|
|
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
int phy_no = phy->sas_phy.id;
|
|
|
|
|
|
|
|
dev_warn(dev, "phy%d wait phyup timeout, issuing link reset\n", phy_no);
|
|
|
|
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
|
|
|
}
|
|
|
|
|
|
|
|
void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
|
2020-05-15 21:13:42 +07:00
|
|
|
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
|
|
|
|
if (phy->phy_attached)
|
|
|
|
return;
|
|
|
|
|
2019-01-25 21:22:35 +07:00
|
|
|
if (!timer_pending(&phy->timer)) {
|
|
|
|
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
|
|
|
|
add_timer(&phy->timer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
|
|
|
|
|
2015-11-17 23:50:42 +07:00
|
|
|
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
2017-12-09 00:16:44 +07:00
|
|
|
int i;
|
2015-11-17 23:50:42 +07:00
|
|
|
|
|
|
|
phy->hisi_hba = hisi_hba;
|
|
|
|
phy->port = NULL;
|
2018-03-07 19:25:07 +07:00
|
|
|
phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
|
|
|
|
phy->maximum_linkrate = hisi_hba->hw->phy_get_max_linkrate();
|
2015-11-17 23:50:42 +07:00
|
|
|
sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0;
|
|
|
|
sas_phy->class = SAS;
|
|
|
|
sas_phy->iproto = SAS_PROTOCOL_ALL;
|
|
|
|
sas_phy->tproto = 0;
|
|
|
|
sas_phy->type = PHY_TYPE_PHYSICAL;
|
|
|
|
sas_phy->role = PHY_ROLE_INITIATOR;
|
|
|
|
sas_phy->oob_mode = OOB_NOT_CONNECTED;
|
|
|
|
sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
|
|
|
|
sas_phy->id = phy_no;
|
|
|
|
sas_phy->sas_addr = &hisi_hba->sas_addr[0];
|
|
|
|
sas_phy->frame_rcvd = &phy->frame_rcvd[0];
|
|
|
|
sas_phy->ha = (struct sas_ha_struct *)hisi_hba->shost->hostdata;
|
|
|
|
sas_phy->lldd_phy = phy;
|
2015-11-17 23:50:48 +07:00
|
|
|
|
2017-12-09 00:16:44 +07:00
|
|
|
for (i = 0; i < HISI_PHYES_NUM; i++)
|
|
|
|
INIT_WORK(&phy->works[i], hisi_sas_phye_fns[i]);
|
2018-05-31 19:50:49 +07:00
|
|
|
|
|
|
|
spin_lock_init(&phy->lock);
|
2019-01-25 21:22:35 +07:00
|
|
|
|
|
|
|
timer_setup(&phy->timer, hisi_sas_wait_phyup_timedout, 0);
|
2015-11-17 23:50:42 +07:00
|
|
|
}
|
|
|
|
|
2019-04-11 19:46:38 +07:00
|
|
|
/* Wrapper to ensure we track hisi_sas_phy.enable properly */
|
|
|
|
void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no, int enable)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct asd_sas_phy *aphy = &phy->sas_phy;
|
|
|
|
struct sas_phy *sphy = aphy->phy;
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&phy->lock, flags);
|
|
|
|
|
|
|
|
if (enable) {
|
|
|
|
/* We may have been enabled already; if so, don't touch */
|
|
|
|
if (!phy->enable)
|
|
|
|
sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
|
|
|
|
hisi_hba->hw->phy_start(hisi_hba, phy_no);
|
|
|
|
} else {
|
|
|
|
sphy->negotiated_linkrate = SAS_PHY_DISABLED;
|
|
|
|
hisi_hba->hw->phy_disable(hisi_hba, phy_no);
|
|
|
|
}
|
|
|
|
phy->enable = enable;
|
|
|
|
spin_unlock_irqrestore(&phy->lock, flags);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_phy_enable);
|
|
|
|
|
2015-11-17 23:50:52 +07:00
|
|
|
static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
|
|
|
|
{
|
|
|
|
struct sas_ha_struct *sas_ha = sas_phy->ha;
|
|
|
|
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
|
|
|
|
struct hisi_sas_phy *phy = sas_phy->lldd_phy;
|
|
|
|
struct asd_sas_port *sas_port = sas_phy->port;
|
2019-11-12 16:30:56 +07:00
|
|
|
struct hisi_sas_port *port;
|
2015-11-17 23:50:52 +07:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
if (!sas_port)
|
|
|
|
return;
|
|
|
|
|
2019-11-12 16:30:56 +07:00
|
|
|
port = to_hisi_sas_port(sas_port);
|
2015-11-17 23:50:52 +07:00
|
|
|
spin_lock_irqsave(&hisi_hba->lock, flags);
|
|
|
|
port->port_attached = 1;
|
|
|
|
port->id = phy->port_id;
|
|
|
|
phy->port = port;
|
|
|
|
sas_port->lldd_port = port;
|
|
|
|
spin_unlock_irqrestore(&hisi_hba->lock, flags);
|
|
|
|
}
|
|
|
|
|
2017-04-10 20:22:00 +07:00
|
|
|
static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task *task,
|
2017-03-23 00:25:21 +07:00
|
|
|
struct hisi_sas_slot *slot)
|
2015-11-17 23:50:52 +07:00
|
|
|
{
|
2017-04-10 20:22:00 +07:00
|
|
|
if (task) {
|
|
|
|
unsigned long flags;
|
|
|
|
struct task_status_struct *ts;
|
2015-11-17 23:50:52 +07:00
|
|
|
|
2017-04-10 20:22:00 +07:00
|
|
|
ts = &task->task_status;
|
2015-11-17 23:50:52 +07:00
|
|
|
|
2017-04-10 20:22:00 +07:00
|
|
|
ts->resp = SAS_TASK_COMPLETE;
|
|
|
|
ts->stat = SAS_ABORTED_TASK;
|
|
|
|
spin_lock_irqsave(&task->task_state_lock, flags);
|
|
|
|
task->task_state_flags &=
|
|
|
|
~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
|
2019-02-28 21:50:58 +07:00
|
|
|
if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
|
|
|
|
task->task_state_flags |= SAS_TASK_STATE_DONE;
|
2017-04-10 20:22:00 +07:00
|
|
|
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
|
|
|
}
|
2015-11-17 23:50:52 +07:00
|
|
|
|
2017-03-23 00:25:21 +07:00
|
|
|
hisi_sas_slot_task_free(hisi_hba, task, slot);
|
2015-11-17 23:50:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_release_task(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device)
|
|
|
|
{
|
2017-03-23 00:25:21 +07:00
|
|
|
struct hisi_sas_slot *slot, *slot2;
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
2015-11-17 23:50:52 +07:00
|
|
|
|
2017-03-23 00:25:21 +07:00
|
|
|
list_for_each_entry_safe(slot, slot2, &sas_dev->list, entry)
|
|
|
|
hisi_sas_do_release_task(hisi_hba, slot->task, slot);
|
2015-11-17 23:50:52 +07:00
|
|
|
}
|
|
|
|
|
2017-12-09 00:16:50 +07:00
|
|
|
void hisi_sas_release_tasks(struct hisi_hba *hisi_hba)
|
2017-03-23 00:25:18 +07:00
|
|
|
{
|
2017-03-23 00:25:21 +07:00
|
|
|
struct hisi_sas_device *sas_dev;
|
|
|
|
struct domain_device *device;
|
2017-03-23 00:25:18 +07:00
|
|
|
int i;
|
|
|
|
|
2017-03-23 00:25:21 +07:00
|
|
|
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
|
|
|
sas_dev = &hisi_hba->devices[i];
|
|
|
|
device = sas_dev->sas_device;
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2017-03-23 00:25:21 +07:00
|
|
|
if ((sas_dev->dev_type == SAS_PHY_UNUSED) ||
|
|
|
|
!device)
|
2017-03-23 00:25:18 +07:00
|
|
|
continue;
|
2017-03-23 00:25:21 +07:00
|
|
|
|
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
2017-03-23 00:25:18 +07:00
|
|
|
}
|
|
|
|
}
|
2017-12-09 00:16:50 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_release_tasks);
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2017-06-14 22:33:32 +07:00
|
|
|
static void hisi_sas_dereg_device(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device)
|
|
|
|
{
|
|
|
|
if (hisi_hba->hw->dereg_device)
|
|
|
|
hisi_hba->hw->dereg_device(hisi_hba, device);
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:51 +07:00
|
|
|
static void hisi_sas_dev_gone(struct domain_device *device)
|
|
|
|
{
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2019-10-24 21:08:10 +07:00
|
|
|
int ret = 0;
|
2015-11-17 23:50:51 +07:00
|
|
|
|
2017-12-09 00:16:41 +07:00
|
|
|
dev_info(dev, "dev[%d:%x] is gone\n",
|
2015-11-17 23:50:51 +07:00
|
|
|
sas_dev->device_id, sas_dev->dev_type);
|
|
|
|
|
scsi: hisi_sas: Fix the conflict between device gone and host reset
When device gone, it will check whether it is during reset, if not, it will
send internal task abort. Before internal task abort returned, reset
begins, and it will check whether SAS_PHY_UNUSED is set, if not, it will
call hisi_sas_init_device(), but at that time domain_device may already be
freed or part of it is freed, so it may referenece null pointer in
hisi_sas_init_device(). It may occur as follows:
thread0 thread1
hisi_sas_dev_gone()
check whether in RESET(no)
internal task abort
reset prep
soft_reset
... (part of reset_done)
internal task abort failed
release resource anyway
clear_itct
device->lldd_dev=NULL
hisi_sas_reset_init_all_device
check sas_dev->dev_type is SAS_PHY_UNUSED and
!device
set dev_type SAS_PHY_UNUSED
sas_free_device
hisi_sas_init_device
...
Semaphore hisi_hba.sema is used to sync the processes of device gone and
host reset.
To solve the issue, expand the scope that semaphore protects and let them
never occur together.
And also some places will check whether domain_device is NULL to judge
whether the device is gone. So when device gone, need to clear
sas_dev->sas_device.
Link: https://lore.kernel.org/r/1567774537-20003-14-git-send-email-john.garry@huawei.com
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-09-06 19:55:37 +07:00
|
|
|
down(&hisi_hba->sem);
|
2017-12-09 00:16:37 +07:00
|
|
|
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
|
|
|
|
hisi_sas_internal_task_abort(hisi_hba, device,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
HISI_SAS_INT_ABT_DEV, 0);
|
2016-08-24 18:05:48 +07:00
|
|
|
|
2017-12-09 00:16:37 +07:00
|
|
|
hisi_sas_dereg_device(hisi_hba, device);
|
|
|
|
|
2019-10-24 21:08:10 +07:00
|
|
|
ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev);
|
2017-12-09 00:16:37 +07:00
|
|
|
device->lldd_dev = NULL;
|
|
|
|
}
|
2017-06-14 22:33:32 +07:00
|
|
|
|
2017-12-09 00:16:34 +07:00
|
|
|
if (hisi_hba->hw->free_device)
|
|
|
|
hisi_hba->hw->free_device(sas_dev);
|
2019-10-24 21:08:10 +07:00
|
|
|
|
|
|
|
/* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */
|
|
|
|
if (!ret)
|
|
|
|
sas_dev->dev_type = SAS_PHY_UNUSED;
|
scsi: hisi_sas: Fix the conflict between device gone and host reset
When device gone, it will check whether it is during reset, if not, it will
send internal task abort. Before internal task abort returned, reset
begins, and it will check whether SAS_PHY_UNUSED is set, if not, it will
call hisi_sas_init_device(), but at that time domain_device may already be
freed or part of it is freed, so it may referenece null pointer in
hisi_sas_init_device(). It may occur as follows:
thread0 thread1
hisi_sas_dev_gone()
check whether in RESET(no)
internal task abort
reset prep
soft_reset
... (part of reset_done)
internal task abort failed
release resource anyway
clear_itct
device->lldd_dev=NULL
hisi_sas_reset_init_all_device
check sas_dev->dev_type is SAS_PHY_UNUSED and
!device
set dev_type SAS_PHY_UNUSED
sas_free_device
hisi_sas_init_device
...
Semaphore hisi_hba.sema is used to sync the processes of device gone and
host reset.
To solve the issue, expand the scope that semaphore protects and let them
never occur together.
And also some places will check whether domain_device is NULL to judge
whether the device is gone. So when device gone, need to clear
sas_dev->sas_device.
Link: https://lore.kernel.org/r/1567774537-20003-14-git-send-email-john.garry@huawei.com
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-09-06 19:55:37 +07:00
|
|
|
sas_dev->sas_device = NULL;
|
|
|
|
up(&hisi_hba->sem);
|
2015-11-17 23:50:51 +07:00
|
|
|
}
|
2015-11-17 23:50:49 +07:00
|
|
|
|
|
|
|
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
|
|
|
|
{
|
|
|
|
return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
|
|
|
|
}
|
|
|
|
|
2019-01-25 21:22:34 +07:00
|
|
|
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
|
2018-05-21 17:09:13 +07:00
|
|
|
struct sas_phy_linkrates *r)
|
|
|
|
{
|
|
|
|
struct sas_phy_linkrates _r;
|
|
|
|
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
enum sas_linkrate min, max;
|
|
|
|
|
2019-01-25 21:22:34 +07:00
|
|
|
if (r->minimum_linkrate > SAS_LINK_RATE_1_5_GBPS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2018-05-21 17:09:13 +07:00
|
|
|
if (r->maximum_linkrate == SAS_LINK_RATE_UNKNOWN) {
|
|
|
|
max = sas_phy->phy->maximum_linkrate;
|
|
|
|
min = r->minimum_linkrate;
|
|
|
|
} else if (r->minimum_linkrate == SAS_LINK_RATE_UNKNOWN) {
|
|
|
|
max = r->maximum_linkrate;
|
|
|
|
min = sas_phy->phy->minimum_linkrate;
|
|
|
|
} else
|
2019-01-25 21:22:34 +07:00
|
|
|
return -EINVAL;
|
2018-05-21 17:09:13 +07:00
|
|
|
|
|
|
|
_r.maximum_linkrate = max;
|
|
|
|
_r.minimum_linkrate = min;
|
|
|
|
|
2018-09-24 22:06:28 +07:00
|
|
|
sas_phy->phy->maximum_linkrate = max;
|
|
|
|
sas_phy->phy->minimum_linkrate = min;
|
|
|
|
|
2019-04-11 19:46:38 +07:00
|
|
|
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
|
2018-05-21 17:09:13 +07:00
|
|
|
msleep(100);
|
|
|
|
hisi_hba->hw->phy_set_linkrate(hisi_hba, phy_no, &_r);
|
2019-04-11 19:46:38 +07:00
|
|
|
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
|
2019-01-25 21:22:34 +07:00
|
|
|
|
|
|
|
return 0;
|
2018-05-21 17:09:13 +07:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:57 +07:00
|
|
|
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
|
|
|
|
void *funcdata)
|
|
|
|
{
|
|
|
|
struct sas_ha_struct *sas_ha = sas_phy->ha;
|
|
|
|
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
|
|
|
|
int phy_no = sas_phy->id;
|
|
|
|
|
|
|
|
switch (func) {
|
|
|
|
case PHY_FUNC_HARD_RESET:
|
|
|
|
hisi_hba->hw->phy_hard_reset(hisi_hba, phy_no);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PHY_FUNC_LINK_RESET:
|
2019-04-11 19:46:38 +07:00
|
|
|
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
|
2017-03-23 00:25:23 +07:00
|
|
|
msleep(100);
|
2019-04-11 19:46:38 +07:00
|
|
|
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
|
2015-11-17 23:50:57 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PHY_FUNC_DISABLE:
|
2019-04-11 19:46:38 +07:00
|
|
|
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
|
2015-11-17 23:50:57 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case PHY_FUNC_SET_LINK_RATE:
|
2019-01-25 21:22:34 +07:00
|
|
|
return hisi_sas_phy_set_linkrate(hisi_hba, phy_no, funcdata);
|
2017-08-10 23:09:29 +07:00
|
|
|
case PHY_FUNC_GET_EVENTS:
|
|
|
|
if (hisi_hba->hw->get_events) {
|
|
|
|
hisi_hba->hw->get_events(hisi_hba, phy_no);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* fallthru */
|
2015-11-17 23:50:57 +07:00
|
|
|
case PHY_FUNC_RELEASE_SPINUP_HOLD:
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2015-11-17 23:50:52 +07:00
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
static void hisi_sas_task_done(struct sas_task *task)
|
|
|
|
{
|
2018-09-24 22:06:30 +07:00
|
|
|
del_timer(&task->slow_task->timer);
|
2015-11-17 23:50:56 +07:00
|
|
|
complete(&task->slow_task->completion);
|
|
|
|
}
|
|
|
|
|
2017-08-23 06:05:14 +07:00
|
|
|
static void hisi_sas_tmf_timedout(struct timer_list *t)
|
2015-11-17 23:50:56 +07:00
|
|
|
{
|
2017-08-23 06:05:14 +07:00
|
|
|
struct sas_task_slow *slow = from_timer(slow, t, timer);
|
|
|
|
struct sas_task *task = slow->task;
|
2017-06-14 22:33:11 +07:00
|
|
|
unsigned long flags;
|
2018-09-24 22:06:30 +07:00
|
|
|
bool is_completed = true;
|
2017-06-14 22:33:11 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(&task->task_state_lock, flags);
|
2018-09-24 22:06:30 +07:00
|
|
|
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
2017-06-14 22:33:11 +07:00
|
|
|
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
2018-09-24 22:06:30 +07:00
|
|
|
is_completed = false;
|
|
|
|
}
|
2017-06-14 22:33:11 +07:00
|
|
|
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
2015-11-17 23:50:56 +07:00
|
|
|
|
2018-09-24 22:06:30 +07:00
|
|
|
if (!is_completed)
|
|
|
|
complete(&task->slow_task->completion);
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#define TASK_TIMEOUT 20
|
|
|
|
#define TASK_RETRY 3
|
2018-03-07 19:25:09 +07:00
|
|
|
#define INTERNAL_ABORT_TIMEOUT 6
|
2015-11-17 23:50:56 +07:00
|
|
|
static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
|
|
|
|
void *parameter, u32 para_len,
|
|
|
|
struct hisi_sas_tmf_task *tmf)
|
|
|
|
{
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
|
|
|
struct hisi_hba *hisi_hba = sas_dev->hisi_hba;
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2015-11-17 23:50:56 +07:00
|
|
|
struct sas_task *task;
|
|
|
|
int res, retry;
|
|
|
|
|
|
|
|
for (retry = 0; retry < TASK_RETRY; retry++) {
|
|
|
|
task = sas_alloc_slow_task(GFP_KERNEL);
|
|
|
|
if (!task)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
task->dev = device;
|
|
|
|
task->task_proto = device->tproto;
|
|
|
|
|
2017-03-23 00:25:20 +07:00
|
|
|
if (dev_is_sata(device)) {
|
|
|
|
task->ata_task.device_control_reg_update = 1;
|
|
|
|
memcpy(&task->ata_task.fis, parameter, para_len);
|
|
|
|
} else {
|
|
|
|
memcpy(&task->ssp_task, parameter, para_len);
|
|
|
|
}
|
2015-11-17 23:50:56 +07:00
|
|
|
task->task_done = hisi_sas_task_done;
|
|
|
|
|
2017-10-23 14:40:42 +07:00
|
|
|
task->slow_task->timer.function = hisi_sas_tmf_timedout;
|
2019-02-06 17:52:56 +07:00
|
|
|
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
|
2015-11-17 23:50:56 +07:00
|
|
|
add_timer(&task->slow_task->timer);
|
|
|
|
|
|
|
|
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
|
|
|
|
|
|
|
|
if (res) {
|
|
|
|
del_timer(&task->slow_task->timer);
|
|
|
|
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
|
|
|
|
res);
|
|
|
|
goto ex_err;
|
|
|
|
}
|
|
|
|
|
|
|
|
wait_for_completion(&task->slow_task->completion);
|
|
|
|
res = TMF_RESP_FUNC_FAILED;
|
|
|
|
/* Even TMF timed out, return direct. */
|
|
|
|
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
|
|
|
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
2017-04-10 20:22:00 +07:00
|
|
|
struct hisi_sas_slot *slot = task->lldd_task;
|
|
|
|
|
2017-12-09 00:16:41 +07:00
|
|
|
dev_err(dev, "abort tmf: TMF task timeout and not done\n");
|
2018-09-24 22:06:30 +07:00
|
|
|
if (slot) {
|
2018-10-18 23:59:39 +07:00
|
|
|
struct hisi_sas_cq *cq =
|
|
|
|
&hisi_hba->cq[slot->dlvry_queue];
|
2018-09-24 22:06:30 +07:00
|
|
|
/*
|
2020-01-20 19:22:31 +07:00
|
|
|
* sync irq to avoid free'ing task
|
2018-09-24 22:06:30 +07:00
|
|
|
* before using task in IO completion
|
|
|
|
*/
|
2020-01-20 19:22:31 +07:00
|
|
|
synchronize_irq(cq->irq_no);
|
2017-04-10 20:22:00 +07:00
|
|
|
slot->task = NULL;
|
2018-09-24 22:06:30 +07:00
|
|
|
}
|
2017-04-10 20:22:00 +07:00
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
goto ex_err;
|
2017-12-09 00:16:41 +07:00
|
|
|
} else
|
|
|
|
dev_err(dev, "abort tmf: TMF task timeout\n");
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
2016-02-25 16:42:10 +07:00
|
|
|
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
|
2015-11-17 23:50:56 +07:00
|
|
|
res = TMF_RESP_FUNC_COMPLETE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-08-24 18:05:53 +07:00
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
|
|
|
task->task_status.stat == TMF_RESP_FUNC_SUCC) {
|
|
|
|
res = TMF_RESP_FUNC_SUCC;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
|
|
|
task->task_status.stat == SAS_DATA_UNDERRUN) {
|
|
|
|
/* no error, but return the number of bytes of
|
|
|
|
* underrun
|
|
|
|
*/
|
2019-04-11 19:46:43 +07:00
|
|
|
dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x sts 0x%x underrun\n",
|
2015-11-17 23:50:56 +07:00
|
|
|
SAS_ADDR(device->sas_addr),
|
|
|
|
task->task_status.resp,
|
|
|
|
task->task_status.stat);
|
|
|
|
res = task->task_status.residual;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
|
|
|
task->task_status.stat == SAS_DATA_OVERRUN) {
|
|
|
|
dev_warn(dev, "abort tmf: blocked task error\n");
|
|
|
|
res = -EMSGSIZE;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2019-04-11 19:46:43 +07:00
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
|
|
|
task->task_status.stat == SAS_OPEN_REJECT) {
|
|
|
|
dev_warn(dev, "abort tmf: open reject failed\n");
|
|
|
|
res = -EIO;
|
|
|
|
} else {
|
|
|
|
dev_warn(dev, "abort tmf: task to dev %016llx resp: 0x%x status 0x%x\n",
|
|
|
|
SAS_ADDR(device->sas_addr),
|
|
|
|
task->task_status.resp,
|
|
|
|
task->task_status.stat);
|
|
|
|
}
|
2015-11-17 23:50:56 +07:00
|
|
|
sas_free_task(task);
|
|
|
|
task = NULL;
|
|
|
|
}
|
|
|
|
ex_err:
|
2016-11-07 19:48:34 +07:00
|
|
|
if (retry == TASK_RETRY)
|
|
|
|
dev_warn(dev, "abort tmf: executing internal task failed!\n");
|
2015-11-17 23:50:56 +07:00
|
|
|
sas_free_task(task);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:20 +07:00
|
|
|
static void hisi_sas_fill_ata_reset_cmd(struct ata_device *dev,
|
|
|
|
bool reset, int pmp, u8 *fis)
|
|
|
|
{
|
|
|
|
struct ata_taskfile tf;
|
|
|
|
|
|
|
|
ata_tf_init(dev, &tf);
|
|
|
|
if (reset)
|
|
|
|
tf.ctl |= ATA_SRST;
|
|
|
|
else
|
|
|
|
tf.ctl &= ~ATA_SRST;
|
|
|
|
tf.command = ATA_CMD_DEV_RESET;
|
|
|
|
ata_tf_to_fis(&tf, pmp, 0, fis);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_softreset_ata_disk(struct domain_device *device)
|
|
|
|
{
|
|
|
|
u8 fis[20] = {0};
|
|
|
|
struct ata_port *ap = device->sata_dev.ap;
|
|
|
|
struct ata_link *link;
|
|
|
|
int rc = TMF_RESP_FUNC_FAILED;
|
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2017-03-23 00:25:20 +07:00
|
|
|
int s = sizeof(struct host_to_dev_fis);
|
|
|
|
|
|
|
|
ata_for_each_link(link, ap, EDGE) {
|
|
|
|
int pmp = sata_srst_pmp(link);
|
|
|
|
|
|
|
|
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
|
|
|
|
rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
|
|
|
|
if (rc != TMF_RESP_FUNC_COMPLETE)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rc == TMF_RESP_FUNC_COMPLETE) {
|
|
|
|
ata_for_each_link(link, ap, EDGE) {
|
|
|
|
int pmp = sata_srst_pmp(link);
|
|
|
|
|
|
|
|
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
|
|
|
|
rc = hisi_sas_exec_internal_tmf_task(device, fis,
|
|
|
|
s, NULL);
|
|
|
|
if (rc != TMF_RESP_FUNC_COMPLETE)
|
|
|
|
dev_err(dev, "ata disk de-reset failed\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
dev_err(dev, "ata disk reset failed\n");
|
|
|
|
}
|
|
|
|
|
2018-05-09 22:10:49 +07:00
|
|
|
if (rc == TMF_RESP_FUNC_COMPLETE)
|
2017-03-23 00:25:20 +07:00
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
|
|
|
|
u8 *lun, struct hisi_sas_tmf_task *tmf)
|
|
|
|
{
|
|
|
|
struct sas_ssp_task ssp_task;
|
|
|
|
|
|
|
|
if (!(device->tproto & SAS_PROTOCOL_SSP))
|
|
|
|
return TMF_RESP_FUNC_ESUPP;
|
|
|
|
|
|
|
|
memcpy(ssp_task.LUN, lun, 8);
|
|
|
|
|
|
|
|
return hisi_sas_exec_internal_tmf_task(device, &ssp_task,
|
|
|
|
sizeof(ssp_task), tmf);
|
|
|
|
}
|
|
|
|
|
2017-12-09 00:16:35 +07:00
|
|
|
static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
|
2017-08-10 23:09:26 +07:00
|
|
|
{
|
2017-12-09 00:16:35 +07:00
|
|
|
u32 state = hisi_hba->hw->get_phys_state(hisi_hba);
|
2017-08-10 23:09:26 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
2017-12-09 00:16:35 +07:00
|
|
|
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
|
|
|
|
struct domain_device *device = sas_dev->sas_device;
|
|
|
|
struct asd_sas_port *sas_port;
|
|
|
|
struct hisi_sas_port *port;
|
|
|
|
struct hisi_sas_phy *phy = NULL;
|
|
|
|
struct asd_sas_phy *sas_phy;
|
|
|
|
|
2017-08-10 23:09:26 +07:00
|
|
|
if ((sas_dev->dev_type == SAS_PHY_UNUSED)
|
2017-12-09 00:16:35 +07:00
|
|
|
|| !device || !device->port)
|
2017-08-10 23:09:26 +07:00
|
|
|
continue;
|
|
|
|
|
2017-12-09 00:16:35 +07:00
|
|
|
sas_port = device->port;
|
|
|
|
port = to_hisi_sas_port(sas_port);
|
|
|
|
|
|
|
|
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
|
|
|
|
if (state & BIT(sas_phy->id)) {
|
|
|
|
phy = sas_phy->lldd_phy;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phy) {
|
|
|
|
port->id = phy->port_id;
|
2017-08-10 23:09:26 +07:00
|
|
|
|
2017-12-09 00:16:35 +07:00
|
|
|
/* Update linkrate of directly attached device. */
|
|
|
|
if (!device->parent)
|
|
|
|
device->linkrate = phy->sas_phy.linkrate;
|
2017-08-10 23:09:26 +07:00
|
|
|
|
2017-12-09 00:16:35 +07:00
|
|
|
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
|
|
|
|
} else
|
|
|
|
port->id = 0xff;
|
2017-08-10 23:09:26 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-06 19:55:34 +07:00
|
|
|
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
|
2017-08-10 23:09:26 +07:00
|
|
|
{
|
|
|
|
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
|
|
|
|
struct asd_sas_port *_sas_port = NULL;
|
|
|
|
int phy_no;
|
|
|
|
|
|
|
|
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
struct asd_sas_port *sas_port = sas_phy->port;
|
2019-11-12 16:30:59 +07:00
|
|
|
bool do_port_check = _sas_port != sas_port;
|
2017-08-10 23:09:26 +07:00
|
|
|
|
|
|
|
if (!sas_phy->phy->enabled)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Report PHY state change to libsas */
|
2017-12-09 00:16:35 +07:00
|
|
|
if (state & BIT(phy_no)) {
|
|
|
|
if (do_port_check && sas_port && sas_port->port_dev) {
|
2017-08-10 23:09:26 +07:00
|
|
|
struct domain_device *dev = sas_port->port_dev;
|
|
|
|
|
|
|
|
_sas_port = sas_port;
|
|
|
|
|
2019-06-10 19:41:41 +07:00
|
|
|
if (dev_is_expander(dev->dev_type))
|
2017-08-10 23:09:26 +07:00
|
|
|
sas_ha->notify_port_event(sas_phy,
|
|
|
|
PORTE_BROADCAST_RCVD);
|
|
|
|
}
|
2019-04-11 19:46:37 +07:00
|
|
|
} else {
|
2017-08-10 23:09:26 +07:00
|
|
|
hisi_sas_phy_down(hisi_hba, phy_no, 0);
|
2019-04-11 19:46:37 +07:00
|
|
|
}
|
2017-08-10 23:09:26 +07:00
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 17:09:19 +07:00
|
|
|
static void hisi_sas_reset_init_all_devices(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
struct hisi_sas_device *sas_dev;
|
|
|
|
struct domain_device *device;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
|
|
|
sas_dev = &hisi_hba->devices[i];
|
|
|
|
device = sas_dev->sas_device;
|
|
|
|
|
|
|
|
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
hisi_sas_init_device(device);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 17:09:23 +07:00
|
|
|
static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
|
|
|
|
struct asd_sas_port *sas_port,
|
|
|
|
struct domain_device *device)
|
|
|
|
{
|
|
|
|
struct hisi_sas_tmf_task tmf_task = { .force_phy = 1 };
|
|
|
|
struct ata_port *ap = device->sata_dev.ap;
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
int s = sizeof(struct host_to_dev_fis);
|
|
|
|
int rc = TMF_RESP_FUNC_FAILED;
|
|
|
|
struct asd_sas_phy *sas_phy;
|
|
|
|
struct ata_link *link;
|
|
|
|
u8 fis[20] = {0};
|
|
|
|
u32 state;
|
|
|
|
|
|
|
|
state = hisi_hba->hw->get_phys_state(hisi_hba);
|
|
|
|
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
|
|
|
|
if (!(state & BIT(sas_phy->id)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ata_for_each_link(link, ap, EDGE) {
|
|
|
|
int pmp = sata_srst_pmp(link);
|
|
|
|
|
|
|
|
tmf_task.phy_id = sas_phy->id;
|
|
|
|
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
|
|
|
|
rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
|
|
|
|
&tmf_task);
|
|
|
|
if (rc != TMF_RESP_FUNC_COMPLETE) {
|
|
|
|
dev_err(dev, "phy%d ata reset failed rc=%d\n",
|
|
|
|
sas_phy->id, rc);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
int port_no, rc, i;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
|
|
|
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
|
|
|
|
struct domain_device *device = sas_dev->sas_device;
|
|
|
|
|
|
|
|
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
|
|
|
HISI_SAS_INT_ABT_DEV, 0);
|
|
|
|
if (rc < 0)
|
|
|
|
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (port_no = 0; port_no < hisi_hba->n_phy; port_no++) {
|
|
|
|
struct hisi_sas_port *port = &hisi_hba->port[port_no];
|
|
|
|
struct asd_sas_port *sas_port = &port->sas_port;
|
|
|
|
struct domain_device *port_dev = sas_port->port_dev;
|
|
|
|
struct domain_device *device;
|
|
|
|
|
2019-06-10 19:41:41 +07:00
|
|
|
if (!port_dev || !dev_is_expander(port_dev->dev_type))
|
2018-05-21 17:09:23 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Try to find a SATA device */
|
|
|
|
list_for_each_entry(device, &sas_port->dev_list,
|
|
|
|
dev_list_node) {
|
|
|
|
if (dev_is_sata(device)) {
|
|
|
|
hisi_sas_send_ata_reset_each_phy(hisi_hba,
|
|
|
|
sas_port,
|
|
|
|
device);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-18 21:14:28 +07:00
|
|
|
void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
|
2017-03-23 00:25:18 +07:00
|
|
|
{
|
2017-08-10 23:09:26 +07:00
|
|
|
struct Scsi_Host *shost = hisi_hba->shost;
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2018-05-31 19:50:44 +07:00
|
|
|
down(&hisi_hba->sem);
|
2018-07-18 21:14:28 +07:00
|
|
|
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2017-08-10 23:09:26 +07:00
|
|
|
scsi_block_requests(shost);
|
2018-05-21 17:09:20 +07:00
|
|
|
hisi_hba->hw->wait_cmds_complete_timeout(hisi_hba, 100, 5000);
|
|
|
|
|
2018-05-02 22:56:29 +07:00
|
|
|
if (timer_pending(&hisi_hba->timer))
|
|
|
|
del_timer_sync(&hisi_hba->timer);
|
|
|
|
|
2017-08-10 23:09:26 +07:00
|
|
|
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
2018-07-18 21:14:28 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
|
|
|
|
|
|
|
|
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
struct Scsi_Host *shost = hisi_hba->shost;
|
|
|
|
u32 state;
|
2017-08-10 23:09:26 +07:00
|
|
|
|
|
|
|
/* Init and wait for PHYs to come up and all libsas event finished. */
|
|
|
|
hisi_hba->hw->phys_init(hisi_hba);
|
|
|
|
msleep(1000);
|
2017-12-09 00:16:35 +07:00
|
|
|
hisi_sas_refresh_port_id(hisi_hba);
|
2018-05-31 19:50:45 +07:00
|
|
|
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
2018-05-21 17:09:23 +07:00
|
|
|
|
|
|
|
if (hisi_hba->reject_stp_links_msk)
|
|
|
|
hisi_sas_terminate_stp_reject(hisi_hba);
|
2018-05-21 17:09:19 +07:00
|
|
|
hisi_sas_reset_init_all_devices(hisi_hba);
|
scsi: hisi_sas: Fix the conflict between device gone and host reset
When device gone, it will check whether it is during reset, if not, it will
send internal task abort. Before internal task abort returned, reset
begins, and it will check whether SAS_PHY_UNUSED is set, if not, it will
call hisi_sas_init_device(), but at that time domain_device may already be
freed or part of it is freed, so it may referenece null pointer in
hisi_sas_init_device(). It may occur as follows:
thread0 thread1
hisi_sas_dev_gone()
check whether in RESET(no)
internal task abort
reset prep
soft_reset
... (part of reset_done)
internal task abort failed
release resource anyway
clear_itct
device->lldd_dev=NULL
hisi_sas_reset_init_all_device
check sas_dev->dev_type is SAS_PHY_UNUSED and
!device
set dev_type SAS_PHY_UNUSED
sas_free_device
hisi_sas_init_device
...
Semaphore hisi_hba.sema is used to sync the processes of device gone and
host reset.
To solve the issue, expand the scope that semaphore protects and let them
never occur together.
And also some places will check whether domain_device is NULL to judge
whether the device is gone. So when device gone, need to clear
sas_dev->sas_device.
Link: https://lore.kernel.org/r/1567774537-20003-14-git-send-email-john.garry@huawei.com
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-09-06 19:55:37 +07:00
|
|
|
up(&hisi_hba->sem);
|
2017-12-09 00:16:36 +07:00
|
|
|
scsi_unblock_requests(shost);
|
2018-05-31 19:50:45 +07:00
|
|
|
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
2017-08-10 23:09:26 +07:00
|
|
|
|
|
|
|
state = hisi_hba->hw->get_phys_state(hisi_hba);
|
2019-09-06 19:55:34 +07:00
|
|
|
hisi_sas_rescan_topology(hisi_hba, state);
|
2018-07-18 21:14:28 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
|
|
|
|
|
|
|
|
static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
struct Scsi_Host *shost = hisi_hba->shost;
|
|
|
|
int rc;
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
|
2018-12-19 22:56:41 +07:00
|
|
|
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
|
|
|
|
|
2018-07-18 21:14:28 +07:00
|
|
|
if (!hisi_hba->hw->soft_reset)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
if (test_and_set_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
dev_info(dev, "controller resetting...\n");
|
|
|
|
hisi_sas_controller_reset_prepare(hisi_hba);
|
|
|
|
|
|
|
|
rc = hisi_hba->hw->soft_reset(hisi_hba);
|
|
|
|
if (rc) {
|
|
|
|
dev_warn(dev, "controller reset failed (%d)\n", rc);
|
|
|
|
clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
|
|
|
|
up(&hisi_hba->sem);
|
|
|
|
scsi_unblock_requests(shost);
|
|
|
|
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
hisi_sas_controller_reset_done(hisi_hba);
|
2017-12-09 00:16:36 +07:00
|
|
|
dev_info(dev, "controller reset complete\n");
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2018-05-31 19:50:45 +07:00
|
|
|
return 0;
|
2017-03-23 00:25:18 +07:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
static int hisi_sas_abort_task(struct sas_task *task)
|
|
|
|
{
|
|
|
|
struct scsi_lun lun;
|
|
|
|
struct hisi_sas_tmf_task tmf_task;
|
|
|
|
struct domain_device *device = task->dev;
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
2018-05-02 22:56:28 +07:00
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct device *dev;
|
2015-11-17 23:50:56 +07:00
|
|
|
int rc = TMF_RESP_FUNC_FAILED;
|
|
|
|
unsigned long flags;
|
|
|
|
|
2018-05-02 22:56:28 +07:00
|
|
|
if (!sas_dev)
|
2015-11-17 23:50:56 +07:00
|
|
|
return TMF_RESP_FUNC_FAILED;
|
2018-05-02 22:56:28 +07:00
|
|
|
|
|
|
|
hisi_hba = dev_to_hisi_hba(task->dev);
|
|
|
|
dev = hisi_hba->dev;
|
2015-11-17 23:50:56 +07:00
|
|
|
|
2018-05-02 22:56:25 +07:00
|
|
|
spin_lock_irqsave(&task->task_state_lock, flags);
|
2015-11-17 23:50:56 +07:00
|
|
|
if (task->task_state_flags & SAS_TASK_STATE_DONE) {
|
2018-09-24 22:06:30 +07:00
|
|
|
struct hisi_sas_slot *slot = task->lldd_task;
|
|
|
|
struct hisi_sas_cq *cq;
|
|
|
|
|
|
|
|
if (slot) {
|
|
|
|
/*
|
2020-01-20 19:22:31 +07:00
|
|
|
* sync irq to avoid free'ing task
|
2018-09-24 22:06:30 +07:00
|
|
|
* before using task in IO completion
|
|
|
|
*/
|
|
|
|
cq = &hisi_hba->cq[slot->dlvry_queue];
|
2020-01-20 19:22:31 +07:00
|
|
|
synchronize_irq(cq->irq_no);
|
2018-09-24 22:06:30 +07:00
|
|
|
}
|
2018-05-02 22:56:25 +07:00
|
|
|
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
2015-11-17 23:50:56 +07:00
|
|
|
rc = TMF_RESP_FUNC_COMPLETE;
|
|
|
|
goto out;
|
|
|
|
}
|
2018-05-02 22:56:25 +07:00
|
|
|
task->task_state_flags |= SAS_TASK_STATE_ABORTED;
|
|
|
|
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
2015-11-17 23:50:56 +07:00
|
|
|
|
|
|
|
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
|
|
|
|
struct scsi_cmnd *cmnd = task->uldd_task;
|
|
|
|
struct hisi_sas_slot *slot = task->lldd_task;
|
2018-12-06 20:34:40 +07:00
|
|
|
u16 tag = slot->idx;
|
2017-03-23 00:25:25 +07:00
|
|
|
int rc2;
|
2015-11-17 23:50:56 +07:00
|
|
|
|
|
|
|
int_to_scsilun(cmnd->device->lun, &lun);
|
|
|
|
tmf_task.tmf = TMF_ABORT_TASK;
|
2018-12-06 20:34:40 +07:00
|
|
|
tmf_task.tag_of_task_to_be_managed = tag;
|
2015-11-17 23:50:56 +07:00
|
|
|
|
|
|
|
rc = hisi_sas_debug_issue_ssp_tmf(task->dev, lun.scsi_lun,
|
|
|
|
&tmf_task);
|
|
|
|
|
2017-03-23 00:25:25 +07:00
|
|
|
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
|
|
|
|
HISI_SAS_INT_ABT_CMD, tag);
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
if (rc2 < 0) {
|
|
|
|
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
|
|
|
|
return TMF_RESP_FUNC_FAILED;
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:25 +07:00
|
|
|
/*
|
|
|
|
* If the TMF finds that the IO is not in the device and also
|
|
|
|
* the internal abort does not succeed, then it is safe to
|
|
|
|
* free the slot.
|
|
|
|
* Note: if the internal abort succeeds then the slot
|
|
|
|
* will have already been completed
|
|
|
|
*/
|
|
|
|
if (rc == TMF_RESP_FUNC_COMPLETE && rc2 != TMF_RESP_FUNC_SUCC) {
|
2018-05-09 22:10:49 +07:00
|
|
|
if (task->lldd_task)
|
2017-03-23 00:25:25 +07:00
|
|
|
hisi_sas_do_release_task(hisi_hba, task, slot);
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
|
|
|
} else if (task->task_proto & SAS_PROTOCOL_SATA ||
|
|
|
|
task->task_proto & SAS_PROTOCOL_STP) {
|
|
|
|
if (task->dev->dev_type == SAS_SATA_DEV) {
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
HISI_SAS_INT_ABT_DEV,
|
|
|
|
0);
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "abort task: internal abort failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
2017-06-14 22:33:32 +07:00
|
|
|
hisi_sas_dereg_device(hisi_hba, device);
|
2017-03-23 00:25:20 +07:00
|
|
|
rc = hisi_sas_softreset_ata_disk(device);
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
2017-05-23 01:00:29 +07:00
|
|
|
} else if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SMP) {
|
2016-08-24 18:05:49 +07:00
|
|
|
/* SMP */
|
|
|
|
struct hisi_sas_slot *slot = task->lldd_task;
|
|
|
|
u32 tag = slot->idx;
|
2018-09-24 22:06:30 +07:00
|
|
|
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
|
2015-11-17 23:50:56 +07:00
|
|
|
|
2017-03-23 00:25:36 +07:00
|
|
|
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
HISI_SAS_INT_ABT_CMD, tag);
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
|
2018-09-24 22:06:30 +07:00
|
|
|
task->lldd_task) {
|
|
|
|
/*
|
2020-01-20 19:22:31 +07:00
|
|
|
* sync irq to avoid free'ing task
|
2018-09-24 22:06:30 +07:00
|
|
|
* before using task in IO completion
|
|
|
|
*/
|
2020-01-20 19:22:31 +07:00
|
|
|
synchronize_irq(cq->irq_no);
|
2018-09-24 22:06:30 +07:00
|
|
|
slot->task = NULL;
|
|
|
|
}
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
out:
|
|
|
|
if (rc != TMF_RESP_FUNC_COMPLETE)
|
|
|
|
dev_notice(dev, "abort task: rc=%d\n", rc);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
|
|
|
|
{
|
2017-12-09 00:16:47 +07:00
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
|
|
|
struct device *dev = hisi_hba->dev;
|
2015-11-17 23:50:56 +07:00
|
|
|
struct hisi_sas_tmf_task tmf_task;
|
2019-04-11 19:46:44 +07:00
|
|
|
int rc;
|
2017-12-09 00:16:47 +07:00
|
|
|
|
|
|
|
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
HISI_SAS_INT_ABT_DEV, 0);
|
2017-12-09 00:16:47 +07:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
|
|
|
|
return TMF_RESP_FUNC_FAILED;
|
|
|
|
}
|
|
|
|
hisi_sas_dereg_device(hisi_hba, device);
|
2015-11-17 23:50:56 +07:00
|
|
|
|
|
|
|
tmf_task.tmf = TMF_ABORT_TASK_SET;
|
|
|
|
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
|
|
|
|
|
2018-05-09 22:10:49 +07:00
|
|
|
if (rc == TMF_RESP_FUNC_COMPLETE)
|
2017-12-09 00:16:47 +07:00
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
|
|
|
|
{
|
|
|
|
struct hisi_sas_tmf_task tmf_task;
|
2019-02-06 17:52:56 +07:00
|
|
|
int rc;
|
2015-11-17 23:50:56 +07:00
|
|
|
|
|
|
|
tmf_task.tmf = TMF_CLEAR_ACA;
|
|
|
|
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
|
|
|
|
{
|
2018-05-21 17:09:25 +07:00
|
|
|
struct sas_phy *local_phy = sas_get_local_phy(device);
|
2019-02-28 21:51:01 +07:00
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
2018-05-21 17:09:25 +07:00
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
|
|
|
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
|
|
|
|
DECLARE_COMPLETION_ONSTACK(phyreset);
|
2019-02-28 21:51:01 +07:00
|
|
|
int rc, reset_type;
|
2018-05-21 17:09:25 +07:00
|
|
|
|
2019-09-06 19:55:27 +07:00
|
|
|
if (!local_phy->enabled) {
|
|
|
|
sas_put_local_phy(local_phy);
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
2018-05-21 17:09:25 +07:00
|
|
|
if (scsi_is_sas_phy_local(local_phy)) {
|
2019-08-05 20:48:03 +07:00
|
|
|
struct asd_sas_phy *sas_phy =
|
|
|
|
sas_ha->sas_phy[local_phy->number];
|
|
|
|
struct hisi_sas_phy *phy =
|
|
|
|
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
|
2018-05-21 17:09:25 +07:00
|
|
|
phy->in_reset = 1;
|
|
|
|
phy->reset_completion = &phyreset;
|
|
|
|
}
|
|
|
|
|
2019-02-28 21:51:01 +07:00
|
|
|
reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT ||
|
2019-09-06 19:55:26 +07:00
|
|
|
!dev_is_sata(device)) ? true : false;
|
2019-02-28 21:51:01 +07:00
|
|
|
|
2018-05-21 17:09:25 +07:00
|
|
|
rc = sas_phy_reset(local_phy, reset_type);
|
|
|
|
sas_put_local_phy(local_phy);
|
|
|
|
|
|
|
|
if (scsi_is_sas_phy_local(local_phy)) {
|
2019-08-05 20:48:03 +07:00
|
|
|
struct asd_sas_phy *sas_phy =
|
|
|
|
sas_ha->sas_phy[local_phy->number];
|
|
|
|
struct hisi_sas_phy *phy =
|
|
|
|
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
|
2018-05-21 17:09:25 +07:00
|
|
|
int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&phy->lock, flags);
|
|
|
|
phy->reset_completion = NULL;
|
|
|
|
phy->in_reset = 0;
|
|
|
|
spin_unlock_irqrestore(&phy->lock, flags);
|
|
|
|
|
|
|
|
/* report PHY down if timed out */
|
|
|
|
if (!ret)
|
|
|
|
hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
|
2019-02-28 21:51:01 +07:00
|
|
|
} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
|
|
|
|
/*
|
|
|
|
* If in init state, we rely on caller to wait for link to be
|
2019-09-06 19:55:28 +07:00
|
|
|
* ready; otherwise, except phy reset is fail, delay.
|
2019-02-28 21:51:01 +07:00
|
|
|
*/
|
2019-09-06 19:55:28 +07:00
|
|
|
if (!rc)
|
|
|
|
msleep(2000);
|
2019-02-28 21:51:01 +07:00
|
|
|
}
|
2018-05-21 17:09:25 +07:00
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2019-02-06 17:52:56 +07:00
|
|
|
int rc;
|
2015-11-17 23:50:56 +07:00
|
|
|
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
HISI_SAS_INT_ABT_DEV, 0);
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
|
|
|
|
return TMF_RESP_FUNC_FAILED;
|
|
|
|
}
|
2017-06-14 22:33:32 +07:00
|
|
|
hisi_sas_dereg_device(hisi_hba, device);
|
|
|
|
|
2019-03-20 17:21:34 +07:00
|
|
|
if (dev_is_sata(device)) {
|
|
|
|
rc = hisi_sas_softreset_ata_disk(device);
|
2019-04-11 19:46:43 +07:00
|
|
|
if (rc == TMF_RESP_FUNC_FAILED)
|
2019-03-20 17:21:34 +07:00
|
|
|
return TMF_RESP_FUNC_FAILED;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
|
|
|
|
2018-05-09 22:10:49 +07:00
|
|
|
if ((rc == TMF_RESP_FUNC_COMPLETE) || (rc == -ENODEV))
|
2017-03-23 00:25:28 +07:00
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
2018-05-09 22:10:49 +07:00
|
|
|
|
2017-03-23 00:25:28 +07:00
|
|
|
return rc;
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
|
|
|
{
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
|
|
|
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2015-11-17 23:50:56 +07:00
|
|
|
int rc = TMF_RESP_FUNC_FAILED;
|
|
|
|
|
2019-08-05 20:48:12 +07:00
|
|
|
/* Clear internal IO and then lu reset */
|
|
|
|
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
|
|
|
HISI_SAS_INT_ABT_DEV, 0);
|
|
|
|
if (rc < 0) {
|
|
|
|
dev_err(dev, "lu_reset: internal abort failed\n");
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
hisi_sas_dereg_device(hisi_hba, device);
|
|
|
|
|
2017-03-23 00:25:26 +07:00
|
|
|
if (dev_is_sata(device)) {
|
|
|
|
struct sas_phy *phy;
|
|
|
|
|
|
|
|
phy = sas_get_local_phy(device);
|
|
|
|
|
2019-09-06 19:55:26 +07:00
|
|
|
rc = sas_phy_reset(phy, true);
|
2017-03-23 00:25:26 +07:00
|
|
|
|
2018-05-09 22:10:49 +07:00
|
|
|
if (rc == 0)
|
2017-03-23 00:25:26 +07:00
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
|
|
|
sas_put_local_phy(phy);
|
|
|
|
} else {
|
|
|
|
struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET };
|
|
|
|
|
|
|
|
rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task);
|
2018-05-09 22:10:49 +07:00
|
|
|
if (rc == TMF_RESP_FUNC_COMPLETE)
|
2017-03-23 00:25:26 +07:00
|
|
|
hisi_sas_release_task(hisi_hba, device);
|
|
|
|
}
|
|
|
|
out:
|
2017-03-23 00:25:37 +07:00
|
|
|
if (rc != TMF_RESP_FUNC_COMPLETE)
|
2017-06-14 22:33:12 +07:00
|
|
|
dev_err(dev, "lu_reset: for device[%d]:rc= %d\n",
|
2017-03-23 00:25:37 +07:00
|
|
|
sas_dev->device_id, rc);
|
2015-11-17 23:50:56 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:35 +07:00
|
|
|
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
|
2018-05-31 19:50:47 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2017-12-09 00:16:38 +07:00
|
|
|
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
|
2018-05-31 19:50:47 +07:00
|
|
|
int rc, i;
|
2017-03-23 00:25:35 +07:00
|
|
|
|
2017-12-09 00:16:38 +07:00
|
|
|
queue_work(hisi_hba->wq, &r.work);
|
|
|
|
wait_for_completion(r.completion);
|
2018-05-31 19:50:47 +07:00
|
|
|
if (!r.done)
|
|
|
|
return TMF_RESP_FUNC_FAILED;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
|
|
|
struct hisi_sas_device *sas_dev = &hisi_hba->devices[i];
|
|
|
|
struct domain_device *device = sas_dev->sas_device;
|
|
|
|
|
|
|
|
if ((sas_dev->dev_type == SAS_PHY_UNUSED) || !device ||
|
2019-06-10 19:41:41 +07:00
|
|
|
dev_is_expander(device->dev_type))
|
2018-05-31 19:50:47 +07:00
|
|
|
continue;
|
|
|
|
|
|
|
|
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
|
|
|
if (rc != TMF_RESP_FUNC_COMPLETE)
|
|
|
|
dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
|
|
|
|
sas_dev->device_id, rc);
|
|
|
|
}
|
|
|
|
|
|
|
|
hisi_sas_release_tasks(hisi_hba);
|
2017-12-09 00:16:38 +07:00
|
|
|
|
2018-05-31 19:50:47 +07:00
|
|
|
return TMF_RESP_FUNC_COMPLETE;
|
2017-03-23 00:25:35 +07:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:56 +07:00
|
|
|
static int hisi_sas_query_task(struct sas_task *task)
|
|
|
|
{
|
|
|
|
struct scsi_lun lun;
|
|
|
|
struct hisi_sas_tmf_task tmf_task;
|
|
|
|
int rc = TMF_RESP_FUNC_FAILED;
|
|
|
|
|
|
|
|
if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
|
|
|
|
struct scsi_cmnd *cmnd = task->uldd_task;
|
|
|
|
struct domain_device *device = task->dev;
|
|
|
|
struct hisi_sas_slot *slot = task->lldd_task;
|
|
|
|
u32 tag = slot->idx;
|
|
|
|
|
|
|
|
int_to_scsilun(cmnd->device->lun, &lun);
|
|
|
|
tmf_task.tmf = TMF_QUERY_TASK;
|
2018-12-06 20:34:40 +07:00
|
|
|
tmf_task.tag_of_task_to_be_managed = tag;
|
2015-11-17 23:50:56 +07:00
|
|
|
|
|
|
|
rc = hisi_sas_debug_issue_ssp_tmf(device,
|
|
|
|
lun.scsi_lun,
|
|
|
|
&tmf_task);
|
|
|
|
switch (rc) {
|
|
|
|
/* The task is still in Lun, release it then */
|
|
|
|
case TMF_RESP_FUNC_SUCC:
|
|
|
|
/* The task is not in Lun or failed, reset the phy */
|
|
|
|
case TMF_RESP_FUNC_FAILED:
|
|
|
|
case TMF_RESP_FUNC_COMPLETE:
|
|
|
|
break;
|
2016-11-07 19:48:35 +07:00
|
|
|
default:
|
|
|
|
rc = TMF_RESP_FUNC_FAILED;
|
|
|
|
break;
|
2015-11-17 23:50:56 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2016-08-24 18:05:47 +07:00
|
|
|
static int
|
2017-06-14 22:33:12 +07:00
|
|
|
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
2016-08-24 18:05:47 +07:00
|
|
|
struct sas_task *task, int abort_flag,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
int task_tag, struct hisi_sas_dq *dq)
|
2016-08-24 18:05:47 +07:00
|
|
|
{
|
|
|
|
struct domain_device *device = task->dev;
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2016-08-24 18:05:47 +07:00
|
|
|
struct hisi_sas_port *port;
|
|
|
|
struct hisi_sas_slot *slot;
|
2017-03-23 00:25:17 +07:00
|
|
|
struct asd_sas_port *sas_port = device->port;
|
2016-08-24 18:05:47 +07:00
|
|
|
struct hisi_sas_cmd_hdr *cmd_hdr_base;
|
|
|
|
int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
|
2019-08-05 20:47:59 +07:00
|
|
|
unsigned long flags;
|
2018-05-09 22:10:48 +07:00
|
|
|
int wr_q_index;
|
2016-08-24 18:05:47 +07:00
|
|
|
|
2017-08-10 23:09:26 +07:00
|
|
|
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
|
2017-03-23 00:25:18 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2016-08-24 18:05:47 +07:00
|
|
|
if (!device->port)
|
|
|
|
return -1;
|
|
|
|
|
2017-03-23 00:25:17 +07:00
|
|
|
port = to_hisi_sas_port(sas_port);
|
2016-08-24 18:05:47 +07:00
|
|
|
|
|
|
|
/* simply get a slot and send abort command */
|
2018-09-24 22:06:33 +07:00
|
|
|
rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
|
|
|
|
if (rc < 0)
|
2016-08-24 18:05:47 +07:00
|
|
|
goto err_out;
|
2017-06-14 22:33:13 +07:00
|
|
|
|
2018-09-24 22:06:33 +07:00
|
|
|
slot_idx = rc;
|
2018-05-09 22:10:47 +07:00
|
|
|
slot = &hisi_hba->slot_info[slot_idx];
|
2018-05-09 22:10:48 +07:00
|
|
|
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&dq->lock);
|
2019-08-05 20:47:59 +07:00
|
|
|
wr_q_index = dq->wr_point;
|
|
|
|
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
|
2018-05-09 22:10:48 +07:00
|
|
|
list_add_tail(&slot->delivery, &dq->list);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&dq->lock);
|
|
|
|
spin_lock(&sas_dev->lock);
|
2019-02-06 17:52:55 +07:00
|
|
|
list_add_tail(&slot->entry, &sas_dev->list);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&sas_dev->lock);
|
2016-08-24 18:05:47 +07:00
|
|
|
|
2017-06-14 22:33:13 +07:00
|
|
|
dlvry_queue = dq->id;
|
2018-05-09 22:10:48 +07:00
|
|
|
dlvry_queue_slot = wr_q_index;
|
2017-06-14 22:33:13 +07:00
|
|
|
|
2019-02-06 17:52:55 +07:00
|
|
|
slot->device_id = sas_dev->device_id;
|
2016-08-24 18:05:47 +07:00
|
|
|
slot->n_elem = n_elem;
|
|
|
|
slot->dlvry_queue = dlvry_queue;
|
|
|
|
slot->dlvry_queue_slot = dlvry_queue_slot;
|
|
|
|
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
|
|
|
|
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
|
|
|
|
slot->task = task;
|
|
|
|
slot->port = port;
|
2018-05-02 22:56:26 +07:00
|
|
|
slot->is_internal = true;
|
2016-08-24 18:05:47 +07:00
|
|
|
task->lldd_task = slot;
|
|
|
|
|
|
|
|
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
|
2017-08-10 23:09:35 +07:00
|
|
|
memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
|
2019-08-05 20:48:04 +07:00
|
|
|
memset(hisi_sas_status_buf_addr_mem(slot), 0,
|
|
|
|
sizeof(struct hisi_sas_err_record));
|
2016-08-24 18:05:47 +07:00
|
|
|
|
2018-05-09 22:10:46 +07:00
|
|
|
hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
|
2016-08-24 18:05:47 +07:00
|
|
|
abort_flag, task_tag);
|
|
|
|
|
2017-03-23 00:25:29 +07:00
|
|
|
spin_lock_irqsave(&task->task_state_lock, flags);
|
2016-08-24 18:05:47 +07:00
|
|
|
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
|
2017-03-23 00:25:29 +07:00
|
|
|
spin_unlock_irqrestore(&task->task_state_lock, flags);
|
2018-07-18 21:14:32 +07:00
|
|
|
WRITE_ONCE(slot->ready, 1);
|
2017-06-14 22:33:13 +07:00
|
|
|
/* send abort command to the chip */
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_lock(&dq->lock);
|
2017-06-14 22:33:13 +07:00
|
|
|
hisi_hba->hw->start_delivery(dq);
|
2020-01-20 19:22:32 +07:00
|
|
|
spin_unlock(&dq->lock);
|
2016-08-24 18:05:47 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out:
|
|
|
|
dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
|
|
|
|
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
* _hisi_sas_internal_task_abort -- execute an internal
|
2016-08-24 18:05:47 +07:00
|
|
|
* abort command for single IO command or a device
|
|
|
|
* @hisi_hba: host controller struct
|
|
|
|
* @device: domain device
|
|
|
|
* @abort_flag: mode of operation, device or single IO
|
|
|
|
* @tag: tag of IO to be aborted (only relevant to single
|
|
|
|
* IO mode)
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
* @dq: delivery queue for this internal abort command
|
2016-08-24 18:05:47 +07:00
|
|
|
*/
|
|
|
|
static int
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device, int abort_flag,
|
|
|
|
int tag, struct hisi_sas_dq *dq)
|
2016-08-24 18:05:47 +07:00
|
|
|
{
|
|
|
|
struct sas_task *task;
|
|
|
|
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2016-08-24 18:05:47 +07:00
|
|
|
int res;
|
|
|
|
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
/*
|
|
|
|
* The interface is not realized means this HW don't support internal
|
|
|
|
* abort, or don't need to do internal abort. Then here, we return
|
|
|
|
* TMF_RESP_FUNC_FAILED and let other steps go on, which depends that
|
|
|
|
* the internal abort has been executed and returned CQ.
|
|
|
|
*/
|
2016-08-24 18:05:47 +07:00
|
|
|
if (!hisi_hba->hw->prep_abort)
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
return TMF_RESP_FUNC_FAILED;
|
2016-08-24 18:05:47 +07:00
|
|
|
|
|
|
|
task = sas_alloc_slow_task(GFP_KERNEL);
|
|
|
|
if (!task)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
task->dev = device;
|
|
|
|
task->task_proto = device->tproto;
|
|
|
|
task->task_done = hisi_sas_task_done;
|
2017-10-23 14:40:42 +07:00
|
|
|
task->slow_task->timer.function = hisi_sas_tmf_timedout;
|
2019-02-06 17:52:56 +07:00
|
|
|
task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
|
2016-08-24 18:05:47 +07:00
|
|
|
add_timer(&task->slow_task->timer);
|
|
|
|
|
|
|
|
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
task, abort_flag, tag, dq);
|
2016-08-24 18:05:47 +07:00
|
|
|
if (res) {
|
|
|
|
del_timer(&task->slow_task->timer);
|
|
|
|
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
|
|
|
|
res);
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
wait_for_completion(&task->slow_task->completion);
|
|
|
|
res = TMF_RESP_FUNC_FAILED;
|
|
|
|
|
2017-06-14 22:33:11 +07:00
|
|
|
/* Internal abort timed out */
|
|
|
|
if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
|
2019-10-24 21:08:21 +07:00
|
|
|
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
|
2019-09-06 19:55:25 +07:00
|
|
|
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
|
|
|
|
|
2017-06-14 22:33:11 +07:00
|
|
|
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
|
|
|
struct hisi_sas_slot *slot = task->lldd_task;
|
2018-09-24 22:06:30 +07:00
|
|
|
|
|
|
|
if (slot) {
|
2018-10-18 23:59:39 +07:00
|
|
|
struct hisi_sas_cq *cq =
|
|
|
|
&hisi_hba->cq[slot->dlvry_queue];
|
2018-09-24 22:06:30 +07:00
|
|
|
/*
|
2020-01-20 19:22:31 +07:00
|
|
|
* sync irq to avoid free'ing task
|
2018-09-24 22:06:30 +07:00
|
|
|
* before using task in IO completion
|
|
|
|
*/
|
2020-01-20 19:22:31 +07:00
|
|
|
synchronize_irq(cq->irq_no);
|
2017-06-14 22:33:11 +07:00
|
|
|
slot->task = NULL;
|
2018-09-24 22:06:30 +07:00
|
|
|
}
|
2017-12-09 00:16:41 +07:00
|
|
|
dev_err(dev, "internal task abort: timeout and not done.\n");
|
2018-12-19 22:56:41 +07:00
|
|
|
|
scsi: hisi_sas: judge result of internal abort
Normally, hardware should ensure that internal abort timeout will never
happen. If happen, it would be an SoC failure. What's more, HW will not
process any other commands if an internal abort hasn't return CQ, and they
will time out also.
So, we should judge the result of internal abort in SCSI EH, if it is failed,
we should give up to do TMF/softreset and return failure to the upper layer
directly.
This patch do following things to achieve this:
1. When internal abort timeout happened, we set return value to -EIO in
hisi_sas_internal_task_abort().
2. If prep_abort() is not support, let hisi_sas_internal_task_abort() return
TMF_RESP_FUNC_FAILED.
3. If hisi_sas_internal_task_abort() return an negative number, it can be
thought that it not executed properly or internal abort timeout. Then we
won't do behind TMF or softreset, and return failure directly.
Signed-off-by: Xiaofei Tan <tanxiaofei@huawei.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2017-12-09 00:16:46 +07:00
|
|
|
res = -EIO;
|
2017-10-24 22:51:32 +07:00
|
|
|
goto exit;
|
2017-12-09 00:16:41 +07:00
|
|
|
} else
|
|
|
|
dev_err(dev, "internal task abort: timeout.\n");
|
2017-06-14 22:33:11 +07:00
|
|
|
}
|
|
|
|
|
2016-08-24 18:05:47 +07:00
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
|
|
|
task->task_status.stat == TMF_RESP_FUNC_COMPLETE) {
|
|
|
|
res = TMF_RESP_FUNC_COMPLETE;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2017-03-23 00:25:25 +07:00
|
|
|
if (task->task_status.resp == SAS_TASK_COMPLETE &&
|
|
|
|
task->task_status.stat == TMF_RESP_FUNC_SUCC) {
|
|
|
|
res = TMF_RESP_FUNC_SUCC;
|
|
|
|
goto exit;
|
|
|
|
}
|
|
|
|
|
2016-08-24 18:05:47 +07:00
|
|
|
exit:
|
2019-08-05 20:48:11 +07:00
|
|
|
dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n",
|
2019-04-11 19:46:43 +07:00
|
|
|
SAS_ADDR(device->sas_addr), task,
|
2016-08-24 18:05:47 +07:00
|
|
|
task->task_status.resp, /* 0 is complete, -1 is undelivered */
|
|
|
|
task->task_status.stat);
|
|
|
|
sas_free_task(task);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
static int
|
|
|
|
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|
|
|
struct domain_device *device,
|
|
|
|
int abort_flag, int tag)
|
|
|
|
{
|
|
|
|
struct hisi_sas_slot *slot;
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
struct hisi_sas_dq *dq;
|
|
|
|
int i, rc;
|
|
|
|
|
|
|
|
switch (abort_flag) {
|
|
|
|
case HISI_SAS_INT_ABT_CMD:
|
|
|
|
slot = &hisi_hba->slot_info[tag];
|
|
|
|
dq = &hisi_hba->dq[slot->dlvry_queue];
|
|
|
|
return _hisi_sas_internal_task_abort(hisi_hba, device,
|
|
|
|
abort_flag, tag, dq);
|
|
|
|
case HISI_SAS_INT_ABT_DEV:
|
|
|
|
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
|
2019-02-06 17:52:55 +07:00
|
|
|
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
2020-01-20 19:22:36 +07:00
|
|
|
const struct cpumask *mask = cq->irq_mask;
|
2019-02-06 17:52:55 +07:00
|
|
|
|
|
|
|
if (mask && !cpumask_intersects(cpu_online_mask, mask))
|
|
|
|
continue;
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
dq = &hisi_hba->dq[i];
|
|
|
|
rc = _hisi_sas_internal_task_abort(hisi_hba, device,
|
|
|
|
abort_flag, tag,
|
|
|
|
dq);
|
|
|
|
if (rc)
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
dev_err(dev, "Unrecognised internal abort flag (%d)\n",
|
|
|
|
abort_flag);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:52 +07:00
|
|
|
static void hisi_sas_port_formed(struct asd_sas_phy *sas_phy)
|
|
|
|
{
|
|
|
|
hisi_sas_port_notify_formed(sas_phy);
|
|
|
|
}
|
|
|
|
|
2018-01-17 23:46:53 +07:00
|
|
|
static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
|
|
|
|
u8 reg_index, u8 reg_count, u8 *write_data)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
|
|
|
|
|
|
|
if (!hisi_hba->hw->write_gpio)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
return hisi_hba->hw->write_gpio(hisi_hba, reg_type,
|
|
|
|
reg_index, reg_count, write_data);
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:52 +07:00
|
|
|
static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
|
|
|
|
{
|
scsi: hisi_sas: Set PHY linkrate when disconnected
When the PHY comes down, we currently do not set the negotiated linkrate:
root@(none)$ pwd
/sys/class/sas_phy/phy-0:0
root@(none)$ more enable
1
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$ echo 0 > enable
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$
This patch fixes the driver code to set it properly when the PHY comes
down.
If the PHY had been enabled, then set unknown; otherwise, flag as disabled.
The logical place to set the negotiated linkrate for this scenario is PHY
down routine, which is called from the PHY down ISR.
However, it is not possible to know if the PHY comes down due to PHY
disable or loss of link, as sas_phy.enabled member is not set until after
the transport disable routine is complete, which races with the PHY down
ISR.
As an imperfect solution, use sas_phy_data.enable as the flag to know if
the PHY is down due to disable. It's imperfect, as sas_phy_data is internal
to libsas.
I can't see another way without adding a new field to hisi_sas_phy and
managing it, or changing SCSI SAS transport.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-28 21:51:00 +07:00
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
struct sas_phy *sphy = sas_phy->phy;
|
2019-04-11 19:46:38 +07:00
|
|
|
unsigned long flags;
|
scsi: hisi_sas: Set PHY linkrate when disconnected
When the PHY comes down, we currently do not set the negotiated linkrate:
root@(none)$ pwd
/sys/class/sas_phy/phy-0:0
root@(none)$ more enable
1
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$ echo 0 > enable
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$
This patch fixes the driver code to set it properly when the PHY comes
down.
If the PHY had been enabled, then set unknown; otherwise, flag as disabled.
The logical place to set the negotiated linkrate for this scenario is PHY
down routine, which is called from the PHY down ISR.
However, it is not possible to know if the PHY comes down due to PHY
disable or loss of link, as sas_phy.enabled member is not set until after
the transport disable routine is complete, which races with the PHY down
ISR.
As an imperfect solution, use sas_phy_data.enable as the flag to know if
the PHY is down due to disable. It's imperfect, as sas_phy_data is internal
to libsas.
I can't see another way without adding a new field to hisi_sas_phy and
managing it, or changing SCSI SAS transport.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-28 21:51:00 +07:00
|
|
|
|
2015-11-17 23:50:52 +07:00
|
|
|
phy->phy_attached = 0;
|
|
|
|
phy->phy_type = 0;
|
|
|
|
phy->port = NULL;
|
scsi: hisi_sas: Set PHY linkrate when disconnected
When the PHY comes down, we currently do not set the negotiated linkrate:
root@(none)$ pwd
/sys/class/sas_phy/phy-0:0
root@(none)$ more enable
1
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$ echo 0 > enable
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$
This patch fixes the driver code to set it properly when the PHY comes
down.
If the PHY had been enabled, then set unknown; otherwise, flag as disabled.
The logical place to set the negotiated linkrate for this scenario is PHY
down routine, which is called from the PHY down ISR.
However, it is not possible to know if the PHY comes down due to PHY
disable or loss of link, as sas_phy.enabled member is not set until after
the transport disable routine is complete, which races with the PHY down
ISR.
As an imperfect solution, use sas_phy_data.enable as the flag to know if
the PHY is down due to disable. It's imperfect, as sas_phy_data is internal
to libsas.
I can't see another way without adding a new field to hisi_sas_phy and
managing it, or changing SCSI SAS transport.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-28 21:51:00 +07:00
|
|
|
|
2019-04-11 19:46:38 +07:00
|
|
|
spin_lock_irqsave(&phy->lock, flags);
|
|
|
|
if (phy->enable)
|
scsi: hisi_sas: Set PHY linkrate when disconnected
When the PHY comes down, we currently do not set the negotiated linkrate:
root@(none)$ pwd
/sys/class/sas_phy/phy-0:0
root@(none)$ more enable
1
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$ echo 0 > enable
root@(none)$ more negotiated_linkrate
12.0 Gbit
root@(none)$
This patch fixes the driver code to set it properly when the PHY comes
down.
If the PHY had been enabled, then set unknown; otherwise, flag as disabled.
The logical place to set the negotiated linkrate for this scenario is PHY
down routine, which is called from the PHY down ISR.
However, it is not possible to know if the PHY comes down due to PHY
disable or loss of link, as sas_phy.enabled member is not set until after
the transport disable routine is complete, which races with the PHY down
ISR.
As an imperfect solution, use sas_phy_data.enable as the flag to know if
the PHY is down due to disable. It's imperfect, as sas_phy_data is internal
to libsas.
I can't see another way without adding a new field to hisi_sas_phy and
managing it, or changing SCSI SAS transport.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-28 21:51:00 +07:00
|
|
|
sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
|
|
|
|
else
|
|
|
|
sphy->negotiated_linkrate = SAS_PHY_DISABLED;
|
2019-04-11 19:46:38 +07:00
|
|
|
spin_unlock_irqrestore(&phy->lock, flags);
|
2015-11-17 23:50:52 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
|
|
|
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
|
|
|
struct sas_ha_struct *sas_ha = &hisi_hba->sha;
|
2018-05-21 17:09:25 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2015-11-17 23:50:52 +07:00
|
|
|
|
|
|
|
if (rdy) {
|
|
|
|
/* Phy down but ready */
|
|
|
|
hisi_sas_bytes_dmaed(hisi_hba, phy_no);
|
|
|
|
hisi_sas_port_notify_formed(sas_phy);
|
|
|
|
} else {
|
|
|
|
struct hisi_sas_port *port = phy->port;
|
|
|
|
|
2018-05-31 19:50:46 +07:00
|
|
|
if (test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags) ||
|
|
|
|
phy->in_reset) {
|
2018-05-21 17:09:25 +07:00
|
|
|
dev_info(dev, "ignore flutter phy%d down\n", phy_no);
|
|
|
|
return;
|
|
|
|
}
|
2015-11-17 23:50:52 +07:00
|
|
|
/* Phy down and not ready */
|
|
|
|
sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
|
|
|
|
sas_phy_disconnected(sas_phy);
|
|
|
|
|
|
|
|
if (port) {
|
|
|
|
if (phy->phy_type & PORT_TYPE_SAS) {
|
|
|
|
int port_id = port->id;
|
|
|
|
|
|
|
|
if (!hisi_hba->hw->get_wideport_bitmap(hisi_hba,
|
|
|
|
port_id))
|
|
|
|
port->port_attached = 0;
|
|
|
|
} else if (phy->phy_type & PORT_TYPE_SATA)
|
|
|
|
port->port_attached = 0;
|
|
|
|
}
|
|
|
|
hisi_sas_phy_disconnected(phy);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_phy_down);
|
|
|
|
|
2020-01-20 19:22:31 +07:00
|
|
|
void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba)
|
2017-10-24 22:51:47 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
scsi: hisi_sas: Issue internal abort on all relevant queues
To support queue mapped to a CPU, it needs to be ensured that issuing an
internal abort is safe, in that it is guaranteed that an internal abort is
processed for a single IO or a device after all the relevant command(s)
which it is attempting to abort have been processed by the controller.
Currently we only deliver commands for any device on a single queue to
solve this problem, as we know that commands issued on the same queue will
be processed in order, and we will not have a scenario where the internal
abort is racing against a command(s) which it is trying to abort.
To enqueue commands on queue mapped to a CPU, choosing a queue for an
command is based on the associated queue for the current CPU, so this is
not safe for internal abort since it would definitely not be guaranteed
that commands for the command devices are issued on the same queue.
To solve this issue, we take a bludgeoning approach, and issue a separate
internal abort on any queue(s) relevant to the command or device, in that
we will be guaranteed that at least one of these internal aborts will be
received last in the controller.
So, for aborting a single command, we can just force the internal abort to
be issued on the same queue as the command which we are trying to abort.
For aborting all commands associated with a device, we issue a separate
internal abort on all relevant queues. Issuing multiple internal aborts in
this fashion would have not side affect.
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2019-02-06 17:52:54 +07:00
|
|
|
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
|
2017-10-24 22:51:47 +07:00
|
|
|
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
|
|
|
|
2020-01-20 19:22:31 +07:00
|
|
|
synchronize_irq(cq->irq_no);
|
2017-10-24 22:51:47 +07:00
|
|
|
}
|
|
|
|
}
|
2020-01-20 19:22:31 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs);
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2019-04-11 19:46:36 +07:00
|
|
|
int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = shost_priv(shost);
|
|
|
|
|
|
|
|
if (reset_type != SCSI_ADAPTER_RESET)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_host_reset);
|
|
|
|
|
2017-06-14 22:33:20 +07:00
|
|
|
struct scsi_transport_template *hisi_sas_stt;
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_stt);
|
2015-11-17 23:50:30 +07:00
|
|
|
|
|
|
|
static struct sas_domain_function_template hisi_sas_transport_ops = {
|
2015-11-17 23:50:51 +07:00
|
|
|
.lldd_dev_found = hisi_sas_dev_found,
|
|
|
|
.lldd_dev_gone = hisi_sas_dev_gone,
|
2015-11-17 23:50:49 +07:00
|
|
|
.lldd_execute_task = hisi_sas_queue_command,
|
2015-11-17 23:50:57 +07:00
|
|
|
.lldd_control_phy = hisi_sas_control_phy,
|
2015-11-17 23:50:56 +07:00
|
|
|
.lldd_abort_task = hisi_sas_abort_task,
|
|
|
|
.lldd_abort_task_set = hisi_sas_abort_task_set,
|
|
|
|
.lldd_clear_aca = hisi_sas_clear_aca,
|
|
|
|
.lldd_I_T_nexus_reset = hisi_sas_I_T_nexus_reset,
|
|
|
|
.lldd_lu_reset = hisi_sas_lu_reset,
|
|
|
|
.lldd_query_task = hisi_sas_query_task,
|
2018-09-25 09:56:51 +07:00
|
|
|
.lldd_clear_nexus_ha = hisi_sas_clear_nexus_ha,
|
2015-11-17 23:50:52 +07:00
|
|
|
.lldd_port_formed = hisi_sas_port_formed,
|
2018-09-25 09:56:51 +07:00
|
|
|
.lldd_write_gpio = hisi_sas_write_gpio,
|
2015-11-17 23:50:30 +07:00
|
|
|
};
|
|
|
|
|
2017-03-23 00:25:18 +07:00
|
|
|
void hisi_sas_init_mem(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-08-05 20:47:58 +07:00
|
|
|
int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS;
|
2019-01-25 21:22:37 +07:00
|
|
|
struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint;
|
2017-03-23 00:25:18 +07:00
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->queue_count; i++) {
|
|
|
|
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
|
|
|
struct hisi_sas_dq *dq = &hisi_hba->dq[i];
|
2019-01-25 21:22:37 +07:00
|
|
|
struct hisi_sas_cmd_hdr *cmd_hdr = hisi_hba->cmd_hdr[i];
|
|
|
|
|
|
|
|
s = sizeof(struct hisi_sas_cmd_hdr);
|
|
|
|
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
|
|
|
|
memset(&cmd_hdr[j], 0, s);
|
2017-03-23 00:25:18 +07:00
|
|
|
|
|
|
|
dq->wr_point = 0;
|
|
|
|
|
|
|
|
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
|
|
|
|
memset(hisi_hba->complete_hdr[i], 0, s);
|
|
|
|
cq->rd_point = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = sizeof(struct hisi_sas_initial_fis) * hisi_hba->n_phy;
|
|
|
|
memset(hisi_hba->initial_fis, 0, s);
|
|
|
|
|
|
|
|
s = max_command_entries * sizeof(struct hisi_sas_iost);
|
|
|
|
memset(hisi_hba->iost, 0, s);
|
|
|
|
|
|
|
|
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
|
|
|
|
memset(hisi_hba->breakpoint, 0, s);
|
|
|
|
|
2019-01-25 21:22:37 +07:00
|
|
|
s = sizeof(struct hisi_sas_sata_breakpoint);
|
|
|
|
for (j = 0; j < HISI_SAS_MAX_ITCT_ENTRIES; j++)
|
|
|
|
memset(&sata_breakpoint[j], 0, s);
|
2017-03-23 00:25:18 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_init_mem);
|
|
|
|
|
2019-01-25 21:22:33 +07:00
|
|
|
int hisi_sas_alloc(struct hisi_hba *hisi_hba)
|
2015-11-17 23:50:34 +07:00
|
|
|
{
|
2017-06-14 22:33:17 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2019-08-05 20:47:58 +07:00
|
|
|
int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS;
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
int max_command_entries_ru, sz_slot_buf_ru;
|
|
|
|
int blk_cnt, slots_per_blk;
|
2015-11-17 23:50:34 +07:00
|
|
|
|
2018-05-31 19:50:44 +07:00
|
|
|
sema_init(&hisi_hba->sem, 1);
|
2015-11-17 23:50:43 +07:00
|
|
|
spin_lock_init(&hisi_hba->lock);
|
2015-11-17 23:50:42 +07:00
|
|
|
for (i = 0; i < hisi_hba->n_phy; i++) {
|
|
|
|
hisi_sas_phy_init(hisi_hba, i);
|
|
|
|
hisi_hba->port[i].port_attached = 0;
|
|
|
|
hisi_hba->port[i].id = -1;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:41 +07:00
|
|
|
for (i = 0; i < HISI_SAS_MAX_DEVICES; i++) {
|
|
|
|
hisi_hba->devices[i].dev_type = SAS_PHY_UNUSED;
|
|
|
|
hisi_hba->devices[i].device_id = i;
|
2019-02-28 21:51:01 +07:00
|
|
|
hisi_hba->devices[i].dev_status = HISI_SAS_DEV_INIT;
|
2015-11-17 23:50:41 +07:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:34 +07:00
|
|
|
for (i = 0; i < hisi_hba->queue_count; i++) {
|
2015-11-17 23:50:37 +07:00
|
|
|
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
2016-09-06 22:36:12 +07:00
|
|
|
struct hisi_sas_dq *dq = &hisi_hba->dq[i];
|
2015-11-17 23:50:37 +07:00
|
|
|
|
|
|
|
/* Completion queue structure */
|
|
|
|
cq->id = i;
|
|
|
|
cq->hisi_hba = hisi_hba;
|
|
|
|
|
2016-09-06 22:36:12 +07:00
|
|
|
/* Delivery queue structure */
|
2017-12-09 00:16:32 +07:00
|
|
|
spin_lock_init(&dq->lock);
|
2018-05-09 22:10:48 +07:00
|
|
|
INIT_LIST_HEAD(&dq->list);
|
2016-09-06 22:36:12 +07:00
|
|
|
dq->id = i;
|
|
|
|
dq->hisi_hba = hisi_hba;
|
|
|
|
|
2015-11-17 23:50:34 +07:00
|
|
|
/* Delivery queue */
|
|
|
|
s = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->cmd_hdr[i] = dmam_alloc_coherent(dev, s,
|
|
|
|
&hisi_hba->cmd_hdr_dma[i],
|
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->cmd_hdr[i])
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
/* Completion queue */
|
|
|
|
s = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->complete_hdr[i] = dmam_alloc_coherent(dev, s,
|
|
|
|
&hisi_hba->complete_hdr_dma[i],
|
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->complete_hdr[i])
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
|
|
|
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma,
|
2019-08-05 20:48:10 +07:00
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->itct)
|
|
|
|
goto err_out;
|
|
|
|
|
2016-01-26 01:47:03 +07:00
|
|
|
hisi_hba->slot_info = devm_kcalloc(dev, max_command_entries,
|
2015-11-17 23:50:34 +07:00
|
|
|
sizeof(struct hisi_sas_slot),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!hisi_hba->slot_info)
|
|
|
|
goto err_out;
|
|
|
|
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
/* roundup to avoid overly large block size */
|
|
|
|
max_command_entries_ru = roundup(max_command_entries, 64);
|
2019-02-06 17:52:51 +07:00
|
|
|
if (hisi_hba->prot_mask & HISI_SAS_DIX_PROT_MASK)
|
|
|
|
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_dif_buf_table);
|
|
|
|
else
|
|
|
|
sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table);
|
|
|
|
sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64);
|
2019-08-05 20:48:05 +07:00
|
|
|
s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE);
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s;
|
|
|
|
slots_per_blk = s / sz_slot_buf_ru;
|
2019-02-06 17:52:51 +07:00
|
|
|
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
for (i = 0; i < blk_cnt; i++) {
|
|
|
|
int slot_index = i * slots_per_blk;
|
2019-02-06 17:52:51 +07:00
|
|
|
dma_addr_t buf_dma;
|
|
|
|
void *buf;
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
|
2019-02-06 17:52:51 +07:00
|
|
|
buf = dmam_alloc_coherent(dev, s, &buf_dma,
|
2019-08-05 20:48:10 +07:00
|
|
|
GFP_KERNEL);
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
if (!buf)
|
|
|
|
goto err_out;
|
|
|
|
|
|
|
|
for (j = 0; j < slots_per_blk; j++, slot_index++) {
|
|
|
|
struct hisi_sas_slot *slot;
|
|
|
|
|
|
|
|
slot = &hisi_hba->slot_info[slot_index];
|
|
|
|
slot->buf = buf;
|
|
|
|
slot->buf_dma = buf_dma;
|
|
|
|
slot->idx = slot_index;
|
|
|
|
|
2019-02-06 17:52:51 +07:00
|
|
|
buf += sz_slot_buf_ru;
|
|
|
|
buf_dma += sz_slot_buf_ru;
|
scsi: hisi_sas: Pre-allocate slot DMA buffers
Currently the driver spends much time allocating and freeing the slot DMA
buffer for command delivery/completion. To boost the performance,
pre-allocate the buffers for all IPTT. The downside of this approach is
that we are reallocating all buffer memory upfront, so hog memory which we
may not need.
However, the current method - DMA buffer pool - also caches all buffers and
does not free them until the pool is destroyed, so is not exactly efficient
either.
On top of this, since the slot DMA buffer is slightly bigger than a 4K
page, we need to allocate 2x4K pages per buffer (for 4K page kernel), which
is quite wasteful. For 64K page size this is not such an issue.
So, for the 4K page case, in order to make memory usage more efficient,
pre-allocating larger blocks of DMA memory for the buffers can be more
efficient.
To make DMA memory usage most efficient, we would choose a single
contiguous DMA memory block, but this could use up all the DMA memory in
the system (when CMA enabled and no IOMMU), or we may just not be able to
allocate a DMA buffer large enough when no CMA or IOMMU.
To decide the block size we use the LCM (least common multiple) of the
buffer size and the page size. We roundup(64) to ensure the LCM is not too
large, even though a little memory may be wasted per block.
So, with this, the total memory requirement is about is about 17MB for 4096
max IPTT.
Previously (for 4K pages case), it would be 32MB (for all slots
allocated).
With this change, the relative increase of IOPS for bs=4K read when
PAGE_SIZE=4K and PAGE_SIZE=64K is as follows:
IODEPTH 4K PAGE_SIZE 64K PAGE_SIZE
32 56% 47%
64 53% 44%
128 64% 43%
256 67% 45%
Signed-off-by: Xiang Chen <chenxiang66@hisilicon.com>
Signed-off-by: John Garry <john.garry@huawei.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
2018-05-31 19:50:48 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-26 01:47:03 +07:00
|
|
|
s = max_command_entries * sizeof(struct hisi_sas_iost);
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->iost = dmam_alloc_coherent(dev, s, &hisi_hba->iost_dma,
|
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->iost)
|
|
|
|
goto err_out;
|
|
|
|
|
2016-01-26 01:47:03 +07:00
|
|
|
s = max_command_entries * sizeof(struct hisi_sas_breakpoint);
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->breakpoint = dmam_alloc_coherent(dev, s,
|
|
|
|
&hisi_hba->breakpoint_dma,
|
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->breakpoint)
|
|
|
|
goto err_out;
|
|
|
|
|
2016-01-26 01:47:03 +07:00
|
|
|
hisi_hba->slot_index_count = max_command_entries;
|
2016-09-06 22:36:15 +07:00
|
|
|
s = hisi_hba->slot_index_count / BITS_PER_BYTE;
|
2015-11-17 23:50:36 +07:00
|
|
|
hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
|
|
|
|
if (!hisi_hba->slot_index_tags)
|
|
|
|
goto err_out;
|
|
|
|
|
2015-11-17 23:50:34 +07:00
|
|
|
s = sizeof(struct hisi_sas_initial_fis) * HISI_SAS_MAX_PHYS;
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->initial_fis = dmam_alloc_coherent(dev, s,
|
|
|
|
&hisi_hba->initial_fis_dma,
|
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->initial_fis)
|
|
|
|
goto err_out;
|
|
|
|
|
2017-10-24 22:51:35 +07:00
|
|
|
s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_sata_breakpoint);
|
2018-05-31 19:50:42 +07:00
|
|
|
hisi_hba->sata_breakpoint = dmam_alloc_coherent(dev, s,
|
|
|
|
&hisi_hba->sata_breakpoint_dma,
|
|
|
|
GFP_KERNEL);
|
2015-11-17 23:50:34 +07:00
|
|
|
if (!hisi_hba->sata_breakpoint)
|
|
|
|
goto err_out;
|
|
|
|
|
2015-11-17 23:50:36 +07:00
|
|
|
hisi_sas_slot_index_init(hisi_hba);
|
2019-08-05 20:47:58 +07:00
|
|
|
hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
|
2015-11-17 23:50:36 +07:00
|
|
|
|
2015-11-17 23:50:40 +07:00
|
|
|
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
|
|
|
|
if (!hisi_hba->wq) {
|
|
|
|
dev_err(dev, "sas_alloc: failed to create workqueue\n");
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:34 +07:00
|
|
|
return 0;
|
|
|
|
err_out:
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
2017-06-14 22:33:20 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_alloc);
|
2015-11-17 23:50:34 +07:00
|
|
|
|
2017-06-14 22:33:20 +07:00
|
|
|
void hisi_sas_free(struct hisi_hba *hisi_hba)
|
2015-11-17 23:50:35 +07:00
|
|
|
{
|
2019-05-29 16:58:42 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->n_phy; i++) {
|
|
|
|
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
|
|
|
|
|
|
|
|
del_timer_sync(&phy->timer);
|
|
|
|
}
|
|
|
|
|
2015-11-17 23:50:40 +07:00
|
|
|
if (hisi_hba->wq)
|
|
|
|
destroy_workqueue(hisi_hba->wq);
|
2015-11-17 23:50:35 +07:00
|
|
|
}
|
2017-06-14 22:33:20 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_free);
|
2015-11-17 23:50:34 +07:00
|
|
|
|
2017-10-24 22:51:45 +07:00
|
|
|
void hisi_sas_rst_work_handler(struct work_struct *work)
|
2017-03-23 00:25:18 +07:00
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba =
|
|
|
|
container_of(work, struct hisi_hba, rst_work);
|
|
|
|
|
|
|
|
hisi_sas_controller_reset(hisi_hba);
|
|
|
|
}
|
2017-10-24 22:51:45 +07:00
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_rst_work_handler);
|
2017-03-23 00:25:18 +07:00
|
|
|
|
2017-12-09 00:16:38 +07:00
|
|
|
void hisi_sas_sync_rst_work_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hisi_sas_rst *rst =
|
|
|
|
container_of(work, struct hisi_sas_rst, work);
|
|
|
|
|
|
|
|
if (!hisi_sas_controller_reset(rst->hisi_hba))
|
|
|
|
rst->done = true;
|
|
|
|
complete(rst->completion);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_sync_rst_work_handler);
|
|
|
|
|
2017-06-14 22:33:18 +07:00
|
|
|
int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba)
|
2015-11-17 23:50:31 +07:00
|
|
|
{
|
2017-06-14 22:33:18 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
struct platform_device *pdev = hisi_hba->platform_dev;
|
|
|
|
struct device_node *np = pdev ? pdev->dev.of_node : NULL;
|
2016-10-04 18:11:11 +07:00
|
|
|
struct clk *refclk;
|
2015-11-17 23:50:31 +07:00
|
|
|
|
2016-02-04 01:26:08 +07:00
|
|
|
if (device_property_read_u8_array(dev, "sas-addr", hisi_hba->sas_addr,
|
2017-06-14 22:33:18 +07:00
|
|
|
SAS_ADDR_SIZE)) {
|
|
|
|
dev_err(dev, "could not get property sas-addr\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
2015-11-17 23:50:32 +07:00
|
|
|
|
2016-02-04 01:26:08 +07:00
|
|
|
if (np) {
|
2017-06-14 22:33:18 +07:00
|
|
|
/*
|
|
|
|
* These properties are only required for platform device-based
|
|
|
|
* controller with DT firmware.
|
|
|
|
*/
|
2016-02-04 01:26:08 +07:00
|
|
|
hisi_hba->ctrl = syscon_regmap_lookup_by_phandle(np,
|
|
|
|
"hisilicon,sas-syscon");
|
2017-06-14 22:33:18 +07:00
|
|
|
if (IS_ERR(hisi_hba->ctrl)) {
|
|
|
|
dev_err(dev, "could not get syscon\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
2015-11-17 23:50:32 +07:00
|
|
|
|
2016-02-04 01:26:08 +07:00
|
|
|
if (device_property_read_u32(dev, "ctrl-reset-reg",
|
2017-06-14 22:33:18 +07:00
|
|
|
&hisi_hba->ctrl_reset_reg)) {
|
2019-04-11 19:46:44 +07:00
|
|
|
dev_err(dev, "could not get property ctrl-reset-reg\n");
|
2017-06-14 22:33:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
2015-11-17 23:50:32 +07:00
|
|
|
|
2016-02-04 01:26:08 +07:00
|
|
|
if (device_property_read_u32(dev, "ctrl-reset-sts-reg",
|
2017-06-14 22:33:18 +07:00
|
|
|
&hisi_hba->ctrl_reset_sts_reg)) {
|
2019-04-11 19:46:44 +07:00
|
|
|
dev_err(dev, "could not get property ctrl-reset-sts-reg\n");
|
2017-06-14 22:33:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
2015-11-17 23:50:32 +07:00
|
|
|
|
2016-02-04 01:26:08 +07:00
|
|
|
if (device_property_read_u32(dev, "ctrl-clock-ena-reg",
|
2017-06-14 22:33:18 +07:00
|
|
|
&hisi_hba->ctrl_clock_ena_reg)) {
|
2019-04-11 19:46:44 +07:00
|
|
|
dev_err(dev, "could not get property ctrl-clock-ena-reg\n");
|
2017-06-14 22:33:18 +07:00
|
|
|
return -ENOENT;
|
|
|
|
}
|
2016-02-04 01:26:08 +07:00
|
|
|
}
|
|
|
|
|
2017-06-14 22:33:18 +07:00
|
|
|
refclk = devm_clk_get(dev, NULL);
|
2016-10-04 18:11:11 +07:00
|
|
|
if (IS_ERR(refclk))
|
2017-01-20 19:45:20 +07:00
|
|
|
dev_dbg(dev, "no ref clk property\n");
|
2016-10-04 18:11:11 +07:00
|
|
|
else
|
|
|
|
hisi_hba->refclk_frequency_mhz = clk_get_rate(refclk) / 1000000;
|
|
|
|
|
2017-06-14 22:33:18 +07:00
|
|
|
if (device_property_read_u32(dev, "phy-count", &hisi_hba->n_phy)) {
|
|
|
|
dev_err(dev, "could not get property phy-count\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
2015-11-17 23:50:32 +07:00
|
|
|
|
2016-02-04 01:26:08 +07:00
|
|
|
if (device_property_read_u32(dev, "queue-count",
|
2017-06-14 22:33:18 +07:00
|
|
|
&hisi_hba->queue_count)) {
|
|
|
|
dev_err(dev, "could not get property queue-count\n");
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_get_fw_info);
|
|
|
|
|
|
|
|
static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
|
|
|
|
const struct hisi_sas_hw *hw)
|
|
|
|
{
|
|
|
|
struct resource *res;
|
|
|
|
struct Scsi_Host *shost;
|
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct device *dev = &pdev->dev;
|
2019-02-18 14:34:25 +07:00
|
|
|
int error;
|
2017-06-14 22:33:18 +07:00
|
|
|
|
2018-05-21 17:09:18 +07:00
|
|
|
shost = scsi_host_alloc(hw->sht, sizeof(*hisi_hba));
|
2017-06-14 22:33:18 +07:00
|
|
|
if (!shost) {
|
|
|
|
dev_err(dev, "scsi host alloc failed\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
hisi_hba = shost_priv(shost);
|
|
|
|
|
|
|
|
INIT_WORK(&hisi_hba->rst_work, hisi_sas_rst_work_handler);
|
|
|
|
hisi_hba->hw = hw;
|
|
|
|
hisi_hba->dev = dev;
|
|
|
|
hisi_hba->platform_dev = pdev;
|
|
|
|
hisi_hba->shost = shost;
|
|
|
|
SHOST_TO_SAS_HA(shost) = &hisi_hba->sha;
|
|
|
|
|
2017-08-23 06:05:14 +07:00
|
|
|
timer_setup(&hisi_hba->timer, NULL, 0);
|
2017-06-14 22:33:18 +07:00
|
|
|
|
|
|
|
if (hisi_sas_get_fw_info(hisi_hba) < 0)
|
2015-11-17 23:50:32 +07:00
|
|
|
goto err_out;
|
|
|
|
|
2019-02-18 14:34:25 +07:00
|
|
|
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
|
|
|
|
if (error)
|
|
|
|
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
|
|
|
|
|
|
|
|
if (error) {
|
2016-09-06 22:36:19 +07:00
|
|
|
dev_err(dev, "No usable DMA addressing method\n");
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2019-09-04 20:02:56 +07:00
|
|
|
hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0);
|
2015-11-17 23:50:32 +07:00
|
|
|
if (IS_ERR(hisi_hba->regs))
|
|
|
|
goto err_out;
|
|
|
|
|
2018-01-17 23:46:53 +07:00
|
|
|
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
|
|
|
if (res) {
|
|
|
|
hisi_hba->sgpio_regs = devm_ioremap_resource(dev, res);
|
|
|
|
if (IS_ERR(hisi_hba->sgpio_regs))
|
|
|
|
goto err_out;
|
|
|
|
}
|
|
|
|
|
2019-01-25 21:22:33 +07:00
|
|
|
if (hisi_sas_alloc(hisi_hba)) {
|
2015-11-17 23:50:35 +07:00
|
|
|
hisi_sas_free(hisi_hba);
|
2015-11-17 23:50:34 +07:00
|
|
|
goto err_out;
|
2015-11-17 23:50:35 +07:00
|
|
|
}
|
2015-11-17 23:50:34 +07:00
|
|
|
|
2015-11-17 23:50:31 +07:00
|
|
|
return shost;
|
|
|
|
err_out:
|
2017-08-10 23:09:43 +07:00
|
|
|
scsi_host_put(shost);
|
2015-11-17 23:50:31 +07:00
|
|
|
dev_err(dev, "shost alloc failed\n");
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int hisi_sas_probe(struct platform_device *pdev,
|
2018-05-21 17:09:18 +07:00
|
|
|
const struct hisi_sas_hw *hw)
|
2015-11-17 23:50:31 +07:00
|
|
|
{
|
|
|
|
struct Scsi_Host *shost;
|
|
|
|
struct hisi_hba *hisi_hba;
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
struct asd_sas_phy **arr_phy;
|
|
|
|
struct asd_sas_port **arr_port;
|
|
|
|
struct sas_ha_struct *sha;
|
|
|
|
int rc, phy_nr, port_nr, i;
|
|
|
|
|
|
|
|
shost = hisi_sas_shost_alloc(pdev, hw);
|
2016-11-29 22:45:57 +07:00
|
|
|
if (!shost)
|
|
|
|
return -ENOMEM;
|
2015-11-17 23:50:31 +07:00
|
|
|
|
|
|
|
sha = SHOST_TO_SAS_HA(shost);
|
|
|
|
hisi_hba = shost_priv(shost);
|
|
|
|
platform_set_drvdata(pdev, sha);
|
2015-11-17 23:50:39 +07:00
|
|
|
|
2015-11-17 23:50:31 +07:00
|
|
|
phy_nr = port_nr = hisi_hba->n_phy;
|
|
|
|
|
|
|
|
arr_phy = devm_kcalloc(dev, phy_nr, sizeof(void *), GFP_KERNEL);
|
|
|
|
arr_port = devm_kcalloc(dev, port_nr, sizeof(void *), GFP_KERNEL);
|
2016-11-29 22:45:57 +07:00
|
|
|
if (!arr_phy || !arr_port) {
|
|
|
|
rc = -ENOMEM;
|
|
|
|
goto err_out_ha;
|
|
|
|
}
|
2015-11-17 23:50:31 +07:00
|
|
|
|
|
|
|
sha->sas_phy = arr_phy;
|
|
|
|
sha->sas_port = arr_port;
|
|
|
|
sha->lldd_ha = hisi_hba;
|
|
|
|
|
|
|
|
shost->transportt = hisi_sas_stt;
|
|
|
|
shost->max_id = HISI_SAS_MAX_DEVICES;
|
|
|
|
shost->max_lun = ~0;
|
|
|
|
shost->max_channel = 1;
|
|
|
|
shost->max_cmd_len = 16;
|
2018-09-24 22:06:33 +07:00
|
|
|
if (hisi_hba->hw->slot_index_alloc) {
|
2019-08-05 20:47:58 +07:00
|
|
|
shost->can_queue = HISI_SAS_MAX_COMMANDS;
|
|
|
|
shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS;
|
2018-09-24 22:06:33 +07:00
|
|
|
} else {
|
2019-08-05 20:47:58 +07:00
|
|
|
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
|
|
|
|
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
|
2018-09-24 22:06:33 +07:00
|
|
|
}
|
2015-11-17 23:50:31 +07:00
|
|
|
|
|
|
|
sha->sas_ha_name = DRV_NAME;
|
2017-06-14 22:33:17 +07:00
|
|
|
sha->dev = hisi_hba->dev;
|
2015-11-17 23:50:31 +07:00
|
|
|
sha->lldd_module = THIS_MODULE;
|
|
|
|
sha->sas_addr = &hisi_hba->sas_addr[0];
|
|
|
|
sha->num_phys = hisi_hba->n_phy;
|
|
|
|
sha->core.shost = hisi_hba->shost;
|
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->n_phy; i++) {
|
|
|
|
sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy;
|
|
|
|
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
|
|
|
|
}
|
|
|
|
|
|
|
|
rc = scsi_add_host(shost, &pdev->dev);
|
|
|
|
if (rc)
|
|
|
|
goto err_out_ha;
|
|
|
|
|
|
|
|
rc = sas_register_ha(sha);
|
|
|
|
if (rc)
|
|
|
|
goto err_out_register_ha;
|
|
|
|
|
2017-01-20 19:45:23 +07:00
|
|
|
rc = hisi_hba->hw->hw_init(hisi_hba);
|
|
|
|
if (rc)
|
|
|
|
goto err_out_register_ha;
|
|
|
|
|
2015-11-17 23:50:31 +07:00
|
|
|
scsi_scan_host(shost);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
err_out_register_ha:
|
|
|
|
scsi_remove_host(shost);
|
|
|
|
err_out_ha:
|
2019-10-24 21:08:24 +07:00
|
|
|
hisi_sas_debugfs_exit(hisi_hba);
|
2016-11-29 22:45:57 +07:00
|
|
|
hisi_sas_free(hisi_hba);
|
2017-08-10 23:09:43 +07:00
|
|
|
scsi_host_put(shost);
|
2015-11-17 23:50:31 +07:00
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_probe);
|
|
|
|
|
2018-12-19 22:56:39 +07:00
|
|
|
struct dentry *hisi_sas_debugfs_dir;
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
int queue_entry_size = hisi_hba->hw->complete_hdr_size;
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
2018-12-19 22:56:41 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->queue_count; i++)
|
2019-10-24 21:08:23 +07:00
|
|
|
memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr,
|
2018-12-19 22:56:41 +07:00
|
|
|
hisi_hba->complete_hdr[i],
|
|
|
|
HISI_SAS_QUEUE_SLOTS * queue_entry_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-01-25 21:22:36 +07:00
|
|
|
int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr);
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
2018-12-19 22:56:41 +07:00
|
|
|
int i;
|
|
|
|
|
2019-01-25 21:22:37 +07:00
|
|
|
for (i = 0; i < hisi_hba->queue_count; i++) {
|
2019-10-24 21:08:14 +07:00
|
|
|
struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr;
|
2019-01-25 21:22:37 +07:00
|
|
|
int j;
|
|
|
|
|
2019-10-24 21:08:23 +07:00
|
|
|
debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr;
|
2019-01-25 21:22:37 +07:00
|
|
|
cmd_hdr = hisi_hba->cmd_hdr[i];
|
|
|
|
|
|
|
|
for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++)
|
|
|
|
memcpy(&debugfs_cmd_hdr[j], &cmd_hdr[j],
|
|
|
|
queue_entry_size);
|
|
|
|
}
|
2018-12-19 22:56:41 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
2018-12-19 22:56:41 +07:00
|
|
|
const struct hisi_sas_debugfs_reg *port =
|
|
|
|
hisi_hba->hw->debugfs_reg_port;
|
|
|
|
int i, phy_cnt;
|
|
|
|
u32 offset;
|
|
|
|
u32 *databuf;
|
|
|
|
|
|
|
|
for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) {
|
2019-10-24 21:08:23 +07:00
|
|
|
databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data;
|
2018-12-19 22:56:41 +07:00
|
|
|
for (i = 0; i < port->count; i++, databuf++) {
|
|
|
|
offset = port->base_off + 4 * i;
|
|
|
|
*databuf = port->read_port_reg(hisi_hba, phy_cnt,
|
|
|
|
offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
|
|
|
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data;
|
2019-08-05 20:48:02 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
2018-12-19 22:56:41 +07:00
|
|
|
const struct hisi_sas_debugfs_reg *global =
|
2019-08-05 20:48:02 +07:00
|
|
|
hw->debugfs_reg_array[DEBUGFS_GLOBAL];
|
2018-12-19 22:56:41 +07:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < global->count; i++, databuf++)
|
|
|
|
*databuf = global->read_global_reg(hisi_hba, 4 * i);
|
|
|
|
}
|
|
|
|
|
2019-08-05 20:48:02 +07:00
|
|
|
static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
|
|
|
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data;
|
2019-08-05 20:48:02 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
|
|
|
const struct hisi_sas_debugfs_reg *axi =
|
|
|
|
hw->debugfs_reg_array[DEBUGFS_AXI];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < axi->count; i++, databuf++)
|
|
|
|
*databuf = axi->read_global_reg(hisi_hba,
|
|
|
|
4 * i + axi->base_off);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
|
|
|
u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data;
|
2019-08-05 20:48:02 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
|
|
|
const struct hisi_sas_debugfs_reg *ras =
|
|
|
|
hw->debugfs_reg_array[DEBUGFS_RAS];
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ras->count; i++, databuf++)
|
|
|
|
*databuf = ras->read_global_reg(hisi_hba,
|
|
|
|
4 * i + ras->base_off);
|
|
|
|
}
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
|
|
|
void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache;
|
|
|
|
void *databuf = hisi_hba->debugfs_itct[dump_index].itct;
|
2018-12-19 22:56:41 +07:00
|
|
|
struct hisi_sas_itct *itct;
|
|
|
|
int i;
|
|
|
|
|
2019-08-05 20:48:01 +07:00
|
|
|
hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE,
|
|
|
|
cachebuf);
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
itct = hisi_hba->itct;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
|
|
|
|
memcpy(databuf, itct, sizeof(struct hisi_sas_itct));
|
|
|
|
databuf += sizeof(struct hisi_sas_itct);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
2019-08-05 20:47:58 +07:00
|
|
|
int max_command_entries = HISI_SAS_MAX_COMMANDS;
|
2019-10-24 21:08:23 +07:00
|
|
|
void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache;
|
|
|
|
void *databuf = hisi_hba->debugfs_iost[dump_index].iost;
|
2018-12-19 22:56:41 +07:00
|
|
|
struct hisi_sas_iost *iost;
|
|
|
|
int i;
|
|
|
|
|
2019-08-05 20:48:01 +07:00
|
|
|
hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE,
|
|
|
|
cachebuf);
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
iost = hisi_hba->iost;
|
|
|
|
|
|
|
|
for (i = 0; i < max_command_entries; i++, iost++) {
|
|
|
|
memcpy(databuf, iost, sizeof(struct hisi_sas_iost));
|
|
|
|
databuf += sizeof(struct hisi_sas_iost);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-25 21:22:28 +07:00
|
|
|
static const char *
|
2018-12-19 22:56:42 +07:00
|
|
|
hisi_sas_debugfs_to_reg_name(int off, int base_off,
|
|
|
|
const struct hisi_sas_debugfs_reg_lu *lu)
|
|
|
|
{
|
|
|
|
for (; lu->name; lu++) {
|
|
|
|
if (off == lu->off - base_off)
|
|
|
|
return lu->name;
|
|
|
|
}
|
|
|
|
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr,
|
|
|
|
struct seq_file *s)
|
|
|
|
{
|
|
|
|
const struct hisi_sas_debugfs_reg *reg = ptr;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < reg->count; i++) {
|
|
|
|
int off = i * 4;
|
|
|
|
const char *name;
|
|
|
|
|
|
|
|
name = hisi_sas_debugfs_to_reg_name(off, reg->base_off,
|
|
|
|
reg->lu);
|
|
|
|
|
|
|
|
if (name)
|
|
|
|
seq_printf(s, "0x%08x 0x%08x %s\n", off,
|
2019-01-25 21:22:28 +07:00
|
|
|
regs_val[i], name);
|
2018-12-19 22:56:42 +07:00
|
|
|
else
|
|
|
|
seq_printf(s, "0x%08x 0x%08x\n", off,
|
2019-01-25 21:22:28 +07:00
|
|
|
regs_val[i]);
|
2018-12-19 22:56:42 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:15 +07:00
|
|
|
struct hisi_sas_debugfs_regs *global = s->private;
|
|
|
|
struct hisi_hba *hisi_hba = global->hisi_hba;
|
2018-12-19 22:56:42 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
2019-08-05 20:48:02 +07:00
|
|
|
const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL];
|
2018-12-19 22:56:42 +07:00
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
hisi_sas_debugfs_print_reg(global->data,
|
2018-12-19 22:56:42 +07:00
|
|
|
reg_global, s);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_global_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_global_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_global_fops = {
|
|
|
|
.open = hisi_sas_debugfs_global_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-08-05 20:48:02 +07:00
|
|
|
static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:15 +07:00
|
|
|
struct hisi_sas_debugfs_regs *axi = s->private;
|
|
|
|
struct hisi_hba *hisi_hba = axi->hisi_hba;
|
2019-08-05 20:48:02 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
|
|
|
const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI];
|
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
hisi_sas_debugfs_print_reg(axi->data,
|
2019-08-05 20:48:02 +07:00
|
|
|
reg_axi, s);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_axi_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_axi_fops = {
|
|
|
|
.open = hisi_sas_debugfs_axi_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:15 +07:00
|
|
|
struct hisi_sas_debugfs_regs *ras = s->private;
|
|
|
|
struct hisi_hba *hisi_hba = ras->hisi_hba;
|
2019-08-05 20:48:02 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
|
|
|
const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS];
|
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
hisi_sas_debugfs_print_reg(ras->data,
|
2019-08-05 20:48:02 +07:00
|
|
|
reg_ras, s);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_ras_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_ras_fops = {
|
|
|
|
.open = hisi_sas_debugfs_ras_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2018-12-19 22:56:43 +07:00
|
|
|
static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:16 +07:00
|
|
|
struct hisi_sas_debugfs_port *port = s->private;
|
|
|
|
struct hisi_sas_phy *phy = port->phy;
|
2018-12-19 22:56:43 +07:00
|
|
|
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
|
|
|
const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port;
|
|
|
|
|
2019-10-24 21:08:16 +07:00
|
|
|
hisi_sas_debugfs_print_reg(port->data, reg_port, s);
|
2018-12-19 22:56:43 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_port_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_port_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_port_fops = {
|
|
|
|
.open = hisi_sas_debugfs_port_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-08-05 20:48:09 +07:00
|
|
|
static void hisi_sas_show_row_64(struct seq_file *s, int index,
|
|
|
|
int sz, __le64 *ptr)
|
2018-12-19 22:56:46 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* completion header size not fixed per HW version */
|
|
|
|
seq_printf(s, "index %04d:\n\t", index);
|
|
|
|
for (i = 1; i <= sz / 8; i++, ptr++) {
|
|
|
|
seq_printf(s, " 0x%016llx", le64_to_cpu(*ptr));
|
|
|
|
if (!(i % 2))
|
|
|
|
seq_puts(s, "\n\t");
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_puts(s, "\n");
|
|
|
|
}
|
|
|
|
|
2019-08-05 20:48:09 +07:00
|
|
|
static void hisi_sas_show_row_32(struct seq_file *s, int index,
|
|
|
|
int sz, __le32 *ptr)
|
2018-12-19 22:56:44 +07:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/* completion header size not fixed per HW version */
|
|
|
|
seq_printf(s, "index %04d:\n\t", index);
|
|
|
|
for (i = 1; i <= sz / 4; i++, ptr++) {
|
2019-01-25 21:22:39 +07:00
|
|
|
seq_printf(s, " 0x%08x", le32_to_cpu(*ptr));
|
2018-12-19 22:56:44 +07:00
|
|
|
if (!(i % 4))
|
|
|
|
seq_puts(s, "\n\t");
|
|
|
|
}
|
|
|
|
seq_puts(s, "\n");
|
|
|
|
}
|
|
|
|
|
2019-10-24 21:08:13 +07:00
|
|
|
static void hisi_sas_cq_show_slot(struct seq_file *s, int slot,
|
|
|
|
struct hisi_sas_debugfs_cq *debugfs_cq)
|
2018-12-19 22:56:44 +07:00
|
|
|
{
|
2019-10-24 21:08:13 +07:00
|
|
|
struct hisi_sas_cq *cq = debugfs_cq->cq;
|
2018-12-19 22:56:44 +07:00
|
|
|
struct hisi_hba *hisi_hba = cq->hisi_hba;
|
2019-10-24 21:08:13 +07:00
|
|
|
__le32 *complete_hdr = debugfs_cq->complete_hdr +
|
|
|
|
(hisi_hba->hw->complete_hdr_size * slot);
|
2018-12-19 22:56:44 +07:00
|
|
|
|
2019-08-05 20:48:09 +07:00
|
|
|
hisi_sas_show_row_32(s, slot,
|
|
|
|
hisi_hba->hw->complete_hdr_size,
|
|
|
|
complete_hdr);
|
2018-12-19 22:56:44 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:13 +07:00
|
|
|
struct hisi_sas_debugfs_cq *debugfs_cq = s->private;
|
2019-08-05 20:48:09 +07:00
|
|
|
int slot;
|
2018-12-19 22:56:44 +07:00
|
|
|
|
|
|
|
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
|
2019-10-24 21:08:13 +07:00
|
|
|
hisi_sas_cq_show_slot(s, slot, debugfs_cq);
|
2018-12-19 22:56:44 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_cq_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_cq_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_cq_fops = {
|
|
|
|
.open = hisi_sas_debugfs_cq_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-08-05 20:48:09 +07:00
|
|
|
static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr)
|
2018-12-19 22:56:45 +07:00
|
|
|
{
|
2019-10-24 21:08:14 +07:00
|
|
|
struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr;
|
|
|
|
void *cmd_queue = debugfs_dq->hdr;
|
2019-01-25 21:22:28 +07:00
|
|
|
__le32 *cmd_hdr = cmd_queue +
|
|
|
|
sizeof(struct hisi_sas_cmd_hdr) * slot;
|
2018-12-19 22:56:45 +07:00
|
|
|
|
2019-08-05 20:48:09 +07:00
|
|
|
hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr);
|
2018-12-19 22:56:45 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-08-05 20:48:09 +07:00
|
|
|
int slot;
|
2018-12-19 22:56:45 +07:00
|
|
|
|
|
|
|
for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) {
|
2019-08-05 20:48:09 +07:00
|
|
|
hisi_sas_dq_show_slot(s, slot, s->private);
|
2018-12-19 22:56:45 +07:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_dq_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_dq_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_dq_fops = {
|
|
|
|
.open = hisi_sas_debugfs_dq_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2018-12-19 22:56:46 +07:00
|
|
|
static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:17 +07:00
|
|
|
struct hisi_sas_debugfs_iost *debugfs_iost = s->private;
|
|
|
|
struct hisi_sas_iost *iost = debugfs_iost->iost;
|
2019-08-05 20:48:09 +07:00
|
|
|
int i, max_command_entries = HISI_SAS_MAX_COMMANDS;
|
2018-12-19 22:56:46 +07:00
|
|
|
|
2019-10-24 21:08:17 +07:00
|
|
|
for (i = 0; i < max_command_entries; i++, iost++) {
|
|
|
|
__le64 *data = &iost->qw0;
|
2019-08-05 20:48:00 +07:00
|
|
|
|
2019-10-24 21:08:17 +07:00
|
|
|
hisi_sas_show_row_64(s, i, sizeof(*iost), data);
|
2018-12-19 22:56:46 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_iost_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_iost_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_iost_fops = {
|
|
|
|
.open = hisi_sas_debugfs_iost_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-08-05 20:48:01 +07:00
|
|
|
static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:19 +07:00
|
|
|
struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private;
|
|
|
|
struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache;
|
2019-08-05 20:48:01 +07:00
|
|
|
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
|
|
|
|
int i, tab_idx;
|
|
|
|
__le64 *iost;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) {
|
|
|
|
/*
|
|
|
|
* Data struct of IOST cache:
|
|
|
|
* Data[1]: BIT0~15: Table index
|
|
|
|
* Bit16: Valid mask
|
|
|
|
* Data[2]~[9]: IOST table
|
|
|
|
*/
|
|
|
|
tab_idx = (iost_cache->data[1] & 0xffff);
|
|
|
|
iost = (__le64 *)iost_cache;
|
|
|
|
|
|
|
|
hisi_sas_show_row_64(s, tab_idx, cache_size, iost);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_iost_cache_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_iost_cache_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_iost_cache_fops = {
|
|
|
|
.open = hisi_sas_debugfs_iost_cache_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-01-25 21:22:29 +07:00
|
|
|
static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-08-05 20:48:09 +07:00
|
|
|
int i;
|
2019-10-24 21:08:18 +07:00
|
|
|
struct hisi_sas_debugfs_itct *debugfs_itct = s->private;
|
|
|
|
struct hisi_sas_itct *itct = debugfs_itct->itct;
|
2019-01-25 21:22:29 +07:00
|
|
|
|
2019-10-24 21:08:18 +07:00
|
|
|
for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) {
|
|
|
|
__le64 *data = &itct->qw0;
|
2019-08-05 20:48:00 +07:00
|
|
|
|
2019-10-24 21:08:18 +07:00
|
|
|
hisi_sas_show_row_64(s, i, sizeof(*itct), data);
|
2019-01-25 21:22:29 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_itct_open(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_itct_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_itct_fops = {
|
|
|
|
.open = hisi_sas_debugfs_itct_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-08-05 20:48:01 +07:00
|
|
|
static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p)
|
|
|
|
{
|
2019-10-24 21:08:20 +07:00
|
|
|
struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private;
|
|
|
|
struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache;
|
2019-08-05 20:48:01 +07:00
|
|
|
u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4;
|
|
|
|
int i, tab_idx;
|
|
|
|
__le64 *itct;
|
|
|
|
|
|
|
|
for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) {
|
|
|
|
/*
|
|
|
|
* Data struct of ITCT cache:
|
|
|
|
* Data[1]: BIT0~15: Table index
|
|
|
|
* Bit16: Valid mask
|
|
|
|
* Data[2]~[9]: ITCT table
|
|
|
|
*/
|
|
|
|
tab_idx = itct_cache->data[1] & 0xffff;
|
|
|
|
itct = (__le64 *)itct_cache;
|
|
|
|
|
|
|
|
hisi_sas_show_row_64(s, tab_idx, cache_size, itct);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_itct_cache_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_itct_cache_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_itct_cache_fops = {
|
|
|
|
.open = hisi_sas_debugfs_itct_cache_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
2019-10-24 21:08:12 +07:00
|
|
|
u64 *debugfs_timestamp;
|
2019-10-24 21:08:23 +07:00
|
|
|
int dump_index = hisi_hba->debugfs_dump_index;
|
2018-12-19 22:56:41 +07:00
|
|
|
struct dentry *dump_dentry;
|
2018-12-19 22:56:43 +07:00
|
|
|
struct dentry *dentry;
|
|
|
|
char name[256];
|
|
|
|
int p;
|
2018-12-19 22:56:44 +07:00
|
|
|
int c;
|
2018-12-19 22:56:45 +07:00
|
|
|
int d;
|
2018-12-19 22:56:41 +07:00
|
|
|
|
2019-10-24 21:08:23 +07:00
|
|
|
snprintf(name, 256, "%d", dump_index);
|
|
|
|
|
|
|
|
dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry);
|
|
|
|
|
|
|
|
debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index];
|
2018-12-19 22:56:42 +07:00
|
|
|
|
2019-10-24 21:08:12 +07:00
|
|
|
debugfs_create_u64("timestamp", 0400, dump_dentry,
|
|
|
|
debugfs_timestamp);
|
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
debugfs_create_file("global", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL],
|
|
|
|
&hisi_sas_debugfs_global_fops);
|
2018-12-19 22:56:43 +07:00
|
|
|
|
|
|
|
/* Create port dir and files */
|
|
|
|
dentry = debugfs_create_dir("port", dump_dentry);
|
|
|
|
for (p = 0; p < hisi_hba->n_phy; p++) {
|
|
|
|
snprintf(name, 256, "%d", p);
|
2019-01-25 21:22:27 +07:00
|
|
|
|
2019-10-24 21:08:16 +07:00
|
|
|
debugfs_create_file(name, 0400, dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_port_reg[dump_index][p],
|
2019-01-25 21:22:27 +07:00
|
|
|
&hisi_sas_debugfs_port_fops);
|
2018-12-19 22:56:43 +07:00
|
|
|
}
|
|
|
|
|
2018-12-19 22:56:44 +07:00
|
|
|
/* Create CQ dir and files */
|
|
|
|
dentry = debugfs_create_dir("cq", dump_dentry);
|
|
|
|
for (c = 0; c < hisi_hba->queue_count; c++) {
|
|
|
|
snprintf(name, 256, "%d", c);
|
|
|
|
|
2019-10-24 21:08:13 +07:00
|
|
|
debugfs_create_file(name, 0400, dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_cq[dump_index][c],
|
2019-01-25 21:22:27 +07:00
|
|
|
&hisi_sas_debugfs_cq_fops);
|
2018-12-19 22:56:44 +07:00
|
|
|
}
|
|
|
|
|
2018-12-19 22:56:45 +07:00
|
|
|
/* Create DQ dir and files */
|
|
|
|
dentry = debugfs_create_dir("dq", dump_dentry);
|
|
|
|
for (d = 0; d < hisi_hba->queue_count; d++) {
|
|
|
|
snprintf(name, 256, "%d", d);
|
|
|
|
|
2019-10-24 21:08:14 +07:00
|
|
|
debugfs_create_file(name, 0400, dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_dq[dump_index][d],
|
2019-01-25 21:22:27 +07:00
|
|
|
&hisi_sas_debugfs_dq_fops);
|
2018-12-19 22:56:45 +07:00
|
|
|
}
|
|
|
|
|
2019-10-24 21:08:17 +07:00
|
|
|
debugfs_create_file("iost", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_iost[dump_index],
|
2019-01-25 21:22:27 +07:00
|
|
|
&hisi_sas_debugfs_iost_fops);
|
2018-12-19 22:56:46 +07:00
|
|
|
|
2019-10-24 21:08:19 +07:00
|
|
|
debugfs_create_file("iost_cache", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_iost_cache[dump_index],
|
2019-08-05 20:48:01 +07:00
|
|
|
&hisi_sas_debugfs_iost_cache_fops);
|
|
|
|
|
2019-10-24 21:08:18 +07:00
|
|
|
debugfs_create_file("itct", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_itct[dump_index],
|
2019-01-25 21:22:29 +07:00
|
|
|
&hisi_sas_debugfs_itct_fops);
|
|
|
|
|
2019-10-24 21:08:20 +07:00
|
|
|
debugfs_create_file("itct_cache", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_itct_cache[dump_index],
|
2019-08-05 20:48:01 +07:00
|
|
|
&hisi_sas_debugfs_itct_cache_fops);
|
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
debugfs_create_file("axi", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI],
|
2019-08-05 20:48:02 +07:00
|
|
|
&hisi_sas_debugfs_axi_fops);
|
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
debugfs_create_file("ras", 0400, dump_dentry,
|
2019-10-24 21:08:23 +07:00
|
|
|
&hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS],
|
2019-08-05 20:48:02 +07:00
|
|
|
&hisi_sas_debugfs_ras_fops);
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
hisi_hba->hw->snapshot_prepare(hisi_hba);
|
|
|
|
|
|
|
|
hisi_sas_debugfs_snapshot_global_reg(hisi_hba);
|
|
|
|
hisi_sas_debugfs_snapshot_port_reg(hisi_hba);
|
2019-08-05 20:48:02 +07:00
|
|
|
hisi_sas_debugfs_snapshot_axi_reg(hisi_hba);
|
|
|
|
hisi_sas_debugfs_snapshot_ras_reg(hisi_hba);
|
2018-12-19 22:56:41 +07:00
|
|
|
hisi_sas_debugfs_snapshot_cq_reg(hisi_hba);
|
|
|
|
hisi_sas_debugfs_snapshot_dq_reg(hisi_hba);
|
|
|
|
hisi_sas_debugfs_snapshot_itct_reg(hisi_hba);
|
|
|
|
hisi_sas_debugfs_snapshot_iost_reg(hisi_hba);
|
|
|
|
|
|
|
|
hisi_sas_debugfs_create_files(hisi_hba);
|
|
|
|
|
|
|
|
hisi_hba->hw->snapshot_restore(hisi_hba);
|
|
|
|
}
|
|
|
|
|
2019-02-06 17:52:52 +07:00
|
|
|
static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file,
|
|
|
|
const char __user *user_buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = file->f_inode->i_private;
|
|
|
|
char buf[8];
|
|
|
|
|
2019-10-24 21:08:23 +07:00
|
|
|
if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count)
|
2019-02-06 17:52:52 +07:00
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (count > 8)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (copy_from_user(buf, user_buf, count))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (buf[0] != '1')
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
queue_work(hisi_hba->wq, &hisi_hba->debugfs_work);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = {
|
|
|
|
.write = &hisi_sas_debugfs_trigger_dump_write,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-09-06 19:55:36 +07:00
|
|
|
enum {
|
|
|
|
HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0,
|
|
|
|
HISI_SAS_BIST_LOOPBACK_MODE_SERDES,
|
|
|
|
HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum {
|
|
|
|
HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_PRBS23,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_PRBS31,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_JTPAT,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_CJTPAT,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_TRAIN,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_HFTP,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_MFTP,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_LFTP,
|
|
|
|
HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct {
|
|
|
|
int value;
|
|
|
|
char *name;
|
|
|
|
} hisi_sas_debugfs_loop_linkrate[] = {
|
|
|
|
{ SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
|
|
|
|
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
|
|
|
|
{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
|
|
|
|
{ SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = s->private;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
|
|
|
|
int match = (hisi_hba->debugfs_bist_linkrate ==
|
|
|
|
hisi_sas_debugfs_loop_linkrate[i].value);
|
|
|
|
|
|
|
|
seq_printf(s, "%s%s%s ", match ? "[" : "",
|
|
|
|
hisi_sas_debugfs_loop_linkrate[i].name,
|
|
|
|
match ? "]" : "");
|
|
|
|
}
|
|
|
|
seq_puts(s, "\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct hisi_hba *hisi_hba = m->private;
|
|
|
|
char kbuf[16] = {}, *pkbuf;
|
|
|
|
bool found = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (hisi_hba->debugfs_bist_enable)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (count >= sizeof(kbuf))
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buf, count))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
pkbuf = strstrip(kbuf);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) {
|
|
|
|
if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name,
|
|
|
|
pkbuf, 16)) {
|
|
|
|
hisi_hba->debugfs_bist_linkrate =
|
|
|
|
hisi_sas_debugfs_loop_linkrate[i].value;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_bist_linkrate_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = {
|
|
|
|
.open = hisi_sas_debugfs_bist_linkrate_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hisi_sas_debugfs_bist_linkrate_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct {
|
|
|
|
int value;
|
|
|
|
char *name;
|
|
|
|
} hisi_sas_debugfs_loop_code_mode[] = {
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" },
|
|
|
|
{ HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = s->private;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
|
|
|
|
int match = (hisi_hba->debugfs_bist_code_mode ==
|
|
|
|
hisi_sas_debugfs_loop_code_mode[i].value);
|
|
|
|
|
|
|
|
seq_printf(s, "%s%s%s ", match ? "[" : "",
|
|
|
|
hisi_sas_debugfs_loop_code_mode[i].name,
|
|
|
|
match ? "]" : "");
|
|
|
|
}
|
|
|
|
seq_puts(s, "\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count,
|
|
|
|
loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct hisi_hba *hisi_hba = m->private;
|
|
|
|
char kbuf[16] = {}, *pkbuf;
|
|
|
|
bool found = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (hisi_hba->debugfs_bist_enable)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (count >= sizeof(kbuf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buf, count))
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
pkbuf = strstrip(kbuf);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) {
|
|
|
|
if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name,
|
|
|
|
pkbuf, 16)) {
|
|
|
|
hisi_hba->debugfs_bist_code_mode =
|
|
|
|
hisi_sas_debugfs_loop_code_mode[i].value;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_bist_code_mode_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = {
|
|
|
|
.open = hisi_sas_debugfs_bist_code_mode_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hisi_sas_debugfs_bist_code_mode_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct hisi_hba *hisi_hba = m->private;
|
|
|
|
unsigned int phy_no;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
if (hisi_hba->debugfs_bist_enable)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
val = kstrtouint_from_user(buf, count, 0, &phy_no);
|
|
|
|
if (val)
|
|
|
|
return val;
|
|
|
|
|
|
|
|
if (phy_no >= hisi_hba->n_phy)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
hisi_hba->debugfs_bist_phy_no = phy_no;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = s->private;
|
|
|
|
|
|
|
|
seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_phy_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_bist_phy_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_bist_phy_ops = {
|
|
|
|
.open = hisi_sas_debugfs_bist_phy_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hisi_sas_debugfs_bist_phy_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct {
|
|
|
|
int value;
|
|
|
|
char *name;
|
|
|
|
} hisi_sas_debugfs_loop_modes[] = {
|
2019-09-16 16:17:06 +07:00
|
|
|
{ HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" },
|
2019-09-06 19:55:36 +07:00
|
|
|
{ HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" },
|
|
|
|
{ HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" },
|
|
|
|
};
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = s->private;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
|
|
|
|
int match = (hisi_hba->debugfs_bist_mode ==
|
|
|
|
hisi_sas_debugfs_loop_modes[i].value);
|
|
|
|
|
|
|
|
seq_printf(s, "%s%s%s ", match ? "[" : "",
|
|
|
|
hisi_sas_debugfs_loop_modes[i].name,
|
|
|
|
match ? "]" : "");
|
|
|
|
}
|
|
|
|
seq_puts(s, "\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct hisi_hba *hisi_hba = m->private;
|
|
|
|
char kbuf[16] = {}, *pkbuf;
|
|
|
|
bool found = false;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
if (hisi_hba->debugfs_bist_enable)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (count >= sizeof(kbuf))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (copy_from_user(kbuf, buf, count))
|
|
|
|
return -EOVERFLOW;
|
|
|
|
|
|
|
|
pkbuf = strstrip(kbuf);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) {
|
|
|
|
if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) {
|
|
|
|
hisi_hba->debugfs_bist_mode =
|
|
|
|
hisi_sas_debugfs_loop_modes[i].value;
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!found)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_mode_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_bist_mode_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_bist_mode_ops = {
|
|
|
|
.open = hisi_sas_debugfs_bist_mode_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hisi_sas_debugfs_bist_mode_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
|
|
|
static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *m = filp->private_data;
|
|
|
|
struct hisi_hba *hisi_hba = m->private;
|
|
|
|
unsigned int enable;
|
|
|
|
int val;
|
|
|
|
|
|
|
|
val = kstrtouint_from_user(buf, count, 0, &enable);
|
|
|
|
if (val)
|
|
|
|
return val;
|
|
|
|
|
|
|
|
if (enable > 1)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (enable == hisi_hba->debugfs_bist_enable)
|
|
|
|
return count;
|
|
|
|
|
|
|
|
if (!hisi_hba->hw->set_bist)
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
val = hisi_hba->hw->set_bist(hisi_hba, enable);
|
|
|
|
if (val < 0)
|
|
|
|
return val;
|
|
|
|
|
|
|
|
hisi_hba->debugfs_bist_enable = enable;
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba = s->private;
|
|
|
|
|
|
|
|
seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_bist_enable_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_bist_enable_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
|
|
|
|
.open = hisi_sas_debugfs_bist_enable_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hisi_sas_debugfs_bist_enable_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2019-10-24 21:08:25 +07:00
|
|
|
static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
|
|
|
|
const char __user *buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct seq_file *s = filp->private_data;
|
|
|
|
struct hisi_sas_phy *phy = s->private;
|
|
|
|
unsigned int set_val;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
res = kstrtouint_from_user(buf, count, 0, &set_val);
|
|
|
|
if (res)
|
|
|
|
return res;
|
|
|
|
|
|
|
|
if (set_val > 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
atomic_set(&phy->down_cnt, 0);
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p)
|
|
|
|
{
|
|
|
|
struct hisi_sas_phy *phy = s->private;
|
|
|
|
|
|
|
|
seq_printf(s, "%d\n", atomic_read(&phy->down_cnt));
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode,
|
|
|
|
struct file *filp)
|
|
|
|
{
|
|
|
|
return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show,
|
|
|
|
inode->i_private);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = {
|
|
|
|
.open = hisi_sas_debugfs_phy_down_cnt_open,
|
|
|
|
.read = seq_read,
|
|
|
|
.write = hisi_sas_debugfs_phy_down_cnt_write,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
};
|
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
void hisi_sas_debugfs_work_handler(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct hisi_hba *hisi_hba =
|
|
|
|
container_of(work, struct hisi_hba, debugfs_work);
|
2019-10-24 21:08:23 +07:00
|
|
|
int debugfs_dump_index = hisi_hba->debugfs_dump_index;
|
|
|
|
struct device *dev = hisi_hba->dev;
|
2019-10-24 21:08:12 +07:00
|
|
|
u64 timestamp = local_clock();
|
2018-12-19 22:56:41 +07:00
|
|
|
|
2019-10-24 21:08:23 +07:00
|
|
|
if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) {
|
|
|
|
dev_warn(dev, "dump count exceeded!\n");
|
2019-01-25 21:22:27 +07:00
|
|
|
return;
|
2019-10-24 21:08:23 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
do_div(timestamp, NSEC_PER_MSEC);
|
|
|
|
hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp;
|
2019-01-25 21:22:27 +07:00
|
|
|
|
2018-12-19 22:56:41 +07:00
|
|
|
hisi_sas_debugfs_snapshot_regs(hisi_hba);
|
2019-10-24 21:08:23 +07:00
|
|
|
hisi_hba->debugfs_dump_index++;
|
2018-12-19 22:56:41 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler);
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index)
|
2019-09-06 19:55:35 +07:00
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
|
|
|
int i;
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache);
|
|
|
|
devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache);
|
|
|
|
devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost);
|
|
|
|
devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct);
|
2019-09-06 19:55:35 +07:00
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->queue_count; i++)
|
2019-10-24 21:08:21 +07:00
|
|
|
devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr);
|
2019-09-06 19:55:35 +07:00
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->queue_count; i++)
|
2019-10-24 21:08:21 +07:00
|
|
|
devm_kfree(dev,
|
|
|
|
hisi_hba->debugfs_cq[dump_index][i].complete_hdr);
|
2019-09-06 19:55:35 +07:00
|
|
|
|
|
|
|
for (i = 0; i < DEBUGFS_REGS_NUM; i++)
|
2019-10-24 21:08:21 +07:00
|
|
|
devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data);
|
2019-09-06 19:55:35 +07:00
|
|
|
|
|
|
|
for (i = 0; i < hisi_hba->n_phy; i++)
|
2019-10-24 21:08:21 +07:00
|
|
|
devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data);
|
2019-09-06 19:55:35 +07:00
|
|
|
}
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index)
|
2018-12-19 22:56:39 +07:00
|
|
|
{
|
2019-08-05 20:48:02 +07:00
|
|
|
const struct hisi_sas_hw *hw = hisi_hba->hw;
|
2018-12-19 22:56:39 +07:00
|
|
|
struct device *dev = hisi_hba->dev;
|
2019-10-24 21:08:21 +07:00
|
|
|
int p, c, d, r, i;
|
2018-12-19 22:56:40 +07:00
|
|
|
size_t sz;
|
2018-12-19 22:56:39 +07:00
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
for (r = 0; r < DEBUGFS_REGS_NUM; r++) {
|
|
|
|
struct hisi_sas_debugfs_regs *regs =
|
2019-10-24 21:08:21 +07:00
|
|
|
&hisi_hba->debugfs_regs[dump_index][r];
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-10-24 21:08:15 +07:00
|
|
|
sz = hw->debugfs_reg_array[r]->count * 4;
|
|
|
|
regs->data = devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!regs->data)
|
|
|
|
goto fail;
|
|
|
|
regs->hisi_hba = hisi_hba;
|
|
|
|
}
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-08-05 20:48:02 +07:00
|
|
|
sz = hw->debugfs_reg_port->count * 4;
|
2018-12-19 22:56:40 +07:00
|
|
|
for (p = 0; p < hisi_hba->n_phy; p++) {
|
2019-10-24 21:08:16 +07:00
|
|
|
struct hisi_sas_debugfs_port *port =
|
2019-10-24 21:08:21 +07:00
|
|
|
&hisi_hba->debugfs_port_reg[dump_index][p];
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-10-24 21:08:16 +07:00
|
|
|
port->data = devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!port->data)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2019-10-24 21:08:16 +07:00
|
|
|
port->phy = &hisi_hba->phy[p];
|
2018-12-19 22:56:40 +07:00
|
|
|
}
|
|
|
|
|
2019-08-05 20:48:02 +07:00
|
|
|
sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS;
|
2018-12-19 22:56:40 +07:00
|
|
|
for (c = 0; c < hisi_hba->queue_count; c++) {
|
2019-10-24 21:08:13 +07:00
|
|
|
struct hisi_sas_debugfs_cq *cq =
|
2019-10-24 21:08:21 +07:00
|
|
|
&hisi_hba->debugfs_cq[dump_index][c];
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-10-24 21:08:13 +07:00
|
|
|
cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!cq->complete_hdr)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2019-10-24 21:08:13 +07:00
|
|
|
cq->cq = &hisi_hba->cq[c];
|
2018-12-19 22:56:40 +07:00
|
|
|
}
|
|
|
|
|
2019-01-25 21:22:36 +07:00
|
|
|
sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS;
|
2018-12-19 22:56:40 +07:00
|
|
|
for (d = 0; d < hisi_hba->queue_count; d++) {
|
2019-10-24 21:08:14 +07:00
|
|
|
struct hisi_sas_debugfs_dq *dq =
|
2019-10-24 21:08:21 +07:00
|
|
|
&hisi_hba->debugfs_dq[dump_index][d];
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-10-24 21:08:14 +07:00
|
|
|
dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!dq->hdr)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2019-10-24 21:08:14 +07:00
|
|
|
dq->dq = &hisi_hba->dq[d];
|
2018-12-19 22:56:40 +07:00
|
|
|
}
|
|
|
|
|
2019-09-06 19:55:35 +07:00
|
|
|
sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost);
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
hisi_hba->debugfs_iost[dump_index].iost =
|
|
|
|
devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!hisi_hba->debugfs_iost[dump_index].iost)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-08-05 20:48:01 +07:00
|
|
|
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
|
|
|
|
sizeof(struct hisi_sas_iost_itct_cache);
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
hisi_hba->debugfs_iost_cache[dump_index].cache =
|
|
|
|
devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!hisi_hba->debugfs_iost_cache[dump_index].cache)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2019-08-05 20:48:01 +07:00
|
|
|
|
|
|
|
sz = HISI_SAS_IOST_ITCT_CACHE_NUM *
|
|
|
|
sizeof(struct hisi_sas_iost_itct_cache);
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
hisi_hba->debugfs_itct_cache[dump_index].cache =
|
|
|
|
devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!hisi_hba->debugfs_itct_cache[dump_index].cache)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2019-08-05 20:48:01 +07:00
|
|
|
|
2018-12-19 22:56:40 +07:00
|
|
|
/* New memory allocation must be locate before itct */
|
|
|
|
sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct);
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
hisi_hba->debugfs_itct[dump_index].itct =
|
|
|
|
devm_kmalloc(dev, sz, GFP_KERNEL);
|
|
|
|
if (!hisi_hba->debugfs_itct[dump_index].itct)
|
2019-09-06 19:55:35 +07:00
|
|
|
goto fail;
|
2018-12-19 22:56:40 +07:00
|
|
|
|
2019-09-06 19:55:35 +07:00
|
|
|
return 0;
|
|
|
|
fail:
|
2019-10-24 21:08:22 +07:00
|
|
|
for (i = 0; i < hisi_sas_debugfs_dump_count; i++)
|
2019-10-24 21:08:21 +07:00
|
|
|
hisi_sas_debugfs_release(hisi_hba, i);
|
2019-09-06 19:55:35 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2019-10-24 21:08:25 +07:00
|
|
|
static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
struct dentry *dir = debugfs_create_dir("phy_down_cnt",
|
|
|
|
hisi_hba->debugfs_dir);
|
|
|
|
char name[16];
|
|
|
|
int phy_no;
|
|
|
|
|
|
|
|
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
|
|
|
|
snprintf(name, 16, "%d", phy_no);
|
|
|
|
debugfs_create_file(name, 0600, dir,
|
|
|
|
&hisi_hba->phy[phy_no],
|
|
|
|
&hisi_sas_debugfs_phy_down_cnt_ops);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-23 12:40:35 +07:00
|
|
|
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
|
2019-09-06 19:55:36 +07:00
|
|
|
{
|
|
|
|
hisi_hba->debugfs_bist_dentry =
|
|
|
|
debugfs_create_dir("bist", hisi_hba->debugfs_dir);
|
|
|
|
debugfs_create_file("link_rate", 0600,
|
|
|
|
hisi_hba->debugfs_bist_dentry, hisi_hba,
|
|
|
|
&hisi_sas_debugfs_bist_linkrate_ops);
|
|
|
|
|
|
|
|
debugfs_create_file("code_mode", 0600,
|
|
|
|
hisi_hba->debugfs_bist_dentry, hisi_hba,
|
|
|
|
&hisi_sas_debugfs_bist_code_mode_ops);
|
|
|
|
|
|
|
|
debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
|
|
|
|
hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
|
|
|
|
|
|
|
|
debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry,
|
|
|
|
&hisi_hba->debugfs_bist_cnt);
|
|
|
|
|
|
|
|
debugfs_create_file("loopback_mode", 0600,
|
|
|
|
hisi_hba->debugfs_bist_dentry,
|
|
|
|
hisi_hba, &hisi_sas_debugfs_bist_mode_ops);
|
|
|
|
|
|
|
|
debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
|
|
|
|
hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
|
|
|
|
|
|
|
|
hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
|
|
|
|
}
|
|
|
|
|
2019-09-06 19:55:35 +07:00
|
|
|
void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
struct device *dev = hisi_hba->dev;
|
2019-10-24 21:08:21 +07:00
|
|
|
int i;
|
2019-09-06 19:55:35 +07:00
|
|
|
|
|
|
|
hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev),
|
|
|
|
hisi_sas_debugfs_dir);
|
2020-01-20 19:22:34 +07:00
|
|
|
debugfs_create_file("trigger_dump", 0200,
|
2019-09-06 19:55:35 +07:00
|
|
|
hisi_hba->debugfs_dir,
|
|
|
|
hisi_hba,
|
|
|
|
&hisi_sas_debugfs_trigger_dump_fops);
|
|
|
|
|
2019-09-06 19:55:36 +07:00
|
|
|
/* create bist structures */
|
|
|
|
hisi_sas_debugfs_bist_init(hisi_hba);
|
|
|
|
|
2019-10-24 21:08:21 +07:00
|
|
|
hisi_hba->debugfs_dump_dentry =
|
|
|
|
debugfs_create_dir("dump", hisi_hba->debugfs_dir);
|
|
|
|
|
2019-10-24 21:08:25 +07:00
|
|
|
hisi_sas_debugfs_phy_down_cnt_init(hisi_hba);
|
|
|
|
|
2019-10-24 21:08:22 +07:00
|
|
|
for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
|
2019-10-24 21:08:21 +07:00
|
|
|
if (hisi_sas_debugfs_alloc(hisi_hba, i)) {
|
|
|
|
debugfs_remove_recursive(hisi_hba->debugfs_dir);
|
|
|
|
dev_dbg(dev, "failed to init debugfs!\n");
|
|
|
|
break;
|
|
|
|
}
|
2019-09-06 19:55:35 +07:00
|
|
|
}
|
2018-12-19 22:56:39 +07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init);
|
|
|
|
|
|
|
|
void hisi_sas_debugfs_exit(struct hisi_hba *hisi_hba)
|
|
|
|
{
|
|
|
|
debugfs_remove_recursive(hisi_hba->debugfs_dir);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_exit);
|
|
|
|
|
2015-11-17 23:50:35 +07:00
|
|
|
int hisi_sas_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct sas_ha_struct *sha = platform_get_drvdata(pdev);
|
|
|
|
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
2016-11-29 22:45:57 +07:00
|
|
|
struct Scsi_Host *shost = sha->core.shost;
|
2015-11-17 23:50:35 +07:00
|
|
|
|
2018-03-23 23:05:10 +07:00
|
|
|
if (timer_pending(&hisi_hba->timer))
|
|
|
|
del_timer(&hisi_hba->timer);
|
|
|
|
|
2015-11-17 23:50:35 +07:00
|
|
|
sas_unregister_ha(sha);
|
|
|
|
sas_remove_host(sha->core.shost);
|
|
|
|
|
|
|
|
hisi_sas_free(hisi_hba);
|
2017-08-10 23:09:43 +07:00
|
|
|
scsi_host_put(shost);
|
2015-11-17 23:50:35 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_remove);
|
|
|
|
|
2018-12-19 22:56:39 +07:00
|
|
|
bool hisi_sas_debugfs_enable;
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
|
|
|
|
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
|
|
|
|
MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
|
|
|
|
|
2019-10-24 21:08:22 +07:00
|
|
|
u32 hisi_sas_debugfs_dump_count = 1;
|
|
|
|
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
|
|
|
|
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
|
|
|
|
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
|
|
|
|
|
2015-11-17 23:50:30 +07:00
|
|
|
static __init int hisi_sas_init(void)
|
|
|
|
{
|
|
|
|
hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops);
|
|
|
|
if (!hisi_sas_stt)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2019-10-24 21:08:22 +07:00
|
|
|
if (hisi_sas_debugfs_enable) {
|
2018-12-19 22:56:39 +07:00
|
|
|
hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL);
|
2019-10-24 21:08:22 +07:00
|
|
|
if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) {
|
|
|
|
pr_info("hisi_sas: Limiting debugfs dump count\n");
|
|
|
|
hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP;
|
|
|
|
}
|
|
|
|
}
|
2018-12-19 22:56:39 +07:00
|
|
|
|
2015-11-17 23:50:30 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static __exit void hisi_sas_exit(void)
|
|
|
|
{
|
|
|
|
sas_release_transport(hisi_sas_stt);
|
2018-12-19 22:56:39 +07:00
|
|
|
|
|
|
|
debugfs_remove(hisi_sas_debugfs_dir);
|
2015-11-17 23:50:30 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(hisi_sas_init);
|
|
|
|
module_exit(hisi_sas_exit);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("John Garry <john.garry@huawei.com>");
|
|
|
|
MODULE_DESCRIPTION("HISILICON SAS controller driver");
|
|
|
|
MODULE_ALIAS("platform:" DRV_NAME);
|