mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 06:26:48 +07:00
Merge branch '3.1-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
* '3.1-rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (21 commits) target: Convert acl_node_lock to be IRQ-disabling target: Make locking in transport_deregister_session() IRQ safe tcm_fc: init/exit functions should not be protected by "#ifdef MODULE" target: Print subpage too for unhandled MODE SENSE pages iscsi-target: Fix iscsit_allocate_se_cmd_for_tmr failure path bugs iscsi-target: Implement iSCSI target IPv6 address printing. target: Fix task SGL chaining breakage with transport_allocate_data_tasks target: Fix task count > 1 handling breakage and use max_sector page alignment target: Add missing DATA_SG_IO transport_cmd_get_valid_sectors check target: Fix SYNCHRONIZE_CACHE zero LBA + range breakage target: Remove duplicate task completions in transport_emulate_control_cdb target: Fix WRITE_SAME usage with transport_get_size target: Add WRITE_SAME (10) parsing and refactor passthrough checks target: Fix write payload exception handling with ->new_cmd_map iscsi-target: forever loop bug in iscsit_attach_ooo_cmdsn() iscsi-target: remove duplicate return target: Convert target_core_rd.c to use use BUG_ON iscsi-target: Fix leak on failure in iscsi_copy_param_list() target: Use ERR_CAST inlined function target: Make standard INQUIRY return 'not connected' for tpg_virt_lun0 ...
This commit is contained in:
commit
f385b6974b
@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(
|
||||
case 0:
|
||||
return iscsit_handle_recovery_datain_or_r2t(conn, buf,
|
||||
hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength);
|
||||
return 0;
|
||||
case ISCSI_FLAG_SNACK_TYPE_STATUS:
|
||||
return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,
|
||||
hdr->begrun, hdr->runlength);
|
||||
|
@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(
|
||||
ISCSI_TCP);
|
||||
if (IS_ERR(tpg_np)) {
|
||||
iscsit_put_tpg(tpg);
|
||||
return ERR_PTR(PTR_ERR(tpg_np));
|
||||
return ERR_CAST(tpg_np);
|
||||
}
|
||||
pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n");
|
||||
|
||||
@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(
|
||||
|
||||
tiqn = iscsit_add_tiqn((unsigned char *)name);
|
||||
if (IS_ERR(tiqn))
|
||||
return ERR_PTR(PTR_ERR(tiqn));
|
||||
return ERR_CAST(tiqn);
|
||||
/*
|
||||
* Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.
|
||||
*/
|
||||
|
@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(
|
||||
*/
|
||||
list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
|
||||
ooo_list) {
|
||||
while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
|
||||
if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)
|
||||
continue;
|
||||
|
||||
list_add(&ooo_cmdsn->ooo_list,
|
||||
|
@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
|
||||
ISCSI_LOGIN_STATUS_TARGET_ERROR);
|
||||
goto new_sess_out;
|
||||
}
|
||||
#if 0
|
||||
if (!iscsi_ntop6((const unsigned char *)
|
||||
&sock_in6.sin6_addr.in6_u,
|
||||
(char *)&conn->ipv6_login_ip[0],
|
||||
IPV6_ADDRESS_SPACE)) {
|
||||
pr_err("iscsi_ntop6() failed\n");
|
||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||
ISCSI_LOGIN_STATUS_TARGET_ERROR);
|
||||
goto new_sess_out;
|
||||
}
|
||||
#else
|
||||
pr_debug("Skipping iscsi_ntop6()\n");
|
||||
#endif
|
||||
snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
|
||||
&sock_in6.sin6_addr.in6_u);
|
||||
conn->login_port = ntohs(sock_in6.sin6_port);
|
||||
} else {
|
||||
memset(&sock_in, 0, sizeof(struct sockaddr_in));
|
||||
|
||||
|
@ -545,13 +545,13 @@ int iscsi_copy_param_list(
|
||||
struct iscsi_param_list *src_param_list,
|
||||
int leading)
|
||||
{
|
||||
struct iscsi_param *new_param = NULL, *param = NULL;
|
||||
struct iscsi_param *param = NULL;
|
||||
struct iscsi_param *new_param = NULL;
|
||||
struct iscsi_param_list *param_list = NULL;
|
||||
|
||||
param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);
|
||||
if (!param_list) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" struct iscsi_param_list.\n");
|
||||
pr_err("Unable to allocate memory for struct iscsi_param_list.\n");
|
||||
goto err_out;
|
||||
}
|
||||
INIT_LIST_HEAD(¶m_list->param_list);
|
||||
@ -567,8 +567,17 @@ int iscsi_copy_param_list(
|
||||
|
||||
new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);
|
||||
if (!new_param) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" struct iscsi_param.\n");
|
||||
pr_err("Unable to allocate memory for struct iscsi_param.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
new_param->name = kstrdup(param->name, GFP_KERNEL);
|
||||
new_param->value = kstrdup(param->value, GFP_KERNEL);
|
||||
if (!new_param->value || !new_param->name) {
|
||||
kfree(new_param->value);
|
||||
kfree(new_param->name);
|
||||
kfree(new_param);
|
||||
pr_err("Unable to allocate memory for parameter name/value.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
@ -580,32 +589,12 @@ int iscsi_copy_param_list(
|
||||
new_param->use = param->use;
|
||||
new_param->type_range = param->type_range;
|
||||
|
||||
new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL);
|
||||
if (!new_param->name) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" parameter name.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
new_param->value = kzalloc(strlen(param->value) + 1,
|
||||
GFP_KERNEL);
|
||||
if (!new_param->value) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" parameter value.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
memcpy(new_param->name, param->name, strlen(param->name));
|
||||
new_param->name[strlen(param->name)] = '\0';
|
||||
memcpy(new_param->value, param->value, strlen(param->value));
|
||||
new_param->value[strlen(param->value)] = '\0';
|
||||
|
||||
list_add_tail(&new_param->p_list, ¶m_list->param_list);
|
||||
}
|
||||
|
||||
if (!list_empty(¶m_list->param_list))
|
||||
if (!list_empty(¶m_list->param_list)) {
|
||||
*dst_param_list = param_list;
|
||||
else {
|
||||
} else {
|
||||
pr_err("No parameters allocated.\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
|
||||
if (!cmd->tmr_req) {
|
||||
pr_err("Unable to allocate memory for"
|
||||
" Task Management command!\n");
|
||||
return NULL;
|
||||
goto out;
|
||||
}
|
||||
/*
|
||||
* TASK_REASSIGN for ERL=2 / connection stays inside of
|
||||
@ -298,8 +298,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
|
||||
return cmd;
|
||||
out:
|
||||
iscsit_release_cmd(cmd);
|
||||
if (se_cmd)
|
||||
transport_free_se_cmd(se_cmd);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -67,6 +67,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_lun *lun = cmd->se_lun;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
struct se_portal_group *tpg = lun->lun_sep->sep_tpg;
|
||||
unsigned char *buf;
|
||||
|
||||
/*
|
||||
@ -81,9 +82,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
|
||||
|
||||
buf = transport_kmap_first_data_page(cmd);
|
||||
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
if (buf[0] == TYPE_TAPE)
|
||||
buf[1] = 0x80;
|
||||
if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
|
||||
buf[0] = 0x3f; /* Not connected */
|
||||
} else {
|
||||
buf[0] = dev->transport->get_device_type(dev);
|
||||
if (buf[0] == TYPE_TAPE)
|
||||
buf[1] = 0x80;
|
||||
}
|
||||
buf[2] = dev->transport->get_device_rev(dev);
|
||||
|
||||
/*
|
||||
@ -915,8 +920,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)
|
||||
length += target_modesense_control(dev, &buf[offset+length]);
|
||||
break;
|
||||
default:
|
||||
pr_err("Got Unknown Mode Page: 0x%02x\n",
|
||||
cdb[2] & 0x3f);
|
||||
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
|
||||
cdb[2] & 0x3f, cdb[3]);
|
||||
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
|
||||
}
|
||||
offset += length;
|
||||
@ -1072,8 +1077,6 @@ target_emulate_unmap(struct se_task *task)
|
||||
size -= 16;
|
||||
}
|
||||
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
err:
|
||||
transport_kunmap_first_data_page(cmd);
|
||||
|
||||
@ -1085,24 +1088,17 @@ target_emulate_unmap(struct se_task *task)
|
||||
* Note this is not used for TCM/pSCSI passthrough
|
||||
*/
|
||||
static int
|
||||
target_emulate_write_same(struct se_task *task, int write_same32)
|
||||
target_emulate_write_same(struct se_task *task, u32 num_blocks)
|
||||
{
|
||||
struct se_cmd *cmd = task->task_se_cmd;
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
sector_t range;
|
||||
sector_t lba = cmd->t_task_lba;
|
||||
unsigned int num_blocks;
|
||||
int ret;
|
||||
/*
|
||||
* Extract num_blocks from the WRITE_SAME_* CDB. Then use the explict
|
||||
* range when non zero is supplied, otherwise calculate the remaining
|
||||
* range based on ->get_blocks() - starting LBA.
|
||||
* Use the explicit range when non zero is supplied, otherwise calculate
|
||||
* the remaining range based on ->get_blocks() - starting LBA.
|
||||
*/
|
||||
if (write_same32)
|
||||
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]);
|
||||
else
|
||||
num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]);
|
||||
|
||||
if (num_blocks != 0)
|
||||
range = num_blocks;
|
||||
else
|
||||
@ -1117,8 +1113,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)
|
||||
return ret;
|
||||
}
|
||||
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1165,13 +1159,23 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
}
|
||||
ret = target_emulate_unmap(task);
|
||||
break;
|
||||
case WRITE_SAME:
|
||||
if (!dev->transport->do_discard) {
|
||||
pr_err("WRITE_SAME emulation not supported"
|
||||
" for: %s\n", dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
ret = target_emulate_write_same(task,
|
||||
get_unaligned_be16(&cmd->t_task_cdb[7]));
|
||||
break;
|
||||
case WRITE_SAME_16:
|
||||
if (!dev->transport->do_discard) {
|
||||
pr_err("WRITE_SAME_16 emulation not supported"
|
||||
" for: %s\n", dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
ret = target_emulate_write_same(task, 0);
|
||||
ret = target_emulate_write_same(task,
|
||||
get_unaligned_be32(&cmd->t_task_cdb[10]));
|
||||
break;
|
||||
case VARIABLE_LENGTH_CMD:
|
||||
service_action =
|
||||
@ -1184,7 +1188,8 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
dev->transport->name);
|
||||
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
|
||||
}
|
||||
ret = target_emulate_write_same(task, 1);
|
||||
ret = target_emulate_write_same(task,
|
||||
get_unaligned_be32(&cmd->t_task_cdb[28]));
|
||||
break;
|
||||
default:
|
||||
pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
|
||||
@ -1219,8 +1224,14 @@ transport_emulate_control_cdb(struct se_task *task)
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
/*
|
||||
* Handle the successful completion here unless a caller
|
||||
* has explictly requested an asychronous completion.
|
||||
*/
|
||||
if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) {
|
||||
task->task_scsi_status = GOOD;
|
||||
transport_complete_task(task, 1);
|
||||
}
|
||||
|
||||
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
|
||||
}
|
||||
|
@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
|
||||
struct se_dev_entry *deve;
|
||||
u32 i;
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
spin_lock_irq(&nacl->device_list_lock);
|
||||
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
|
||||
@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
|
||||
}
|
||||
spin_unlock_irq(&nacl->device_list_lock);
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
}
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
}
|
||||
|
||||
static struct se_port *core_alloc_port(struct se_device *dev)
|
||||
@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
|
||||
{
|
||||
u32 tmp, aligned_max_sectors;
|
||||
/*
|
||||
* Limit max_sectors to a PAGE_SIZE aligned value for modern
|
||||
* transport_allocate_data_tasks() operation.
|
||||
*/
|
||||
tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
|
||||
aligned_max_sectors = (tmp / block_size);
|
||||
if (max_sectors != aligned_max_sectors) {
|
||||
printk(KERN_INFO "Rounding down aligned max_sectors from %u"
|
||||
" to %u\n", max_sectors, aligned_max_sectors);
|
||||
return aligned_max_sectors;
|
||||
}
|
||||
|
||||
return max_sectors;
|
||||
}
|
||||
|
||||
void se_dev_set_default_attribs(
|
||||
struct se_device *dev,
|
||||
struct se_dev_limits *dev_limits)
|
||||
@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
|
||||
* max_sectors is based on subsystem plugin dependent requirements.
|
||||
*/
|
||||
dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
|
||||
/*
|
||||
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
|
||||
*/
|
||||
limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
|
||||
limits->logical_block_size);
|
||||
dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
|
||||
/*
|
||||
* Set optimal_sectors from max_sectors, which can be lowered via
|
||||
@ -1242,6 +1265,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
|
||||
*/
|
||||
max_sectors = se_dev_align_max_sectors(max_sectors,
|
||||
dev->se_sub_dev->se_dev_attrib.block_size);
|
||||
|
||||
dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
|
||||
pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
|
||||
@ -1344,15 +1372,17 @@ struct se_lun *core_dev_add_lun(
|
||||
*/
|
||||
if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
|
||||
struct se_node_acl *acl;
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
|
||||
if (acl->dynamic_node_acl) {
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
if (acl->dynamic_node_acl &&
|
||||
(!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
|
||||
!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
core_tpg_add_node_to_devs(acl, tpg);
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
}
|
||||
|
||||
return lun_p;
|
||||
|
@ -481,7 +481,7 @@ static struct config_group *target_fabric_make_nodeacl(
|
||||
|
||||
se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
|
||||
if (IS_ERR(se_nacl))
|
||||
return ERR_PTR(PTR_ERR(se_nacl));
|
||||
return ERR_CAST(se_nacl);
|
||||
|
||||
nacl_cg = &se_nacl->acl_group;
|
||||
nacl_cg->default_groups = se_nacl->acl_default_groups;
|
||||
|
@ -1598,14 +1598,14 @@ static int core_scsi3_decode_spec_i_port(
|
||||
* from the decoded fabric module specific TransportID
|
||||
* at *i_str.
|
||||
*/
|
||||
spin_lock_bh(&tmp_tpg->acl_node_lock);
|
||||
spin_lock_irq(&tmp_tpg->acl_node_lock);
|
||||
dest_node_acl = __core_tpg_get_initiator_node_acl(
|
||||
tmp_tpg, i_str);
|
||||
if (dest_node_acl) {
|
||||
atomic_inc(&dest_node_acl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
spin_unlock_bh(&tmp_tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tmp_tpg->acl_node_lock);
|
||||
|
||||
if (!dest_node_acl) {
|
||||
core_scsi3_tpg_undepend_item(tmp_tpg);
|
||||
@ -3496,14 +3496,14 @@ static int core_scsi3_emulate_pro_register_and_move(
|
||||
/*
|
||||
* Locate the destination struct se_node_acl from the received Transport ID
|
||||
*/
|
||||
spin_lock_bh(&dest_se_tpg->acl_node_lock);
|
||||
spin_lock_irq(&dest_se_tpg->acl_node_lock);
|
||||
dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
|
||||
initiator_str);
|
||||
if (dest_node_acl) {
|
||||
atomic_inc(&dest_node_acl->acl_pr_ref_count);
|
||||
smp_mb__after_atomic_inc();
|
||||
}
|
||||
spin_unlock_bh(&dest_se_tpg->acl_node_lock);
|
||||
spin_unlock_irq(&dest_se_tpg->acl_node_lock);
|
||||
|
||||
if (!dest_node_acl) {
|
||||
pr_err("Unable to locate %s dest_node_acl for"
|
||||
|
@ -390,12 +390,10 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
length = req->rd_size;
|
||||
|
||||
dst = sg_virt(&sg_d[i++]) + dst_offset;
|
||||
if (!dst)
|
||||
BUG();
|
||||
BUG_ON(!dst);
|
||||
|
||||
src = sg_virt(&sg_s[j]) + src_offset;
|
||||
if (!src)
|
||||
BUG();
|
||||
BUG_ON(!src);
|
||||
|
||||
dst_offset = 0;
|
||||
src_offset = length;
|
||||
@ -415,8 +413,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
length = req->rd_size;
|
||||
|
||||
dst = sg_virt(&sg_d[i]) + dst_offset;
|
||||
if (!dst)
|
||||
BUG();
|
||||
BUG_ON(!dst);
|
||||
|
||||
if (sg_d[i].length == length) {
|
||||
i++;
|
||||
@ -425,8 +422,7 @@ static int rd_MEMCPY_read(struct rd_request *req)
|
||||
dst_offset = length;
|
||||
|
||||
src = sg_virt(&sg_s[j++]) + src_offset;
|
||||
if (!src)
|
||||
BUG();
|
||||
BUG_ON(!src);
|
||||
|
||||
src_offset = 0;
|
||||
page_end = 1;
|
||||
@ -510,12 +506,10 @@ static int rd_MEMCPY_write(struct rd_request *req)
|
||||
length = req->rd_size;
|
||||
|
||||
src = sg_virt(&sg_s[i++]) + src_offset;
|
||||
if (!src)
|
||||
BUG();
|
||||
BUG_ON(!src);
|
||||
|
||||
dst = sg_virt(&sg_d[j]) + dst_offset;
|
||||
if (!dst)
|
||||
BUG();
|
||||
BUG_ON(!dst);
|
||||
|
||||
src_offset = 0;
|
||||
dst_offset = length;
|
||||
@ -535,8 +529,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
|
||||
length = req->rd_size;
|
||||
|
||||
src = sg_virt(&sg_s[i]) + src_offset;
|
||||
if (!src)
|
||||
BUG();
|
||||
BUG_ON(!src);
|
||||
|
||||
if (sg_s[i].length == length) {
|
||||
i++;
|
||||
@ -545,8 +538,7 @@ static int rd_MEMCPY_write(struct rd_request *req)
|
||||
src_offset = length;
|
||||
|
||||
dst = sg_virt(&sg_d[j++]) + dst_offset;
|
||||
if (!dst)
|
||||
BUG();
|
||||
BUG_ON(!dst);
|
||||
|
||||
dst_offset = 0;
|
||||
page_end = 1;
|
||||
|
@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
|
||||
{
|
||||
struct se_node_acl *acl;
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
|
||||
if (!strcmp(acl->initiatorname, initiatorname) &&
|
||||
!acl->dynamic_node_acl) {
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
return acl;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
|
||||
tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
|
||||
return NULL;
|
||||
}
|
||||
/*
|
||||
* Here we only create demo-mode MappedLUNs from the active
|
||||
* TPG LUNs if the fabric is not explictly asking for
|
||||
* tpg_check_demo_mode_login_only() == 1.
|
||||
*/
|
||||
if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) &&
|
||||
(tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1))
|
||||
do { ; } while (0);
|
||||
else
|
||||
core_tpg_add_node_to_devs(acl, tpg);
|
||||
|
||||
core_tpg_add_node_to_devs(acl, tpg);
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
|
||||
tpg->num_node_acls++;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
|
||||
" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
{
|
||||
struct se_node_acl *acl = NULL;
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
if (acl) {
|
||||
if (acl->dynamic_node_acl) {
|
||||
@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
|
||||
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
/*
|
||||
* Release the locally allocated struct se_node_acl
|
||||
* because * core_tpg_add_initiator_node_acl() returned
|
||||
@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
" Node %s already exists for TPG %u, ignoring"
|
||||
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
if (!se_nacl) {
|
||||
pr_err("struct se_node_acl pointer is NULL\n");
|
||||
@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
|
||||
tpg->num_node_acls++;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
done:
|
||||
pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
|
||||
@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(
|
||||
struct se_session *sess, *sess_tmp;
|
||||
int dynamic_acl = 0;
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
if (acl->dynamic_node_acl) {
|
||||
acl->dynamic_node_acl = 0;
|
||||
dynamic_acl = 1;
|
||||
}
|
||||
list_del(&acl->acl_list);
|
||||
tpg->num_node_acls--;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
spin_lock_bh(&tpg->session_lock);
|
||||
list_for_each_entry_safe(sess, sess_tmp,
|
||||
@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
struct se_node_acl *acl;
|
||||
int dynamic_acl = 0;
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
||||
if (!acl) {
|
||||
pr_err("Access Control List entry for %s Initiator"
|
||||
" Node %s does not exists for TPG %hu, ignoring"
|
||||
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
||||
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
return -ENODEV;
|
||||
}
|
||||
if (acl->dynamic_node_acl) {
|
||||
acl->dynamic_node_acl = 0;
|
||||
dynamic_acl = 1;
|
||||
}
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
spin_lock_bh(&tpg->session_lock);
|
||||
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
|
||||
@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
|
||||
spin_unlock_bh(&tpg->session_lock);
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
if (dynamic_acl)
|
||||
acl->dynamic_node_acl = 1;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
return -EEXIST;
|
||||
}
|
||||
/*
|
||||
@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
if (init_sess)
|
||||
tpg->se_tpg_tfo->close_session(init_sess);
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
if (dynamic_acl)
|
||||
acl->dynamic_node_acl = 1;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
spin_unlock_bh(&tpg->session_lock);
|
||||
@ -590,10 +598,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
||||
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
|
||||
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
||||
|
||||
spin_lock_bh(&tpg->acl_node_lock);
|
||||
spin_lock_irq(&tpg->acl_node_lock);
|
||||
if (dynamic_acl)
|
||||
acl->dynamic_node_acl = 1;
|
||||
spin_unlock_bh(&tpg->acl_node_lock);
|
||||
spin_unlock_irq(&tpg->acl_node_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
|
||||
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
|
||||
* in transport_deregister_session().
|
||||
*/
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
spin_lock_irq(&se_tpg->acl_node_lock);
|
||||
list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
|
||||
acl_list) {
|
||||
list_del(&nacl->acl_list);
|
||||
se_tpg->num_node_acls--;
|
||||
spin_unlock_bh(&se_tpg->acl_node_lock);
|
||||
spin_unlock_irq(&se_tpg->acl_node_lock);
|
||||
|
||||
core_tpg_wait_for_nacl_pr_ref(nacl);
|
||||
core_free_device_list_for_node(nacl, se_tpg);
|
||||
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
|
||||
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
spin_lock_irq(&se_tpg->acl_node_lock);
|
||||
}
|
||||
spin_unlock_bh(&se_tpg->acl_node_lock);
|
||||
spin_unlock_irq(&se_tpg->acl_node_lock);
|
||||
|
||||
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
|
||||
core_tpg_release_virtual_lun0(se_tpg);
|
||||
|
@ -389,17 +389,18 @@ void transport_deregister_session(struct se_session *se_sess)
|
||||
{
|
||||
struct se_portal_group *se_tpg = se_sess->se_tpg;
|
||||
struct se_node_acl *se_nacl;
|
||||
unsigned long flags;
|
||||
|
||||
if (!se_tpg) {
|
||||
transport_free_session(se_sess);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&se_tpg->session_lock);
|
||||
spin_lock_irqsave(&se_tpg->session_lock, flags);
|
||||
list_del(&se_sess->sess_list);
|
||||
se_sess->se_tpg = NULL;
|
||||
se_sess->fabric_sess_ptr = NULL;
|
||||
spin_unlock_bh(&se_tpg->session_lock);
|
||||
spin_unlock_irqrestore(&se_tpg->session_lock, flags);
|
||||
|
||||
/*
|
||||
* Determine if we need to do extra work for this initiator node's
|
||||
@ -407,22 +408,22 @@ void transport_deregister_session(struct se_session *se_sess)
|
||||
*/
|
||||
se_nacl = se_sess->se_node_acl;
|
||||
if (se_nacl) {
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
|
||||
if (se_nacl->dynamic_node_acl) {
|
||||
if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
|
||||
se_tpg)) {
|
||||
list_del(&se_nacl->acl_list);
|
||||
se_tpg->num_node_acls--;
|
||||
spin_unlock_bh(&se_tpg->acl_node_lock);
|
||||
spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
|
||||
|
||||
core_tpg_wait_for_nacl_pr_ref(se_nacl);
|
||||
core_free_device_list_for_node(se_nacl, se_tpg);
|
||||
se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,
|
||||
se_nacl);
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
spin_lock_irqsave(&se_tpg->acl_node_lock, flags);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&se_tpg->acl_node_lock);
|
||||
spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);
|
||||
}
|
||||
|
||||
transport_free_session(se_sess);
|
||||
@ -2053,8 +2054,14 @@ static void transport_generic_request_failure(
|
||||
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!sc)
|
||||
/*
|
||||
* If a fabric does not define a cmd->se_tfo->new_cmd_map caller,
|
||||
* make the call to transport_send_check_condition_and_sense()
|
||||
* directly. Otherwise expect the fabric to make the call to
|
||||
* transport_send_check_condition_and_sense() after handling
|
||||
* possible unsoliticied write data payloads.
|
||||
*/
|
||||
if (!sc && !cmd->se_tfo->new_cmd_map)
|
||||
transport_new_cmd_failure(cmd);
|
||||
else {
|
||||
ret = transport_send_check_condition_and_sense(cmd,
|
||||
@ -2847,12 +2854,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)
|
||||
" transport_dev_end_lba(): %llu\n",
|
||||
cmd->t_task_lba, sectors,
|
||||
transport_dev_end_lba(dev));
|
||||
pr_err(" We should return CHECK_CONDITION"
|
||||
" but we don't yet\n");
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return sectors;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev)
|
||||
{
|
||||
/*
|
||||
* Determine if the received WRITE_SAME is used to for direct
|
||||
* passthrough into Linux/SCSI with struct request via TCM/pSCSI
|
||||
* or we are signaling the use of internal WRITE_SAME + UNMAP=1
|
||||
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code.
|
||||
*/
|
||||
int passthrough = (dev->transport->transport_type ==
|
||||
TRANSPORT_PLUGIN_PHBA_PDEV);
|
||||
|
||||
if (!passthrough) {
|
||||
if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
|
||||
pr_err("WRITE_SAME PBDATA and LBDATA"
|
||||
" bits not supported for Block Discard"
|
||||
" Emulation\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
/*
|
||||
* Currently for the emulated case we only accept
|
||||
* tpws with the UNMAP=1 bit set.
|
||||
*/
|
||||
if (!(flags[0] & 0x08)) {
|
||||
pr_err("WRITE_SAME w/o UNMAP bit not"
|
||||
" supported for Block Discard Emulation\n");
|
||||
return -ENOSYS;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* transport_generic_cmd_sequencer():
|
||||
@ -3065,7 +3102,7 @@ static int transport_generic_cmd_sequencer(
|
||||
goto out_unsupported_cdb;
|
||||
|
||||
if (sectors)
|
||||
size = transport_get_size(sectors, cdb, cmd);
|
||||
size = transport_get_size(1, cdb, cmd);
|
||||
else {
|
||||
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
|
||||
" supported\n");
|
||||
@ -3075,27 +3112,9 @@ static int transport_generic_cmd_sequencer(
|
||||
cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
|
||||
|
||||
/*
|
||||
* Skip the remaining assignments for TCM/PSCSI passthrough
|
||||
*/
|
||||
if (passthrough)
|
||||
break;
|
||||
if (target_check_write_same_discard(&cdb[10], dev) < 0)
|
||||
goto out_invalid_cdb_field;
|
||||
|
||||
if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
|
||||
pr_err("WRITE_SAME PBDATA and LBDATA"
|
||||
" bits not supported for Block Discard"
|
||||
" Emulation\n");
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
/*
|
||||
* Currently for the emulated case we only accept
|
||||
* tpws with the UNMAP=1 bit set.
|
||||
*/
|
||||
if (!(cdb[10] & 0x08)) {
|
||||
pr_err("WRITE_SAME w/o UNMAP bit not"
|
||||
" supported for Block Discard Emulation\n");
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
pr_err("VARIABLE_LENGTH_CMD service action"
|
||||
@ -3330,10 +3349,12 @@ static int transport_generic_cmd_sequencer(
|
||||
cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;
|
||||
/*
|
||||
* Check to ensure that LBA + Range does not exceed past end of
|
||||
* device.
|
||||
* device for IBLOCK and FILEIO ->do_sync_cache() backend calls
|
||||
*/
|
||||
if (!transport_cmd_get_valid_sectors(cmd))
|
||||
goto out_invalid_cdb_field;
|
||||
if ((cmd->t_task_lba != 0) || (sectors != 0)) {
|
||||
if (transport_cmd_get_valid_sectors(cmd) < 0)
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
break;
|
||||
case UNMAP:
|
||||
size = get_unaligned_be16(&cdb[7]);
|
||||
@ -3345,40 +3366,38 @@ static int transport_generic_cmd_sequencer(
|
||||
goto out_unsupported_cdb;
|
||||
|
||||
if (sectors)
|
||||
size = transport_get_size(sectors, cdb, cmd);
|
||||
size = transport_get_size(1, cdb, cmd);
|
||||
else {
|
||||
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
|
||||
cmd->t_task_lba = get_unaligned_be64(&cdb[2]);
|
||||
passthrough = (dev->transport->transport_type ==
|
||||
TRANSPORT_PLUGIN_PHBA_PDEV);
|
||||
/*
|
||||
* Determine if the received WRITE_SAME_16 is used to for direct
|
||||
* passthrough into Linux/SCSI with struct request via TCM/pSCSI
|
||||
* or we are signaling the use of internal WRITE_SAME + UNMAP=1
|
||||
* emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
|
||||
* TCM/FILEIO subsystem plugin backstores.
|
||||
*/
|
||||
if (!passthrough) {
|
||||
if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
|
||||
pr_err("WRITE_SAME PBDATA and LBDATA"
|
||||
" bits not supported for Block Discard"
|
||||
" Emulation\n");
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
/*
|
||||
* Currently for the emulated case we only accept
|
||||
* tpws with the UNMAP=1 bit set.
|
||||
*/
|
||||
if (!(cdb[1] & 0x08)) {
|
||||
pr_err("WRITE_SAME w/o UNMAP bit not "
|
||||
" supported for Block Discard Emulation\n");
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
}
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
|
||||
|
||||
if (target_check_write_same_discard(&cdb[1], dev) < 0)
|
||||
goto out_invalid_cdb_field;
|
||||
break;
|
||||
case WRITE_SAME:
|
||||
sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
|
||||
if (sector_ret)
|
||||
goto out_unsupported_cdb;
|
||||
|
||||
if (sectors)
|
||||
size = transport_get_size(1, cdb, cmd);
|
||||
else {
|
||||
pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
|
||||
goto out_invalid_cdb_field;
|
||||
}
|
||||
|
||||
cmd->t_task_lba = get_unaligned_be32(&cdb[2]);
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
|
||||
/*
|
||||
* Follow sbcr26 with WRITE_SAME (10) and check for the existence
|
||||
* of byte 1 bit 3 UNMAP instead of original reserved field
|
||||
*/
|
||||
if (target_check_write_same_discard(&cdb[1], dev) < 0)
|
||||
goto out_invalid_cdb_field;
|
||||
break;
|
||||
case ALLOW_MEDIUM_REMOVAL:
|
||||
case GPCMD_CLOSE_TRACK:
|
||||
@ -3873,9 +3892,7 @@ EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
|
||||
static int transport_new_cmd_obj(struct se_cmd *cmd)
|
||||
{
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
u32 task_cdbs;
|
||||
u32 rc;
|
||||
int set_counts = 1;
|
||||
int set_counts = 1, rc, task_cdbs;
|
||||
|
||||
/*
|
||||
* Setup any BIDI READ tasks and memory from
|
||||
@ -3893,7 +3910,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
|
||||
cmd->scsi_sense_reason =
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
return -EINVAL;
|
||||
}
|
||||
atomic_inc(&cmd->t_fe_count);
|
||||
atomic_inc(&cmd->t_se_count);
|
||||
@ -3912,7 +3929,7 @@ static int transport_new_cmd_obj(struct se_cmd *cmd)
|
||||
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
|
||||
cmd->scsi_sense_reason =
|
||||
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
return PYX_TRANSPORT_LU_COMM_FAILURE;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (set_counts) {
|
||||
@ -4028,8 +4045,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
|
||||
if (!task->task_sg)
|
||||
continue;
|
||||
|
||||
BUG_ON(!task->task_padded_sg);
|
||||
|
||||
if (!sg_first) {
|
||||
sg_first = task->task_sg;
|
||||
chained_nents = task->task_sg_nents;
|
||||
@ -4037,9 +4052,19 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)
|
||||
sg_chain(sg_prev, sg_prev_nents, task->task_sg);
|
||||
chained_nents += task->task_sg_nents;
|
||||
}
|
||||
/*
|
||||
* For the padded tasks, use the extra SGL vector allocated
|
||||
* in transport_allocate_data_tasks() for the sg_prev_nents
|
||||
* offset into sg_chain() above.. The last task of a
|
||||
* multi-task list, or a single task will not have
|
||||
* task->task_sg_padded set..
|
||||
*/
|
||||
if (task->task_padded_sg)
|
||||
sg_prev_nents = (task->task_sg_nents + 1);
|
||||
else
|
||||
sg_prev_nents = task->task_sg_nents;
|
||||
|
||||
sg_prev = task->task_sg;
|
||||
sg_prev_nents = task->task_sg_nents;
|
||||
}
|
||||
/*
|
||||
* Setup the starting pointer and total t_tasks_sg_linked_no including
|
||||
@ -4091,7 +4116,7 @@ static int transport_allocate_data_tasks(
|
||||
|
||||
cmd_sg = sgl;
|
||||
for (i = 0; i < task_count; i++) {
|
||||
unsigned int task_size;
|
||||
unsigned int task_size, task_sg_nents_padded;
|
||||
int count;
|
||||
|
||||
task = transport_generic_get_task(cmd, data_direction);
|
||||
@ -4110,30 +4135,33 @@ static int transport_allocate_data_tasks(
|
||||
|
||||
/* Update new cdb with updated lba/sectors */
|
||||
cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb);
|
||||
|
||||
/*
|
||||
* This now assumes that passed sg_ents are in PAGE_SIZE chunks
|
||||
* in order to calculate the number per task SGL entries
|
||||
*/
|
||||
task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);
|
||||
/*
|
||||
* Check if the fabric module driver is requesting that all
|
||||
* struct se_task->task_sg[] be chained together.. If so,
|
||||
* then allocate an extra padding SG entry for linking and
|
||||
* marking the end of the chained SGL.
|
||||
* Possibly over-allocate task sgl size by using cmd sgl size.
|
||||
* It's so much easier and only a waste when task_count > 1.
|
||||
* That is extremely rare.
|
||||
* marking the end of the chained SGL for every task except
|
||||
* the last one for (task_count > 1) operation, or skipping
|
||||
* the extra padding for the (task_count == 1) case.
|
||||
*/
|
||||
task->task_sg_nents = sgl_nents;
|
||||
if (cmd->se_tfo->task_sg_chaining) {
|
||||
task->task_sg_nents++;
|
||||
if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) {
|
||||
task_sg_nents_padded = (task->task_sg_nents + 1);
|
||||
task->task_padded_sg = 1;
|
||||
}
|
||||
} else
|
||||
task_sg_nents_padded = task->task_sg_nents;
|
||||
|
||||
task->task_sg = kmalloc(sizeof(struct scatterlist) *
|
||||
task->task_sg_nents, GFP_KERNEL);
|
||||
task_sg_nents_padded, GFP_KERNEL);
|
||||
if (!task->task_sg) {
|
||||
cmd->se_dev->transport->free_task(task);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_init_table(task->task_sg, task->task_sg_nents);
|
||||
sg_init_table(task->task_sg, task_sg_nents_padded);
|
||||
|
||||
task_size = task->task_size;
|
||||
|
||||
@ -4230,10 +4258,13 @@ static u32 transport_allocate_tasks(
|
||||
struct scatterlist *sgl,
|
||||
unsigned int sgl_nents)
|
||||
{
|
||||
if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
|
||||
if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
|
||||
if (transport_cmd_get_valid_sectors(cmd) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return transport_allocate_data_tasks(cmd, lba, data_direction,
|
||||
sgl, sgl_nents);
|
||||
else
|
||||
} else
|
||||
return transport_allocate_control_task(cmd);
|
||||
|
||||
}
|
||||
@ -4726,6 +4757,13 @@ int transport_send_check_condition_and_sense(
|
||||
*/
|
||||
switch (reason) {
|
||||
case TCM_NON_EXISTENT_LUN:
|
||||
/* CURRENT ERROR */
|
||||
buffer[offset] = 0x70;
|
||||
/* ILLEGAL REQUEST */
|
||||
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
|
||||
/* LOGICAL UNIT NOT SUPPORTED */
|
||||
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25;
|
||||
break;
|
||||
case TCM_UNSUPPORTED_SCSI_OPCODE:
|
||||
case TCM_SECTOR_COUNT_TOO_MANY:
|
||||
/* CURRENT ERROR */
|
||||
|
@ -256,7 +256,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
|
||||
struct se_portal_group *se_tpg = &tpg->se_tpg;
|
||||
struct se_node_acl *se_acl;
|
||||
|
||||
spin_lock_bh(&se_tpg->acl_node_lock);
|
||||
spin_lock_irq(&se_tpg->acl_node_lock);
|
||||
list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
|
||||
acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
|
||||
pr_debug("acl %p port_name %llx\n",
|
||||
@ -270,7 +270,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&se_tpg->acl_node_lock);
|
||||
spin_unlock_irq(&se_tpg->acl_node_lock);
|
||||
return found;
|
||||
}
|
||||
|
||||
@ -655,9 +655,7 @@ static void __exit ft_exit(void)
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
#ifdef MODULE
|
||||
MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
module_init(ft_init);
|
||||
module_exit(ft_exit);
|
||||
#endif /* MODULE */
|
||||
|
@ -27,6 +27,12 @@ struct target_core_fabric_ops {
|
||||
int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
|
||||
int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
|
||||
int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
|
||||
/*
|
||||
* Optionally used by fabrics to allow demo-mode login, but not
|
||||
* expose any TPG LUNs, and return 'not connected' in standard
|
||||
* inquiry response
|
||||
*/
|
||||
int (*tpg_check_demo_mode_login_only)(struct se_portal_group *);
|
||||
struct se_node_acl *(*tpg_alloc_fabric_acl)(
|
||||
struct se_portal_group *);
|
||||
void (*tpg_release_fabric_acl)(struct se_portal_group *,
|
||||
|
Loading…
Reference in New Issue
Block a user