mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 01:10:53 +07:00
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
Pull SCSI target fixes from Nicholas Bellinger: "The highlights this merge window include: - Allow target fabric drivers to function as built-in. (Roland) - Fix tcm_loop multi-TPG endpoint nexus bug. (Hannes) - Move per device config_item_type into se_subsystem_api, allowing configfs attributes to be defined at module_init time. (Jerome + nab) - Convert existing IBLOCK/FILEIO/RAMDISK/PSCSI/TCMU drivers to use external configfs attributes. (nab) - A number of iser-target fixes related to active session + network portal shutdown stability during extended stress testing. (Sagi + Slava) - Dynamic allocation of T10-PI contexts for iser-target, fixing a potentially bogus iscsi_np->tpg_np pointer reference in >= v3.14 code. (Sagi) - iser-target performance + scalability improvements. (Sagi) - Fixes for SPC-4 Persistent Reservation AllRegistrants spec compliance. (Ilias + James + nab) - Avoid potential short kern_sendmsg() in iscsi-target for now until Al's conversion to use msghdr iteration is merged post -rc1. (Viro) Also, Sagi has requested a number of iser-target patches (9) that address stability issues he's encountered during extended stress testing be considered for v3.10.y + v3.14.y code. Given the amount of LOC involved, it will certainly require extra backporting effort. Apologies in advance to Greg-KH & Co on this. Sagi and I will be working post-merge to ensure they each get applied correctly" * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (53 commits) target: Allow AllRegistrants to re-RESERVE existing reservation uapi/linux/target_core_user.h: fix headers_install.sh badness iscsi-target: Fail connection on short sendmsg writes iscsi-target: nullify session in failed login sequence target: Avoid dropping AllRegistrants reservation during unregister target: Fix R_HOLDER bit usage for AllRegistrants iscsi-target: Drop left-over bogus iscsi_np->tpg_np iser-target: Fix wc->wr_id cast warning iser-target: Remove code duplication iser-target: Adjust log levels and prettify some prints iser-target: Use debug_level parameter to control logging level iser-target: Fix logout sequence iser-target: Don't wait for session commands from completion context iser-target: Reduce CQ lock contention by batch polling iser-target: Introduce isert_poll_budget iser-target: Remove an atomic operation from the IO path iser-target: Remove redundant call to isert_conn_terminate iser-target: Use single CQ for TX and RX iser-target: Centralize completion elements to a context iser-target: Cast wr_id with uintptr_t instead of unsinged long ...
This commit is contained in:
commit
ed55635e2e
File diff suppressed because it is too large
Load Diff
@ -4,9 +4,37 @@
|
||||
#include <rdma/ib_verbs.h>
|
||||
#include <rdma/rdma_cm.h>
|
||||
|
||||
#define DRV_NAME "isert"
|
||||
#define PFX DRV_NAME ": "
|
||||
|
||||
#define isert_dbg(fmt, arg...) \
|
||||
do { \
|
||||
if (unlikely(isert_debug_level > 2)) \
|
||||
printk(KERN_DEBUG PFX "%s: " fmt,\
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define isert_warn(fmt, arg...) \
|
||||
do { \
|
||||
if (unlikely(isert_debug_level > 0)) \
|
||||
pr_warn(PFX "%s: " fmt, \
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define isert_info(fmt, arg...) \
|
||||
do { \
|
||||
if (unlikely(isert_debug_level > 1)) \
|
||||
pr_info(PFX "%s: " fmt, \
|
||||
__func__ , ## arg); \
|
||||
} while (0)
|
||||
|
||||
#define isert_err(fmt, arg...) \
|
||||
pr_err(PFX "%s: " fmt, __func__ , ## arg)
|
||||
|
||||
#define ISERT_RDMA_LISTEN_BACKLOG 10
|
||||
#define ISCSI_ISER_SG_TABLESIZE 256
|
||||
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
|
||||
#define ISER_BEACON_WRID 0xfffffffffffffffeULL
|
||||
|
||||
enum isert_desc_type {
|
||||
ISCSI_TX_CONTROL,
|
||||
@ -23,6 +51,7 @@ enum iser_ib_op_code {
|
||||
enum iser_conn_state {
|
||||
ISER_CONN_INIT,
|
||||
ISER_CONN_UP,
|
||||
ISER_CONN_FULL_FEATURE,
|
||||
ISER_CONN_TERMINATING,
|
||||
ISER_CONN_DOWN,
|
||||
};
|
||||
@ -44,9 +73,6 @@ struct iser_tx_desc {
|
||||
struct ib_sge tx_sg[2];
|
||||
int num_sge;
|
||||
struct isert_cmd *isert_cmd;
|
||||
struct llist_node *comp_llnode_batch;
|
||||
struct llist_node comp_llnode;
|
||||
bool llnode_active;
|
||||
struct ib_send_wr send_wr;
|
||||
} __packed;
|
||||
|
||||
@ -81,6 +107,12 @@ struct isert_data_buf {
|
||||
enum dma_data_direction dma_dir;
|
||||
};
|
||||
|
||||
enum {
|
||||
DATA = 0,
|
||||
PROT = 1,
|
||||
SIG = 2,
|
||||
};
|
||||
|
||||
struct isert_rdma_wr {
|
||||
struct list_head wr_list;
|
||||
struct isert_cmd *isert_cmd;
|
||||
@ -90,6 +122,7 @@ struct isert_rdma_wr {
|
||||
int send_wr_num;
|
||||
struct ib_send_wr *send_wr;
|
||||
struct ib_send_wr s_send_wr;
|
||||
struct ib_sge ib_sg[3];
|
||||
struct isert_data_buf data;
|
||||
struct isert_data_buf prot;
|
||||
struct fast_reg_descriptor *fr_desc;
|
||||
@ -117,14 +150,15 @@ struct isert_device;
|
||||
struct isert_conn {
|
||||
enum iser_conn_state state;
|
||||
int post_recv_buf_count;
|
||||
atomic_t post_send_buf_count;
|
||||
u32 responder_resources;
|
||||
u32 initiator_depth;
|
||||
bool pi_support;
|
||||
u32 max_sge;
|
||||
char *login_buf;
|
||||
char *login_req_buf;
|
||||
char *login_rsp_buf;
|
||||
u64 login_req_dma;
|
||||
int login_req_len;
|
||||
u64 login_rsp_dma;
|
||||
unsigned int conn_rx_desc_head;
|
||||
struct iser_rx_desc *conn_rx_descs;
|
||||
@ -132,13 +166,13 @@ struct isert_conn {
|
||||
struct iscsi_conn *conn;
|
||||
struct list_head conn_accept_node;
|
||||
struct completion conn_login_comp;
|
||||
struct completion login_req_comp;
|
||||
struct iser_tx_desc conn_login_tx_desc;
|
||||
struct rdma_cm_id *conn_cm_id;
|
||||
struct ib_pd *conn_pd;
|
||||
struct ib_mr *conn_mr;
|
||||
struct ib_qp *conn_qp;
|
||||
struct isert_device *conn_device;
|
||||
struct work_struct conn_logout_work;
|
||||
struct mutex conn_mutex;
|
||||
struct completion conn_wait;
|
||||
struct completion conn_wait_comp_err;
|
||||
@ -147,31 +181,38 @@ struct isert_conn {
|
||||
int conn_fr_pool_size;
|
||||
/* lock to protect fastreg pool */
|
||||
spinlock_t conn_lock;
|
||||
#define ISERT_COMP_BATCH_COUNT 8
|
||||
int conn_comp_batch;
|
||||
struct llist_head conn_comp_llist;
|
||||
bool disconnect;
|
||||
struct work_struct release_work;
|
||||
struct ib_recv_wr beacon;
|
||||
bool logout_posted;
|
||||
};
|
||||
|
||||
#define ISERT_MAX_CQ 64
|
||||
|
||||
struct isert_cq_desc {
|
||||
struct isert_device *device;
|
||||
int cq_index;
|
||||
struct work_struct cq_rx_work;
|
||||
struct work_struct cq_tx_work;
|
||||
/**
|
||||
* struct isert_comp - iSER completion context
|
||||
*
|
||||
* @device: pointer to device handle
|
||||
* @cq: completion queue
|
||||
* @wcs: work completion array
|
||||
* @active_qps: Number of active QPs attached
|
||||
* to completion context
|
||||
* @work: completion work handle
|
||||
*/
|
||||
struct isert_comp {
|
||||
struct isert_device *device;
|
||||
struct ib_cq *cq;
|
||||
struct ib_wc wcs[16];
|
||||
int active_qps;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct isert_device {
|
||||
int use_fastreg;
|
||||
bool pi_capable;
|
||||
int cqs_used;
|
||||
int refcount;
|
||||
int cq_active_qps[ISERT_MAX_CQ];
|
||||
struct ib_device *ib_device;
|
||||
struct ib_cq *dev_rx_cq[ISERT_MAX_CQ];
|
||||
struct ib_cq *dev_tx_cq[ISERT_MAX_CQ];
|
||||
struct isert_cq_desc *cq_desc;
|
||||
struct isert_comp *comps;
|
||||
int comps_used;
|
||||
struct list_head dev_node;
|
||||
struct ib_device_attr dev_attr;
|
||||
int (*reg_rdma_mem)(struct iscsi_conn *conn,
|
||||
@ -182,6 +223,7 @@ struct isert_device {
|
||||
};
|
||||
|
||||
struct isert_np {
|
||||
struct iscsi_np *np;
|
||||
struct semaphore np_sem;
|
||||
struct rdma_cm_id *np_cm_id;
|
||||
struct mutex np_accept_mutex;
|
||||
|
@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
|
||||
|
||||
return ret;
|
||||
r2t_out:
|
||||
iscsit_unregister_transport(&iscsi_target_transport);
|
||||
kmem_cache_destroy(lio_r2t_cache);
|
||||
ooo_out:
|
||||
kmem_cache_destroy(lio_ooo_cache);
|
||||
|
@ -790,7 +790,6 @@ struct iscsi_np {
|
||||
void *np_context;
|
||||
struct iscsit_transport *np_transport;
|
||||
struct list_head np_list;
|
||||
struct iscsi_tpg_np *tpg_np;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
struct iscsi_tpg_np {
|
||||
|
@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
|
||||
{
|
||||
struct iscsi_session *sess = NULL;
|
||||
struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
|
||||
enum target_prot_op sup_pro_ops;
|
||||
int ret;
|
||||
|
||||
sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
|
||||
@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
|
||||
kfree(sess);
|
||||
return -ENOMEM;
|
||||
}
|
||||
sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
|
||||
|
||||
sess->se_sess = transport_init_session(sup_pro_ops);
|
||||
sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
|
||||
if (IS_ERR(sess->se_sess)) {
|
||||
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
|
||||
ISCSI_LOGIN_STATUS_NO_RESOURCES);
|
||||
@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
|
||||
}
|
||||
kfree(conn->sess->sess_ops);
|
||||
kfree(conn->sess);
|
||||
conn->sess = NULL;
|
||||
|
||||
old_sess_out:
|
||||
iscsi_stop_login_thread_timer(np);
|
||||
@ -1204,6 +1203,9 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
|
||||
conn->sock = NULL;
|
||||
}
|
||||
|
||||
if (conn->conn_transport->iscsit_wait_conn)
|
||||
conn->conn_transport->iscsit_wait_conn(conn);
|
||||
|
||||
if (conn->conn_transport->iscsit_free_conn)
|
||||
conn->conn_transport->iscsit_free_conn(conn);
|
||||
|
||||
@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
|
||||
}
|
||||
login->zero_tsih = zero_tsih;
|
||||
|
||||
conn->sess->se_sess->sup_prot_ops =
|
||||
conn->conn_transport->iscsit_get_sup_prot_ops(conn);
|
||||
|
||||
tpg = conn->tpg;
|
||||
if (!tpg) {
|
||||
pr_err("Unable to locate struct iscsi_conn->tpg\n");
|
||||
|
@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
|
||||
init_completion(&tpg_np->tpg_np_comp);
|
||||
kref_init(&tpg_np->tpg_np_kref);
|
||||
tpg_np->tpg_np = np;
|
||||
np->tpg_np = tpg_np;
|
||||
tpg_np->tpg = tpg;
|
||||
|
||||
spin_lock(&tpg->tpg_np_lock);
|
||||
|
@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
|
||||
|
||||
void iscsit_put_transport(struct iscsit_transport *t)
|
||||
{
|
||||
if (t->owner)
|
||||
module_put(t->owner);
|
||||
module_put(t->owner);
|
||||
}
|
||||
|
||||
int iscsit_register_transport(struct iscsit_transport *t)
|
||||
|
@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
|
||||
struct iscsi_conn *conn,
|
||||
struct iscsi_data_count *count)
|
||||
{
|
||||
int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
|
||||
int ret, iov_len;
|
||||
struct kvec *iov_p;
|
||||
struct msghdr msg;
|
||||
|
||||
if (!conn || !conn->sock || !conn->conn_ops)
|
||||
return -1;
|
||||
|
||||
if (data <= 0) {
|
||||
pr_err("Data length is: %d\n", data);
|
||||
if (count->data_length <= 0) {
|
||||
pr_err("Data length is: %d\n", count->data_length);
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
|
||||
iov_p = count->iov;
|
||||
iov_len = count->iov_count;
|
||||
|
||||
while (total_tx < data) {
|
||||
tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
|
||||
(data - total_tx));
|
||||
if (tx_loop <= 0) {
|
||||
pr_debug("tx_loop: %d total_tx %d\n",
|
||||
tx_loop, total_tx);
|
||||
return tx_loop;
|
||||
}
|
||||
total_tx += tx_loop;
|
||||
pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
|
||||
tx_loop, total_tx, data);
|
||||
ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
|
||||
count->data_length);
|
||||
if (ret != count->data_length) {
|
||||
pr_err("Unexpected ret: %d send data %d\n",
|
||||
ret, count->data_length);
|
||||
return -EPIPE;
|
||||
}
|
||||
pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
|
||||
|
||||
return total_tx;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int rx_data(
|
||||
|
@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
|
||||
set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
|
||||
goto out_done;
|
||||
}
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
tl_nexus = tl_tpg->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
|
||||
" does not exist\n");
|
||||
@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
|
||||
* to struct scsi_device
|
||||
*/
|
||||
static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||
struct tcm_loop_nexus *tl_nexus,
|
||||
int lun, int task, enum tcm_tmreq_table tmr)
|
||||
{
|
||||
struct se_cmd *se_cmd = NULL;
|
||||
struct se_session *se_sess;
|
||||
struct se_portal_group *se_tpg;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_cmd *tl_cmd = NULL;
|
||||
struct tcm_loop_tmr *tl_tmr = NULL;
|
||||
int ret = TMR_FUNCTION_FAILED, rc;
|
||||
|
||||
/*
|
||||
* Locate the tl_nexus and se_sess pointers
|
||||
*/
|
||||
tl_nexus = tl_tpg->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
pr_err("Unable to perform device reset without"
|
||||
" active I_T Nexus\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
|
||||
if (!tl_cmd) {
|
||||
pr_err("Unable to allocate memory for tl_cmd\n");
|
||||
@ -243,7 +253,7 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||
|
||||
se_cmd = &tl_cmd->tl_se_cmd;
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
se_sess = tl_nexus->se_sess;
|
||||
se_sess = tl_tpg->tl_nexus->se_sess;
|
||||
/*
|
||||
* Initialize struct se_cmd descriptor from target_core_mod infrastructure
|
||||
*/
|
||||
@ -288,7 +298,6 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
|
||||
static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_tpg *tl_tpg;
|
||||
int ret = FAILED;
|
||||
|
||||
@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
||||
* Locate the tcm_loop_hba_t pointer
|
||||
*/
|
||||
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
|
||||
/*
|
||||
* Locate the tl_nexus and se_sess pointers
|
||||
*/
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
pr_err("Unable to perform device reset without"
|
||||
" active I_T Nexus\n");
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/*
|
||||
* Locate the tl_tpg pointer from TargetID in sc->device->id
|
||||
*/
|
||||
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
||||
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
|
||||
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
|
||||
sc->request->tag, TMR_ABORT_TASK);
|
||||
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
|
||||
}
|
||||
@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
|
||||
static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
{
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_tpg *tl_tpg;
|
||||
int ret = FAILED;
|
||||
|
||||
@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
|
||||
* Locate the tcm_loop_hba_t pointer
|
||||
*/
|
||||
tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
|
||||
/*
|
||||
* Locate the tl_nexus and se_sess pointers
|
||||
*/
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
if (!tl_nexus) {
|
||||
pr_err("Unable to perform device reset without"
|
||||
" active I_T Nexus\n");
|
||||
return FAILED;
|
||||
}
|
||||
/*
|
||||
* Locate the tl_tpg pointer from TargetID in sc->device->id
|
||||
*/
|
||||
tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
|
||||
ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
|
||||
|
||||
ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
|
||||
0, TMR_LUN_RESET);
|
||||
return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
|
||||
}
|
||||
@ -940,8 +924,8 @@ static int tcm_loop_make_nexus(
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (tl_tpg->tl_hba->tl_nexus) {
|
||||
pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
|
||||
if (tl_tpg->tl_nexus) {
|
||||
pr_debug("tl_tpg->tl_nexus already exists\n");
|
||||
return -EEXIST;
|
||||
}
|
||||
se_tpg = &tl_tpg->tl_se_tpg;
|
||||
@ -976,7 +960,7 @@ static int tcm_loop_make_nexus(
|
||||
*/
|
||||
__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
|
||||
tl_nexus->se_sess, tl_nexus);
|
||||
tl_tpg->tl_hba->tl_nexus = tl_nexus;
|
||||
tl_tpg->tl_nexus = tl_nexus;
|
||||
pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
name);
|
||||
@ -992,12 +976,8 @@ static int tcm_loop_drop_nexus(
|
||||
{
|
||||
struct se_session *se_sess;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct tcm_loop_hba *tl_hba = tpg->tl_hba;
|
||||
|
||||
if (!tl_hba)
|
||||
return -ENODEV;
|
||||
|
||||
tl_nexus = tl_hba->tl_nexus;
|
||||
tl_nexus = tpg->tl_nexus;
|
||||
if (!tl_nexus)
|
||||
return -ENODEV;
|
||||
|
||||
@ -1013,13 +993,13 @@ static int tcm_loop_drop_nexus(
|
||||
}
|
||||
|
||||
pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
|
||||
" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
|
||||
tl_nexus->se_sess->se_node_acl->initiatorname);
|
||||
/*
|
||||
* Release the SCSI I_T Nexus to the emulated SAS Target Port
|
||||
*/
|
||||
transport_deregister_session(tl_nexus->se_sess);
|
||||
tpg->tl_hba->tl_nexus = NULL;
|
||||
tpg->tl_nexus = NULL;
|
||||
kfree(tl_nexus);
|
||||
return 0;
|
||||
}
|
||||
@ -1035,7 +1015,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
ssize_t ret;
|
||||
|
||||
tl_nexus = tl_tpg->tl_hba->tl_nexus;
|
||||
tl_nexus = tl_tpg->tl_nexus;
|
||||
if (!tl_nexus)
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -27,11 +27,6 @@ struct tcm_loop_tmr {
|
||||
};
|
||||
|
||||
struct tcm_loop_nexus {
|
||||
int it_nexus_active;
|
||||
/*
|
||||
* Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
|
||||
*/
|
||||
struct scsi_host *sh;
|
||||
/*
|
||||
* Pointer to TCM session for I_T Nexus
|
||||
*/
|
||||
@ -51,6 +46,7 @@ struct tcm_loop_tpg {
|
||||
atomic_t tl_tpg_port_count;
|
||||
struct se_portal_group tl_se_tpg;
|
||||
struct tcm_loop_hba *tl_hba;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
};
|
||||
|
||||
struct tcm_loop_hba {
|
||||
@ -59,7 +55,6 @@ struct tcm_loop_hba {
|
||||
struct se_hba_s *se_hba;
|
||||
struct se_lun *tl_hba_lun;
|
||||
struct se_port *tl_hba_lun_sep;
|
||||
struct tcm_loop_nexus *tl_nexus;
|
||||
struct device dev;
|
||||
struct Scsi_Host *sh;
|
||||
struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
|
||||
|
@ -50,6 +50,19 @@
|
||||
#include "target_core_rd.h"
|
||||
#include "target_core_xcopy.h"
|
||||
|
||||
#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
|
||||
static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
|
||||
{ \
|
||||
struct target_backend_cits *tbc = &sa->tb_cits; \
|
||||
struct config_item_type *cit = &tbc->tb_##_name##_cit; \
|
||||
\
|
||||
cit->ct_item_ops = _item_ops; \
|
||||
cit->ct_group_ops = _group_ops; \
|
||||
cit->ct_attrs = _attrs; \
|
||||
cit->ct_owner = sa->owner; \
|
||||
pr_debug("Setup generic %s\n", __stringify(_name)); \
|
||||
}
|
||||
|
||||
extern struct t10_alua_lu_gp *default_lu_gp;
|
||||
|
||||
static LIST_HEAD(g_tf_list);
|
||||
@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric(
|
||||
|
||||
pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
|
||||
" %s\n", group, name);
|
||||
/*
|
||||
* Below are some hardcoded request_module() calls to automatically
|
||||
* local fabric modules when the following is called:
|
||||
*
|
||||
* mkdir -p /sys/kernel/config/target/$MODULE_NAME
|
||||
*
|
||||
* Note that this does not limit which TCM fabric module can be
|
||||
* registered, but simply provids auto loading logic for modules with
|
||||
* mkdir(2) system calls with known TCM fabric modules.
|
||||
*/
|
||||
if (!strncmp(name, "iscsi", 5)) {
|
||||
/*
|
||||
* Automatically load the LIO Target fabric module when the
|
||||
* following is called:
|
||||
*
|
||||
* mkdir -p $CONFIGFS/target/iscsi
|
||||
*/
|
||||
ret = request_module("iscsi_target_mod");
|
||||
if (ret < 0) {
|
||||
pr_err("request_module() failed for"
|
||||
" iscsi_target_mod.ko: %d\n", ret);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
} else if (!strncmp(name, "loopback", 8)) {
|
||||
/*
|
||||
* Automatically load the tcm_loop fabric module when the
|
||||
* following is called:
|
||||
*
|
||||
* mkdir -p $CONFIGFS/target/loopback
|
||||
*/
|
||||
ret = request_module("tcm_loop");
|
||||
if (ret < 0) {
|
||||
pr_err("request_module() failed for"
|
||||
" tcm_loop.ko: %d\n", ret);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
tf = target_core_get_fabric(name);
|
||||
if (!tf) {
|
||||
pr_err("target_core_get_fabric() failed for %s\n",
|
||||
pr_err("target_core_register_fabric() trying autoload for %s\n",
|
||||
name);
|
||||
|
||||
/*
|
||||
* Below are some hardcoded request_module() calls to automatically
|
||||
* local fabric modules when the following is called:
|
||||
*
|
||||
* mkdir -p /sys/kernel/config/target/$MODULE_NAME
|
||||
*
|
||||
* Note that this does not limit which TCM fabric module can be
|
||||
* registered, but simply provids auto loading logic for modules with
|
||||
* mkdir(2) system calls with known TCM fabric modules.
|
||||
*/
|
||||
|
||||
if (!strncmp(name, "iscsi", 5)) {
|
||||
/*
|
||||
* Automatically load the LIO Target fabric module when the
|
||||
* following is called:
|
||||
*
|
||||
* mkdir -p $CONFIGFS/target/iscsi
|
||||
*/
|
||||
ret = request_module("iscsi_target_mod");
|
||||
if (ret < 0) {
|
||||
pr_err("request_module() failed for"
|
||||
" iscsi_target_mod.ko: %d\n", ret);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
} else if (!strncmp(name, "loopback", 8)) {
|
||||
/*
|
||||
* Automatically load the tcm_loop fabric module when the
|
||||
* following is called:
|
||||
*
|
||||
* mkdir -p $CONFIGFS/target/loopback
|
||||
*/
|
||||
ret = request_module("tcm_loop");
|
||||
if (ret < 0) {
|
||||
pr_err("request_module() failed for"
|
||||
" tcm_loop.ko: %d\n", ret);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
}
|
||||
|
||||
tf = target_core_get_fabric(name);
|
||||
}
|
||||
|
||||
if (!tf) {
|
||||
pr_err("target_core_get_fabric() failed for %s\n",
|
||||
name);
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
|
||||
@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister);
|
||||
// Stop functions called by external Target Fabrics Modules
|
||||
//############################################################################*/
|
||||
|
||||
/* Start functions for struct config_item_type target_core_dev_attrib_cit */
|
||||
|
||||
#define DEF_DEV_ATTRIB_SHOW(_name) \
|
||||
static ssize_t target_core_dev_show_attr_##_name( \
|
||||
struct se_dev_attrib *da, \
|
||||
char *page) \
|
||||
{ \
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", \
|
||||
(u32)da->da_dev->dev_attrib._name); \
|
||||
}
|
||||
|
||||
#define DEF_DEV_ATTRIB_STORE(_name) \
|
||||
static ssize_t target_core_dev_store_attr_##_name( \
|
||||
struct se_dev_attrib *da, \
|
||||
const char *page, \
|
||||
size_t count) \
|
||||
{ \
|
||||
unsigned long val; \
|
||||
int ret; \
|
||||
\
|
||||
ret = kstrtoul(page, 0, &val); \
|
||||
if (ret < 0) { \
|
||||
pr_err("kstrtoul() failed with" \
|
||||
" ret: %d\n", ret); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
ret = se_dev_set_##_name(da->da_dev, (u32)val); \
|
||||
\
|
||||
return (!ret) ? count : -EINVAL; \
|
||||
}
|
||||
|
||||
#define DEF_DEV_ATTRIB(_name) \
|
||||
DEF_DEV_ATTRIB_SHOW(_name); \
|
||||
DEF_DEV_ATTRIB_STORE(_name);
|
||||
|
||||
#define DEF_DEV_ATTRIB_RO(_name) \
|
||||
DEF_DEV_ATTRIB_SHOW(_name);
|
||||
/* Start functions for struct config_item_type tb_dev_attrib_cit */
|
||||
|
||||
CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
|
||||
#define SE_DEV_ATTR(_name, _mode) \
|
||||
static struct target_core_dev_attrib_attribute \
|
||||
target_core_dev_attrib_##_name = \
|
||||
__CONFIGFS_EATTR(_name, _mode, \
|
||||
target_core_dev_show_attr_##_name, \
|
||||
target_core_dev_store_attr_##_name);
|
||||
|
||||
#define SE_DEV_ATTR_RO(_name); \
|
||||
static struct target_core_dev_attrib_attribute \
|
||||
target_core_dev_attrib_##_name = \
|
||||
__CONFIGFS_EATTR_RO(_name, \
|
||||
target_core_dev_show_attr_##_name);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_model_alias);
|
||||
SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_dpo);
|
||||
SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_fua_write);
|
||||
SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_fua_read);
|
||||
SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_write_cache);
|
||||
SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
|
||||
SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_tas);
|
||||
SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_tpu);
|
||||
SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_tpws);
|
||||
SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_caw);
|
||||
SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_3pc);
|
||||
SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(pi_prot_type);
|
||||
SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
|
||||
SE_DEV_ATTR_RO(hw_pi_prot_type);
|
||||
|
||||
DEF_DEV_ATTRIB(pi_prot_format);
|
||||
SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(enforce_pr_isids);
|
||||
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(is_nonrot);
|
||||
SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(emulate_rest_reord);
|
||||
SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(force_pr_aptpl);
|
||||
SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB_RO(hw_block_size);
|
||||
SE_DEV_ATTR_RO(hw_block_size);
|
||||
|
||||
DEF_DEV_ATTRIB(block_size);
|
||||
SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB_RO(hw_max_sectors);
|
||||
SE_DEV_ATTR_RO(hw_max_sectors);
|
||||
|
||||
DEF_DEV_ATTRIB(fabric_max_sectors);
|
||||
SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(optimal_sectors);
|
||||
SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB_RO(hw_queue_depth);
|
||||
SE_DEV_ATTR_RO(hw_queue_depth);
|
||||
|
||||
DEF_DEV_ATTRIB(queue_depth);
|
||||
SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(max_unmap_lba_count);
|
||||
SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(max_unmap_block_desc_count);
|
||||
SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(unmap_granularity);
|
||||
SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(unmap_granularity_alignment);
|
||||
SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
|
||||
|
||||
DEF_DEV_ATTRIB(max_write_same_len);
|
||||
SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
|
||||
|
||||
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
|
||||
|
||||
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
|
||||
&target_core_dev_attrib_emulate_model_alias.attr,
|
||||
&target_core_dev_attrib_emulate_dpo.attr,
|
||||
&target_core_dev_attrib_emulate_fua_write.attr,
|
||||
&target_core_dev_attrib_emulate_fua_read.attr,
|
||||
&target_core_dev_attrib_emulate_write_cache.attr,
|
||||
&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
|
||||
&target_core_dev_attrib_emulate_tas.attr,
|
||||
&target_core_dev_attrib_emulate_tpu.attr,
|
||||
&target_core_dev_attrib_emulate_tpws.attr,
|
||||
&target_core_dev_attrib_emulate_caw.attr,
|
||||
&target_core_dev_attrib_emulate_3pc.attr,
|
||||
&target_core_dev_attrib_pi_prot_type.attr,
|
||||
&target_core_dev_attrib_hw_pi_prot_type.attr,
|
||||
&target_core_dev_attrib_pi_prot_format.attr,
|
||||
&target_core_dev_attrib_enforce_pr_isids.attr,
|
||||
&target_core_dev_attrib_force_pr_aptpl.attr,
|
||||
&target_core_dev_attrib_is_nonrot.attr,
|
||||
&target_core_dev_attrib_emulate_rest_reord.attr,
|
||||
&target_core_dev_attrib_hw_block_size.attr,
|
||||
&target_core_dev_attrib_block_size.attr,
|
||||
&target_core_dev_attrib_hw_max_sectors.attr,
|
||||
&target_core_dev_attrib_fabric_max_sectors.attr,
|
||||
&target_core_dev_attrib_optimal_sectors.attr,
|
||||
&target_core_dev_attrib_hw_queue_depth.attr,
|
||||
&target_core_dev_attrib_queue_depth.attr,
|
||||
&target_core_dev_attrib_max_unmap_lba_count.attr,
|
||||
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
|
||||
&target_core_dev_attrib_unmap_granularity.attr,
|
||||
&target_core_dev_attrib_unmap_granularity_alignment.attr,
|
||||
&target_core_dev_attrib_max_write_same_len.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct configfs_item_operations target_core_dev_attrib_ops = {
|
||||
.show_attribute = target_core_dev_attrib_attr_show,
|
||||
.store_attribute = target_core_dev_attrib_attr_store,
|
||||
};
|
||||
|
||||
static struct config_item_type target_core_dev_attrib_cit = {
|
||||
.ct_item_ops = &target_core_dev_attrib_ops,
|
||||
.ct_attrs = target_core_dev_attrib_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
|
||||
|
||||
/* End functions for struct config_item_type target_core_dev_attrib_cit */
|
||||
/* End functions for struct config_item_type tb_dev_attrib_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_dev_wwn_cit */
|
||||
/* Start functions for struct config_item_type tb_dev_wwn_cit */
|
||||
|
||||
CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
|
||||
#define SE_DEV_WWN_ATTR(_name, _mode) \
|
||||
@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = {
|
||||
.store_attribute = target_core_dev_wwn_attr_store,
|
||||
};
|
||||
|
||||
static struct config_item_type target_core_dev_wwn_cit = {
|
||||
.ct_item_ops = &target_core_dev_wwn_ops,
|
||||
.ct_attrs = target_core_dev_wwn_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
|
||||
|
||||
/* End functions for struct config_item_type target_core_dev_wwn_cit */
|
||||
/* End functions for struct config_item_type tb_dev_wwn_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_dev_pr_cit */
|
||||
/* Start functions for struct config_item_type tb_dev_pr_cit */
|
||||
|
||||
CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
|
||||
#define SE_DEV_PR_ATTR(_name, _mode) \
|
||||
@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = {
|
||||
.store_attribute = target_core_dev_pr_attr_store,
|
||||
};
|
||||
|
||||
static struct config_item_type target_core_dev_pr_cit = {
|
||||
.ct_item_ops = &target_core_dev_pr_ops,
|
||||
.ct_attrs = target_core_dev_pr_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
|
||||
|
||||
/* End functions for struct config_item_type target_core_dev_pr_cit */
|
||||
/* End functions for struct config_item_type tb_dev_pr_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_dev_cit */
|
||||
/* Start functions for struct config_item_type tb_dev_cit */
|
||||
|
||||
static ssize_t target_core_show_dev_info(void *p, char *page)
|
||||
{
|
||||
@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
|
||||
.store = target_core_store_dev_lba_map,
|
||||
};
|
||||
|
||||
static struct configfs_attribute *lio_core_dev_attrs[] = {
|
||||
static struct configfs_attribute *target_core_dev_attrs[] = {
|
||||
&target_core_attr_dev_info.attr,
|
||||
&target_core_attr_dev_control.attr,
|
||||
&target_core_attr_dev_alias.attr,
|
||||
@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = {
|
||||
.store_attribute = target_core_dev_store,
|
||||
};
|
||||
|
||||
static struct config_item_type target_core_dev_cit = {
|
||||
.ct_item_ops = &target_core_dev_item_ops,
|
||||
.ct_attrs = lio_core_dev_attrs,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
|
||||
|
||||
/* End functions for struct config_item_type target_core_dev_cit */
|
||||
/* End functions for struct config_item_type tb_dev_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
|
||||
|
||||
@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
|
||||
|
||||
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
|
||||
/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
|
||||
|
||||
static struct config_group *target_core_alua_create_tg_pt_gp(
|
||||
struct config_group *group,
|
||||
@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
|
||||
.drop_item = &target_core_alua_drop_tg_pt_gp,
|
||||
};
|
||||
|
||||
static struct config_item_type target_core_alua_tg_pt_gps_cit = {
|
||||
.ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
|
||||
|
||||
/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
|
||||
/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_alua_cit */
|
||||
|
||||
@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = {
|
||||
|
||||
/* End functions for struct config_item_type target_core_alua_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_stat_cit */
|
||||
/* Start functions for struct config_item_type tb_dev_stat_cit */
|
||||
|
||||
static struct config_group *target_core_stat_mkdir(
|
||||
struct config_group *group,
|
||||
@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
|
||||
.drop_item = &target_core_stat_rmdir,
|
||||
};
|
||||
|
||||
static struct config_item_type target_core_stat_cit = {
|
||||
.ct_group_ops = &target_core_stat_group_ops,
|
||||
.ct_owner = THIS_MODULE,
|
||||
};
|
||||
TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
|
||||
|
||||
/* End functions for struct config_item_type target_core_stat_cit */
|
||||
/* End functions for struct config_item_type tb_dev_stat_cit */
|
||||
|
||||
/* Start functions for struct config_item_type target_core_hba_cit */
|
||||
|
||||
@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev(
|
||||
if (!dev_cg->default_groups)
|
||||
goto out_free_device;
|
||||
|
||||
config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
|
||||
config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
|
||||
config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
|
||||
&target_core_dev_attrib_cit);
|
||||
&t->tb_cits.tb_dev_attrib_cit);
|
||||
config_group_init_type_name(&dev->dev_pr_group, "pr",
|
||||
&target_core_dev_pr_cit);
|
||||
&t->tb_cits.tb_dev_pr_cit);
|
||||
config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
|
||||
&target_core_dev_wwn_cit);
|
||||
&t->tb_cits.tb_dev_wwn_cit);
|
||||
config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
|
||||
"alua", &target_core_alua_tg_pt_gps_cit);
|
||||
"alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
|
||||
config_group_init_type_name(&dev->dev_stat_grps.stat_group,
|
||||
"statistics", &target_core_stat_cit);
|
||||
"statistics", &t->tb_cits.tb_dev_stat_cit);
|
||||
|
||||
dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
|
||||
dev_cg->default_groups[1] = &dev->dev_pr_group;
|
||||
@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = {
|
||||
|
||||
/* Stop functions for struct config_item_type target_core_hba_cit */
|
||||
|
||||
void target_core_setup_sub_cits(struct se_subsystem_api *sa)
|
||||
{
|
||||
target_core_setup_dev_cit(sa);
|
||||
target_core_setup_dev_attrib_cit(sa);
|
||||
target_core_setup_dev_pr_cit(sa);
|
||||
target_core_setup_dev_wwn_cit(sa);
|
||||
target_core_setup_dev_alua_tg_pt_gps_cit(sa);
|
||||
target_core_setup_dev_stat_cit(sa);
|
||||
}
|
||||
EXPORT_SYMBOL(target_core_setup_sub_cits);
|
||||
|
||||
static int __init target_core_init_configfs(void)
|
||||
{
|
||||
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
|
||||
|
@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
|
||||
dev, dev->dev_attrib.max_unmap_lba_count);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
|
||||
|
||||
int se_dev_set_max_unmap_block_desc_count(
|
||||
struct se_device *dev,
|
||||
@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
|
||||
dev, dev->dev_attrib.max_unmap_block_desc_count);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
|
||||
|
||||
int se_dev_set_unmap_granularity(
|
||||
struct se_device *dev,
|
||||
@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
|
||||
dev, dev->dev_attrib.unmap_granularity);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_unmap_granularity);
|
||||
|
||||
int se_dev_set_unmap_granularity_alignment(
|
||||
struct se_device *dev,
|
||||
@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
|
||||
dev, dev->dev_attrib.unmap_granularity_alignment);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
|
||||
|
||||
int se_dev_set_max_write_same_len(
|
||||
struct se_device *dev,
|
||||
@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
|
||||
dev, dev->dev_attrib.max_write_same_len);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_max_write_same_len);
|
||||
|
||||
static void dev_set_t10_wwn_model_alias(struct se_device *dev)
|
||||
{
|
||||
@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
|
||||
|
||||
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_dpo);
|
||||
|
||||
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
|
||||
pr_err("Illegal value %d\n", flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flag &&
|
||||
dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
pr_err("emulate_fua_write not supported for pSCSI\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
dev->dev_attrib.emulate_fua_write = flag;
|
||||
pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
|
||||
dev, dev->dev_attrib.emulate_fua_write);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
|
||||
|
||||
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
|
||||
|
||||
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -793,11 +796,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
|
||||
pr_err("Illegal value %d\n", flag);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (flag &&
|
||||
dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
pr_err("emulate_write_cache not supported for pSCSI\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (flag &&
|
||||
dev->transport->get_write_cache) {
|
||||
pr_err("emulate_write_cache not supported for this device\n");
|
||||
@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
|
||||
dev, dev->dev_attrib.emulate_write_cache);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
|
||||
|
||||
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
|
||||
|
||||
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_tas);
|
||||
|
||||
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
|
||||
dev, flag);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_tpu);
|
||||
|
||||
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
|
||||
dev, flag);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_tpws);
|
||||
|
||||
int se_dev_set_emulate_caw(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_caw);
|
||||
|
||||
int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_3pc);
|
||||
|
||||
int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_pi_prot_type);
|
||||
|
||||
int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_pi_prot_format);
|
||||
|
||||
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
|
||||
(dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
|
||||
|
||||
int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
|
||||
pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
|
||||
|
||||
int se_dev_set_is_nonrot(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
|
||||
dev, flag);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_is_nonrot);
|
||||
|
||||
int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
|
||||
{
|
||||
@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
|
||||
pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
|
||||
|
||||
/*
|
||||
* Note, this can only be called on unexported SE Device Object.
|
||||
@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
if (queue_depth > dev->dev_attrib.queue_depth) {
|
||||
if (queue_depth > dev->dev_attrib.hw_queue_depth) {
|
||||
pr_err("dev[%p]: Passed queue_depth: %u"
|
||||
" exceeds TCM/SE_Device TCQ: %u\n",
|
||||
dev, queue_depth,
|
||||
pr_err("dev[%p]: Passed queue_depth:"
|
||||
" %u exceeds TCM/SE_Device MAX"
|
||||
" TCQ: %u\n", dev, queue_depth,
|
||||
dev->dev_attrib.hw_queue_depth);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (queue_depth > dev->dev_attrib.queue_depth) {
|
||||
if (queue_depth > dev->dev_attrib.hw_queue_depth) {
|
||||
pr_err("dev[%p]: Passed queue_depth:"
|
||||
" %u exceeds TCM/SE_Device MAX"
|
||||
" TCQ: %u\n", dev, queue_depth,
|
||||
dev->dev_attrib.hw_queue_depth);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
|
||||
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
|
||||
dev, queue_depth);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_queue_depth);
|
||||
|
||||
int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
||||
{
|
||||
@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
||||
DA_STATUS_MAX_SECTORS_MIN);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
|
||||
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
|
||||
" greater than TCM/SE_Device max_sectors:"
|
||||
" %u\n", dev, fabric_max_sectors,
|
||||
dev->dev_attrib.hw_max_sectors);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
|
||||
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
|
||||
" greater than DA_STATUS_MAX_SECTORS_MAX:"
|
||||
" %u\n", dev, fabric_max_sectors,
|
||||
DA_STATUS_MAX_SECTORS_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
|
||||
pr_err("dev[%p]: Passed fabric_max_sectors: %u"
|
||||
" greater than DA_STATUS_MAX_SECTORS_MAX:"
|
||||
" %u\n", dev, fabric_max_sectors,
|
||||
DA_STATUS_MAX_SECTORS_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
/*
|
||||
* Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
|
||||
@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
|
||||
dev, fabric_max_sectors);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
|
||||
|
||||
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
||||
{
|
||||
@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
||||
dev, dev->export_count);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
pr_err("dev[%p]: Passed optimal_sectors cannot be"
|
||||
" changed for TCM/pSCSI\n", dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
|
||||
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
|
||||
" greater than fabric_max_sectors: %u\n", dev,
|
||||
@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
|
||||
dev, optimal_sectors);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_optimal_sectors);
|
||||
|
||||
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
|
||||
{
|
||||
@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
|
||||
pr_err("dev[%p]: Not allowed to change block_size for"
|
||||
" Physical Device, use for Linux/SCSI to change"
|
||||
" block_size for underlying hardware\n", dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev->dev_attrib.block_size = block_size;
|
||||
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
|
||||
dev, block_size);
|
||||
@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(se_dev_set_block_size);
|
||||
|
||||
struct se_lun *core_dev_add_lun(
|
||||
struct se_portal_group *tpg,
|
||||
|
@ -37,6 +37,7 @@
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/target_core_backend_configfs.h>
|
||||
|
||||
#include "target_core_file.h"
|
||||
|
||||
@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
|
||||
return sbc_parse_cdb(cmd, &fd_sbc_ops);
|
||||
}
|
||||
|
||||
DEF_TB_DEFAULT_ATTRIBS(fileio);
|
||||
|
||||
static struct configfs_attribute *fileio_backend_dev_attrs[] = {
|
||||
&fileio_dev_attrib_emulate_model_alias.attr,
|
||||
&fileio_dev_attrib_emulate_dpo.attr,
|
||||
&fileio_dev_attrib_emulate_fua_write.attr,
|
||||
&fileio_dev_attrib_emulate_fua_read.attr,
|
||||
&fileio_dev_attrib_emulate_write_cache.attr,
|
||||
&fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
|
||||
&fileio_dev_attrib_emulate_tas.attr,
|
||||
&fileio_dev_attrib_emulate_tpu.attr,
|
||||
&fileio_dev_attrib_emulate_tpws.attr,
|
||||
&fileio_dev_attrib_emulate_caw.attr,
|
||||
&fileio_dev_attrib_emulate_3pc.attr,
|
||||
&fileio_dev_attrib_pi_prot_type.attr,
|
||||
&fileio_dev_attrib_hw_pi_prot_type.attr,
|
||||
&fileio_dev_attrib_pi_prot_format.attr,
|
||||
&fileio_dev_attrib_enforce_pr_isids.attr,
|
||||
&fileio_dev_attrib_is_nonrot.attr,
|
||||
&fileio_dev_attrib_emulate_rest_reord.attr,
|
||||
&fileio_dev_attrib_force_pr_aptpl.attr,
|
||||
&fileio_dev_attrib_hw_block_size.attr,
|
||||
&fileio_dev_attrib_block_size.attr,
|
||||
&fileio_dev_attrib_hw_max_sectors.attr,
|
||||
&fileio_dev_attrib_fabric_max_sectors.attr,
|
||||
&fileio_dev_attrib_optimal_sectors.attr,
|
||||
&fileio_dev_attrib_hw_queue_depth.attr,
|
||||
&fileio_dev_attrib_queue_depth.attr,
|
||||
&fileio_dev_attrib_max_unmap_lba_count.attr,
|
||||
&fileio_dev_attrib_max_unmap_block_desc_count.attr,
|
||||
&fileio_dev_attrib_unmap_granularity.attr,
|
||||
&fileio_dev_attrib_unmap_granularity_alignment.attr,
|
||||
&fileio_dev_attrib_max_write_same_len.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct se_subsystem_api fileio_template = {
|
||||
.name = "fileio",
|
||||
.inquiry_prod = "FILEIO",
|
||||
@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
|
||||
|
||||
static int __init fileio_module_init(void)
|
||||
{
|
||||
struct target_backend_cits *tbc = &fileio_template.tb_cits;
|
||||
|
||||
target_core_setup_sub_cits(&fileio_template);
|
||||
tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
|
||||
|
||||
return transport_subsystem_register(&fileio_template);
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/target_core_fabric.h>
|
||||
#include <target/target_core_configfs.h>
|
||||
|
||||
#include "target_core_internal.h"
|
||||
|
||||
@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
|
||||
return hba;
|
||||
|
||||
out_module_put:
|
||||
if (hba->transport->owner)
|
||||
module_put(hba->transport->owner);
|
||||
module_put(hba->transport->owner);
|
||||
hba->transport = NULL;
|
||||
out_free_hba:
|
||||
kfree(hba);
|
||||
@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
|
||||
pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
|
||||
" Core\n", hba->hba_id);
|
||||
|
||||
if (hba->transport->owner)
|
||||
module_put(hba->transport->owner);
|
||||
module_put(hba->transport->owner);
|
||||
|
||||
hba->transport = NULL;
|
||||
kfree(hba);
|
||||
|
@ -41,6 +41,7 @@
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/target_core_backend_configfs.h>
|
||||
|
||||
#include "target_core_iblock.h"
|
||||
|
||||
@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
|
||||
return q->flush_flags & REQ_FLUSH;
|
||||
}
|
||||
|
||||
DEF_TB_DEFAULT_ATTRIBS(iblock);
|
||||
|
||||
static struct configfs_attribute *iblock_backend_dev_attrs[] = {
|
||||
&iblock_dev_attrib_emulate_model_alias.attr,
|
||||
&iblock_dev_attrib_emulate_dpo.attr,
|
||||
&iblock_dev_attrib_emulate_fua_write.attr,
|
||||
&iblock_dev_attrib_emulate_fua_read.attr,
|
||||
&iblock_dev_attrib_emulate_write_cache.attr,
|
||||
&iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
|
||||
&iblock_dev_attrib_emulate_tas.attr,
|
||||
&iblock_dev_attrib_emulate_tpu.attr,
|
||||
&iblock_dev_attrib_emulate_tpws.attr,
|
||||
&iblock_dev_attrib_emulate_caw.attr,
|
||||
&iblock_dev_attrib_emulate_3pc.attr,
|
||||
&iblock_dev_attrib_pi_prot_type.attr,
|
||||
&iblock_dev_attrib_hw_pi_prot_type.attr,
|
||||
&iblock_dev_attrib_pi_prot_format.attr,
|
||||
&iblock_dev_attrib_enforce_pr_isids.attr,
|
||||
&iblock_dev_attrib_is_nonrot.attr,
|
||||
&iblock_dev_attrib_emulate_rest_reord.attr,
|
||||
&iblock_dev_attrib_force_pr_aptpl.attr,
|
||||
&iblock_dev_attrib_hw_block_size.attr,
|
||||
&iblock_dev_attrib_block_size.attr,
|
||||
&iblock_dev_attrib_hw_max_sectors.attr,
|
||||
&iblock_dev_attrib_fabric_max_sectors.attr,
|
||||
&iblock_dev_attrib_optimal_sectors.attr,
|
||||
&iblock_dev_attrib_hw_queue_depth.attr,
|
||||
&iblock_dev_attrib_queue_depth.attr,
|
||||
&iblock_dev_attrib_max_unmap_lba_count.attr,
|
||||
&iblock_dev_attrib_max_unmap_block_desc_count.attr,
|
||||
&iblock_dev_attrib_unmap_granularity.attr,
|
||||
&iblock_dev_attrib_unmap_granularity_alignment.attr,
|
||||
&iblock_dev_attrib_max_write_same_len.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct se_subsystem_api iblock_template = {
|
||||
.name = "iblock",
|
||||
.inquiry_prod = "IBLOCK",
|
||||
@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
|
||||
|
||||
static int __init iblock_module_init(void)
|
||||
{
|
||||
struct target_backend_cits *tbc = &iblock_template.tb_cits;
|
||||
|
||||
target_core_setup_sub_cits(&iblock_template);
|
||||
tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
|
||||
|
||||
return transport_subsystem_register(&iblock_template);
|
||||
}
|
||||
|
||||
|
@ -18,34 +18,6 @@ int core_dev_export(struct se_device *, struct se_portal_group *,
|
||||
struct se_lun *);
|
||||
void core_dev_unexport(struct se_device *, struct se_portal_group *,
|
||||
struct se_lun *);
|
||||
int se_dev_set_task_timeout(struct se_device *, u32);
|
||||
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
|
||||
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
|
||||
int se_dev_set_unmap_granularity(struct se_device *, u32);
|
||||
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
|
||||
int se_dev_set_max_write_same_len(struct se_device *, u32);
|
||||
int se_dev_set_emulate_model_alias(struct se_device *, int);
|
||||
int se_dev_set_emulate_dpo(struct se_device *, int);
|
||||
int se_dev_set_emulate_fua_write(struct se_device *, int);
|
||||
int se_dev_set_emulate_fua_read(struct se_device *, int);
|
||||
int se_dev_set_emulate_write_cache(struct se_device *, int);
|
||||
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
|
||||
int se_dev_set_emulate_tas(struct se_device *, int);
|
||||
int se_dev_set_emulate_tpu(struct se_device *, int);
|
||||
int se_dev_set_emulate_tpws(struct se_device *, int);
|
||||
int se_dev_set_emulate_caw(struct se_device *, int);
|
||||
int se_dev_set_emulate_3pc(struct se_device *, int);
|
||||
int se_dev_set_pi_prot_type(struct se_device *, int);
|
||||
int se_dev_set_pi_prot_format(struct se_device *, int);
|
||||
int se_dev_set_enforce_pr_isids(struct se_device *, int);
|
||||
int se_dev_set_force_pr_aptpl(struct se_device *, int);
|
||||
int se_dev_set_is_nonrot(struct se_device *, int);
|
||||
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
|
||||
int se_dev_set_queue_depth(struct se_device *, u32);
|
||||
int se_dev_set_max_sectors(struct se_device *, u32);
|
||||
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
|
||||
int se_dev_set_optimal_sectors(struct se_device *, u32);
|
||||
int se_dev_set_block_size(struct se_device *, u32);
|
||||
struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
|
||||
void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
|
||||
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
|
||||
|
@ -76,7 +76,7 @@ enum preempt_type {
|
||||
};
|
||||
|
||||
static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
|
||||
struct t10_pr_registration *, int);
|
||||
struct t10_pr_registration *, int, int);
|
||||
|
||||
static sense_reason_t
|
||||
target_scsi2_reservation_check(struct se_cmd *cmd)
|
||||
@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release(
|
||||
* service action with the SERVICE ACTION RESERVATION KEY
|
||||
* field set to zero (see 5.7.11.3).
|
||||
*/
|
||||
__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
|
||||
__core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
|
||||
ret = 1;
|
||||
/*
|
||||
* For 'All Registrants' reservation types, all existing
|
||||
@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration(
|
||||
|
||||
pr_reg->pr_reg_deve->def_pr_registered = 0;
|
||||
pr_reg->pr_reg_deve->pr_res_key = 0;
|
||||
list_del(&pr_reg->pr_reg_list);
|
||||
if (!list_empty(&pr_reg->pr_reg_list))
|
||||
list_del(&pr_reg->pr_reg_list);
|
||||
/*
|
||||
* Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
|
||||
* so call core_scsi3_put_pr_reg() to decrement our reference.
|
||||
@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl(
|
||||
{
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
|
||||
bool free_reg = false;
|
||||
/*
|
||||
* If the passed se_node_acl matches the reservation holder,
|
||||
* release the reservation.
|
||||
@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl(
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
pr_res_holder = dev->dev_pr_res_holder;
|
||||
if ((pr_res_holder != NULL) &&
|
||||
(pr_res_holder->pr_reg_nacl == nacl))
|
||||
__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
|
||||
(pr_res_holder->pr_reg_nacl == nacl)) {
|
||||
__core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
|
||||
free_reg = true;
|
||||
}
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
/*
|
||||
* Release any registration associated with the struct se_node_acl.
|
||||
*/
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
if (pr_res_holder && free_reg)
|
||||
__core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
|
||||
|
||||
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
|
||||
&pr_tmpl->registration_list, pr_reg_list) {
|
||||
|
||||
@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations(
|
||||
if (pr_res_holder != NULL) {
|
||||
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
|
||||
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
|
||||
pr_res_holder, 0);
|
||||
pr_res_holder, 0, 0);
|
||||
}
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
|
||||
@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port(
|
||||
struct target_core_fabric_ops *tmp_tf_ops;
|
||||
unsigned char *buf;
|
||||
unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
|
||||
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
|
||||
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
|
||||
sense_reason_t ret;
|
||||
u32 tpdl, tid_len = 0;
|
||||
int dest_local_nexus;
|
||||
u32 dest_rtpi = 0;
|
||||
|
||||
memset(dest_iport, 0, 64);
|
||||
|
||||
local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
|
||||
/*
|
||||
* Allocate a struct pr_transport_id_holder and setup the
|
||||
@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
|
||||
/*
|
||||
* sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
|
||||
*/
|
||||
pr_holder = core_scsi3_check_implicit_release(
|
||||
cmd->se_dev, pr_reg);
|
||||
type = pr_reg->pr_res_type;
|
||||
pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
|
||||
pr_reg);
|
||||
if (pr_holder < 0) {
|
||||
ret = TCM_RESERVATION_CONFLICT;
|
||||
goto out;
|
||||
}
|
||||
type = pr_reg->pr_res_type;
|
||||
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
/*
|
||||
@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
pr_res_holder = dev->dev_pr_res_holder;
|
||||
if (pr_res_holder) {
|
||||
int pr_res_type = pr_res_holder->pr_res_type;
|
||||
/*
|
||||
* From spc4r17 Section 5.7.9: Reserving:
|
||||
*
|
||||
@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
|
||||
* the logical unit, then the command shall be completed with
|
||||
* RESERVATION CONFLICT status.
|
||||
*/
|
||||
if (pr_res_holder != pr_reg) {
|
||||
if ((pr_res_holder != pr_reg) &&
|
||||
(pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
|
||||
(pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
|
||||
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
|
||||
pr_err("SPC-3 PR: Attempted RESERVE from"
|
||||
" [%s]: %s while reservation already held by"
|
||||
@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release(
|
||||
struct se_device *dev,
|
||||
struct se_node_acl *se_nacl,
|
||||
struct t10_pr_registration *pr_reg,
|
||||
int explicit)
|
||||
int explicit,
|
||||
int unreg)
|
||||
{
|
||||
struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
|
||||
char i_buf[PR_REG_ISID_ID_LEN];
|
||||
int pr_res_type = 0, pr_res_scope = 0;
|
||||
|
||||
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
||||
core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
|
||||
/*
|
||||
* Go ahead and release the current PR reservation holder.
|
||||
* If an All Registrants reservation is currently active and
|
||||
* a unregister operation is requested, replace the current
|
||||
* dev_pr_res_holder with another active registration.
|
||||
*/
|
||||
dev->dev_pr_res_holder = NULL;
|
||||
if (dev->dev_pr_res_holder) {
|
||||
pr_res_type = dev->dev_pr_res_holder->pr_res_type;
|
||||
pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
|
||||
dev->dev_pr_res_holder->pr_res_type = 0;
|
||||
dev->dev_pr_res_holder->pr_res_scope = 0;
|
||||
dev->dev_pr_res_holder->pr_res_holder = 0;
|
||||
dev->dev_pr_res_holder = NULL;
|
||||
}
|
||||
if (!unreg)
|
||||
goto out;
|
||||
|
||||
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
|
||||
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
|
||||
tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
|
||||
core_scsi3_pr_dump_type(pr_reg->pr_res_type),
|
||||
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
|
||||
spin_lock(&dev->t10_pr.registration_lock);
|
||||
list_del_init(&pr_reg->pr_reg_list);
|
||||
/*
|
||||
* If the I_T nexus is a reservation holder, the persistent reservation
|
||||
* is of an all registrants type, and the I_T nexus is the last remaining
|
||||
* registered I_T nexus, then the device server shall also release the
|
||||
* persistent reservation.
|
||||
*/
|
||||
if (!list_empty(&dev->t10_pr.registration_list) &&
|
||||
((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
|
||||
(pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
|
||||
dev->dev_pr_res_holder =
|
||||
list_entry(dev->t10_pr.registration_list.next,
|
||||
struct t10_pr_registration, pr_reg_list);
|
||||
dev->dev_pr_res_holder->pr_res_type = pr_res_type;
|
||||
dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
|
||||
dev->dev_pr_res_holder->pr_res_holder = 1;
|
||||
}
|
||||
spin_unlock(&dev->t10_pr.registration_lock);
|
||||
out:
|
||||
if (!dev->dev_pr_res_holder) {
|
||||
pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
|
||||
" reservation holder TYPE: %s ALL_TG_PT: %d\n",
|
||||
tfo->get_fabric_name(), (explicit) ? "explicit" :
|
||||
"implicit", core_scsi3_pr_dump_type(pr_res_type),
|
||||
(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
|
||||
}
|
||||
pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
|
||||
tfo->get_fabric_name(), se_nacl->initiatorname,
|
||||
i_buf);
|
||||
@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
|
||||
* server shall not establish a unit attention condition.
|
||||
*/
|
||||
__core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
|
||||
pr_reg, 1);
|
||||
pr_reg, 1, 0);
|
||||
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
|
||||
@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
|
||||
if (pr_res_holder) {
|
||||
struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
|
||||
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
|
||||
pr_res_holder, 0);
|
||||
pr_res_holder, 0, 0);
|
||||
}
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
/*
|
||||
@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt(
|
||||
*/
|
||||
if (dev->dev_pr_res_holder)
|
||||
__core_scsi3_complete_pro_release(dev, nacl,
|
||||
dev->dev_pr_res_holder, 0);
|
||||
dev->dev_pr_res_holder, 0, 0);
|
||||
|
||||
dev->dev_pr_res_holder = pr_reg;
|
||||
pr_reg->pr_res_holder = 1;
|
||||
@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
|
||||
*/
|
||||
if (pr_reg_n != pr_res_holder)
|
||||
__core_scsi3_complete_pro_release(dev,
|
||||
pr_res_holder->pr_reg_nacl,
|
||||
dev->dev_pr_res_holder, 0);
|
||||
pr_res_holder->pr_reg_nacl,
|
||||
dev->dev_pr_res_holder, 0, 0);
|
||||
/*
|
||||
* b) Remove the registrations for all I_T nexuses identified
|
||||
* by the SERVICE ACTION RESERVATION KEY field, except the
|
||||
@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
unsigned char *buf;
|
||||
unsigned char *initiator_str;
|
||||
char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
|
||||
char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
|
||||
u32 tid_len, tmp_tid_len;
|
||||
int new_reg = 0, type, scope, matching_iname;
|
||||
sense_reason_t ret;
|
||||
@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
}
|
||||
|
||||
memset(dest_iport, 0, 64);
|
||||
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
|
||||
se_tpg = se_sess->se_tpg;
|
||||
tf_ops = se_tpg->se_tpg_tfo;
|
||||
@ -3389,7 +3432,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
|
||||
* holder (i.e., the I_T nexus on which the
|
||||
*/
|
||||
__core_scsi3_complete_pro_release(dev, pr_res_nacl,
|
||||
dev->dev_pr_res_holder, 0);
|
||||
dev->dev_pr_res_holder, 0, 0);
|
||||
/*
|
||||
* g) Move the persistent reservation to the specified I_T nexus using
|
||||
* the same scope and type as the persistent reservation released in
|
||||
@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
unsigned char *buf;
|
||||
u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
|
||||
u32 off = 8; /* off into first Full Status descriptor */
|
||||
int format_code = 0;
|
||||
int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
|
||||
bool all_reg = false;
|
||||
|
||||
if (cmd->data_length < 8) {
|
||||
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
|
||||
@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
|
||||
buf[3] = (dev->t10_pr.pr_generation & 0xff);
|
||||
|
||||
spin_lock(&dev->dev_reservation_lock);
|
||||
if (dev->dev_pr_res_holder) {
|
||||
struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
|
||||
|
||||
if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
|
||||
pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
|
||||
all_reg = true;
|
||||
pr_res_type = pr_holder->pr_res_type;
|
||||
pr_res_scope = pr_holder->pr_res_scope;
|
||||
}
|
||||
}
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
|
||||
spin_lock(&pr_tmpl->registration_lock);
|
||||
list_for_each_entry_safe(pr_reg, pr_reg_tmp,
|
||||
&pr_tmpl->registration_list, pr_reg_list) {
|
||||
@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
||||
* reservation holder for PR_HOLDER bit.
|
||||
*
|
||||
* Also, if this registration is the reservation
|
||||
* holder, fill in SCOPE and TYPE in the next byte.
|
||||
* holder or there is an All Registrants reservation
|
||||
* active, fill in SCOPE and TYPE in the next byte.
|
||||
*/
|
||||
if (pr_reg->pr_res_holder) {
|
||||
buf[off++] |= 0x01;
|
||||
buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
|
||||
(pr_reg->pr_res_type & 0x0f);
|
||||
} else
|
||||
} else if (all_reg) {
|
||||
buf[off++] |= 0x01;
|
||||
buf[off++] = (pr_res_scope & 0xf0) |
|
||||
(pr_res_type & 0x0f);
|
||||
} else {
|
||||
off += 2;
|
||||
}
|
||||
|
||||
off += 4; /* Skip over reserved area */
|
||||
/*
|
||||
|
@ -44,6 +44,7 @@
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/target_core_backend_configfs.h>
|
||||
|
||||
#include "target_core_alua.h"
|
||||
#include "target_core_pscsi.h"
|
||||
@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
|
||||
kfree(pt);
|
||||
}
|
||||
|
||||
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
|
||||
TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
|
||||
|
||||
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
|
||||
TB_DEV_ATTR_RO(pscsi, hw_block_size);
|
||||
|
||||
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
|
||||
TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
|
||||
|
||||
DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
|
||||
TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
|
||||
|
||||
static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
|
||||
&pscsi_dev_attrib_hw_pi_prot_type.attr,
|
||||
&pscsi_dev_attrib_hw_block_size.attr,
|
||||
&pscsi_dev_attrib_hw_max_sectors.attr,
|
||||
&pscsi_dev_attrib_hw_queue_depth.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct se_subsystem_api pscsi_template = {
|
||||
.name = "pscsi",
|
||||
.owner = THIS_MODULE,
|
||||
@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
|
||||
|
||||
static int __init pscsi_module_init(void)
|
||||
{
|
||||
struct target_backend_cits *tbc = &pscsi_template.tb_cits;
|
||||
|
||||
target_core_setup_sub_cits(&pscsi_template);
|
||||
tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
|
||||
|
||||
return transport_subsystem_register(&pscsi_template);
|
||||
}
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/target_core_backend_configfs.h>
|
||||
|
||||
#include "target_core_rd.h"
|
||||
|
||||
@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
|
||||
return sbc_parse_cdb(cmd, &rd_sbc_ops);
|
||||
}
|
||||
|
||||
DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
|
||||
|
||||
static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
|
||||
&rd_mcp_dev_attrib_emulate_model_alias.attr,
|
||||
&rd_mcp_dev_attrib_emulate_dpo.attr,
|
||||
&rd_mcp_dev_attrib_emulate_fua_write.attr,
|
||||
&rd_mcp_dev_attrib_emulate_fua_read.attr,
|
||||
&rd_mcp_dev_attrib_emulate_write_cache.attr,
|
||||
&rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
|
||||
&rd_mcp_dev_attrib_emulate_tas.attr,
|
||||
&rd_mcp_dev_attrib_emulate_tpu.attr,
|
||||
&rd_mcp_dev_attrib_emulate_tpws.attr,
|
||||
&rd_mcp_dev_attrib_emulate_caw.attr,
|
||||
&rd_mcp_dev_attrib_emulate_3pc.attr,
|
||||
&rd_mcp_dev_attrib_pi_prot_type.attr,
|
||||
&rd_mcp_dev_attrib_hw_pi_prot_type.attr,
|
||||
&rd_mcp_dev_attrib_pi_prot_format.attr,
|
||||
&rd_mcp_dev_attrib_enforce_pr_isids.attr,
|
||||
&rd_mcp_dev_attrib_is_nonrot.attr,
|
||||
&rd_mcp_dev_attrib_emulate_rest_reord.attr,
|
||||
&rd_mcp_dev_attrib_force_pr_aptpl.attr,
|
||||
&rd_mcp_dev_attrib_hw_block_size.attr,
|
||||
&rd_mcp_dev_attrib_block_size.attr,
|
||||
&rd_mcp_dev_attrib_hw_max_sectors.attr,
|
||||
&rd_mcp_dev_attrib_fabric_max_sectors.attr,
|
||||
&rd_mcp_dev_attrib_optimal_sectors.attr,
|
||||
&rd_mcp_dev_attrib_hw_queue_depth.attr,
|
||||
&rd_mcp_dev_attrib_queue_depth.attr,
|
||||
&rd_mcp_dev_attrib_max_unmap_lba_count.attr,
|
||||
&rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
|
||||
&rd_mcp_dev_attrib_unmap_granularity.attr,
|
||||
&rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
|
||||
&rd_mcp_dev_attrib_max_write_same_len.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct se_subsystem_api rd_mcp_template = {
|
||||
.name = "rd_mcp",
|
||||
.inquiry_prod = "RAMDISK-MCP",
|
||||
@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
|
||||
|
||||
int __init rd_module_init(void)
|
||||
{
|
||||
struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
|
||||
int ret;
|
||||
|
||||
target_core_setup_sub_cits(&rd_mcp_template);
|
||||
tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
|
||||
|
||||
ret = transport_subsystem_register(&rd_mcp_template);
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include <target/target_core_base.h>
|
||||
#include <target/target_core_fabric.h>
|
||||
#include <target/target_core_backend.h>
|
||||
#include <target/target_core_backend_configfs.h>
|
||||
|
||||
#include <linux/target_core_user.h>
|
||||
|
||||
/*
|
||||
@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
|
||||
return ret;
|
||||
}
|
||||
|
||||
DEF_TB_DEFAULT_ATTRIBS(tcmu);
|
||||
|
||||
static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
|
||||
&tcmu_dev_attrib_emulate_model_alias.attr,
|
||||
&tcmu_dev_attrib_emulate_dpo.attr,
|
||||
&tcmu_dev_attrib_emulate_fua_write.attr,
|
||||
&tcmu_dev_attrib_emulate_fua_read.attr,
|
||||
&tcmu_dev_attrib_emulate_write_cache.attr,
|
||||
&tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
|
||||
&tcmu_dev_attrib_emulate_tas.attr,
|
||||
&tcmu_dev_attrib_emulate_tpu.attr,
|
||||
&tcmu_dev_attrib_emulate_tpws.attr,
|
||||
&tcmu_dev_attrib_emulate_caw.attr,
|
||||
&tcmu_dev_attrib_emulate_3pc.attr,
|
||||
&tcmu_dev_attrib_pi_prot_type.attr,
|
||||
&tcmu_dev_attrib_hw_pi_prot_type.attr,
|
||||
&tcmu_dev_attrib_pi_prot_format.attr,
|
||||
&tcmu_dev_attrib_enforce_pr_isids.attr,
|
||||
&tcmu_dev_attrib_is_nonrot.attr,
|
||||
&tcmu_dev_attrib_emulate_rest_reord.attr,
|
||||
&tcmu_dev_attrib_force_pr_aptpl.attr,
|
||||
&tcmu_dev_attrib_hw_block_size.attr,
|
||||
&tcmu_dev_attrib_block_size.attr,
|
||||
&tcmu_dev_attrib_hw_max_sectors.attr,
|
||||
&tcmu_dev_attrib_fabric_max_sectors.attr,
|
||||
&tcmu_dev_attrib_optimal_sectors.attr,
|
||||
&tcmu_dev_attrib_hw_queue_depth.attr,
|
||||
&tcmu_dev_attrib_queue_depth.attr,
|
||||
&tcmu_dev_attrib_max_unmap_lba_count.attr,
|
||||
&tcmu_dev_attrib_max_unmap_block_desc_count.attr,
|
||||
&tcmu_dev_attrib_unmap_granularity.attr,
|
||||
&tcmu_dev_attrib_unmap_granularity_alignment.attr,
|
||||
&tcmu_dev_attrib_max_write_same_len.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct se_subsystem_api tcmu_template = {
|
||||
.name = "user",
|
||||
.inquiry_prod = "USER",
|
||||
@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
|
||||
|
||||
static int __init tcmu_module_init(void)
|
||||
{
|
||||
struct target_backend_cits *tbc = &tcmu_template.tb_cits;
|
||||
int ret;
|
||||
|
||||
BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
|
||||
@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
|
||||
goto out_unreg_device;
|
||||
}
|
||||
|
||||
target_core_setup_sub_cits(&tcmu_template);
|
||||
tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
|
||||
|
||||
ret = transport_subsystem_register(&tcmu_template);
|
||||
if (ret)
|
||||
goto out_unreg_genl;
|
||||
|
@ -5,6 +5,15 @@
|
||||
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
|
||||
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
|
||||
|
||||
struct target_backend_cits {
|
||||
struct config_item_type tb_dev_cit;
|
||||
struct config_item_type tb_dev_attrib_cit;
|
||||
struct config_item_type tb_dev_pr_cit;
|
||||
struct config_item_type tb_dev_wwn_cit;
|
||||
struct config_item_type tb_dev_alua_tg_pt_gps_cit;
|
||||
struct config_item_type tb_dev_stat_cit;
|
||||
};
|
||||
|
||||
struct se_subsystem_api {
|
||||
struct list_head sub_api_list;
|
||||
|
||||
@ -44,6 +53,8 @@ struct se_subsystem_api {
|
||||
int (*init_prot)(struct se_device *);
|
||||
int (*format_prot)(struct se_device *);
|
||||
void (*free_prot)(struct se_device *);
|
||||
|
||||
struct target_backend_cits tb_cits;
|
||||
};
|
||||
|
||||
struct sbc_ops {
|
||||
@ -96,4 +107,36 @@ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
|
||||
|
||||
void array_free(void *array, int n);
|
||||
|
||||
/* From target_core_configfs.c to setup default backend config_item_types */
|
||||
void target_core_setup_sub_cits(struct se_subsystem_api *);
|
||||
|
||||
/* attribute helpers from target_core_device.c for backend drivers */
|
||||
int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
|
||||
int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
|
||||
int se_dev_set_unmap_granularity(struct se_device *, u32);
|
||||
int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
|
||||
int se_dev_set_max_write_same_len(struct se_device *, u32);
|
||||
int se_dev_set_emulate_model_alias(struct se_device *, int);
|
||||
int se_dev_set_emulate_dpo(struct se_device *, int);
|
||||
int se_dev_set_emulate_fua_write(struct se_device *, int);
|
||||
int se_dev_set_emulate_fua_read(struct se_device *, int);
|
||||
int se_dev_set_emulate_write_cache(struct se_device *, int);
|
||||
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
|
||||
int se_dev_set_emulate_tas(struct se_device *, int);
|
||||
int se_dev_set_emulate_tpu(struct se_device *, int);
|
||||
int se_dev_set_emulate_tpws(struct se_device *, int);
|
||||
int se_dev_set_emulate_caw(struct se_device *, int);
|
||||
int se_dev_set_emulate_3pc(struct se_device *, int);
|
||||
int se_dev_set_pi_prot_type(struct se_device *, int);
|
||||
int se_dev_set_pi_prot_format(struct se_device *, int);
|
||||
int se_dev_set_enforce_pr_isids(struct se_device *, int);
|
||||
int se_dev_set_force_pr_aptpl(struct se_device *, int);
|
||||
int se_dev_set_is_nonrot(struct se_device *, int);
|
||||
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
|
||||
int se_dev_set_queue_depth(struct se_device *, u32);
|
||||
int se_dev_set_max_sectors(struct se_device *, u32);
|
||||
int se_dev_set_fabric_max_sectors(struct se_device *, u32);
|
||||
int se_dev_set_optimal_sectors(struct se_device *, u32);
|
||||
int se_dev_set_block_size(struct se_device *, u32);
|
||||
|
||||
#endif /* TARGET_CORE_BACKEND_H */
|
||||
|
120
include/target/target_core_backend_configfs.h
Normal file
120
include/target/target_core_backend_configfs.h
Normal file
@ -0,0 +1,120 @@
|
||||
#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
|
||||
#define TARGET_CORE_BACKEND_CONFIGFS_H
|
||||
|
||||
#include <target/configfs_macros.h>
|
||||
|
||||
#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name) \
|
||||
static ssize_t _backend##_dev_show_attr_##_name( \
|
||||
struct se_dev_attrib *da, \
|
||||
char *page) \
|
||||
{ \
|
||||
return snprintf(page, PAGE_SIZE, "%u\n", \
|
||||
(u32)da->da_dev->dev_attrib._name); \
|
||||
}
|
||||
|
||||
#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name) \
|
||||
static ssize_t _backend##_dev_store_attr_##_name( \
|
||||
struct se_dev_attrib *da, \
|
||||
const char *page, \
|
||||
size_t count) \
|
||||
{ \
|
||||
unsigned long val; \
|
||||
int ret; \
|
||||
\
|
||||
ret = kstrtoul(page, 0, &val); \
|
||||
if (ret < 0) { \
|
||||
pr_err("kstrtoul() failed with ret: %d\n", ret); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
ret = se_dev_set_##_name(da->da_dev, (u32)val); \
|
||||
\
|
||||
return (!ret) ? count : -EINVAL; \
|
||||
}
|
||||
|
||||
#define DEF_TB_DEV_ATTRIB(_backend, _name) \
|
||||
DEF_TB_DEV_ATTRIB_SHOW(_backend, _name); \
|
||||
DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
|
||||
|
||||
#define DEF_TB_DEV_ATTRIB_RO(_backend, name) \
|
||||
DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
|
||||
|
||||
CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
|
||||
#define TB_DEV_ATTR(_backend, _name, _mode) \
|
||||
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
|
||||
__CONFIGFS_EATTR(_name, _mode, \
|
||||
_backend##_dev_show_attr_##_name, \
|
||||
_backend##_dev_store_attr_##_name);
|
||||
|
||||
#define TB_DEV_ATTR_RO(_backend, _name) \
|
||||
static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
|
||||
__CONFIGFS_EATTR_RO(_name, \
|
||||
_backend##_dev_show_attr_##_name);
|
||||
|
||||
/*
|
||||
* Default list of target backend device attributes as defined by
|
||||
* struct se_dev_attrib
|
||||
*/
|
||||
|
||||
#define DEF_TB_DEFAULT_ATTRIBS(_backend) \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias); \
|
||||
TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_dpo); \
|
||||
TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write); \
|
||||
TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read); \
|
||||
TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache); \
|
||||
TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl); \
|
||||
TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_tas); \
|
||||
TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_tpu); \
|
||||
TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_tpws); \
|
||||
TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_caw); \
|
||||
TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_3pc); \
|
||||
TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, pi_prot_type); \
|
||||
TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type); \
|
||||
TB_DEV_ATTR_RO(_backend, hw_pi_prot_type); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, pi_prot_format); \
|
||||
TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids); \
|
||||
TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, is_nonrot); \
|
||||
TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord); \
|
||||
TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl); \
|
||||
TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size); \
|
||||
TB_DEV_ATTR_RO(_backend, hw_block_size); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, block_size); \
|
||||
TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
|
||||
TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
|
||||
TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
|
||||
TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
|
||||
TB_DEV_ATTR_RO(_backend, hw_queue_depth); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, queue_depth); \
|
||||
TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count); \
|
||||
TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count); \
|
||||
TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, unmap_granularity); \
|
||||
TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment); \
|
||||
TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
|
||||
DEF_TB_DEV_ATTRIB(_backend, max_write_same_len); \
|
||||
TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
|
||||
|
||||
#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
|
@ -6,10 +6,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
#ifndef __packed
|
||||
#define __packed __attribute__((packed))
|
||||
#endif
|
||||
|
||||
#define TCMU_VERSION "1.0"
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user