mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:00:53 +07:00
Merge branch 'nvme-4.21' of git://git.infradead.org/nvme into for-4.21/block
Pull NVMe updates from Christoph: "Here is the second large chunk of nvme updates for 4.21: - host and target support for NVMe over TCP (Sagi Grimberg, Roy Shterman, Solganik Alexander) - error log page support in target (Chaitanya Kulkarni) plus small fixes and improvements from Jens Axboe and Chengguang Xu." * 'nvme-4.21' of git://git.infradead.org/nvme: (33 commits) nvme-rdma: support separate queue maps for read and write nvme-tcp: support separate queue maps for read and write nvme-fabrics: allow user to set nr_write_queues for separate queue maps nvme-fabrics: add missing nvmf_ctrl_options documentation blk-mq-rdma: pass in queue map to blk_mq_rdma_map_queues nvmet: update smart log with num err log entries nvmet: add error log page cmd handler nvmet: add error log support for file backend nvmet: add error log support for bdev backend nvmet: add error log support for admin-cmd nvmet: add error log support for rdma backend nvmet: add error log support for fabrics-cmd nvmet: add error log support in the core nvmet: add interface to update error-log page nvmet: add error-log definitions nvme: add error log page slot definition nvme: remove nvme_common command cdw10 array nvmet: remove unused variable nvme: provide fallback for discard alloc failure nvme: add __exit annotation ...
This commit is contained in:
commit
2d9a058e3f
@ -29,24 +29,24 @@
|
||||
* @set->nr_hw_queues, or @dev does not provide an affinity mask for a
|
||||
* vector, we fallback to the naive mapping.
|
||||
*/
|
||||
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
|
||||
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||
struct ib_device *dev, int first_vec)
|
||||
{
|
||||
const struct cpumask *mask;
|
||||
unsigned int queue, cpu;
|
||||
|
||||
for (queue = 0; queue < set->nr_hw_queues; queue++) {
|
||||
for (queue = 0; queue < map->nr_queues; queue++) {
|
||||
mask = ib_get_vector_affinity(dev, first_vec + queue);
|
||||
if (!mask)
|
||||
goto fallback;
|
||||
|
||||
for_each_cpu(cpu, mask)
|
||||
set->map[0].mq_map[cpu] = queue;
|
||||
map->mq_map[cpu] = map->queue_offset + queue;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fallback:
|
||||
return blk_mq_map_queues(&set->map[0]);
|
||||
return blk_mq_map_queues(map);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_rdma_map_queues);
|
||||
|
@ -1322,7 +1322,7 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
|
||||
struct ath6kl_vif *vif = netdev_priv(ndev);
|
||||
struct ath6kl_key *key = NULL;
|
||||
u8 key_usage;
|
||||
enum crypto_type key_type = NONE_CRYPT;
|
||||
enum ath6kl_crypto_type key_type = NONE_CRYPT;
|
||||
|
||||
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
|
||||
|
||||
|
@ -67,7 +67,7 @@ struct ath6kl_llc_snap_hdr {
|
||||
__be16 eth_type;
|
||||
} __packed;
|
||||
|
||||
enum crypto_type {
|
||||
enum ath6kl_crypto_type {
|
||||
NONE_CRYPT = 0x01,
|
||||
WEP_CRYPT = 0x02,
|
||||
TKIP_CRYPT = 0x04,
|
||||
|
@ -1849,9 +1849,9 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
|
||||
enum network_type nw_type,
|
||||
enum dot11_auth_mode dot11_auth_mode,
|
||||
enum auth_mode auth_mode,
|
||||
enum crypto_type pairwise_crypto,
|
||||
enum ath6kl_crypto_type pairwise_crypto,
|
||||
u8 pairwise_crypto_len,
|
||||
enum crypto_type group_crypto,
|
||||
enum ath6kl_crypto_type group_crypto,
|
||||
u8 group_crypto_len, int ssid_len, u8 *ssid,
|
||||
u8 *bssid, u16 channel, u32 ctrl_flags,
|
||||
u8 nw_subtype)
|
||||
@ -2301,7 +2301,7 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
|
||||
}
|
||||
|
||||
int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
|
||||
enum crypto_type key_type,
|
||||
enum ath6kl_crypto_type key_type,
|
||||
u8 key_usage, u8 key_len,
|
||||
u8 *key_rsc, unsigned int key_rsc_len,
|
||||
u8 *key_material,
|
||||
|
@ -2556,9 +2556,9 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
|
||||
enum network_type nw_type,
|
||||
enum dot11_auth_mode dot11_auth_mode,
|
||||
enum auth_mode auth_mode,
|
||||
enum crypto_type pairwise_crypto,
|
||||
enum ath6kl_crypto_type pairwise_crypto,
|
||||
u8 pairwise_crypto_len,
|
||||
enum crypto_type group_crypto,
|
||||
enum ath6kl_crypto_type group_crypto,
|
||||
u8 group_crypto_len, int ssid_len, u8 *ssid,
|
||||
u8 *bssid, u16 channel, u32 ctrl_flags,
|
||||
u8 nw_subtype);
|
||||
@ -2610,7 +2610,7 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
|
||||
|
||||
int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
|
||||
int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
|
||||
enum crypto_type key_type,
|
||||
enum ath6kl_crypto_type key_type,
|
||||
u8 key_usage, u8 key_len,
|
||||
u8 *key_rsc, unsigned int key_rsc_len,
|
||||
u8 *key_material,
|
||||
|
@ -57,3 +57,18 @@ config NVME_FC
|
||||
from https://github.com/linux-nvme/nvme-cli.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NVME_TCP
|
||||
tristate "NVM Express over Fabrics TCP host driver"
|
||||
depends on INET
|
||||
depends on BLK_DEV_NVME
|
||||
select NVME_FABRICS
|
||||
help
|
||||
This provides support for the NVMe over Fabrics protocol using
|
||||
the TCP transport. This allows you to use remote block devices
|
||||
exported using the NVMe protocol set.
|
||||
|
||||
To configure a NVMe over Fabrics controller use the nvme-cli tool
|
||||
from https://github.com/linux-nvme/nvme-cli.
|
||||
|
||||
If unsure, say N.
|
||||
|
@ -7,6 +7,7 @@ obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
|
||||
obj-$(CONFIG_NVME_FABRICS) += nvme-fabrics.o
|
||||
obj-$(CONFIG_NVME_RDMA) += nvme-rdma.o
|
||||
obj-$(CONFIG_NVME_FC) += nvme-fc.o
|
||||
obj-$(CONFIG_NVME_TCP) += nvme-tcp.o
|
||||
|
||||
nvme-core-y := core.o
|
||||
nvme-core-$(CONFIG_TRACING) += trace.o
|
||||
@ -21,3 +22,5 @@ nvme-fabrics-y += fabrics.o
|
||||
nvme-rdma-y += rdma.o
|
||||
|
||||
nvme-fc-y += fc.o
|
||||
|
||||
nvme-tcp-y += tcp.o
|
||||
|
@ -564,10 +564,20 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
||||
struct nvme_dsm_range *range;
|
||||
struct bio *bio;
|
||||
|
||||
range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
|
||||
if (!range)
|
||||
range = kmalloc_array(segments, sizeof(*range),
|
||||
GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!range) {
|
||||
/*
|
||||
* If we fail allocation our range, fallback to the controller
|
||||
* discard page. If that's also busy, it's safe to return
|
||||
* busy, as we know we can make progress once that's freed.
|
||||
*/
|
||||
if (test_and_set_bit_lock(0, &ns->ctrl->discard_page_busy))
|
||||
return BLK_STS_RESOURCE;
|
||||
|
||||
range = page_address(ns->ctrl->discard_page);
|
||||
}
|
||||
|
||||
__rq_for_each_bio(bio, req) {
|
||||
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
|
||||
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
|
||||
@ -581,6 +591,9 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(n != segments)) {
|
||||
if (virt_to_page(range) == ns->ctrl->discard_page)
|
||||
clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
|
||||
else
|
||||
kfree(range);
|
||||
return BLK_STS_IOERR;
|
||||
}
|
||||
@ -664,8 +677,13 @@ void nvme_cleanup_cmd(struct request *req)
|
||||
blk_rq_bytes(req) >> ns->lba_shift);
|
||||
}
|
||||
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
|
||||
kfree(page_address(req->special_vec.bv_page) +
|
||||
req->special_vec.bv_offset);
|
||||
struct nvme_ns *ns = req->rq_disk->private_data;
|
||||
struct page *page = req->special_vec.bv_page;
|
||||
|
||||
if (page == ns->ctrl->discard_page)
|
||||
clear_bit_unlock(0, &ns->ctrl->discard_page_busy);
|
||||
else
|
||||
kfree(page_address(page) + req->special_vec.bv_offset);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
|
||||
@ -1265,12 +1283,12 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
||||
c.common.nsid = cpu_to_le32(cmd.nsid);
|
||||
c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
|
||||
c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
|
||||
c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
|
||||
c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
|
||||
c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
|
||||
c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
|
||||
c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
|
||||
c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
|
||||
c.common.cdw10 = cpu_to_le32(cmd.cdw10);
|
||||
c.common.cdw11 = cpu_to_le32(cmd.cdw11);
|
||||
c.common.cdw12 = cpu_to_le32(cmd.cdw12);
|
||||
c.common.cdw13 = cpu_to_le32(cmd.cdw13);
|
||||
c.common.cdw14 = cpu_to_le32(cmd.cdw14);
|
||||
c.common.cdw15 = cpu_to_le32(cmd.cdw15);
|
||||
|
||||
if (cmd.timeout_ms)
|
||||
timeout = msecs_to_jiffies(cmd.timeout_ms);
|
||||
@ -1631,7 +1649,7 @@ static int nvme_pr_command(struct block_device *bdev, u32 cdw10,
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.common.opcode = op;
|
||||
c.common.nsid = cpu_to_le32(ns->head->ns_id);
|
||||
c.common.cdw10[0] = cpu_to_le32(cdw10);
|
||||
c.common.cdw10 = cpu_to_le32(cdw10);
|
||||
|
||||
ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
|
||||
nvme_put_ns_from_disk(head, srcu_idx);
|
||||
@ -1705,8 +1723,8 @@ int nvme_sec_submit(void *data, u16 spsp, u8 secp, void *buffer, size_t len,
|
||||
else
|
||||
cmd.common.opcode = nvme_admin_security_recv;
|
||||
cmd.common.nsid = 0;
|
||||
cmd.common.cdw10[0] = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
|
||||
cmd.common.cdw10[1] = cpu_to_le32(len);
|
||||
cmd.common.cdw10 = cpu_to_le32(((u32)secp) << 24 | ((u32)spsp) << 8);
|
||||
cmd.common.cdw11 = cpu_to_le32(len);
|
||||
|
||||
return __nvme_submit_sync_cmd(ctrl->admin_q, &cmd, NULL, buffer, len,
|
||||
ADMIN_TIMEOUT, NVME_QID_ANY, 1, 0);
|
||||
@ -3578,6 +3596,7 @@ static void nvme_free_ctrl(struct device *dev)
|
||||
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
|
||||
kfree(ctrl->effects);
|
||||
nvme_mpath_uninit(ctrl);
|
||||
kfree(ctrl->discard_page);
|
||||
|
||||
if (subsys) {
|
||||
mutex_lock(&subsys->lock);
|
||||
@ -3618,6 +3637,14 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
||||
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
|
||||
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
|
||||
|
||||
BUILD_BUG_ON(NVME_DSM_MAX_RANGES * sizeof(struct nvme_dsm_range) >
|
||||
PAGE_SIZE);
|
||||
ctrl->discard_page = alloc_page(GFP_KERNEL);
|
||||
if (!ctrl->discard_page) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ida_simple_get(&nvme_instance_ida, 0, 0, GFP_KERNEL);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -3655,6 +3682,8 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
|
||||
out_release_instance:
|
||||
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
|
||||
out:
|
||||
if (ctrl->discard_page)
|
||||
__free_page(ctrl->discard_page);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
|
||||
@ -3802,7 +3831,7 @@ int __init nvme_core_init(void)
|
||||
return result;
|
||||
}
|
||||
|
||||
void nvme_core_exit(void)
|
||||
void __exit nvme_core_exit(void)
|
||||
{
|
||||
ida_destroy(&nvme_subsystems_ida);
|
||||
class_destroy(nvme_subsys_class);
|
||||
|
@ -614,6 +614,9 @@ static const match_table_t opt_tokens = {
|
||||
{ NVMF_OPT_HOST_ID, "hostid=%s" },
|
||||
{ NVMF_OPT_DUP_CONNECT, "duplicate_connect" },
|
||||
{ NVMF_OPT_DISABLE_SQFLOW, "disable_sqflow" },
|
||||
{ NVMF_OPT_HDR_DIGEST, "hdr_digest" },
|
||||
{ NVMF_OPT_DATA_DIGEST, "data_digest" },
|
||||
{ NVMF_OPT_NR_WRITE_QUEUES, "nr_write_queues=%d" },
|
||||
{ NVMF_OPT_ERR, NULL }
|
||||
};
|
||||
|
||||
@ -633,6 +636,8 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
||||
opts->reconnect_delay = NVMF_DEF_RECONNECT_DELAY;
|
||||
opts->kato = NVME_DEFAULT_KATO;
|
||||
opts->duplicate_connect = false;
|
||||
opts->hdr_digest = false;
|
||||
opts->data_digest = false;
|
||||
|
||||
options = o = kstrdup(buf, GFP_KERNEL);
|
||||
if (!options)
|
||||
@ -827,6 +832,24 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
|
||||
case NVMF_OPT_DISABLE_SQFLOW:
|
||||
opts->disable_sqflow = true;
|
||||
break;
|
||||
case NVMF_OPT_HDR_DIGEST:
|
||||
opts->hdr_digest = true;
|
||||
break;
|
||||
case NVMF_OPT_DATA_DIGEST:
|
||||
opts->data_digest = true;
|
||||
break;
|
||||
case NVMF_OPT_NR_WRITE_QUEUES:
|
||||
if (match_int(args, &token)) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (token <= 0) {
|
||||
pr_err("Invalid nr_write_queues %d\n", token);
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
opts->nr_write_queues = token;
|
||||
break;
|
||||
default:
|
||||
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
|
||||
p);
|
||||
|
@ -59,6 +59,9 @@ enum {
|
||||
NVMF_OPT_HOST_ID = 1 << 12,
|
||||
NVMF_OPT_DUP_CONNECT = 1 << 13,
|
||||
NVMF_OPT_DISABLE_SQFLOW = 1 << 14,
|
||||
NVMF_OPT_HDR_DIGEST = 1 << 15,
|
||||
NVMF_OPT_DATA_DIGEST = 1 << 16,
|
||||
NVMF_OPT_NR_WRITE_QUEUES = 1 << 17,
|
||||
};
|
||||
|
||||
/**
|
||||
@ -86,6 +89,10 @@ enum {
|
||||
* @max_reconnects: maximum number of allowed reconnect attempts before removing
|
||||
* the controller, (-1) means reconnect forever, zero means remove
|
||||
* immediately;
|
||||
* @disable_sqflow: disable controller sq flow control
|
||||
* @hdr_digest: generate/verify header digest (TCP)
|
||||
* @data_digest: generate/verify data digest (TCP)
|
||||
* @nr_write_queues: number of queues for write I/O
|
||||
*/
|
||||
struct nvmf_ctrl_options {
|
||||
unsigned mask;
|
||||
@ -103,6 +110,9 @@ struct nvmf_ctrl_options {
|
||||
struct nvmf_host *host;
|
||||
int max_reconnects;
|
||||
bool disable_sqflow;
|
||||
bool hdr_digest;
|
||||
bool data_digest;
|
||||
unsigned int nr_write_queues;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -937,9 +937,9 @@ static int nvme_nvm_user_vcmd(struct nvme_ns *ns, int admin,
|
||||
/* cdw11-12 */
|
||||
c.ph_rw.length = cpu_to_le16(vcmd.nppas);
|
||||
c.ph_rw.control = cpu_to_le16(vcmd.control);
|
||||
c.common.cdw10[3] = cpu_to_le32(vcmd.cdw13);
|
||||
c.common.cdw10[4] = cpu_to_le32(vcmd.cdw14);
|
||||
c.common.cdw10[5] = cpu_to_le32(vcmd.cdw15);
|
||||
c.common.cdw13 = cpu_to_le32(vcmd.cdw13);
|
||||
c.common.cdw14 = cpu_to_le32(vcmd.cdw14);
|
||||
c.common.cdw15 = cpu_to_le32(vcmd.cdw15);
|
||||
|
||||
if (vcmd.timeout_ms)
|
||||
timeout = msecs_to_jiffies(vcmd.timeout_ms);
|
||||
|
@ -241,6 +241,9 @@ struct nvme_ctrl {
|
||||
u16 maxcmd;
|
||||
int nr_reconnects;
|
||||
struct nvmf_ctrl_options *opts;
|
||||
|
||||
struct page *discard_page;
|
||||
unsigned long discard_page_busy;
|
||||
};
|
||||
|
||||
struct nvme_subsystem {
|
||||
@ -565,6 +568,6 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev)
|
||||
}
|
||||
|
||||
int __init nvme_core_init(void);
|
||||
void nvme_core_exit(void);
|
||||
void __exit nvme_core_exit(void);
|
||||
|
||||
#endif /* _NVME_H */
|
||||
|
@ -645,6 +645,8 @@ static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl)
|
||||
nr_io_queues = min_t(unsigned int, nr_io_queues,
|
||||
ibdev->num_comp_vectors);
|
||||
|
||||
nr_io_queues += min(opts->nr_write_queues, num_online_cpus());
|
||||
|
||||
ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -714,6 +716,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
|
||||
set->driver_data = ctrl;
|
||||
set->nr_hw_queues = nctrl->queue_count - 1;
|
||||
set->timeout = NVME_IO_TIMEOUT;
|
||||
set->nr_maps = 2 /* default + read */;
|
||||
}
|
||||
|
||||
ret = blk_mq_alloc_tag_set(set);
|
||||
@ -1751,7 +1754,25 @@ static int nvme_rdma_map_queues(struct blk_mq_tag_set *set)
|
||||
{
|
||||
struct nvme_rdma_ctrl *ctrl = set->driver_data;
|
||||
|
||||
return blk_mq_rdma_map_queues(set, ctrl->device->dev, 0);
|
||||
set->map[HCTX_TYPE_DEFAULT].queue_offset = 0;
|
||||
set->map[HCTX_TYPE_READ].nr_queues = ctrl->ctrl.opts->nr_io_queues;
|
||||
if (ctrl->ctrl.opts->nr_write_queues) {
|
||||
/* separate read/write queues */
|
||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||
ctrl->ctrl.opts->nr_write_queues;
|
||||
set->map[HCTX_TYPE_READ].queue_offset =
|
||||
ctrl->ctrl.opts->nr_write_queues;
|
||||
} else {
|
||||
/* mixed read/write queues */
|
||||
set->map[HCTX_TYPE_DEFAULT].nr_queues =
|
||||
ctrl->ctrl.opts->nr_io_queues;
|
||||
set->map[HCTX_TYPE_READ].queue_offset = 0;
|
||||
}
|
||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_DEFAULT],
|
||||
ctrl->device->dev, 0);
|
||||
blk_mq_rdma_map_queues(&set->map[HCTX_TYPE_READ],
|
||||
ctrl->device->dev, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct blk_mq_ops nvme_rdma_mq_ops = {
|
||||
@ -1906,7 +1927,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
||||
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
|
||||
INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work);
|
||||
|
||||
ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */
|
||||
ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + 1;
|
||||
ctrl->ctrl.sqsize = opts->queue_size - 1;
|
||||
ctrl->ctrl.kato = opts->kato;
|
||||
|
||||
@ -1957,7 +1978,8 @@ static struct nvmf_transport_ops nvme_rdma_transport = {
|
||||
.module = THIS_MODULE,
|
||||
.required_opts = NVMF_OPT_TRADDR,
|
||||
.allowed_opts = NVMF_OPT_TRSVCID | NVMF_OPT_RECONNECT_DELAY |
|
||||
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO,
|
||||
NVMF_OPT_HOST_TRADDR | NVMF_OPT_CTRL_LOSS_TMO |
|
||||
NVMF_OPT_NR_WRITE_QUEUES,
|
||||
.create_ctrl = nvme_rdma_create_ctrl,
|
||||
};
|
||||
|
||||
|
2277
drivers/nvme/host/tcp.c
Normal file
2277
drivers/nvme/host/tcp.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -115,8 +115,8 @@ TRACE_EVENT(nvme_setup_cmd,
|
||||
__entry->nsid = le32_to_cpu(cmd->common.nsid);
|
||||
__entry->metadata = le64_to_cpu(cmd->common.metadata);
|
||||
__assign_disk_name(__entry->disk, req->rq_disk);
|
||||
memcpy(__entry->cdw10, cmd->common.cdw10,
|
||||
sizeof(__entry->cdw10));
|
||||
memcpy(__entry->cdw10, &cmd->common.cdw10,
|
||||
6 * sizeof(__entry->cdw10));
|
||||
),
|
||||
TP_printk("nvme%d: %sqid=%d, cmdid=%u, nsid=%u, flags=0x%x, meta=0x%llx, cmd=(%s %s)",
|
||||
__entry->ctrl_id, __print_disk_name(__entry->disk),
|
||||
|
@ -60,3 +60,13 @@ config NVME_TARGET_FCLOOP
|
||||
to test NVMe-FC transport interfaces.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config NVME_TARGET_TCP
|
||||
tristate "NVMe over Fabrics TCP target support"
|
||||
depends on INET
|
||||
depends on NVME_TARGET
|
||||
help
|
||||
This enables the NVMe TCP target support, which allows exporting NVMe
|
||||
devices over TCP.
|
||||
|
||||
If unsure, say N.
|
||||
|
@ -5,6 +5,7 @@ obj-$(CONFIG_NVME_TARGET_LOOP) += nvme-loop.o
|
||||
obj-$(CONFIG_NVME_TARGET_RDMA) += nvmet-rdma.o
|
||||
obj-$(CONFIG_NVME_TARGET_FC) += nvmet-fc.o
|
||||
obj-$(CONFIG_NVME_TARGET_FCLOOP) += nvme-fcloop.o
|
||||
obj-$(CONFIG_NVME_TARGET_TCP) += nvmet-tcp.o
|
||||
|
||||
nvmet-y += core.o configfs.o admin-cmd.o fabrics-cmd.o \
|
||||
discovery.o io-cmd-file.o io-cmd-bdev.o
|
||||
@ -12,3 +13,4 @@ nvme-loop-y += loop.o
|
||||
nvmet-rdma-y += rdma.o
|
||||
nvmet-fc-y += fc.o
|
||||
nvme-fcloop-y += fcloop.o
|
||||
nvmet-tcp-y += tcp.o
|
||||
|
@ -37,6 +37,34 @@ static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
|
||||
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
|
||||
}
|
||||
|
||||
static void nvmet_execute_get_log_page_error(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
unsigned long flags;
|
||||
off_t offset = 0;
|
||||
u64 slot;
|
||||
u64 i;
|
||||
|
||||
spin_lock_irqsave(&ctrl->error_lock, flags);
|
||||
slot = ctrl->err_counter % NVMET_ERROR_LOG_SLOTS;
|
||||
|
||||
for (i = 0; i < NVMET_ERROR_LOG_SLOTS; i++) {
|
||||
status = nvmet_copy_to_sgl(req, offset, &ctrl->slots[slot],
|
||||
sizeof(struct nvme_error_slot));
|
||||
if (status)
|
||||
break;
|
||||
|
||||
if (slot == 0)
|
||||
slot = NVMET_ERROR_LOG_SLOTS - 1;
|
||||
else
|
||||
slot--;
|
||||
offset += sizeof(struct nvme_error_slot);
|
||||
}
|
||||
spin_unlock_irqrestore(&ctrl->error_lock, flags);
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
||||
struct nvme_smart_log *slog)
|
||||
{
|
||||
@ -47,6 +75,7 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
||||
if (!ns) {
|
||||
pr_err("Could not find namespace id : %d\n",
|
||||
le32_to_cpu(req->cmd->get_log_page.nsid));
|
||||
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
||||
return NVME_SC_INVALID_NS;
|
||||
}
|
||||
|
||||
@ -106,6 +135,7 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
|
||||
{
|
||||
struct nvme_smart_log *log;
|
||||
u16 status = NVME_SC_INTERNAL;
|
||||
unsigned long flags;
|
||||
|
||||
if (req->data_len != sizeof(*log))
|
||||
goto out;
|
||||
@ -121,6 +151,11 @@ static void nvmet_execute_get_log_page_smart(struct nvmet_req *req)
|
||||
if (status)
|
||||
goto out_free_log;
|
||||
|
||||
spin_lock_irqsave(&req->sq->ctrl->error_lock, flags);
|
||||
put_unaligned_le64(req->sq->ctrl->err_counter,
|
||||
&log->num_err_log_entries);
|
||||
spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags);
|
||||
|
||||
status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log));
|
||||
out_free_log:
|
||||
kfree(log);
|
||||
@ -380,6 +415,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
|
||||
u16 status = 0;
|
||||
|
||||
if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
@ -500,6 +536,7 @@ static void nvmet_execute_identify_desclist(struct nvmet_req *req)
|
||||
|
||||
ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid);
|
||||
if (!ns) {
|
||||
req->error_loc = offsetof(struct nvme_identify, nsid);
|
||||
status = NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
@ -557,13 +594,15 @@ static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req)
|
||||
|
||||
static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
|
||||
{
|
||||
u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]);
|
||||
u32 write_protect = le32_to_cpu(req->cmd->common.cdw11);
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u16 status = NVME_SC_FEATURE_NOT_CHANGEABLE;
|
||||
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid);
|
||||
if (unlikely(!req->ns))
|
||||
if (unlikely(!req->ns)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return status;
|
||||
}
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
switch (write_protect) {
|
||||
@ -589,7 +628,7 @@ static u16 nvmet_set_feat_write_protect(struct nvmet_req *req)
|
||||
|
||||
u16 nvmet_set_feat_kato(struct nvmet_req *req)
|
||||
{
|
||||
u32 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
|
||||
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
|
||||
|
||||
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
||||
|
||||
@ -600,10 +639,12 @@ u16 nvmet_set_feat_kato(struct nvmet_req *req)
|
||||
|
||||
u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
|
||||
{
|
||||
u32 val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
|
||||
u32 val32 = le32_to_cpu(req->cmd->common.cdw11);
|
||||
|
||||
if (val32 & ~mask)
|
||||
if (val32 & ~mask) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, cdw11);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
WRITE_ONCE(req->sq->ctrl->aen_enabled, val32);
|
||||
nvmet_set_result(req, val32);
|
||||
@ -614,7 +655,7 @@ u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask)
|
||||
static void nvmet_execute_set_features(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u16 status = 0;
|
||||
|
||||
switch (cdw10 & 0xff) {
|
||||
@ -635,6 +676,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
||||
status = nvmet_set_feat_write_protect(req);
|
||||
break;
|
||||
default:
|
||||
req->error_loc = offsetof(struct nvme_common_command, cdw10);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
@ -648,9 +690,10 @@ static u16 nvmet_get_feat_write_protect(struct nvmet_req *req)
|
||||
u32 result;
|
||||
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid);
|
||||
if (!req->ns)
|
||||
if (!req->ns) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
|
||||
}
|
||||
mutex_lock(&subsys->lock);
|
||||
if (req->ns->readonly == true)
|
||||
result = NVME_NS_WRITE_PROTECT;
|
||||
@ -675,7 +718,7 @@ void nvmet_get_feat_async_event(struct nvmet_req *req)
|
||||
static void nvmet_execute_get_features(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u16 status = 0;
|
||||
|
||||
switch (cdw10 & 0xff) {
|
||||
@ -715,7 +758,9 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
|
||||
break;
|
||||
case NVME_FEAT_HOST_ID:
|
||||
/* need 128-bit host identifier flag */
|
||||
if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) {
|
||||
if (!(req->cmd->common.cdw11 & cpu_to_le32(1 << 0))) {
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_common_command, cdw11);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
@ -727,6 +772,8 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
|
||||
status = nvmet_get_feat_write_protect(req);
|
||||
break;
|
||||
default:
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_common_command, cdw10);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
@ -776,13 +823,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
||||
|
||||
switch (cmd->get_log_page.lid) {
|
||||
case NVME_LOG_ERROR:
|
||||
/*
|
||||
* We currently never set the More bit in the status
|
||||
* field, so all error log entries are invalid and can
|
||||
* be zeroed out. This is called a minum viable
|
||||
* implementation (TM) of this mandatory log page.
|
||||
*/
|
||||
req->execute = nvmet_execute_get_log_page_noop;
|
||||
req->execute = nvmet_execute_get_log_page_error;
|
||||
return 0;
|
||||
case NVME_LOG_SMART:
|
||||
req->execute = nvmet_execute_get_log_page_smart;
|
||||
@ -848,5 +889,6 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
|
||||
|
||||
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
|
||||
req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ static const struct nvmet_transport_name {
|
||||
} nvmet_transport_names[] = {
|
||||
{ NVMF_TRTYPE_RDMA, "rdma" },
|
||||
{ NVMF_TRTYPE_FC, "fc" },
|
||||
{ NVMF_TRTYPE_TCP, "tcp" },
|
||||
{ NVMF_TRTYPE_LOOP, "loop" },
|
||||
};
|
||||
|
||||
|
@ -45,28 +45,72 @@ u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
|
||||
u64 nvmet_ana_chgcnt;
|
||||
DECLARE_RWSEM(nvmet_ana_sem);
|
||||
|
||||
inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
|
||||
{
|
||||
u16 status;
|
||||
|
||||
switch (errno) {
|
||||
case -ENOSPC:
|
||||
req->error_loc = offsetof(struct nvme_rw_command, length);
|
||||
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
||||
break;
|
||||
case -EREMOTEIO:
|
||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
break;
|
||||
case -EOPNOTSUPP:
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
switch (req->cmd->common.opcode) {
|
||||
case nvme_cmd_dsm:
|
||||
case nvme_cmd_write_zeroes:
|
||||
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
|
||||
break;
|
||||
default:
|
||||
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
break;
|
||||
case -ENODATA:
|
||||
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
||||
status = NVME_SC_ACCESS_DENIED;
|
||||
break;
|
||||
case -EIO:
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
|
||||
const char *subsysnqn);
|
||||
|
||||
u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
|
||||
size_t len)
|
||||
{
|
||||
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
|
||||
if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
|
||||
{
|
||||
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len)
|
||||
if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
|
||||
{
|
||||
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len)
|
||||
if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||
return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -611,14 +655,44 @@ static void nvmet_update_sq_head(struct nvmet_req *req)
|
||||
req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
|
||||
}
|
||||
|
||||
static void nvmet_set_error(struct nvmet_req *req, u16 status)
|
||||
{
|
||||
struct nvmet_ctrl *ctrl = req->sq->ctrl;
|
||||
struct nvme_error_slot *new_error_slot;
|
||||
unsigned long flags;
|
||||
|
||||
req->rsp->status = cpu_to_le16(status << 1);
|
||||
|
||||
if (!ctrl || req->error_loc == -1)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ctrl->error_lock, flags);
|
||||
ctrl->err_counter++;
|
||||
new_error_slot =
|
||||
&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
|
||||
|
||||
new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
|
||||
new_error_slot->sqid = cpu_to_le16(req->sq->qid);
|
||||
new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
|
||||
new_error_slot->status_field = cpu_to_le16(status << 1);
|
||||
new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
|
||||
new_error_slot->lba = cpu_to_le64(req->error_slba);
|
||||
new_error_slot->nsid = req->cmd->common.nsid;
|
||||
spin_unlock_irqrestore(&ctrl->error_lock, flags);
|
||||
|
||||
/* set the more bit for this request */
|
||||
req->rsp->status |= cpu_to_le16(1 << 14);
|
||||
}
|
||||
|
||||
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
|
||||
{
|
||||
if (!req->sq->sqhd_disabled)
|
||||
nvmet_update_sq_head(req);
|
||||
req->rsp->sq_id = cpu_to_le16(req->sq->qid);
|
||||
req->rsp->command_id = req->cmd->common.command_id;
|
||||
|
||||
if (unlikely(status))
|
||||
nvmet_set_status(req, status);
|
||||
nvmet_set_error(req, status);
|
||||
if (req->ns)
|
||||
nvmet_put_namespace(req->ns);
|
||||
req->ops->queue_response(req);
|
||||
@ -739,14 +813,20 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
|
||||
return ret;
|
||||
|
||||
req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
|
||||
if (unlikely(!req->ns))
|
||||
if (unlikely(!req->ns)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return NVME_SC_INVALID_NS | NVME_SC_DNR;
|
||||
}
|
||||
ret = nvmet_check_ana_state(req->port, req->ns);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return ret;
|
||||
}
|
||||
ret = nvmet_io_cmd_check_access(req);
|
||||
if (unlikely(ret))
|
||||
if (unlikely(ret)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, nsid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (req->ns->file)
|
||||
return nvmet_file_parse_io_cmd(req);
|
||||
@ -769,9 +849,12 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
req->rsp->status = 0;
|
||||
req->rsp->sq_head = 0;
|
||||
req->ns = NULL;
|
||||
req->error_loc = -1;
|
||||
req->error_slba = 0;
|
||||
|
||||
/* no support for fused commands yet */
|
||||
if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, flags);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto fail;
|
||||
}
|
||||
@ -782,6 +865,7 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
|
||||
* byte aligned.
|
||||
*/
|
||||
if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, flags);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto fail;
|
||||
}
|
||||
@ -827,9 +911,10 @@ EXPORT_SYMBOL_GPL(nvmet_req_uninit);
|
||||
|
||||
void nvmet_req_execute(struct nvmet_req *req)
|
||||
{
|
||||
if (unlikely(req->data_len != req->transfer_len))
|
||||
if (unlikely(req->data_len != req->transfer_len)) {
|
||||
req->error_loc = offsetof(struct nvme_common_command, dptr);
|
||||
nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
|
||||
else
|
||||
} else
|
||||
req->execute(req);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvmet_req_execute);
|
||||
@ -1174,6 +1259,9 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
|
||||
/* keep-alive timeout in seconds */
|
||||
ctrl->kato = DIV_ROUND_UP(kato, 1000);
|
||||
|
||||
ctrl->err_counter = 0;
|
||||
spin_lock_init(&ctrl->error_lock);
|
||||
|
||||
nvmet_start_keep_alive_timer(ctrl);
|
||||
|
||||
mutex_lock(&subsys->lock);
|
||||
|
@ -247,7 +247,7 @@ static void nvmet_execute_identify_disc_ctrl(struct nvmet_req *req)
|
||||
|
||||
static void nvmet_execute_disc_set_features(struct nvmet_req *req)
|
||||
{
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u16 stat;
|
||||
|
||||
switch (cdw10 & 0xff) {
|
||||
@ -259,6 +259,8 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
|
||||
NVMET_DISC_AEN_CFG_OPTIONAL);
|
||||
break;
|
||||
default:
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_common_command, cdw10);
|
||||
stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
@ -268,7 +270,7 @@ static void nvmet_execute_disc_set_features(struct nvmet_req *req)
|
||||
|
||||
static void nvmet_execute_disc_get_features(struct nvmet_req *req)
|
||||
{
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
|
||||
u16 stat = 0;
|
||||
|
||||
switch (cdw10 & 0xff) {
|
||||
@ -279,6 +281,8 @@ static void nvmet_execute_disc_get_features(struct nvmet_req *req)
|
||||
nvmet_get_feat_async_event(req);
|
||||
break;
|
||||
default:
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_common_command, cdw10);
|
||||
stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
@ -293,6 +297,8 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
|
||||
if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
|
||||
pr_err("got cmd %d while not ready\n",
|
||||
cmd->common.opcode);
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
@ -323,6 +329,8 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
|
||||
default:
|
||||
pr_err("unsupported get_log_page lid %d\n",
|
||||
cmd->get_log_page.lid);
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_get_log_page_command, lid);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
case nvme_admin_identify:
|
||||
@ -335,10 +343,12 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
|
||||
default:
|
||||
pr_err("unsupported identify cns %d\n",
|
||||
cmd->identify.cns);
|
||||
req->error_loc = offsetof(struct nvme_identify, cns);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
default:
|
||||
pr_err("unhandled cmd %d\n", cmd->common.opcode);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
|
@ -17,23 +17,26 @@
|
||||
|
||||
static void nvmet_execute_prop_set(struct nvmet_req *req)
|
||||
{
|
||||
u64 val = le64_to_cpu(req->cmd->prop_set.value);
|
||||
u16 status = 0;
|
||||
|
||||
if (!(req->cmd->prop_set.attrib & 1)) {
|
||||
u64 val = le64_to_cpu(req->cmd->prop_set.value);
|
||||
if (req->cmd->prop_set.attrib & 1) {
|
||||
req->error_loc =
|
||||
offsetof(struct nvmf_property_set_command, attrib);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
switch (le32_to_cpu(req->cmd->prop_set.offset)) {
|
||||
case NVME_REG_CC:
|
||||
nvmet_update_cc(req->sq->ctrl, val);
|
||||
break;
|
||||
default:
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
req->error_loc =
|
||||
offsetof(struct nvmf_property_set_command, offset);
|
||||
status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
out:
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
@ -69,6 +72,14 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
|
||||
}
|
||||
}
|
||||
|
||||
if (status && req->cmd->prop_get.attrib & 1) {
|
||||
req->error_loc =
|
||||
offsetof(struct nvmf_property_get_command, offset);
|
||||
} else {
|
||||
req->error_loc =
|
||||
offsetof(struct nvmf_property_get_command, attrib);
|
||||
}
|
||||
|
||||
req->rsp->result.u64 = cpu_to_le64(val);
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
@ -89,6 +100,7 @@ u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
|
||||
default:
|
||||
pr_err("received unknown capsule type 0x%x\n",
|
||||
cmd->fabrics.fctype);
|
||||
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
@ -105,10 +117,12 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
|
||||
old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
|
||||
if (old) {
|
||||
pr_warn("queue already connected!\n");
|
||||
req->error_loc = offsetof(struct nvmf_connect_command, opcode);
|
||||
return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
|
||||
}
|
||||
if (!sqsize) {
|
||||
pr_warn("queue size zero!\n");
|
||||
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
|
||||
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
@ -121,6 +135,16 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
|
||||
req->rsp->sq_head = cpu_to_le16(0xffff);
|
||||
}
|
||||
|
||||
if (ctrl->ops->install_queue) {
|
||||
u16 ret = ctrl->ops->install_queue(req->sq);
|
||||
|
||||
if (ret) {
|
||||
pr_err("failed to install queue %d cntlid %d ret %x\n",
|
||||
qid, ret, ctrl->cntlid);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -147,6 +171,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
||||
if (c->recfmt != 0) {
|
||||
pr_warn("invalid connect version (%d).\n",
|
||||
le16_to_cpu(c->recfmt));
|
||||
req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
|
||||
status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
|
||||
goto out;
|
||||
}
|
||||
@ -161,8 +186,13 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
|
||||
|
||||
status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
|
||||
le32_to_cpu(c->kato), &ctrl);
|
||||
if (status)
|
||||
if (status) {
|
||||
if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
|
||||
req->error_loc =
|
||||
offsetof(struct nvme_common_command, opcode);
|
||||
goto out;
|
||||
}
|
||||
|
||||
uuid_copy(&ctrl->hostid, &d->hostid);
|
||||
|
||||
status = nvmet_install_queue(ctrl, req);
|
||||
@ -249,11 +279,13 @@ u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
|
||||
if (cmd->common.opcode != nvme_fabrics_command) {
|
||||
pr_err("invalid command 0x%x on unconnected queue.\n",
|
||||
cmd->fabrics.opcode);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
|
||||
pr_err("invalid capsule type 0x%x on unconnected queue.\n",
|
||||
cmd->fabrics.fctype);
|
||||
req->error_loc = offsetof(struct nvmf_common_command, fctype);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
|
@ -44,13 +44,69 @@ void nvmet_bdev_ns_disable(struct nvmet_ns *ns)
|
||||
}
|
||||
}
|
||||
|
||||
static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
|
||||
{
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
|
||||
if (likely(blk_sts == BLK_STS_OK))
|
||||
return status;
|
||||
/*
|
||||
* Right now there exists M : 1 mapping between block layer error
|
||||
* to the NVMe status code (see nvme_error_status()). For consistency,
|
||||
* when we reverse map we use most appropriate NVMe Status code from
|
||||
* the group of the NVMe staus codes used in the nvme_error_status().
|
||||
*/
|
||||
switch (blk_sts) {
|
||||
case BLK_STS_NOSPC:
|
||||
status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
|
||||
req->error_loc = offsetof(struct nvme_rw_command, length);
|
||||
break;
|
||||
case BLK_STS_TARGET:
|
||||
status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
req->error_loc = offsetof(struct nvme_rw_command, slba);
|
||||
break;
|
||||
case BLK_STS_NOTSUPP:
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
switch (req->cmd->common.opcode) {
|
||||
case nvme_cmd_dsm:
|
||||
case nvme_cmd_write_zeroes:
|
||||
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
|
||||
break;
|
||||
default:
|
||||
status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
break;
|
||||
case BLK_STS_MEDIUM:
|
||||
status = NVME_SC_ACCESS_DENIED;
|
||||
req->error_loc = offsetof(struct nvme_rw_command, nsid);
|
||||
break;
|
||||
case BLK_STS_IOERR:
|
||||
/* fallthru */
|
||||
default:
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
}
|
||||
|
||||
switch (req->cmd->common.opcode) {
|
||||
case nvme_cmd_read:
|
||||
case nvme_cmd_write:
|
||||
req->error_slba = le64_to_cpu(req->cmd->rw.slba);
|
||||
break;
|
||||
case nvme_cmd_write_zeroes:
|
||||
req->error_slba =
|
||||
le64_to_cpu(req->cmd->write_zeroes.slba);
|
||||
break;
|
||||
default:
|
||||
req->error_slba = 0;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
static void nvmet_bio_done(struct bio *bio)
|
||||
{
|
||||
struct nvmet_req *req = bio->bi_private;
|
||||
|
||||
nvmet_req_complete(req,
|
||||
bio->bi_status ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
|
||||
|
||||
nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status));
|
||||
if (bio != &req->b.inline_bio)
|
||||
bio_put(bio);
|
||||
}
|
||||
@ -61,7 +117,6 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
struct bio *bio;
|
||||
struct scatterlist *sg;
|
||||
sector_t sector;
|
||||
blk_qc_t cookie;
|
||||
int op, op_flags = 0, i;
|
||||
|
||||
if (!req->sg_cnt) {
|
||||
@ -114,7 +169,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
|
||||
sg_cnt--;
|
||||
}
|
||||
|
||||
cookie = submit_bio(bio);
|
||||
submit_bio(bio);
|
||||
}
|
||||
|
||||
static void nvmet_bdev_execute_flush(struct nvmet_req *req)
|
||||
@ -137,18 +192,21 @@ u16 nvmet_bdev_flush(struct nvmet_req *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u16 nvmet_bdev_discard_range(struct nvmet_ns *ns,
|
||||
static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
|
||||
struct nvme_dsm_range *range, struct bio **bio)
|
||||
{
|
||||
struct nvmet_ns *ns = req->ns;
|
||||
int ret;
|
||||
|
||||
ret = __blkdev_issue_discard(ns->bdev,
|
||||
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
|
||||
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
|
||||
GFP_KERNEL, 0, bio);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
return 0;
|
||||
|
||||
if (ret)
|
||||
req->error_slba = le64_to_cpu(range->slba);
|
||||
|
||||
return blk_to_nvme_status(req, errno_to_blk_status(ret));
|
||||
}
|
||||
|
||||
static void nvmet_bdev_execute_discard(struct nvmet_req *req)
|
||||
@ -164,7 +222,7 @@ static void nvmet_bdev_execute_discard(struct nvmet_req *req)
|
||||
if (status)
|
||||
break;
|
||||
|
||||
status = nvmet_bdev_discard_range(req->ns, &range, &bio);
|
||||
status = nvmet_bdev_discard_range(req, &range, &bio);
|
||||
if (status)
|
||||
break;
|
||||
}
|
||||
@ -205,16 +263,16 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
sector_t sector;
|
||||
sector_t nr_sector;
|
||||
int ret;
|
||||
|
||||
sector = le64_to_cpu(write_zeroes->slba) <<
|
||||
(req->ns->blksize_shift - 9);
|
||||
nr_sector = (((sector_t)le16_to_cpu(write_zeroes->length) + 1) <<
|
||||
(req->ns->blksize_shift - 9));
|
||||
|
||||
if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
|
||||
GFP_KERNEL, &bio, 0))
|
||||
status = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
|
||||
ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
|
||||
GFP_KERNEL, &bio, 0);
|
||||
status = blk_to_nvme_status(req, errno_to_blk_status(ret));
|
||||
if (bio) {
|
||||
bio->bi_private = req;
|
||||
bio->bi_end_io = nvmet_bio_done;
|
||||
@ -249,6 +307,7 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
|
||||
default:
|
||||
pr_err("unhandled cmd %d on qid %d\n", cmd->common.opcode,
|
||||
req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
}
|
||||
|
@ -112,6 +112,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
|
||||
static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
|
||||
{
|
||||
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
|
||||
u16 status = NVME_SC_SUCCESS;
|
||||
|
||||
if (req->f.bvec != req->inline_bvec) {
|
||||
if (likely(req->f.mpool_alloc == false))
|
||||
@ -120,8 +121,9 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
|
||||
mempool_free(req->f.bvec, req->ns->bvec_pool);
|
||||
}
|
||||
|
||||
nvmet_req_complete(req, ret != req->data_len ?
|
||||
NVME_SC_INTERNAL | NVME_SC_DNR : 0);
|
||||
if (unlikely(ret != req->data_len))
|
||||
status = errno_to_nvme_status(req, ret);
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
|
||||
@ -140,7 +142,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
|
||||
|
||||
pos = le64_to_cpu(req->cmd->rw.slba) << req->ns->blksize_shift;
|
||||
if (unlikely(pos + req->data_len > req->ns->size)) {
|
||||
nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
|
||||
nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -254,9 +256,7 @@ static void nvmet_file_execute_rw(struct nvmet_req *req)
|
||||
|
||||
u16 nvmet_file_flush(struct nvmet_req *req)
|
||||
{
|
||||
if (vfs_fsync(req->ns->file, 1) < 0)
|
||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
return 0;
|
||||
return errno_to_nvme_status(req, vfs_fsync(req->ns->file, 1));
|
||||
}
|
||||
|
||||
static void nvmet_file_flush_work(struct work_struct *w)
|
||||
@ -277,30 +277,34 @@ static void nvmet_file_execute_discard(struct nvmet_req *req)
|
||||
int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
|
||||
struct nvme_dsm_range range;
|
||||
loff_t offset, len;
|
||||
u16 ret;
|
||||
u16 status = 0;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= le32_to_cpu(req->cmd->dsm.nr); i++) {
|
||||
ret = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
|
||||
status = nvmet_copy_from_sgl(req, i * sizeof(range), &range,
|
||||
sizeof(range));
|
||||
if (ret)
|
||||
if (status)
|
||||
break;
|
||||
|
||||
offset = le64_to_cpu(range.slba) << req->ns->blksize_shift;
|
||||
len = le32_to_cpu(range.nlb);
|
||||
len <<= req->ns->blksize_shift;
|
||||
if (offset + len > req->ns->size) {
|
||||
ret = NVME_SC_LBA_RANGE | NVME_SC_DNR;
|
||||
req->error_slba = le64_to_cpu(range.slba);
|
||||
status = errno_to_nvme_status(req, -ENOSPC);
|
||||
break;
|
||||
}
|
||||
|
||||
if (vfs_fallocate(req->ns->file, mode, offset, len)) {
|
||||
ret = NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
ret = vfs_fallocate(req->ns->file, mode, offset, len);
|
||||
if (ret) {
|
||||
req->error_slba = le64_to_cpu(range.slba);
|
||||
status = errno_to_nvme_status(req, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
nvmet_req_complete(req, ret);
|
||||
nvmet_req_complete(req, status);
|
||||
}
|
||||
|
||||
static void nvmet_file_dsm_work(struct work_struct *w)
|
||||
@ -340,12 +344,12 @@ static void nvmet_file_write_zeroes_work(struct work_struct *w)
|
||||
req->ns->blksize_shift);
|
||||
|
||||
if (unlikely(offset + len > req->ns->size)) {
|
||||
nvmet_req_complete(req, NVME_SC_LBA_RANGE | NVME_SC_DNR);
|
||||
nvmet_req_complete(req, errno_to_nvme_status(req, -ENOSPC));
|
||||
return;
|
||||
}
|
||||
|
||||
ret = vfs_fallocate(req->ns->file, mode, offset, len);
|
||||
nvmet_req_complete(req, ret < 0 ? NVME_SC_INTERNAL | NVME_SC_DNR : 0);
|
||||
nvmet_req_complete(req, ret < 0 ? errno_to_nvme_status(req, ret) : 0);
|
||||
}
|
||||
|
||||
static void nvmet_file_execute_write_zeroes(struct nvmet_req *req)
|
||||
@ -380,6 +384,7 @@ u16 nvmet_file_parse_io_cmd(struct nvmet_req *req)
|
||||
default:
|
||||
pr_err("unhandled cmd for file ns %d on qid %d\n",
|
||||
cmd->common.opcode, req->sq->qid);
|
||||
req->error_loc = offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
|
||||
}
|
||||
}
|
||||
|
@ -202,6 +202,10 @@ struct nvmet_ctrl {
|
||||
|
||||
struct device *p2p_client;
|
||||
struct radix_tree_root p2p_ns_map;
|
||||
|
||||
spinlock_t error_lock;
|
||||
u64 err_counter;
|
||||
struct nvme_error_slot slots[NVMET_ERROR_LOG_SLOTS];
|
||||
};
|
||||
|
||||
struct nvmet_subsys {
|
||||
@ -279,6 +283,7 @@ struct nvmet_fabrics_ops {
|
||||
void (*delete_ctrl)(struct nvmet_ctrl *ctrl);
|
||||
void (*disc_traddr)(struct nvmet_req *req,
|
||||
struct nvmet_port *port, char *traddr);
|
||||
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
|
||||
};
|
||||
|
||||
#define NVMET_MAX_INLINE_BIOVEC 8
|
||||
@ -316,15 +321,12 @@ struct nvmet_req {
|
||||
|
||||
struct pci_dev *p2p_dev;
|
||||
struct device *p2p_client;
|
||||
u16 error_loc;
|
||||
u64 error_slba;
|
||||
};
|
||||
|
||||
extern struct workqueue_struct *buffered_io_wq;
|
||||
|
||||
static inline void nvmet_set_status(struct nvmet_req *req, u16 status)
|
||||
{
|
||||
req->rsp->status = cpu_to_le16(status << 1);
|
||||
}
|
||||
|
||||
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
|
||||
{
|
||||
req->rsp->result.u32 = cpu_to_le32(result);
|
||||
@ -348,7 +350,7 @@ struct nvmet_async_event {
|
||||
|
||||
static inline void nvmet_clear_aen_bit(struct nvmet_req *req, u32 bn)
|
||||
{
|
||||
int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15;
|
||||
int rae = le32_to_cpu(req->cmd->common.cdw10) & 1 << 15;
|
||||
|
||||
if (!rae)
|
||||
clear_bit(bn, &req->sq->ctrl->aen_masked);
|
||||
@ -492,4 +494,6 @@ static inline u32 nvmet_rw_len(struct nvmet_req *req)
|
||||
return ((u32)le16_to_cpu(req->cmd->rw.length) + 1) <<
|
||||
req->ns->blksize_shift;
|
||||
}
|
||||
|
||||
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
|
||||
#endif /* _NVMET_H */
|
||||
|
@ -630,8 +630,11 @@ static u16 nvmet_rdma_map_sgl_inline(struct nvmet_rdma_rsp *rsp)
|
||||
u64 off = le64_to_cpu(sgl->addr);
|
||||
u32 len = le32_to_cpu(sgl->length);
|
||||
|
||||
if (!nvme_is_write(rsp->req.cmd))
|
||||
if (!nvme_is_write(rsp->req.cmd)) {
|
||||
rsp->req.error_loc =
|
||||
offsetof(struct nvme_common_command, opcode);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
|
||||
if (off + len > rsp->queue->dev->inline_data_size) {
|
||||
pr_err("invalid inline data offset!\n");
|
||||
@ -696,6 +699,8 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
|
||||
return nvmet_rdma_map_sgl_inline(rsp);
|
||||
default:
|
||||
pr_err("invalid SGL subtype: %#x\n", sgl->type);
|
||||
rsp->req.error_loc =
|
||||
offsetof(struct nvme_common_command, dptr);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
case NVME_KEY_SGL_FMT_DATA_DESC:
|
||||
@ -706,10 +711,13 @@ static u16 nvmet_rdma_map_sgl(struct nvmet_rdma_rsp *rsp)
|
||||
return nvmet_rdma_map_sgl_keyed(rsp, sgl, false);
|
||||
default:
|
||||
pr_err("invalid SGL subtype: %#x\n", sgl->type);
|
||||
rsp->req.error_loc =
|
||||
offsetof(struct nvme_common_command, dptr);
|
||||
return NVME_SC_INVALID_FIELD | NVME_SC_DNR;
|
||||
}
|
||||
default:
|
||||
pr_err("invalid SGL type: %#x\n", sgl->type);
|
||||
rsp->req.error_loc = offsetof(struct nvme_common_command, dptr);
|
||||
return NVME_SC_SGL_INVALID_TYPE | NVME_SC_DNR;
|
||||
}
|
||||
}
|
||||
|
1737
drivers/nvme/target/tcp.c
Normal file
1737
drivers/nvme/target/tcp.c
Normal file
File diff suppressed because it is too large
Load Diff
@ -4,7 +4,7 @@
|
||||
struct blk_mq_tag_set;
|
||||
struct ib_device;
|
||||
|
||||
int blk_mq_rdma_map_queues(struct blk_mq_tag_set *set,
|
||||
int blk_mq_rdma_map_queues(struct blk_mq_queue_map *map,
|
||||
struct ib_device *dev, int first_vec);
|
||||
|
||||
#endif /* _LINUX_BLK_MQ_RDMA_H */
|
||||
|
189
include/linux/nvme-tcp.h
Normal file
189
include/linux/nvme-tcp.h
Normal file
@ -0,0 +1,189 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* NVMe over Fabrics TCP protocol header.
|
||||
* Copyright (c) 2018 Lightbits Labs. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_NVME_TCP_H
|
||||
#define _LINUX_NVME_TCP_H
|
||||
|
||||
#include <linux/nvme.h>
|
||||
|
||||
#define NVME_TCP_DISC_PORT 8009
|
||||
#define NVME_TCP_ADMIN_CCSZ SZ_8K
|
||||
#define NVME_TCP_DIGEST_LENGTH 4
|
||||
|
||||
enum nvme_tcp_pfv {
|
||||
NVME_TCP_PFV_1_0 = 0x0,
|
||||
};
|
||||
|
||||
enum nvme_tcp_fatal_error_status {
|
||||
NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
|
||||
NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
|
||||
NVME_TCP_FES_HDR_DIGEST_ERR = 0x03,
|
||||
NVME_TCP_FES_DATA_OUT_OF_RANGE = 0x04,
|
||||
NVME_TCP_FES_R2T_LIMIT_EXCEEDED = 0x05,
|
||||
NVME_TCP_FES_DATA_LIMIT_EXCEEDED = 0x05,
|
||||
NVME_TCP_FES_UNSUPPORTED_PARAM = 0x06,
|
||||
};
|
||||
|
||||
enum nvme_tcp_digest_option {
|
||||
NVME_TCP_HDR_DIGEST_ENABLE = (1 << 0),
|
||||
NVME_TCP_DATA_DIGEST_ENABLE = (1 << 1),
|
||||
};
|
||||
|
||||
enum nvme_tcp_pdu_type {
|
||||
nvme_tcp_icreq = 0x0,
|
||||
nvme_tcp_icresp = 0x1,
|
||||
nvme_tcp_h2c_term = 0x2,
|
||||
nvme_tcp_c2h_term = 0x3,
|
||||
nvme_tcp_cmd = 0x4,
|
||||
nvme_tcp_rsp = 0x5,
|
||||
nvme_tcp_h2c_data = 0x6,
|
||||
nvme_tcp_c2h_data = 0x7,
|
||||
nvme_tcp_r2t = 0x9,
|
||||
};
|
||||
|
||||
enum nvme_tcp_pdu_flags {
|
||||
NVME_TCP_F_HDGST = (1 << 0),
|
||||
NVME_TCP_F_DDGST = (1 << 1),
|
||||
NVME_TCP_F_DATA_LAST = (1 << 2),
|
||||
NVME_TCP_F_DATA_SUCCESS = (1 << 3),
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_hdr - nvme tcp pdu common header
|
||||
*
|
||||
* @type: pdu type
|
||||
* @flags: pdu specific flags
|
||||
* @hlen: pdu header length
|
||||
* @pdo: pdu data offset
|
||||
* @plen: pdu wire byte length
|
||||
*/
|
||||
struct nvme_tcp_hdr {
|
||||
__u8 type;
|
||||
__u8 flags;
|
||||
__u8 hlen;
|
||||
__u8 pdo;
|
||||
__le32 plen;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_icreq_pdu - nvme tcp initialize connection request pdu
|
||||
*
|
||||
* @hdr: pdu generic header
|
||||
* @pfv: pdu version format
|
||||
* @hpda: host pdu data alignment (dwords, 0's based)
|
||||
* @digest: digest types enabled
|
||||
* @maxr2t: maximum r2ts per request supported
|
||||
*/
|
||||
struct nvme_tcp_icreq_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
__le16 pfv;
|
||||
__u8 hpda;
|
||||
__u8 digest;
|
||||
__le32 maxr2t;
|
||||
__u8 rsvd2[112];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_icresp_pdu - nvme tcp initialize connection response pdu
|
||||
*
|
||||
* @hdr: pdu common header
|
||||
* @pfv: pdu version format
|
||||
* @cpda: controller pdu data alignment (dowrds, 0's based)
|
||||
* @digest: digest types enabled
|
||||
* @maxdata: maximum data capsules per r2t supported
|
||||
*/
|
||||
struct nvme_tcp_icresp_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
__le16 pfv;
|
||||
__u8 cpda;
|
||||
__u8 digest;
|
||||
__le32 maxdata;
|
||||
__u8 rsvd[112];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_term_pdu - nvme tcp terminate connection pdu
|
||||
*
|
||||
* @hdr: pdu common header
|
||||
* @fes: fatal error status
|
||||
* @fei: fatal error information
|
||||
*/
|
||||
struct nvme_tcp_term_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
__le16 fes;
|
||||
__le32 fei;
|
||||
__u8 rsvd[8];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_cmd_pdu - nvme tcp command capsule pdu
|
||||
*
|
||||
* @hdr: pdu common header
|
||||
* @cmd: nvme command
|
||||
*/
|
||||
struct nvme_tcp_cmd_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
struct nvme_command cmd;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_rsp_pdu - nvme tcp response capsule pdu
|
||||
*
|
||||
* @hdr: pdu common header
|
||||
* @hdr: nvme-tcp generic header
|
||||
* @cqe: nvme completion queue entry
|
||||
*/
|
||||
struct nvme_tcp_rsp_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
struct nvme_completion cqe;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_r2t_pdu - nvme tcp ready-to-transfer pdu
|
||||
*
|
||||
* @hdr: pdu common header
|
||||
* @command_id: nvme command identifier which this relates to
|
||||
* @ttag: transfer tag (controller generated)
|
||||
* @r2t_offset: offset from the start of the command data
|
||||
* @r2t_length: length the host is allowed to send
|
||||
*/
|
||||
struct nvme_tcp_r2t_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
__u16 command_id;
|
||||
__u16 ttag;
|
||||
__le32 r2t_offset;
|
||||
__le32 r2t_length;
|
||||
__u8 rsvd[4];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nvme_tcp_data_pdu - nvme tcp data pdu
|
||||
*
|
||||
* @hdr: pdu common header
|
||||
* @command_id: nvme command identifier which this relates to
|
||||
* @ttag: transfer tag (controller generated)
|
||||
* @data_offset: offset from the start of the command data
|
||||
* @data_length: length of the data stream
|
||||
*/
|
||||
struct nvme_tcp_data_pdu {
|
||||
struct nvme_tcp_hdr hdr;
|
||||
__u16 command_id;
|
||||
__u16 ttag;
|
||||
__le32 data_offset;
|
||||
__le32 data_length;
|
||||
__u8 rsvd[4];
|
||||
};
|
||||
|
||||
union nvme_tcp_pdu {
|
||||
struct nvme_tcp_icreq_pdu icreq;
|
||||
struct nvme_tcp_icresp_pdu icresp;
|
||||
struct nvme_tcp_cmd_pdu cmd;
|
||||
struct nvme_tcp_rsp_pdu rsp;
|
||||
struct nvme_tcp_r2t_pdu r2t;
|
||||
struct nvme_tcp_data_pdu data;
|
||||
};
|
||||
|
||||
#endif /* _LINUX_NVME_TCP_H */
|
@ -52,6 +52,7 @@ enum {
|
||||
enum {
|
||||
NVMF_TRTYPE_RDMA = 1, /* RDMA */
|
||||
NVMF_TRTYPE_FC = 2, /* Fibre Channel */
|
||||
NVMF_TRTYPE_TCP = 3, /* TCP/IP */
|
||||
NVMF_TRTYPE_LOOP = 254, /* Reserved for host usage */
|
||||
NVMF_TRTYPE_MAX,
|
||||
};
|
||||
@ -661,7 +662,12 @@ struct nvme_common_command {
|
||||
__le32 cdw2[2];
|
||||
__le64 metadata;
|
||||
union nvme_data_ptr dptr;
|
||||
__le32 cdw10[6];
|
||||
__le32 cdw10;
|
||||
__le32 cdw11;
|
||||
__le32 cdw12;
|
||||
__le32 cdw13;
|
||||
__le32 cdw14;
|
||||
__le32 cdw15;
|
||||
};
|
||||
|
||||
struct nvme_rw_command {
|
||||
@ -1162,6 +1168,20 @@ struct nvme_command {
|
||||
};
|
||||
};
|
||||
|
||||
struct nvme_error_slot {
|
||||
__le64 error_count;
|
||||
__le16 sqid;
|
||||
__le16 cmdid;
|
||||
__le16 status_field;
|
||||
__le16 param_error_location;
|
||||
__le64 lba;
|
||||
__le32 nsid;
|
||||
__u8 vs;
|
||||
__u8 resv[3];
|
||||
__le64 cs;
|
||||
__u8 resv2[24];
|
||||
};
|
||||
|
||||
static inline bool nvme_is_write(struct nvme_command *cmd)
|
||||
{
|
||||
/*
|
||||
|
@ -3325,6 +3325,9 @@ static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
|
||||
}
|
||||
int skb_copy_and_csum_datagram_msg(struct sk_buff *skb, int hlen,
|
||||
struct msghdr *msg);
|
||||
int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len,
|
||||
struct ahash_request *hash);
|
||||
int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
|
||||
struct iov_iter *from, int len);
|
||||
int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *frm);
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/thread_info.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <uapi/linux/uio.h>
|
||||
|
||||
struct page;
|
||||
@ -266,9 +267,11 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
|
||||
{
|
||||
i->count = count;
|
||||
}
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
|
||||
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
|
||||
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
|
||||
struct iov_iter *i);
|
||||
|
||||
int import_iovec(int type, const struct iovec __user * uvector,
|
||||
unsigned nr_segs, unsigned fast_segs,
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/splice.h>
|
||||
#include <net/checksum.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
#define PIPE_PARANOIA /* for now */
|
||||
|
||||
@ -1464,10 +1465,11 @@ bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum,
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
|
||||
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
|
||||
size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
const char *from = addr;
|
||||
__wsum *csum = csump;
|
||||
__wsum sum, next;
|
||||
size_t off = 0;
|
||||
|
||||
@ -1510,6 +1512,21 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
|
||||
}
|
||||
EXPORT_SYMBOL(csum_and_copy_to_iter);
|
||||
|
||||
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
|
||||
struct iov_iter *i)
|
||||
{
|
||||
struct ahash_request *hash = hashp;
|
||||
struct scatterlist sg;
|
||||
size_t copied;
|
||||
|
||||
copied = copy_to_iter(addr, bytes, i);
|
||||
sg_init_one(&sg, addr, copied);
|
||||
ahash_request_set_crypt(hash, &sg, NULL, copied);
|
||||
crypto_ahash_update(hash);
|
||||
return copied;
|
||||
}
|
||||
EXPORT_SYMBOL(hash_and_copy_to_iter);
|
||||
|
||||
int iov_iter_npages(const struct iov_iter *i, int maxpages)
|
||||
{
|
||||
size_t size = i->count;
|
||||
|
@ -408,27 +408,20 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
|
||||
}
|
||||
EXPORT_SYMBOL(skb_kill_datagram);
|
||||
|
||||
/**
|
||||
* skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
|
||||
* @skb: buffer to copy
|
||||
* @offset: offset in the buffer to start copying from
|
||||
* @to: iovec iterator to copy to
|
||||
* @len: amount of data to copy from buffer to iovec
|
||||
*/
|
||||
int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len)
|
||||
int __skb_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len, bool fault_short,
|
||||
size_t (*cb)(const void *, size_t, void *, struct iov_iter *),
|
||||
void *data)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset, start_off = offset, n;
|
||||
struct sk_buff *frag_iter;
|
||||
|
||||
trace_skb_copy_datagram_iovec(skb, len);
|
||||
|
||||
/* Copy header. */
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
n = copy_to_iter(skb->data + offset, copy, to);
|
||||
n = cb(skb->data + offset, copy, data, to);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto short_copy;
|
||||
@ -445,11 +438,14 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
|
||||
end = start + skb_frag_size(frag);
|
||||
if ((copy = end - offset) > 0) {
|
||||
struct page *page = skb_frag_page(frag);
|
||||
u8 *vaddr = kmap(page);
|
||||
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
n = copy_page_to_iter(skb_frag_page(frag),
|
||||
frag->page_offset + offset -
|
||||
start, copy, to);
|
||||
n = cb(vaddr + frag->page_offset +
|
||||
offset - start, copy, data, to);
|
||||
kunmap(page);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto short_copy;
|
||||
@ -468,8 +464,8 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
if ((copy = end - offset) > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (skb_copy_datagram_iter(frag_iter, offset - start,
|
||||
to, copy))
|
||||
if (__skb_datagram_iter(frag_iter, offset - start,
|
||||
to, copy, fault_short, cb, data))
|
||||
goto fault;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
@ -490,11 +486,50 @@ int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
return -EFAULT;
|
||||
|
||||
short_copy:
|
||||
if (iov_iter_count(to))
|
||||
if (fault_short || iov_iter_count(to))
|
||||
goto fault;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
|
||||
* and update a hash.
|
||||
* @skb: buffer to copy
|
||||
* @offset: offset in the buffer to start copying from
|
||||
* @to: iovec iterator to copy to
|
||||
* @len: amount of data to copy from buffer to iovec
|
||||
* @hash: hash request to update
|
||||
*/
|
||||
int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len,
|
||||
struct ahash_request *hash)
|
||||
{
|
||||
return __skb_datagram_iter(skb, offset, to, len, true,
|
||||
hash_and_copy_to_iter, hash);
|
||||
}
|
||||
EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
|
||||
|
||||
static size_t simple_copy_to_iter(const void *addr, size_t bytes,
|
||||
void *data __always_unused, struct iov_iter *i)
|
||||
{
|
||||
return copy_to_iter(addr, bytes, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
|
||||
* @skb: buffer to copy
|
||||
* @offset: offset in the buffer to start copying from
|
||||
* @to: iovec iterator to copy to
|
||||
* @len: amount of data to copy from buffer to iovec
|
||||
*/
|
||||
int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len)
|
||||
{
|
||||
trace_skb_copy_datagram_iovec(skb, len);
|
||||
return __skb_datagram_iter(skb, offset, to, len, false,
|
||||
simple_copy_to_iter, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(skb_copy_datagram_iter);
|
||||
|
||||
/**
|
||||
@ -645,87 +680,21 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
|
||||
}
|
||||
EXPORT_SYMBOL(zerocopy_sg_from_iter);
|
||||
|
||||
/**
|
||||
* skb_copy_and_csum_datagram_iter - Copy datagram to an iovec iterator
|
||||
* and update a checksum.
|
||||
* @skb: buffer to copy
|
||||
* @offset: offset in the buffer to start copying from
|
||||
* @to: iovec iterator to copy to
|
||||
* @len: amount of data to copy from buffer to iovec
|
||||
* @csump: checksum pointer
|
||||
*/
|
||||
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
|
||||
struct iov_iter *to, int len,
|
||||
__wsum *csump)
|
||||
{
|
||||
int start = skb_headlen(skb);
|
||||
int i, copy = start - offset, start_off = offset;
|
||||
struct sk_buff *frag_iter;
|
||||
int pos = 0;
|
||||
int n;
|
||||
|
||||
/* Copy header. */
|
||||
if (copy > 0) {
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
n = csum_and_copy_to_iter(skb->data + offset, copy, csump, to);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto fault;
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
pos = copy;
|
||||
}
|
||||
|
||||
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
|
||||
int end;
|
||||
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
|
||||
|
||||
WARN_ON(start > offset + len);
|
||||
|
||||
end = start + skb_frag_size(frag);
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2 = 0;
|
||||
struct page *page = skb_frag_page(frag);
|
||||
u8 *vaddr = kmap(page);
|
||||
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
n = csum_and_copy_to_iter(vaddr + frag->page_offset +
|
||||
offset - start, copy,
|
||||
&csum2, to);
|
||||
kunmap(page);
|
||||
offset += n;
|
||||
if (n != copy)
|
||||
goto fault;
|
||||
*csump = csum_block_add(*csump, csum2, pos);
|
||||
if (!(len -= copy))
|
||||
return 0;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
|
||||
skb_walk_frags(skb, frag_iter) {
|
||||
int end;
|
||||
|
||||
WARN_ON(start > offset + len);
|
||||
|
||||
end = start + frag_iter->len;
|
||||
if ((copy = end - offset) > 0) {
|
||||
__wsum csum2 = 0;
|
||||
if (copy > len)
|
||||
copy = len;
|
||||
if (skb_copy_and_csum_datagram(frag_iter,
|
||||
offset - start,
|
||||
to, copy,
|
||||
&csum2))
|
||||
goto fault;
|
||||
*csump = csum_block_add(*csump, csum2, pos);
|
||||
if ((len -= copy) == 0)
|
||||
return 0;
|
||||
offset += copy;
|
||||
pos += copy;
|
||||
}
|
||||
start = end;
|
||||
}
|
||||
if (!len)
|
||||
return 0;
|
||||
|
||||
fault:
|
||||
iov_iter_revert(to, offset - start_off);
|
||||
return -EFAULT;
|
||||
return __skb_datagram_iter(skb, offset, to, len, true,
|
||||
csum_and_copy_to_iter, csump);
|
||||
}
|
||||
|
||||
__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
|
||||
|
Loading…
Reference in New Issue
Block a user