mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:40:52 +07:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "A set of fixes for the current series, one fixing a regression with block size < page cache size in the alias series from Jan. Outside of that, two small cleanups for wbt from Bart, a nvme pull request from Christoph, and a few small fixes of documentation updates" * 'for-linus' of git://git.kernel.dk/linux-block: block: fix up io_poll documentation block: Avoid that sparse complains about context imbalance in __wbt_wait() block: Make wbt_wait() definition consistent with declaration clean_bdev_aliases: Prevent cleaning blocks that are not in block range genhd: remove dead and duplicated scsi code block: add back plugging in __blkdev_direct_IO nvmet/fcloop: remove some logically dead code performing redundant ret checks nvmet: fix KATO offset in Set Features nvme/fc: simplify error handling of nvme_fc_create_hw_io_queues nvme/fc: correct some printk information nvme/scsi: Remove START STOP emulation nvme/pci: Delete misleading queue-wrap comment nvme/pci: Fix whitespace problem nvme: simplify stripe quirk nvme: update maintainers information
This commit is contained in:
commit
62f8c40592
@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
|
||||
|
||||
io_poll (RW)
|
||||
------------
|
||||
When read, this file shows the total number of block IO polls and how
|
||||
many returned success. Writing '0' to this file will disable polling
|
||||
for this device. Writing any non-zero value will enable this feature.
|
||||
When read, this file shows whether polling is enabled (1) or disabled
|
||||
(0). Writing '0' to this file will disable polling for this device.
|
||||
Writing any non-zero value will enable this feature.
|
||||
|
||||
io_poll_delay (RW)
|
||||
------------------
|
||||
|
@ -8854,17 +8854,22 @@ F: drivers/video/fbdev/nvidia/
|
||||
NVM EXPRESS DRIVER
|
||||
M: Keith Busch <keith.busch@intel.com>
|
||||
M: Jens Axboe <axboe@fb.com>
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
W: https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
|
||||
T: git://git.infradead.org/nvme.git
|
||||
W: http://git.infradead.org/nvme.git
|
||||
S: Supported
|
||||
F: drivers/nvme/host/
|
||||
F: include/linux/nvme.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS TARGET DRIVER
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
T: git://git.infradead.org/nvme.git
|
||||
W: http://git.infradead.org/nvme.git
|
||||
S: Supported
|
||||
F: drivers/nvme/target/
|
||||
|
||||
|
@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||
* the timer to kick off queuing again.
|
||||
*/
|
||||
static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
|
||||
__releases(lock)
|
||||
__acquires(lock)
|
||||
{
|
||||
struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
|
||||
DEFINE_WAIT(wait);
|
||||
@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
|
||||
if (may_queue(rwb, rqw, &wait, rw))
|
||||
break;
|
||||
|
||||
if (lock)
|
||||
if (lock) {
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
io_schedule();
|
||||
|
||||
if (lock)
|
||||
io_schedule();
|
||||
spin_lock_irq(lock);
|
||||
} else
|
||||
io_schedule();
|
||||
} while (1);
|
||||
|
||||
finish_wait(&rqw->wait, &wait);
|
||||
@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
|
||||
* in an irq held spinlock, if it holds one when calling this function.
|
||||
* If we do sleep, we'll release and re-grab it.
|
||||
*/
|
||||
unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
|
||||
enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
|
||||
{
|
||||
unsigned int ret = 0;
|
||||
|
||||
|
@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
|
||||
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
|
||||
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
|
||||
}
|
||||
if (ctrl->stripe_size)
|
||||
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
|
||||
if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
|
||||
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
|
||||
blk_queue_virt_boundary(q, ctrl->page_size - 1);
|
||||
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
|
||||
vwc = true;
|
||||
@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
||||
ctrl->max_hw_sectors =
|
||||
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
|
||||
|
||||
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
|
||||
unsigned int max_hw_sectors;
|
||||
|
||||
ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
|
||||
max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
|
||||
if (ctrl->max_hw_sectors) {
|
||||
ctrl->max_hw_sectors = min(max_hw_sectors,
|
||||
ctrl->max_hw_sectors);
|
||||
} else {
|
||||
ctrl->max_hw_sectors = max_hw_sectors;
|
||||
}
|
||||
}
|
||||
|
||||
nvme_set_queue_limits(ctrl, ctrl->admin_q);
|
||||
ctrl->sgls = le32_to_cpu(id->sgls);
|
||||
ctrl->kas = le16_to_cpu(id->kas);
|
||||
|
@ -1491,19 +1491,20 @@ static int
|
||||
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
|
||||
{
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[1];
|
||||
int i, j, ret;
|
||||
int i, ret;
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++, queue++) {
|
||||
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
|
||||
if (ret) {
|
||||
for (j = i-1; j >= 0; j--)
|
||||
__nvme_fc_delete_hw_queue(ctrl,
|
||||
&ctrl->queues[j], j);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto delete_queues;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
delete_queues:
|
||||
for (; i >= 0; i--)
|
||||
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
||||
WARN_ON_ONCE(!changed);
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
|
||||
ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
|
||||
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
|
||||
ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
|
||||
|
||||
kref_get(&ctrl->ctrl.kref);
|
||||
|
||||
|
@ -135,7 +135,6 @@ struct nvme_ctrl {
|
||||
|
||||
u32 page_size;
|
||||
u32 max_hw_sectors;
|
||||
u32 stripe_size;
|
||||
u16 oncs;
|
||||
u16 vid;
|
||||
atomic_t abort_limit;
|
||||
|
@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
|
||||
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
|
||||
nvme_req(req)->result = cqe.result;
|
||||
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
|
||||
|
||||
}
|
||||
|
||||
/* If the controller ignores the cq head doorbell and continuously
|
||||
* writes to the queue, it is theoretically possible to wrap around
|
||||
* the queue twice and mistakenly return IRQ_NONE. Linux only
|
||||
* requires that 0.1% of your interrupts are handled, so this isn't
|
||||
* a big problem.
|
||||
*/
|
||||
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
|
||||
return;
|
||||
|
||||
@ -1909,10 +1902,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
||||
if (!dev->bar)
|
||||
goto release;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
release:
|
||||
pci_release_mem_regions(pdev);
|
||||
return -ENODEV;
|
||||
pci_release_mem_regions(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
|
||||
return nvme_trans_status_code(hdr, nvme_sc);
|
||||
}
|
||||
|
||||
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
u8 *cmd)
|
||||
{
|
||||
u8 immed, no_flush;
|
||||
|
||||
immed = cmd[1] & 0x01;
|
||||
no_flush = cmd[4] & 0x04;
|
||||
|
||||
if (immed != 0) {
|
||||
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
|
||||
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
|
||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||
} else {
|
||||
if (no_flush == 0) {
|
||||
/* Issue NVME FLUSH command prior to START STOP UNIT */
|
||||
int res = nvme_trans_synchronize_cache(ns, hdr);
|
||||
if (res)
|
||||
return res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
u8 *cmd)
|
||||
{
|
||||
@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
|
||||
case SECURITY_PROTOCOL_OUT:
|
||||
retcode = nvme_trans_security_protocol(ns, hdr, cmd);
|
||||
break;
|
||||
case START_STOP:
|
||||
retcode = nvme_trans_start_stop(ns, hdr, cmd);
|
||||
break;
|
||||
case SYNCHRONIZE_CACHE:
|
||||
retcode = nvme_trans_synchronize_cache(ns, hdr);
|
||||
break;
|
||||
|
@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
||||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
|
||||
u64 val;
|
||||
u32 val32;
|
||||
u16 status = 0;
|
||||
|
||||
@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
||||
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
|
||||
break;
|
||||
case NVME_FEAT_KATO:
|
||||
val = le64_to_cpu(req->cmd->prop_set.value);
|
||||
val32 = val & 0xffff;
|
||||
val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
|
||||
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
||||
nvmet_set_result(req, req->sq->ctrl->kato);
|
||||
break;
|
||||
|
@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
|
||||
rport->lport = nport->lport;
|
||||
nport->rport = rport;
|
||||
|
||||
return ret ? ret : count;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
|
||||
tport->lport = nport->lport;
|
||||
nport->tport = tport;
|
||||
|
||||
return ret ? ret : count;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
|
@ -328,6 +328,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = bdev_file_inode(file);
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
struct blk_plug plug;
|
||||
struct blkdev_dio *dio;
|
||||
struct bio *bio;
|
||||
bool is_read = (iov_iter_rw(iter) == READ);
|
||||
@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||
dio->multi_bio = false;
|
||||
dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
for (;;) {
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_iter.bi_sector = pos >> 9;
|
||||
@ -394,6 +396,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||
submit_bio(bio);
|
||||
bio = bio_alloc(GFP_KERNEL, nr_pages);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
if (!dio->is_sync)
|
||||
return -EIOCBQUEUED;
|
||||
|
@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
|
||||
head = page_buffers(page);
|
||||
bh = head;
|
||||
do {
|
||||
if (!buffer_mapped(bh))
|
||||
if (!buffer_mapped(bh) || (bh->b_blocknr < block))
|
||||
goto next;
|
||||
if (bh->b_blocknr >= block + len)
|
||||
break;
|
||||
|
@ -146,15 +146,6 @@ enum {
|
||||
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
|
||||
};
|
||||
|
||||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
struct blk_scsi_cmd_filter {
|
||||
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
struct disk_part_tbl {
|
||||
struct rcu_head rcu_head;
|
||||
int len;
|
||||
|
Loading…
Reference in New Issue
Block a user