mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 19:30:52 +07:00
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "A collection of fixes for this series. This contains: - NVMe pull request from Christoph, one uuid attribute fix, and one fix for the controller memory buffer address for remapped BARs. - use-after-free fix for bsg, from Benjamin Block. - bcache race/use-after-free fix for a list traversal, fixing a regression in this merge window. From Coly Li. - null_blk change configfs dependency change from a 'depends' to a 'select'. This is a change from this merge window as well. From me. - nbd signal fix from Josef, fixing a regression introduced with the status code changes. - nbd MAINTAINERS mailing list entry update. - blk-throttle stall fix from Joseph Qi. - blk-mq-debugfs fix from Omar, fixing an issue where we don't register the IO scheduler debugfs directory, if the driver is loaded with it. Only shows up if you switch through the sysfs interface" * 'for-linus' of git://git.kernel.dk/linux-block: bsg-lib: fix use-after-free under memory-pressure nvme-pci: Use PCI bus address for data/queues in CMB blk-mq-debugfs: fix device sched directory for default scheduler null_blk: change configfs dependency to select blk-throttle: fix possible io stall when upgrade to max MAINTAINERS: update list for NBD nbd: fix -ERESTARTSYS handling nvme: fix visibility of "uuid" ns attribute bcache: use llist_for_each_entry_safe() in __closure_wake_up()
This commit is contained in:
commit
17d084c8d1
@ -9360,7 +9360,7 @@ NETWORK BLOCK DEVICE (NBD)
|
||||
M: Josef Bacik <jbacik@fb.com>
|
||||
S: Maintained
|
||||
L: linux-block@vger.kernel.org
|
||||
L: nbd-general@lists.sourceforge.net
|
||||
L: nbd@other.debian.org
|
||||
F: Documentation/blockdev/nbd.txt
|
||||
F: drivers/block/nbd.c
|
||||
F: include/uapi/linux/nbd.h
|
||||
|
@ -815,10 +815,14 @@ int blk_mq_debugfs_register(struct request_queue *q)
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* blk_mq_init_hctx() attempted to do this already, but q->debugfs_dir
|
||||
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
|
||||
* didn't exist yet (because we don't know what to name the directory
|
||||
* until the queue is registered to a gendisk).
|
||||
*/
|
||||
if (q->elevator && !q->sched_debugfs_dir)
|
||||
blk_mq_debugfs_register_sched(q);
|
||||
|
||||
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
|
||||
goto err;
|
||||
|
@ -1911,11 +1911,11 @@ static void throtl_upgrade_state(struct throtl_data *td)
|
||||
|
||||
tg->disptime = jiffies - 1;
|
||||
throtl_select_dispatch(sq);
|
||||
throtl_schedule_next_dispatch(sq, false);
|
||||
throtl_schedule_next_dispatch(sq, true);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
throtl_select_dispatch(&td->service_queue);
|
||||
throtl_schedule_next_dispatch(&td->service_queue, false);
|
||||
throtl_schedule_next_dispatch(&td->service_queue, true);
|
||||
queue_work(kthrotld_workqueue, &td->dispatch_work);
|
||||
}
|
||||
|
||||
|
@ -207,20 +207,34 @@ static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_request *sreq = &job->sreq;
|
||||
|
||||
memset(job, 0, sizeof(*job));
|
||||
/* called right after the request is allocated for the request_queue */
|
||||
|
||||
scsi_req_init(sreq);
|
||||
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
sreq->sense = kzalloc(sreq->sense_len, gfp);
|
||||
sreq->sense = kzalloc(SCSI_SENSE_BUFFERSIZE, gfp);
|
||||
if (!sreq->sense)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bsg_initialize_rq(struct request *req)
|
||||
{
|
||||
struct bsg_job *job = blk_mq_rq_to_pdu(req);
|
||||
struct scsi_request *sreq = &job->sreq;
|
||||
void *sense = sreq->sense;
|
||||
|
||||
/* called right before the request is given to the request_queue user */
|
||||
|
||||
memset(job, 0, sizeof(*job));
|
||||
|
||||
scsi_req_init(sreq);
|
||||
|
||||
sreq->sense = sense;
|
||||
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
|
||||
|
||||
job->req = req;
|
||||
job->reply = sreq->sense;
|
||||
job->reply = sense;
|
||||
job->reply_len = sreq->sense_len;
|
||||
job->dd_data = job + 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bsg_exit_rq(struct request_queue *q, struct request *req)
|
||||
@ -251,6 +265,7 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name,
|
||||
q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
|
||||
q->init_rq_fn = bsg_init_rq;
|
||||
q->exit_rq_fn = bsg_exit_rq;
|
||||
q->initialize_rq_fn = bsg_initialize_rq;
|
||||
q->request_fn = bsg_request_fn;
|
||||
|
||||
ret = blk_init_allocated_queue(q);
|
||||
|
@ -17,7 +17,7 @@ if BLK_DEV
|
||||
|
||||
config BLK_DEV_NULL_BLK
|
||||
tristate "Null test block driver"
|
||||
depends on CONFIGFS_FS
|
||||
select CONFIGFS_FS
|
||||
|
||||
config BLK_DEV_FD
|
||||
tristate "Normal floppy disk support"
|
||||
|
@ -820,9 +820,13 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
* appropriate.
|
||||
*/
|
||||
ret = nbd_handle_cmd(cmd, hctx->queue_num);
|
||||
if (ret < 0)
|
||||
ret = BLK_STS_IOERR;
|
||||
else if (!ret)
|
||||
ret = BLK_STS_OK;
|
||||
complete(&cmd->send_complete);
|
||||
|
||||
return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
|
||||
|
@ -64,7 +64,7 @@ EXPORT_SYMBOL(closure_put);
|
||||
void __closure_wake_up(struct closure_waitlist *wait_list)
|
||||
{
|
||||
struct llist_node *list;
|
||||
struct closure *cl;
|
||||
struct closure *cl, *t;
|
||||
struct llist_node *reverse = NULL;
|
||||
|
||||
list = llist_del_all(&wait_list->list);
|
||||
@ -73,7 +73,7 @@ void __closure_wake_up(struct closure_waitlist *wait_list)
|
||||
reverse = llist_reverse_order(list);
|
||||
|
||||
/* Then do the wakeups */
|
||||
llist_for_each_entry(cl, reverse, list) {
|
||||
llist_for_each_entry_safe(cl, t, reverse, list) {
|
||||
closure_set_waiting(cl, 0);
|
||||
closure_sub(cl, CLOSURE_WAITING + 1);
|
||||
}
|
||||
|
@ -2136,7 +2136,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj,
|
||||
struct nvme_ns *ns = nvme_get_ns_from_dev(dev);
|
||||
|
||||
if (a == &dev_attr_uuid.attr) {
|
||||
if (uuid_is_null(&ns->uuid) ||
|
||||
if (uuid_is_null(&ns->uuid) &&
|
||||
!memchr_inv(ns->nguid, 0, sizeof(ns->nguid)))
|
||||
return 0;
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ struct nvme_dev {
|
||||
struct mutex shutdown_lock;
|
||||
bool subsystem;
|
||||
void __iomem *cmb;
|
||||
dma_addr_t cmb_dma_addr;
|
||||
pci_bus_addr_t cmb_bus_addr;
|
||||
u64 cmb_size;
|
||||
u32 cmbsz;
|
||||
u32 cmbloc;
|
||||
@ -1226,7 +1226,7 @@ static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||
if (qid && dev->cmb && use_cmb_sqes && NVME_CMB_SQS(dev->cmbsz)) {
|
||||
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
|
||||
dev->ctrl.page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_dma_addr + offset;
|
||||
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
||||
nvmeq->sq_cmds_io = dev->cmb + offset;
|
||||
} else {
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
@ -1527,7 +1527,7 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
||||
resource_size_t bar_size;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
void __iomem *cmb;
|
||||
dma_addr_t dma_addr;
|
||||
int bar;
|
||||
|
||||
dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ);
|
||||
if (!(NVME_CMB_SZ(dev->cmbsz)))
|
||||
@ -1540,7 +1540,8 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
||||
szu = (u64)1 << (12 + 4 * NVME_CMB_SZU(dev->cmbsz));
|
||||
size = szu * NVME_CMB_SZ(dev->cmbsz);
|
||||
offset = szu * NVME_CMB_OFST(dev->cmbloc);
|
||||
bar_size = pci_resource_len(pdev, NVME_CMB_BIR(dev->cmbloc));
|
||||
bar = NVME_CMB_BIR(dev->cmbloc);
|
||||
bar_size = pci_resource_len(pdev, bar);
|
||||
|
||||
if (offset > bar_size)
|
||||
return NULL;
|
||||
@ -1553,12 +1554,11 @@ static void __iomem *nvme_map_cmb(struct nvme_dev *dev)
|
||||
if (size > bar_size - offset)
|
||||
size = bar_size - offset;
|
||||
|
||||
dma_addr = pci_resource_start(pdev, NVME_CMB_BIR(dev->cmbloc)) + offset;
|
||||
cmb = ioremap_wc(dma_addr, size);
|
||||
cmb = ioremap_wc(pci_resource_start(pdev, bar) + offset, size);
|
||||
if (!cmb)
|
||||
return NULL;
|
||||
|
||||
dev->cmb_dma_addr = dma_addr;
|
||||
dev->cmb_bus_addr = pci_bus_address(pdev, bar) + offset;
|
||||
dev->cmb_size = size;
|
||||
return cmb;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user