mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 00:30:52 +07:00
- Fix a regression in bdev partition locking (Christoph)
- NVMe pull request from Christoph: - cancel async events before freeing them (David Milburn) - revert a broken race fix (James Smart) - fix command processing during resets (Sagi Grimberg) - Fix a kyber crash with requeued flushes (Omar) - Fix __bio_try_merge_page() same_page error for no merging (Ritesh) -----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl9boNoQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpm++D/9oEC1RazLFXwZD7rtXUMQ0bWRmbyM77Qtq P7wn0poSSvHT6fNyd9ytf9STlTXeJz81Gk4jTRiau1HKAhc9GudYEzYFw0baNN82 AX5dO1Gt2vww+k4XAHCM0l0k2/IOgQg8d2hDJBt68bnDIW/T1T3GORqS5Ki0dw9R EYVFbBePZTyUIAxDWnSKtNRR3TpMrfZfi9AAUpwGkKVcCZkHD4SlrNPGKd0ckD5Z GnHdJtWjb5mIgVHMbHgWjcIjKhC7BTrL+sCqdBJ55NvfWXZ20QoKKDSx5BWl6rMI g/eMAJjoYJ6Ih13sjIbrC7fHZBXzPRTRfqKBq8fM6oytD0cO9ZcUfpBeqiCWOyrT SU3C1MkkqeskDGNXhjOq8lFWeyQlUgBg0rXIDDeFNusUB3QOZa3T7oirqZlfZsOi G7WVd4/aftr+qB8GVl1HmLCg7U3rO2q6EuJ+aJDGh07TuiFi5qaPwRzmRcykKs62 UJ15W9JaNEHdGQs5rim7evz9qLCTyQqrwF7nDFBpM8hsraPPCNbwGoUbXLACtXGR htjr5nxEoOEJs9SKZCWl9jXzvyoMkqLp4j6soVS7cZKUJU1qxMhf68FGylbHitEq Pe1z7dG/3Pq/zV77aGTt1J40tB43tHr3gOSQ2swwjxqvYIjlvbP4xnl6SIHvLlof blntc17XWQ== =J16G -----END PGP SIGNATURE----- Merge tag 'block-5.9-2020-09-11' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - Fix a regression in bdev partition locking (Christoph) - NVMe pull request from Christoph: - cancel async events before freeing them (David Milburn) - revert a broken race fix (James Smart) - fix command processing during resets (Sagi Grimberg) - Fix a kyber crash with requeued flushes (Omar) - Fix __bio_try_merge_page() same_page error for no merging (Ritesh) * tag 'block-5.9-2020-09-11' of git://git.kernel.dk/linux-block: block: Set same_page to false in __bio_try_merge_page if ret is false nvme-fabrics: allow to queue requests for live queues block: only call sched requeue_request() for scheduled requests nvme-tcp: cancel async events before freeing event struct nvme-rdma: cancel async events before freeing event struct nvme-fc: cancel async events before freeing event struct nvme: Revert: Fix controller creation races with teardown flow block: restore a specific error code in bdev_del_partition
This commit is contained in:
commit
7b8731d958
@ -5895,18 +5895,6 @@ static void bfq_finish_requeue_request(struct request *rq)
|
||||
struct bfq_queue *bfqq = RQ_BFQQ(rq);
|
||||
struct bfq_data *bfqd;
|
||||
|
||||
/*
|
||||
* Requeue and finish hooks are invoked in blk-mq without
|
||||
* checking whether the involved request is actually still
|
||||
* referenced in the scheduler. To handle this fact, the
|
||||
* following two checks make this function exit in case of
|
||||
* spurious invocations, for which there is nothing to do.
|
||||
*
|
||||
* First, check whether rq has nothing to do with an elevator.
|
||||
*/
|
||||
if (unlikely(!(rq->rq_flags & RQF_ELVPRIV)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* rq either is not associated with any icq, or is an already
|
||||
* requeued request that has not (yet) been re-inserted into
|
||||
|
@ -879,8 +879,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
||||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (page_is_mergeable(bv, page, len, off, same_page)) {
|
||||
if (bio->bi_iter.bi_size > UINT_MAX - len)
|
||||
if (bio->bi_iter.bi_size > UINT_MAX - len) {
|
||||
*same_page = false;
|
||||
return false;
|
||||
}
|
||||
bv->bv_len += len;
|
||||
bio->bi_iter.bi_size += len;
|
||||
return true;
|
||||
|
@ -66,7 +66,7 @@ static inline void blk_mq_sched_requeue_request(struct request *rq)
|
||||
struct request_queue *q = rq->q;
|
||||
struct elevator_queue *e = q->elevator;
|
||||
|
||||
if (e && e->type->ops.requeue_request)
|
||||
if ((rq->rq_flags & RQF_ELVPRIV) && e && e->type->ops.requeue_request)
|
||||
e->type->ops.requeue_request(rq);
|
||||
}
|
||||
|
||||
|
@ -537,7 +537,7 @@ int bdev_del_partition(struct block_device *bdev, int partno)
|
||||
|
||||
bdevp = bdget_disk(bdev->bd_disk, partno);
|
||||
if (!bdevp)
|
||||
return -ENOMEM;
|
||||
return -ENXIO;
|
||||
|
||||
mutex_lock(&bdevp->bd_mutex);
|
||||
mutex_lock_nested(&bdev->bd_mutex, 1);
|
||||
|
@ -3525,10 +3525,6 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
|
||||
{
|
||||
struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
|
||||
|
||||
/* Can't delete non-created controllers */
|
||||
if (!ctrl->created)
|
||||
return -EBUSY;
|
||||
|
||||
if (device_remove_file_self(dev, attr))
|
||||
nvme_delete_ctrl_sync(ctrl);
|
||||
return count;
|
||||
@ -4403,7 +4399,6 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
|
||||
nvme_queue_scan(ctrl);
|
||||
nvme_start_queues(ctrl);
|
||||
}
|
||||
ctrl->created = true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_ctrl);
|
||||
|
||||
|
@ -565,10 +565,14 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||
struct nvme_request *req = nvme_req(rq);
|
||||
|
||||
/*
|
||||
* If we are in some state of setup or teardown only allow
|
||||
* internally generated commands.
|
||||
* currently we have a problem sending passthru commands
|
||||
* on the admin_q if the controller is not LIVE because we can't
|
||||
* make sure that they are going out after the admin connect,
|
||||
* controller enable and/or other commands in the initialization
|
||||
* sequence. until the controller will be LIVE, fail with
|
||||
* BLK_STS_RESOURCE so that they will be rescheduled.
|
||||
*/
|
||||
if (!blk_rq_is_passthrough(rq) || (req->flags & NVME_REQ_USERCMD))
|
||||
if (rq->q == ctrl->admin_q && (req->flags & NVME_REQ_USERCMD))
|
||||
return false;
|
||||
|
||||
/*
|
||||
@ -577,7 +581,7 @@ bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
|
||||
*/
|
||||
switch (ctrl->state) {
|
||||
case NVME_CTRL_CONNECTING:
|
||||
if (nvme_is_fabrics(req->cmd) &&
|
||||
if (blk_rq_is_passthrough(rq) && nvme_is_fabrics(req->cmd) &&
|
||||
req->cmd->fabrics.fctype == nvme_fabrics_type_connect)
|
||||
return true;
|
||||
break;
|
||||
|
@ -2160,6 +2160,7 @@ nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
|
||||
struct nvme_fc_fcp_op *aen_op;
|
||||
int i;
|
||||
|
||||
cancel_work_sync(&ctrl->ctrl.async_event_work);
|
||||
aen_op = ctrl->aen_ops;
|
||||
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
|
||||
__nvme_fc_exit_request(ctrl, aen_op);
|
||||
|
@ -307,7 +307,6 @@ struct nvme_ctrl {
|
||||
struct nvme_command ka_cmd;
|
||||
struct work_struct fw_act_work;
|
||||
unsigned long events;
|
||||
bool created;
|
||||
|
||||
#ifdef CONFIG_NVME_MULTIPATH
|
||||
/* asymmetric namespace access: */
|
||||
|
@ -835,6 +835,7 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
|
||||
blk_mq_free_tag_set(ctrl->ctrl.admin_tagset);
|
||||
}
|
||||
if (ctrl->async_event_sqe.data) {
|
||||
cancel_work_sync(&ctrl->ctrl.async_event_work);
|
||||
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
|
||||
sizeof(struct nvme_command), DMA_TO_DEVICE);
|
||||
ctrl->async_event_sqe.data = NULL;
|
||||
|
@ -1596,6 +1596,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
|
||||
static void nvme_tcp_free_admin_queue(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
if (to_tcp_ctrl(ctrl)->async_req.pdu) {
|
||||
cancel_work_sync(&ctrl->async_event_work);
|
||||
nvme_tcp_free_async_req(to_tcp_ctrl(ctrl));
|
||||
to_tcp_ctrl(ctrl)->async_req.pdu = NULL;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user