nvme: rename and document nvme_end_request

nvme_end_request is a bit misnamed, as it wraps around the
blk_mq_complete_* API.  It's semantics also are non-trivial, so give it
a more descriptive name and add a comment explaining the semantics.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Reviewed-by: Mike Snitzer <snitzer@redhat.com>
Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2020-08-18 09:11:29 +02:00 committed by Jens Axboe
parent c41ad98beb
commit 2eb81a3364
7 changed files with 14 additions and 8 deletions

View File

@ -3,7 +3,7 @@ NVMe Fault Injection
Linux's fault injection framework provides a systematic way to support
error injection via debugfs in the /sys/kernel/debug directory. When
enabled, the default NVME_SC_INVALID_OPCODE with no retry will be
injected into the nvme_end_request. Users can change the default status
injected into the nvme_try_complete_req. Users can change the default status
code and no retry flag via the debugfs. The list of Generic Command
Status can be found in include/linux/nvme.h

View File

@ -2035,7 +2035,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
}
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
if (!nvme_end_request(rq, status, result))
if (!nvme_try_complete_req(rq, status, result))
nvme_fc_complete_rq(rq);
check_error:

View File

@ -523,7 +523,13 @@ static inline u32 nvme_bytes_to_numd(size_t len)
return (len >> 2) - 1;
}
static inline bool nvme_end_request(struct request *req, __le16 status,
/*
* Fill in the status and result information from the CQE, and then figure out
* if blk-mq will need to use IPI magic to complete the request, and if yes do
* so. If not let the caller complete the request without an indirect function
* call.
*/
static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);

View File

@ -961,7 +961,7 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
if (!nvme_end_request(req, cqe->status, cqe->result))
if (!nvme_try_complete_req(req, cqe->status, cqe->result))
nvme_pci_complete_rq(req);
}

View File

@ -1189,7 +1189,7 @@ static void nvme_rdma_end_request(struct nvme_rdma_request *req)
if (!refcount_dec_and_test(&req->ref))
return;
if (!nvme_end_request(rq, req->status, req->result))
if (!nvme_try_complete_req(rq, req->status, req->result))
nvme_rdma_complete_rq(rq);
}

View File

@ -481,7 +481,7 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
return -EINVAL;
}
if (!nvme_end_request(rq, cqe->status, cqe->result))
if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_complete_rq(rq);
queue->nr_cqe++;
@ -672,7 +672,7 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
{
union nvme_result res = {};
if (!nvme_end_request(rq, cpu_to_le16(status << 1), res))
if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
nvme_complete_rq(rq);
}

View File

@ -115,7 +115,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
return;
}
if (!nvme_end_request(rq, cqe->status, cqe->result))
if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
nvme_loop_complete_rq(rq);
}
}