mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-04 02:16:45 +07:00
io_uring: make submission ref putting consistent
The rule is simple, any async handler gets a submission ref and should put it at the end. Make them all follow it, and so more consistent. This is a preparation patch, and as io_wq_assign_next() currently won't ever work, this doesn't care to use io_put_req_find_next() instead of io_put_req(). Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> refcount_inc_not_zero() -> refcount_inc() fix. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
a2100672f3
commit
594506fec5
@ -2550,7 +2550,7 @@ static bool io_req_cancelled(struct io_kiocb *req)
|
||||
if (req->work.flags & IO_WQ_WORK_CANCEL) {
|
||||
req_set_fail_links(req);
|
||||
io_cqring_add_event(req, -ECANCELED);
|
||||
io_put_req(req);
|
||||
io_double_put_req(req);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2600,6 +2600,7 @@ static void io_fsync_finish(struct io_wq_work **workptr)
|
||||
if (io_req_cancelled(req))
|
||||
return;
|
||||
__io_fsync(req, &nxt);
|
||||
io_put_req(req); /* drop submission reference */
|
||||
if (nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
@ -2609,7 +2610,6 @@ static int io_fsync(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
{
|
||||
/* fsync always requires a blocking context */
|
||||
if (force_nonblock) {
|
||||
io_put_req(req);
|
||||
req->work.func = io_fsync_finish;
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -2621,9 +2621,6 @@ static void __io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (io_req_cancelled(req))
|
||||
return;
|
||||
|
||||
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
|
||||
req->sync.len);
|
||||
if (ret < 0)
|
||||
@ -2637,7 +2634,10 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
|
||||
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
|
||||
struct io_kiocb *nxt = NULL;
|
||||
|
||||
if (io_req_cancelled(req))
|
||||
return;
|
||||
__io_fallocate(req, &nxt);
|
||||
io_put_req(req); /* drop submission reference */
|
||||
if (nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
@ -2659,7 +2659,6 @@ static int io_fallocate(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
{
|
||||
/* fallocate always requiring blocking context */
|
||||
if (force_nonblock) {
|
||||
io_put_req(req);
|
||||
req->work.func = io_fallocate_finish;
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -3015,6 +3014,7 @@ static void io_close_finish(struct io_wq_work **workptr)
|
||||
|
||||
/* not cancellable, don't do io_req_cancelled() */
|
||||
__io_close_finish(req, &nxt);
|
||||
io_put_req(req); /* drop submission reference */
|
||||
if (nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
@ -3031,6 +3031,9 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
|
||||
/* if the file has a flush method, be safe and punt to async */
|
||||
if (req->close.put_file->f_op->flush && force_nonblock) {
|
||||
/* submission ref will be dropped, take it for async */
|
||||
refcount_inc(&req->refs);
|
||||
|
||||
req->work.func = io_close_finish;
|
||||
/*
|
||||
* Do manual async queue here to avoid grabbing files - we don't
|
||||
@ -3088,6 +3091,7 @@ static void io_sync_file_range_finish(struct io_wq_work **workptr)
|
||||
if (io_req_cancelled(req))
|
||||
return;
|
||||
__io_sync_file_range(req, &nxt);
|
||||
io_put_req(req); /* put submission ref */
|
||||
if (nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
@ -3097,7 +3101,6 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
|
||||
{
|
||||
/* sync_file_range always requires a blocking context */
|
||||
if (force_nonblock) {
|
||||
io_put_req(req);
|
||||
req->work.func = io_sync_file_range_finish;
|
||||
return -EAGAIN;
|
||||
}
|
||||
@ -3464,11 +3467,10 @@ static void io_accept_finish(struct io_wq_work **workptr)
|
||||
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
|
||||
struct io_kiocb *nxt = NULL;
|
||||
|
||||
io_put_req(req);
|
||||
|
||||
if (io_req_cancelled(req))
|
||||
return;
|
||||
__io_accept(req, &nxt, false);
|
||||
io_put_req(req); /* drop submission reference */
|
||||
if (nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
@ -4734,17 +4736,14 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
|
||||
} while (1);
|
||||
}
|
||||
|
||||
/* drop submission reference */
|
||||
io_put_req(req);
|
||||
|
||||
if (ret) {
|
||||
req_set_fail_links(req);
|
||||
io_cqring_add_event(req, ret);
|
||||
io_put_req(req);
|
||||
}
|
||||
|
||||
/* if a dependent link is ready, pass it back */
|
||||
if (!ret && nxt)
|
||||
io_put_req(req); /* drop submission reference */
|
||||
if (nxt)
|
||||
io_wq_assign_next(workptr, nxt);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user