aio: keep io_event in aio_kiocb

We want to separate forming the resulting io_event from putting it
into the ring buffer.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2019-03-07 19:43:45 -05:00
parent 833f4154ed
commit a9339b7855

View File

@ -204,8 +204,7 @@ struct aio_kiocb {
struct kioctx *ki_ctx; struct kioctx *ki_ctx;
kiocb_cancel_fn *ki_cancel; kiocb_cancel_fn *ki_cancel;
struct iocb __user *ki_user_iocb; /* user's aiocb */ struct io_event ki_res;
__u64 ki_user_data; /* user's data for completion */
struct list_head ki_list; /* the aio core uses this struct list_head ki_list; /* the aio core uses this
* for cancellation */ * for cancellation */
@ -1084,15 +1083,6 @@ static inline void iocb_put(struct aio_kiocb *iocb)
iocb_destroy(iocb); iocb_destroy(iocb);
} }
static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
long res, long res2)
{
ev->obj = (u64)(unsigned long)iocb->ki_user_iocb;
ev->data = iocb->ki_user_data;
ev->res = res;
ev->res2 = res2;
}
/* aio_complete /* aio_complete
* Called when the io request on the given iocb is complete. * Called when the io request on the given iocb is complete.
*/ */
@ -1104,6 +1094,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
unsigned tail, pos, head; unsigned tail, pos, head;
unsigned long flags; unsigned long flags;
iocb->ki_res.res = res;
iocb->ki_res.res2 = res2;
/* /*
* Add a completion event to the ring buffer. Must be done holding * Add a completion event to the ring buffer. Must be done holding
* ctx->completion_lock to prevent other code from messing with the tail * ctx->completion_lock to prevent other code from messing with the tail
@ -1120,14 +1112,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
event = ev_page + pos % AIO_EVENTS_PER_PAGE; event = ev_page + pos % AIO_EVENTS_PER_PAGE;
aio_fill_event(event, iocb, res, res2); *event = iocb->ki_res;
kunmap_atomic(ev_page); kunmap_atomic(ev_page);
flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, (void __user *)(unsigned long)iocb->ki_res.obj,
res, res2); iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
/* after flagging the request as done, we /* after flagging the request as done, we
* must never even look at it again * must never even look at it again
@ -1844,8 +1836,10 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
goto out_put_req; goto out_put_req;
} }
req->ki_user_iocb = user_iocb; req->ki_res.obj = (u64)(unsigned long)user_iocb;
req->ki_user_data = iocb->aio_data; req->ki_res.data = iocb->aio_data;
req->ki_res.res = 0;
req->ki_res.res2 = 0;
switch (iocb->aio_lio_opcode) { switch (iocb->aio_lio_opcode) {
case IOCB_CMD_PREAD: case IOCB_CMD_PREAD:
@ -2019,6 +2013,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct aio_kiocb *kiocb; struct aio_kiocb *kiocb;
int ret = -EINVAL; int ret = -EINVAL;
u32 key; u32 key;
u64 obj = (u64)(unsigned long)iocb;
if (unlikely(get_user(key, &iocb->aio_key))) if (unlikely(get_user(key, &iocb->aio_key)))
return -EFAULT; return -EFAULT;
@ -2032,7 +2027,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
spin_lock_irq(&ctx->ctx_lock); spin_lock_irq(&ctx->ctx_lock);
/* TODO: use a hash or array, this sucks. */ /* TODO: use a hash or array, this sucks. */
list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
if (kiocb->ki_user_iocb == iocb) { if (kiocb->ki_res.obj == obj) {
ret = kiocb->ki_cancel(&kiocb->rw); ret = kiocb->ki_cancel(&kiocb->rw);
list_del_init(&kiocb->ki_list); list_del_init(&kiocb->ki_list);
break; break;