epoll: do not insert into poll queues until all sanity checks are done

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2020-09-09 22:25:06 -04:00
parent 77f4689de1
commit f8d4f44df0

View File

@ -1522,6 +1522,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
RCU_INIT_POINTER(epi->ws, NULL); RCU_INIT_POINTER(epi->ws, NULL);
} }
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* Initialize the poll table using the queue callback */ /* Initialize the poll table using the queue callback */
epq.epi = epi; epq.epi = epi;
init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
@ -1544,22 +1560,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
if (epi->nwait < 0) if (epi->nwait < 0)
goto error_unregister; goto error_unregister;
/* Add the current item to the list of active epoll hook for this file */
spin_lock(&tfile->f_lock);
list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links);
spin_unlock(&tfile->f_lock);
/*
* Add the current item to the RB tree. All RB tree operations are
* protected by "mtx", and ep_insert() is called with "mtx" held.
*/
ep_rbtree_insert(ep, epi);
/* now check if we've created too many backpaths */
error = -EINVAL;
if (full_check && reverse_path_check())
goto error_remove_epi;
/* We have to drop the new item inside our item list to keep track of it */ /* We have to drop the new item inside our item list to keep track of it */
write_lock_irq(&ep->lock); write_lock_irq(&ep->lock);
@ -1588,6 +1588,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
return 0; return 0;
error_unregister:
ep_unregister_pollwait(ep, epi);
error_remove_epi: error_remove_epi:
spin_lock(&tfile->f_lock); spin_lock(&tfile->f_lock);
list_del_rcu(&epi->fllink); list_del_rcu(&epi->fllink);
@ -1595,9 +1597,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
rb_erase_cached(&epi->rbn, &ep->rbr); rb_erase_cached(&epi->rbn, &ep->rbr);
error_unregister:
ep_unregister_pollwait(ep, epi);
/* /*
* We need to do this because an event could have been arrived on some * We need to do this because an event could have been arrived on some
* allocated wait queue. Note that we don't care about the ep->ovflist * allocated wait queue. Note that we don't care about the ep->ovflist