mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 02:10:52 +07:00
task_work: cleanup notification modes
A previous commit changed the notification mode from true/false to an
int, allowing notify-no, notify-yes, or signal-notify. This was
backwards compatible in the sense that any existing true/false user
would translate to either 0 (on notification sent) or 1, the latter
which mapped to TWA_RESUME. TWA_SIGNAL was assigned a value of 2.
Clean this up properly, and define a proper enum for the notification
mode. Now we have:
- TWA_NONE. This is 0, same as before the original change, meaning no
notification requested.
- TWA_RESUME. This is 1, same as before the original change, meaning
that we use TIF_NOTIFY_RESUME.
- TWA_SIGNAL. This uses TIF_SIGPENDING/JOBCTL_TASK_WORK for the
notification.
Clean up all the callers, switching their 0/1/false/true to using the
appropriate TWA_* mode for notifications.
Fixes: e91b481623
("task_work: teach task_work_add() to do signal_wake_up()")
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
3c532798ec
commit
91989c7078
@ -1277,7 +1277,7 @@ static void queue_task_work(struct mce *m, int kill_it)
|
|||||||
else
|
else
|
||||||
current->mce_kill_me.func = kill_me_maybe;
|
current->mce_kill_me.func = kill_me_maybe;
|
||||||
|
|
||||||
task_work_add(current, ¤t->mce_kill_me, true);
|
task_work_add(current, ¤t->mce_kill_me, TWA_RESUME);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -561,7 +561,7 @@ static int __rdtgroup_move_task(struct task_struct *tsk,
|
|||||||
* callback has been invoked.
|
* callback has been invoked.
|
||||||
*/
|
*/
|
||||||
atomic_inc(&rdtgrp->waitcount);
|
atomic_inc(&rdtgrp->waitcount);
|
||||||
ret = task_work_add(tsk, &callback->work, true);
|
ret = task_work_add(tsk, &callback->work, TWA_RESUME);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
* Task is exiting. Drop the refcount and free the callback.
|
* Task is exiting. Drop the refcount and free the callback.
|
||||||
|
@ -879,7 +879,7 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
|
|||||||
estatus_node->task_work.func = ghes_kick_task_work;
|
estatus_node->task_work.func = ghes_kick_task_work;
|
||||||
estatus_node->task_work_cpu = smp_processor_id();
|
estatus_node->task_work_cpu = smp_processor_id();
|
||||||
ret = task_work_add(current, &estatus_node->task_work,
|
ret = task_work_add(current, &estatus_node->task_work,
|
||||||
true);
|
TWA_RESUME);
|
||||||
if (ret)
|
if (ret)
|
||||||
estatus_node->task_work.func = NULL;
|
estatus_node->task_work.func = NULL;
|
||||||
}
|
}
|
||||||
|
@ -2229,7 +2229,7 @@ static void binder_deferred_fd_close(int fd)
|
|||||||
__close_fd_get_file(fd, &twcb->file);
|
__close_fd_get_file(fd, &twcb->file);
|
||||||
if (twcb->file) {
|
if (twcb->file) {
|
||||||
filp_close(twcb->file, current->files);
|
filp_close(twcb->file, current->files);
|
||||||
task_work_add(current, &twcb->twork, true);
|
task_work_add(current, &twcb->twork, TWA_RESUME);
|
||||||
} else {
|
} else {
|
||||||
kfree(twcb);
|
kfree(twcb);
|
||||||
}
|
}
|
||||||
|
@ -339,7 +339,7 @@ void fput_many(struct file *file, unsigned int refs)
|
|||||||
|
|
||||||
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
|
if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
|
||||||
init_task_work(&file->f_u.fu_rcuhead, ____fput);
|
init_task_work(&file->f_u.fu_rcuhead, ____fput);
|
||||||
if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
|
if (!task_work_add(task, &file->f_u.fu_rcuhead, TWA_RESUME))
|
||||||
return;
|
return;
|
||||||
/*
|
/*
|
||||||
* After this task has run exit_task_work(),
|
* After this task has run exit_task_work(),
|
||||||
|
@ -1976,7 +1976,8 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
|
|||||||
{
|
{
|
||||||
struct task_struct *tsk = req->task;
|
struct task_struct *tsk = req->task;
|
||||||
struct io_ring_ctx *ctx = req->ctx;
|
struct io_ring_ctx *ctx = req->ctx;
|
||||||
int ret, notify;
|
enum task_work_notify_mode notify;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (tsk->flags & PF_EXITING)
|
if (tsk->flags & PF_EXITING)
|
||||||
return -ESRCH;
|
return -ESRCH;
|
||||||
@ -1987,7 +1988,7 @@ static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
|
|||||||
* processing task_work. There's no reliable way to tell if TWA_RESUME
|
* processing task_work. There's no reliable way to tell if TWA_RESUME
|
||||||
* will do the job.
|
* will do the job.
|
||||||
*/
|
*/
|
||||||
notify = 0;
|
notify = TWA_NONE;
|
||||||
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
|
if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
|
||||||
notify = TWA_SIGNAL;
|
notify = TWA_SIGNAL;
|
||||||
|
|
||||||
@ -2056,7 +2057,7 @@ static void io_req_task_queue(struct io_kiocb *req)
|
|||||||
|
|
||||||
init_task_work(&req->task_work, io_req_task_cancel);
|
init_task_work(&req->task_work, io_req_task_cancel);
|
||||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||||
task_work_add(tsk, &req->task_work, 0);
|
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2177,7 +2178,7 @@ static void io_free_req_deferred(struct io_kiocb *req)
|
|||||||
struct task_struct *tsk;
|
struct task_struct *tsk;
|
||||||
|
|
||||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||||
task_work_add(tsk, &req->task_work, 0);
|
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3291,7 +3292,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
|
|||||||
/* queue just for cancelation */
|
/* queue just for cancelation */
|
||||||
init_task_work(&req->task_work, io_req_task_cancel);
|
init_task_work(&req->task_work, io_req_task_cancel);
|
||||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||||
task_work_add(tsk, &req->task_work, 0);
|
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
@ -4857,7 +4858,7 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
|
|||||||
|
|
||||||
WRITE_ONCE(poll->canceled, true);
|
WRITE_ONCE(poll->canceled, true);
|
||||||
tsk = io_wq_get_task(req->ctx->io_wq);
|
tsk = io_wq_get_task(req->ctx->io_wq);
|
||||||
task_work_add(tsk, &req->task_work, 0);
|
task_work_add(tsk, &req->task_work, TWA_NONE);
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
}
|
}
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -1191,7 +1191,7 @@ static void mntput_no_expire(struct mount *mnt)
|
|||||||
struct task_struct *task = current;
|
struct task_struct *task = current;
|
||||||
if (likely(!(task->flags & PF_KTHREAD))) {
|
if (likely(!(task->flags & PF_KTHREAD))) {
|
||||||
init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
|
init_task_work(&mnt->mnt_rcu, __cleanup_mnt);
|
||||||
if (!task_work_add(task, &mnt->mnt_rcu, true))
|
if (!task_work_add(task, &mnt->mnt_rcu, TWA_RESUME))
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
|
if (llist_add(&mnt->mnt_llist, &delayed_mntput_list))
|
||||||
|
@ -13,9 +13,14 @@ init_task_work(struct callback_head *twork, task_work_func_t func)
|
|||||||
twork->func = func;
|
twork->func = func;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define TWA_RESUME 1
|
enum task_work_notify_mode {
|
||||||
#define TWA_SIGNAL 2
|
TWA_NONE,
|
||||||
int task_work_add(struct task_struct *task, struct callback_head *twork, int);
|
TWA_RESUME,
|
||||||
|
TWA_SIGNAL,
|
||||||
|
};
|
||||||
|
|
||||||
|
int task_work_add(struct task_struct *task, struct callback_head *twork,
|
||||||
|
enum task_work_notify_mode mode);
|
||||||
|
|
||||||
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
|
struct callback_head *task_work_cancel(struct task_struct *, task_work_func_t);
|
||||||
void task_work_run(void);
|
void task_work_run(void);
|
||||||
|
@ -1823,7 +1823,7 @@ void uprobe_copy_process(struct task_struct *t, unsigned long flags)
|
|||||||
|
|
||||||
t->utask->dup_xol_addr = area->vaddr;
|
t->utask->dup_xol_addr = area->vaddr;
|
||||||
init_task_work(&t->utask->dup_xol_work, dup_xol_work);
|
init_task_work(&t->utask->dup_xol_work, dup_xol_work);
|
||||||
task_work_add(t, &t->utask->dup_xol_work, true);
|
task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1162,7 +1162,7 @@ static int irq_thread(void *data)
|
|||||||
handler_fn = irq_thread_fn;
|
handler_fn = irq_thread_fn;
|
||||||
|
|
||||||
init_task_work(&on_exit_work, irq_thread_dtor);
|
init_task_work(&on_exit_work, irq_thread_dtor);
|
||||||
task_work_add(current, &on_exit_work, false);
|
task_work_add(current, &on_exit_work, TWA_NONE);
|
||||||
|
|
||||||
irq_thread_check_affinity(desc, action);
|
irq_thread_check_affinity(desc, action);
|
||||||
|
|
||||||
|
@ -2928,7 +2928,7 @@ static void task_tick_numa(struct rq *rq, struct task_struct *curr)
|
|||||||
curr->node_stamp += period;
|
curr->node_stamp += period;
|
||||||
|
|
||||||
if (!time_before(jiffies, curr->mm->numa_next_scan))
|
if (!time_before(jiffies, curr->mm->numa_next_scan))
|
||||||
task_work_add(curr, work, true);
|
task_work_add(curr, work, TWA_RESUME);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,23 +9,28 @@ static struct callback_head work_exited; /* all we need is ->next == NULL */
|
|||||||
* task_work_add - ask the @task to execute @work->func()
|
* task_work_add - ask the @task to execute @work->func()
|
||||||
* @task: the task which should run the callback
|
* @task: the task which should run the callback
|
||||||
* @work: the callback to run
|
* @work: the callback to run
|
||||||
* @notify: send the notification if true
|
* @notify: how to notify the targeted task
|
||||||
*
|
*
|
||||||
* Queue @work for task_work_run() below and notify the @task if @notify.
|
* Queue @work for task_work_run() below and notify the @task if @notify
|
||||||
* Fails if the @task is exiting/exited and thus it can't process this @work.
|
* is @TWA_RESUME or @TWA_SIGNAL. @TWA_SIGNAL works like signals, in that the
|
||||||
* Otherwise @work->func() will be called when the @task returns from kernel
|
* it will interrupt the targeted task and run the task_work. @TWA_RESUME
|
||||||
* mode or exits.
|
* work is run only when the task exits the kernel and returns to user mode,
|
||||||
|
* or before entering guest mode. Fails if the @task is exiting/exited and thus
|
||||||
|
* it can't process this @work. Otherwise @work->func() will be called when the
|
||||||
|
* @task goes through one of the aforementioned transitions, or exits.
|
||||||
*
|
*
|
||||||
* This is like the signal handler which runs in kernel mode, but it doesn't
|
* If the targeted task is exiting, then an error is returned and the work item
|
||||||
* try to wake up the @task.
|
* is not queued. It's up to the caller to arrange for an alternative mechanism
|
||||||
|
* in that case.
|
||||||
*
|
*
|
||||||
* Note: there is no ordering guarantee on works queued here.
|
* Note: there is no ordering guarantee on works queued here. The task_work
|
||||||
|
* list is LIFO.
|
||||||
*
|
*
|
||||||
* RETURNS:
|
* RETURNS:
|
||||||
* 0 if succeeds or -ESRCH.
|
* 0 if succeeds or -ESRCH.
|
||||||
*/
|
*/
|
||||||
int
|
int task_work_add(struct task_struct *task, struct callback_head *work,
|
||||||
task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
enum task_work_notify_mode notify)
|
||||||
{
|
{
|
||||||
struct callback_head *head;
|
struct callback_head *head;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
@ -38,6 +43,8 @@ task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
|||||||
} while (cmpxchg(&task->task_works, head, work) != head);
|
} while (cmpxchg(&task->task_works, head, work) != head);
|
||||||
|
|
||||||
switch (notify) {
|
switch (notify) {
|
||||||
|
case TWA_NONE:
|
||||||
|
break;
|
||||||
case TWA_RESUME:
|
case TWA_RESUME:
|
||||||
set_notify_resume(task);
|
set_notify_resume(task);
|
||||||
break;
|
break;
|
||||||
@ -54,6 +61,9 @@ task_work_add(struct task_struct *task, struct callback_head *work, int notify)
|
|||||||
unlock_task_sighand(task, &flags);
|
unlock_task_sighand(task, &flags);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
WARN_ON_ONCE(1);
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1693,7 +1693,7 @@ long keyctl_session_to_parent(void)
|
|||||||
|
|
||||||
/* the replacement session keyring is applied just prior to userspace
|
/* the replacement session keyring is applied just prior to userspace
|
||||||
* restarting */
|
* restarting */
|
||||||
ret = task_work_add(parent, newwork, true);
|
ret = task_work_add(parent, newwork, TWA_RESUME);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
newwork = NULL;
|
newwork = NULL;
|
||||||
unlock:
|
unlock:
|
||||||
|
@ -99,7 +99,7 @@ static void report_access(const char *access, struct task_struct *target,
|
|||||||
info->access = access;
|
info->access = access;
|
||||||
info->target = target;
|
info->target = target;
|
||||||
info->agent = agent;
|
info->agent = agent;
|
||||||
if (task_work_add(current, &info->work, true) == 0)
|
if (task_work_add(current, &info->work, TWA_RESUME) == 0)
|
||||||
return; /* success */
|
return; /* success */
|
||||||
|
|
||||||
WARN(1, "report_access called from exiting task");
|
WARN(1, "report_access called from exiting task");
|
||||||
|
Loading…
Reference in New Issue
Block a user