pidns: guarantee that the pidns init will be the last pidns process reaped

Today we have a twofold bug.  Sometimes release_task on pid == 1 in a pid
namespace can run before other processes in a pid namespace have had
release task called.  With the result that pid_ns_release_proc can be
called before the last proc_flus_task() is done using upid->ns->proc_mnt,
resulting in the use of a stale pointer.  This same set of circumstances
can lead to waitpid(...) returning for a processes started with
clone(CLONE_NEWPID) before the every process in the pid namespace has
actually exited.

To fix this modify zap_pid_ns_processess wait until all other processes in
the pid namespace have exited, even EXIT_DEAD zombies.

The delay_group_leader and related tests ensure that the thread gruop
leader will be the last thread of a process group to be reaped, or to
become EXIT_DEAD and self reap.  With the change to zap_pid_ns_processes
we get the guarantee that pid == 1 in a pid namespace will be the last
task that release_task is called on.

With pid == 1 being the last task to pass through release_task
pid_ns_release_proc can no longer be called too early nor can wait return
before all of the EXIT_DEAD tasks in a pid namespace have exited.

Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: Louis Rilling <louis.rilling@kerlabs.com>
Cc: Mike Galbraith <efault@gmx.de>
Acked-by: Pavel Emelyanov <xemul@parallels.com>
Tested-by: Andrew Wagin <avagin@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Eric W. Biederman 2012-06-20 12:53:03 -07:00 committed by Linus Torvalds
parent f39cdaebb8
commit 6347e90091
2 changed files with 33 additions and 1 deletions

View File

@ -64,7 +64,6 @@ static void exit_mm(struct task_struct * tsk);
static void __unhash_process(struct task_struct *p, bool group_dead) static void __unhash_process(struct task_struct *p, bool group_dead)
{ {
nr_threads--; nr_threads--;
detach_pid(p, PIDTYPE_PID);
if (group_dead) { if (group_dead) {
detach_pid(p, PIDTYPE_PGID); detach_pid(p, PIDTYPE_PGID);
detach_pid(p, PIDTYPE_SID); detach_pid(p, PIDTYPE_SID);
@ -72,7 +71,20 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
list_del_rcu(&p->tasks); list_del_rcu(&p->tasks);
list_del_init(&p->sibling); list_del_init(&p->sibling);
__this_cpu_dec(process_counts); __this_cpu_dec(process_counts);
/*
* If we are the last child process in a pid namespace to be
* reaped, notify the reaper sleeping zap_pid_ns_processes().
*/
if (IS_ENABLED(CONFIG_PID_NS)) {
struct task_struct *parent = p->real_parent;
if ((task_active_pid_ns(p)->child_reaper == parent) &&
list_empty(&parent->children) &&
(parent->flags & PF_EXITING))
wake_up_process(parent);
}
} }
detach_pid(p, PIDTYPE_PID);
list_del_rcu(&p->thread_group); list_del_rcu(&p->thread_group);
} }

View File

@ -184,11 +184,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
} }
read_unlock(&tasklist_lock); read_unlock(&tasklist_lock);
/* Firstly reap the EXIT_ZOMBIE children we may have. */
do { do {
clear_thread_flag(TIF_SIGPENDING); clear_thread_flag(TIF_SIGPENDING);
rc = sys_wait4(-1, NULL, __WALL, NULL); rc = sys_wait4(-1, NULL, __WALL, NULL);
} while (rc != -ECHILD); } while (rc != -ECHILD);
/*
* sys_wait4() above can't reap the TASK_DEAD children.
* Make sure they all go away, see __unhash_process().
*/
for (;;) {
bool need_wait = false;
read_lock(&tasklist_lock);
if (!list_empty(&current->children)) {
__set_current_state(TASK_UNINTERRUPTIBLE);
need_wait = true;
}
read_unlock(&tasklist_lock);
if (!need_wait)
break;
schedule();
}
if (pid_ns->reboot) if (pid_ns->reboot)
current->signal->group_exit_code = pid_ns->reboot; current->signal->group_exit_code = pid_ns->reboot;