2016-03-16 04:58:25 +07:00
|
|
|
/*
|
|
|
|
* Copyright 1997-1998 Transmeta Corporation -- All Rights Reserved
|
|
|
|
* Copyright 2001-2006 Ian Kent <raven@themaw.net>
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* This file is part of the Linux kernel and is made available under
|
|
|
|
* the terms of the GNU General Public License, version 2, or at your
|
|
|
|
* option, any later version, incorporated herein by reference.
|
2016-03-16 04:58:25 +07:00
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/signal.h>
|
|
|
|
#include <linux/file.h>
|
|
|
|
#include "autofs_i.h"
|
|
|
|
|
|
|
|
/* We make this a static variable rather than a part of the superblock; it
|
2016-03-16 04:58:25 +07:00
|
|
|
* is better if we don't reassign numbers easily even across filesystems
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
static autofs_wqt_t autofs4_next_wait_queue = 1;
|
|
|
|
|
|
|
|
/* These are the signals we allow interrupting a pending mount */
|
|
|
|
#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGINT) | sigmask(SIGQUIT))
|
|
|
|
|
|
|
|
void autofs4_catatonic_mode(struct autofs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
struct autofs_wait_queue *wq, *nwq;
|
|
|
|
|
2008-07-24 11:30:17 +07:00
|
|
|
mutex_lock(&sbi->wq_mutex);
|
|
|
|
if (sbi->catatonic) {
|
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-03-16 04:58:45 +07:00
|
|
|
pr_debug("entering catatonic mode\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
sbi->catatonic = 1;
|
|
|
|
wq = sbi->queues;
|
|
|
|
sbi->queues = NULL; /* Erase all wait queues */
|
2006-03-27 16:14:49 +07:00
|
|
|
while (wq) {
|
2005-04-17 05:20:36 +07:00
|
|
|
nwq = wq->next;
|
|
|
|
wq->status = -ENOENT; /* Magic is gone - report failure */
|
2013-03-01 18:46:48 +07:00
|
|
|
kfree(wq->name.name);
|
|
|
|
wq->name.name = NULL;
|
2008-07-24 11:30:21 +07:00
|
|
|
wq->wait_ctr--;
|
2005-04-17 05:20:36 +07:00
|
|
|
wake_up_interruptible(&wq->queue);
|
|
|
|
wq = nwq;
|
|
|
|
}
|
2006-11-14 17:03:29 +07:00
|
|
|
fput(sbi->pipe); /* Close the pipe */
|
|
|
|
sbi->pipe = NULL;
|
2008-07-24 11:30:17 +07:00
|
|
|
sbi->pipefd = -1;
|
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2012-01-11 10:35:38 +07:00
|
|
|
static int autofs4_write(struct autofs_sb_info *sbi,
|
|
|
|
struct file *file, const void *addr, int bytes)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
unsigned long sigpipe, flags;
|
|
|
|
mm_segment_t fs;
|
|
|
|
const char *data = (const char *)addr;
|
|
|
|
ssize_t wr = 0;
|
|
|
|
|
|
|
|
sigpipe = sigismember(¤t->pending.signal, SIGPIPE);
|
|
|
|
|
|
|
|
/* Save pointer to user space and point back to kernel space */
|
|
|
|
fs = get_fs();
|
|
|
|
set_fs(KERNEL_DS);
|
|
|
|
|
2012-01-11 10:35:38 +07:00
|
|
|
mutex_lock(&sbi->pipe_mutex);
|
2016-06-25 04:50:27 +07:00
|
|
|
while (bytes) {
|
|
|
|
wr = __vfs_write(file, data, bytes, &file->f_pos);
|
|
|
|
if (wr <= 0)
|
|
|
|
break;
|
2005-04-17 05:20:36 +07:00
|
|
|
data += wr;
|
|
|
|
bytes -= wr;
|
|
|
|
}
|
2012-01-13 19:41:46 +07:00
|
|
|
mutex_unlock(&sbi->pipe_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
set_fs(fs);
|
|
|
|
|
|
|
|
/* Keep the currently executing process from receiving a
|
2016-03-16 04:58:25 +07:00
|
|
|
* SIGPIPE unless it was already supposed to get one
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
if (wr == -EPIPE && !sigpipe) {
|
|
|
|
spin_lock_irqsave(¤t->sighand->siglock, flags);
|
|
|
|
sigdelset(¤t->pending.signal, SIGPIPE);
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irqrestore(¤t->sighand->siglock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (bytes > 0);
|
|
|
|
}
|
2016-03-16 04:58:36 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
|
|
|
|
struct autofs_wait_queue *wq,
|
|
|
|
int type)
|
|
|
|
{
|
2007-02-21 04:58:09 +07:00
|
|
|
union {
|
|
|
|
struct autofs_packet_hdr hdr;
|
|
|
|
union autofs_packet_union v4_pkt;
|
|
|
|
union autofs_v5_packet_union v5_pkt;
|
|
|
|
} pkt;
|
2008-07-24 11:30:20 +07:00
|
|
|
struct file *pipe = NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
size_t pktsz;
|
|
|
|
|
2016-03-16 04:58:45 +07:00
|
|
|
pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
|
|
|
|
(unsigned long) wq->wait_queue_token,
|
|
|
|
wq->name.len, wq->name.name, type);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2016-03-16 04:58:25 +07:00
|
|
|
memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
pkt.hdr.proto_version = sbi->version;
|
|
|
|
pkt.hdr.type = type;
|
2012-01-11 10:24:48 +07:00
|
|
|
|
2006-03-27 16:14:55 +07:00
|
|
|
switch (type) {
|
|
|
|
/* Kernel protocol v4 missing and expire packets */
|
|
|
|
case autofs_ptype_missing:
|
|
|
|
{
|
2007-02-21 04:58:09 +07:00
|
|
|
struct autofs_packet_missing *mp = &pkt.v4_pkt.missing;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
pktsz = sizeof(*mp);
|
|
|
|
|
|
|
|
mp->wait_queue_token = wq->wait_queue_token;
|
2008-07-24 11:30:16 +07:00
|
|
|
mp->len = wq->name.len;
|
|
|
|
memcpy(mp->name, wq->name.name, wq->name.len);
|
|
|
|
mp->name[wq->name.len] = '\0';
|
2006-03-27 16:14:55 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
case autofs_ptype_expire_multi:
|
|
|
|
{
|
2016-03-16 04:58:25 +07:00
|
|
|
struct autofs_packet_expire_multi *ep =
|
|
|
|
&pkt.v4_pkt.expire_multi;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
pktsz = sizeof(*ep);
|
|
|
|
|
|
|
|
ep->wait_queue_token = wq->wait_queue_token;
|
2008-07-24 11:30:16 +07:00
|
|
|
ep->len = wq->name.len;
|
|
|
|
memcpy(ep->name, wq->name.name, wq->name.len);
|
|
|
|
ep->name[wq->name.len] = '\0';
|
2006-03-27 16:14:55 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Kernel protocol v5 packet for handling indirect and direct
|
|
|
|
* mount missing and expire requests
|
|
|
|
*/
|
|
|
|
case autofs_ptype_missing_indirect:
|
|
|
|
case autofs_ptype_expire_indirect:
|
|
|
|
case autofs_ptype_missing_direct:
|
|
|
|
case autofs_ptype_expire_direct:
|
|
|
|
{
|
2007-02-21 04:58:09 +07:00
|
|
|
struct autofs_v5_packet *packet = &pkt.v5_pkt.v5_packet;
|
2012-02-08 07:18:52 +07:00
|
|
|
struct user_namespace *user_ns = sbi->pipe->f_cred->user_ns;
|
2006-03-27 16:14:55 +07:00
|
|
|
|
Revert "autofs: work around unhappy compat problem on x86-64"
This reverts commit a32744d4abae24572eff7269bc17895c41bd0085.
While that commit was technically the right thing to do, and made the
x86-64 compat mode work identically to native 32-bit mode (and thus
fixing the problem with a 32-bit systemd install on a 64-bit kernel), it
turns out that the automount binaries had workarounds for this compat
problem.
Now, the workarounds are disgusting: doing an "uname()" to find out the
architecture of the kernel, and then comparing it for the 64-bit cases
and fixing up the size of the read() in automount for those. And they
were confused: it's not actually a generic 64-bit issue at all, it's
very much tied to just x86-64, which has different alignment for an
'u64' in 64-bit mode than in 32-bit mode.
But the end result is that fixing the compat layer actually breaks the
case of a 32-bit automount on a x86-64 kernel.
There are various approaches to fix this (including just doing a
"strcmp()" on current->comm and comparing it to "automount"), but I
think that I will do the one that teaches pipes about a special "packet
mode", which will allow user space to not have to care too deeply about
the padding at the end of the autofs packet.
That change will make the compat workaround unnecessary, so let's revert
it first, and get automount working again in compat mode. The
packetized pipes will then fix autofs for systemd.
Reported-and-requested-by: Michael Tokarev <mjt@tls.msk.ru>
Cc: Ian Kent <raven@themaw.net>
Cc: stable@kernel.org # for 3.3
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2012-04-28 22:29:56 +07:00
|
|
|
pktsz = sizeof(*packet);
|
|
|
|
|
2006-03-27 16:14:55 +07:00
|
|
|
packet->wait_queue_token = wq->wait_queue_token;
|
2008-07-24 11:30:16 +07:00
|
|
|
packet->len = wq->name.len;
|
|
|
|
memcpy(packet->name, wq->name.name, wq->name.len);
|
|
|
|
packet->name[wq->name.len] = '\0';
|
2006-03-27 16:14:55 +07:00
|
|
|
packet->dev = wq->dev;
|
|
|
|
packet->ino = wq->ino;
|
2012-02-08 07:18:52 +07:00
|
|
|
packet->uid = from_kuid_munged(user_ns, wq->uid);
|
|
|
|
packet->gid = from_kgid_munged(user_ns, wq->gid);
|
2006-03-27 16:14:55 +07:00
|
|
|
packet->pid = wq->pid;
|
|
|
|
packet->tgid = wq->tgid;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
2016-03-16 04:58:45 +07:00
|
|
|
pr_warn("bad type %d!\n", type);
|
2012-01-11 10:24:48 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-08-28 01:48:26 +07:00
|
|
|
pipe = get_file(sbi->pipe);
|
2012-01-11 10:24:48 +07:00
|
|
|
|
2008-07-24 11:30:20 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
|
|
|
|
2012-01-11 10:35:38 +07:00
|
|
|
if (autofs4_write(sbi, pipe, &pkt, pktsz))
|
2012-01-11 10:24:48 +07:00
|
|
|
autofs4_catatonic_mode(sbi);
|
|
|
|
fput(pipe);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int autofs4_getpath(struct autofs_sb_info *sbi,
|
|
|
|
struct dentry *dentry, char **name)
|
|
|
|
{
|
|
|
|
struct dentry *root = sbi->sb->s_root;
|
|
|
|
struct dentry *tmp;
|
2011-01-07 13:49:37 +07:00
|
|
|
char *buf;
|
2005-04-17 05:20:36 +07:00
|
|
|
char *p;
|
2011-01-07 13:49:37 +07:00
|
|
|
int len;
|
|
|
|
unsigned seq;
|
|
|
|
|
|
|
|
rename_retry:
|
|
|
|
buf = *name;
|
|
|
|
len = 0;
|
2011-01-07 13:49:38 +07:00
|
|
|
|
2011-01-07 13:49:37 +07:00
|
|
|
seq = read_seqbegin(&rename_lock);
|
|
|
|
rcu_read_lock();
|
2011-03-25 00:51:31 +07:00
|
|
|
spin_lock(&sbi->fs_lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
|
|
|
|
len += tmp->d_name.len + 1;
|
|
|
|
|
2008-05-01 18:35:07 +07:00
|
|
|
if (!len || --len > NAME_MAX) {
|
2011-03-25 00:51:31 +07:00
|
|
|
spin_unlock(&sbi->fs_lock);
|
2011-01-07 13:49:37 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
if (read_seqretry(&rename_lock, seq))
|
|
|
|
goto rename_retry;
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
*(buf + len) = '\0';
|
|
|
|
p = buf + len - dentry->d_name.len;
|
|
|
|
strncpy(p, dentry->d_name.name, dentry->d_name.len);
|
|
|
|
|
|
|
|
for (tmp = dentry->d_parent; tmp != root ; tmp = tmp->d_parent) {
|
|
|
|
*(--p) = '/';
|
|
|
|
p -= tmp->d_name.len;
|
|
|
|
strncpy(p, tmp->d_name.name, tmp->d_name.len);
|
|
|
|
}
|
2011-03-25 00:51:31 +07:00
|
|
|
spin_unlock(&sbi->fs_lock);
|
2011-01-07 13:49:37 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
if (read_seqretry(&rename_lock, seq))
|
|
|
|
goto rename_retry;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2006-05-15 23:43:51 +07:00
|
|
|
static struct autofs_wait_queue *
|
2016-07-21 03:25:07 +07:00
|
|
|
autofs4_find_wait(struct autofs_sb_info *sbi, const struct qstr *qstr)
|
2006-05-15 23:43:51 +07:00
|
|
|
{
|
|
|
|
struct autofs_wait_queue *wq;
|
|
|
|
|
|
|
|
for (wq = sbi->queues; wq; wq = wq->next) {
|
2008-07-24 11:30:16 +07:00
|
|
|
if (wq->name.hash == qstr->hash &&
|
|
|
|
wq->name.len == qstr->len &&
|
|
|
|
wq->name.name &&
|
2016-03-16 04:58:25 +07:00
|
|
|
!memcmp(wq->name.name, qstr->name, qstr->len))
|
2006-05-15 23:43:51 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return wq;
|
|
|
|
}
|
|
|
|
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
/*
|
|
|
|
* Check if we have a valid request.
|
|
|
|
* Returns
|
|
|
|
* 1 if the request should continue.
|
|
|
|
* In this case we can return an autofs_wait_queue entry if one is
|
|
|
|
* found or NULL to idicate a new wait needs to be created.
|
|
|
|
* 0 or a negative errno if the request shouldn't continue.
|
|
|
|
*/
|
|
|
|
static int validate_request(struct autofs_wait_queue **wait,
|
|
|
|
struct autofs_sb_info *sbi,
|
2016-07-21 03:25:07 +07:00
|
|
|
const struct qstr *qstr,
|
2016-03-16 04:58:25 +07:00
|
|
|
struct dentry *dentry, enum autofs_notify notify)
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
{
|
|
|
|
struct autofs_wait_queue *wq;
|
|
|
|
struct autofs_info *ino;
|
|
|
|
|
2012-01-11 10:20:12 +07:00
|
|
|
if (sbi->catatonic)
|
|
|
|
return -ENOENT;
|
|
|
|
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
/* Wait in progress, continue; */
|
|
|
|
wq = autofs4_find_wait(sbi, qstr);
|
|
|
|
if (wq) {
|
|
|
|
*wait = wq;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*wait = NULL;
|
|
|
|
|
|
|
|
/* If we don't yet have any info this is a new request */
|
|
|
|
ino = autofs4_dentry_ino(dentry);
|
|
|
|
if (!ino)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we've been asked to wait on an existing expire (NFY_NONE)
|
|
|
|
* but there is no wait in the queue ...
|
|
|
|
*/
|
|
|
|
if (notify == NFY_NONE) {
|
|
|
|
/*
|
|
|
|
* Either we've betean the pending expire to post it's
|
|
|
|
* wait or it finished while we waited on the mutex.
|
|
|
|
* So we need to wait till either, the wait appears
|
|
|
|
* or the expire finishes.
|
|
|
|
*/
|
|
|
|
|
|
|
|
while (ino->flags & AUTOFS_INF_EXPIRING) {
|
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
|
|
|
schedule_timeout_interruptible(HZ/10);
|
|
|
|
if (mutex_lock_interruptible(&sbi->wq_mutex))
|
|
|
|
return -EINTR;
|
|
|
|
|
2012-01-11 10:20:12 +07:00
|
|
|
if (sbi->catatonic)
|
|
|
|
return -ENOENT;
|
|
|
|
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
wq = autofs4_find_wait(sbi, qstr);
|
|
|
|
if (wq) {
|
|
|
|
*wait = wq;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not ideal but the status has already gone. Of the two
|
|
|
|
* cases where we wait on NFY_NONE neither depend on the
|
|
|
|
* return status of the wait.
|
|
|
|
*/
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we've been asked to trigger a mount and the request
|
|
|
|
* completed while we waited on the mutex ...
|
|
|
|
*/
|
|
|
|
if (notify == NFY_MOUNT) {
|
2011-01-15 01:46:30 +07:00
|
|
|
struct dentry *new = NULL;
|
|
|
|
int valid = 1;
|
|
|
|
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
/*
|
autofs4: remove hashed check in validate_wait()
The recent ->lookup() deadlock correction required the directory inode
mutex to be dropped while waiting for expire completion. We were
concerned about side effects from this change and one has been identified.
I saw several error messages.
They cause autofs to become quite confused and don't really point to the
actual problem.
Things like:
handle_packet_missing_direct:1376: can't find map entry for (43,1827932)
which is usually totally fatal (although in this case it wouldn't be
except that I treat is as such because it normally is).
do_mount_direct: direct trigger not valid or already mounted
/test/nested/g3c/s1/ss1
which is recoverable, however if this problem is at play it can cause
autofs to become quite confused as to the dependencies in the mount tree
because mount triggers end up mounted multiple times. It's hard to
accurately check for this over mounting case and automount shouldn't need
to if the kernel module is doing its job.
There was one other message, similar in consequence of this last one but I
can't locate a log example just now.
When checking if a mount has already completed prior to adding a new mount
request to the wait queue we check if the dentry is hashed and, if so, if
it is a mount point. But, if a mount successfully completed while we
slept on the wait queue mutex the dentry must exist for the mount to have
completed so the test is not really needed.
Mounts can also be done on top of a global root dentry, so for the above
case, where a mount request completes and the wait queue entry has already
been removed, the hashed test returning false can cause an incorrect
callback to the daemon. Also, d_mountpoint() is not sufficient to check
if a mount has completed for the multi-mount case when we don't have a
real mount at the base of the tree.
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-10 06:26:24 +07:00
|
|
|
* If the dentry was successfully mounted while we slept
|
|
|
|
* on the wait queue mutex we can return success. If it
|
|
|
|
* isn't mounted (doesn't have submounts for the case of
|
|
|
|
* a multi-mount with no mount at it's base) we can
|
|
|
|
* continue on and create a new request.
|
|
|
|
*/
|
2011-01-15 01:46:30 +07:00
|
|
|
if (!IS_ROOT(dentry)) {
|
2016-03-16 04:58:25 +07:00
|
|
|
if (d_unhashed(dentry) &&
|
|
|
|
d_really_is_positive(dentry)) {
|
2011-01-15 01:46:30 +07:00
|
|
|
struct dentry *parent = dentry->d_parent;
|
2016-03-16 04:58:25 +07:00
|
|
|
|
2011-01-15 01:46:30 +07:00
|
|
|
new = d_lookup(parent, &dentry->d_name);
|
|
|
|
if (new)
|
|
|
|
dentry = new;
|
|
|
|
}
|
|
|
|
}
|
autofs4: remove hashed check in validate_wait()
The recent ->lookup() deadlock correction required the directory inode
mutex to be dropped while waiting for expire completion. We were
concerned about side effects from this change and one has been identified.
I saw several error messages.
They cause autofs to become quite confused and don't really point to the
actual problem.
Things like:
handle_packet_missing_direct:1376: can't find map entry for (43,1827932)
which is usually totally fatal (although in this case it wouldn't be
except that I treat is as such because it normally is).
do_mount_direct: direct trigger not valid or already mounted
/test/nested/g3c/s1/ss1
which is recoverable, however if this problem is at play it can cause
autofs to become quite confused as to the dependencies in the mount tree
because mount triggers end up mounted multiple times. It's hard to
accurately check for this over mounting case and automount shouldn't need
to if the kernel module is doing its job.
There was one other message, similar in consequence of this last one but I
can't locate a log example just now.
When checking if a mount has already completed prior to adding a new mount
request to the wait queue we check if the dentry is hashed and, if so, if
it is a mount point. But, if a mount successfully completed while we
slept on the wait queue mutex the dentry must exist for the mount to have
completed so the test is not really needed.
Mounts can also be done on top of a global root dentry, so for the above
case, where a mount request completes and the wait queue entry has already
been removed, the hashed test returning false can cause an incorrect
callback to the daemon. Also, d_mountpoint() is not sufficient to check
if a mount has completed for the multi-mount case when we don't have a
real mount at the base of the tree.
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2009-06-10 06:26:24 +07:00
|
|
|
if (have_submounts(dentry))
|
2011-01-15 01:46:30 +07:00
|
|
|
valid = 0;
|
|
|
|
|
|
|
|
if (new)
|
|
|
|
dput(new);
|
|
|
|
return valid;
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-03-16 04:58:25 +07:00
|
|
|
int autofs4_wait(struct autofs_sb_info *sbi,
|
2016-11-24 04:03:42 +07:00
|
|
|
const struct path *path, enum autofs_notify notify)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2016-11-24 04:03:42 +07:00
|
|
|
struct dentry *dentry = path->dentry;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct autofs_wait_queue *wq;
|
2008-07-24 11:30:16 +07:00
|
|
|
struct qstr qstr;
|
2005-04-17 05:20:36 +07:00
|
|
|
char *name;
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
int status, ret, type;
|
2014-01-24 06:54:58 +07:00
|
|
|
pid_t pid;
|
|
|
|
pid_t tgid;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* In catatonic mode, we don't wait for nobody */
|
2006-03-27 16:14:49 +07:00
|
|
|
if (sbi->catatonic)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -ENOENT;
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
|
2014-01-24 06:54:58 +07:00
|
|
|
/*
|
|
|
|
* Try translating pids to the namespace of the daemon.
|
|
|
|
*
|
|
|
|
* Zero means failure: we are in an unrelated pid namespace.
|
|
|
|
*/
|
|
|
|
pid = task_pid_nr_ns(current, ns_of_pid(sbi->oz_pgrp));
|
|
|
|
tgid = task_tgid_nr_ns(current, ns_of_pid(sbi->oz_pgrp));
|
|
|
|
if (pid == 0 || tgid == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2015-03-18 05:25:59 +07:00
|
|
|
if (d_really_is_negative(dentry)) {
|
2008-07-24 11:30:23 +07:00
|
|
|
/*
|
|
|
|
* A wait for a negative dentry is invalid for certain
|
|
|
|
* cases. A direct or offset mount "always" has its mount
|
|
|
|
* point directory created and so the request dentry must
|
|
|
|
* be positive or the map key doesn't exist. The situation
|
|
|
|
* is very similar for indirect mounts except only dentrys
|
|
|
|
* in the root of the autofs file system may be negative.
|
|
|
|
*/
|
2009-01-07 05:42:08 +07:00
|
|
|
if (autofs_type_trigger(sbi->type))
|
2008-07-24 11:30:23 +07:00
|
|
|
return -ENOENT;
|
|
|
|
else if (!IS_ROOT(dentry->d_parent))
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
2008-07-24 11:30:22 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
|
|
|
|
if (!name)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2006-03-27 16:14:55 +07:00
|
|
|
/* If this is a direct mount request create a dummy name */
|
2009-01-07 05:42:08 +07:00
|
|
|
if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
|
2008-07-24 11:30:16 +07:00
|
|
|
qstr.len = sprintf(name, "%p", dentry);
|
2006-03-27 16:14:55 +07:00
|
|
|
else {
|
2008-07-24 11:30:16 +07:00
|
|
|
qstr.len = autofs4_getpath(sbi, dentry, &name);
|
|
|
|
if (!qstr.len) {
|
2006-03-27 16:14:55 +07:00
|
|
|
kfree(name);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2008-07-24 11:30:16 +07:00
|
|
|
qstr.name = name;
|
2016-06-10 21:51:30 +07:00
|
|
|
qstr.hash = full_name_hash(dentry, name, qstr.len);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2008-07-24 11:30:19 +07:00
|
|
|
if (mutex_lock_interruptible(&sbi->wq_mutex)) {
|
|
|
|
kfree(qstr.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINTR;
|
2008-07-24 11:30:19 +07:00
|
|
|
}
|
2006-05-15 23:43:51 +07:00
|
|
|
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
ret = validate_request(&wq, sbi, &qstr, dentry, notify);
|
|
|
|
if (ret <= 0) {
|
2012-01-11 10:20:12 +07:00
|
|
|
if (ret != -EINTR)
|
2006-03-23 18:00:41 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
autofs4: fix pending mount race
Close a race between a pending mount that is about to finish and a new
lookup for the same directory.
Process P1 triggers a mount of directory foo. It sets
DCACHE_AUTOFS_PENDING in the ->lookup routine, creates a waitq entry for
'foo', and calls out to the daemon to perform the mount. The autofs
daemon will then create the directory 'foo', using a new dentry that will
be hashed in the dcache.
Before the mount completes, another process, P2, tries to walk into the
'foo' directory. The vfs path walking code finds an entry for 'foo' and
calls the revalidate method. Revalidate finds that the entry is not
PENDING (because PENDING was never set on the dentry created by the
mkdir), but it does find the directory is empty. Revalidate calls
try_to_fill_dentry, which sets the PENDING flag and then calls into the
autofs4 wait code to trigger or wait for a mount of 'foo'. The wait code
finds the entry for 'foo' and goes to sleep waiting for the completion of
the mount.
Yet another process, P3, tries to walk into the 'foo' directory. This
process again finds a dentry in the dcache for 'foo', and calls into the
autofs revalidate code.
The revalidate code finds that the PENDING flag is set, and so calls
try_to_fill_dentry.
a) try_to_fill_dentry sets the PENDING flag redundantly for this
dentry, then calls into the autofs4 wait code.
b) the autofs4 wait code takes the waitq mutex and searches for an
entry for 'foo'
Between a and b, P1 is woken up because the mount completed. P1 takes the
wait queue mutex, clears the PENDING flag from the dentry, and removes the
waitqueue entry for 'foo' from the list.
When it releases the waitq mutex, P3 (eventually) acquires it. At this
time, it looks for an existing waitq for 'foo', finds none, and so creates
a new one and calls out to the daemon to mount the 'foo' directory.
Now, the reason that three processes are required to trigger this race is
that, because the PENDING flag is not set on the dentry created by mkdir,
the window for the race would be way to slim for it to ever occur.
Basically, between the testing of d_mountpoint(dentry) and the taking of
the waitq mutex, the mount would have to complete and the daemon would
have to be woken up, and that in turn would have to wake up P1. This is
simply impossible. Add the third process, though, and it becomes slightly
more likely.
Signed-off-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ian Kent <raven@themaw.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2008-07-24 11:30:19 +07:00
|
|
|
kfree(qstr.name);
|
|
|
|
return ret;
|
2006-05-15 23:43:51 +07:00
|
|
|
}
|
2005-06-22 07:16:39 +07:00
|
|
|
|
2006-05-15 23:43:51 +07:00
|
|
|
if (!wq) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Create a new wait queue */
|
2016-03-16 04:58:25 +07:00
|
|
|
wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL);
|
2006-03-27 16:14:49 +07:00
|
|
|
if (!wq) {
|
2008-07-24 11:30:16 +07:00
|
|
|
kfree(qstr.name);
|
2006-03-23 18:00:41 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
wq->wait_queue_token = autofs4_next_wait_queue;
|
|
|
|
if (++autofs4_next_wait_queue == 0)
|
|
|
|
autofs4_next_wait_queue = 1;
|
|
|
|
wq->next = sbi->queues;
|
|
|
|
sbi->queues = wq;
|
|
|
|
init_waitqueue_head(&wq->queue);
|
2008-07-24 11:30:16 +07:00
|
|
|
memcpy(&wq->name, &qstr, sizeof(struct qstr));
|
2006-03-27 16:14:55 +07:00
|
|
|
wq->dev = autofs4_get_dev(sbi);
|
|
|
|
wq->ino = autofs4_get_ino(sbi);
|
2016-09-30 23:28:05 +07:00
|
|
|
wq->uid = current_real_cred()->uid;
|
|
|
|
wq->gid = current_real_cred()->gid;
|
2014-01-24 06:54:58 +07:00
|
|
|
wq->pid = pid;
|
|
|
|
wq->tgid = tgid;
|
2005-04-17 05:20:36 +07:00
|
|
|
wq->status = -EINTR; /* Status return if interrupted */
|
2008-07-24 11:30:21 +07:00
|
|
|
wq->wait_ctr = 2;
|
2006-03-27 16:14:59 +07:00
|
|
|
|
2006-03-27 16:14:55 +07:00
|
|
|
if (sbi->version < 5) {
|
|
|
|
if (notify == NFY_MOUNT)
|
|
|
|
type = autofs_ptype_missing;
|
|
|
|
else
|
|
|
|
type = autofs_ptype_expire_multi;
|
|
|
|
} else {
|
|
|
|
if (notify == NFY_MOUNT)
|
2009-01-07 05:42:08 +07:00
|
|
|
type = autofs_type_trigger(sbi->type) ?
|
2006-03-27 16:14:55 +07:00
|
|
|
autofs_ptype_missing_direct :
|
|
|
|
autofs_ptype_missing_indirect;
|
|
|
|
else
|
2009-01-07 05:42:08 +07:00
|
|
|
type = autofs_type_trigger(sbi->type) ?
|
2006-03-27 16:14:55 +07:00
|
|
|
autofs_ptype_expire_direct :
|
|
|
|
autofs_ptype_expire_indirect;
|
|
|
|
}
|
2005-05-01 22:59:16 +07:00
|
|
|
|
2016-03-16 04:58:45 +07:00
|
|
|
pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
|
|
|
|
(unsigned long) wq->wait_queue_token, wq->name.len,
|
|
|
|
wq->name.name, notify);
|
2005-05-01 22:59:16 +07:00
|
|
|
|
2016-03-16 04:58:25 +07:00
|
|
|
/*
|
|
|
|
* autofs4_notify_daemon() may block; it will unlock ->wq_mutex
|
|
|
|
*/
|
2005-05-01 22:59:16 +07:00
|
|
|
autofs4_notify_daemon(sbi, wq, type);
|
2006-05-15 23:43:51 +07:00
|
|
|
} else {
|
2008-07-24 11:30:21 +07:00
|
|
|
wq->wait_ctr++;
|
2016-03-16 04:58:45 +07:00
|
|
|
pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
|
|
|
|
(unsigned long) wq->wait_queue_token, wq->name.len,
|
|
|
|
wq->name.name, notify);
|
2013-09-15 04:32:12 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
|
|
|
kfree(qstr.name);
|
2005-05-01 22:59:16 +07:00
|
|
|
}
|
|
|
|
|
2008-07-24 11:30:17 +07:00
|
|
|
/*
|
|
|
|
* wq->name.name is NULL iff the lock is already released
|
|
|
|
* or the mount has been made catatonic.
|
|
|
|
*/
|
2008-07-24 11:30:16 +07:00
|
|
|
if (wq->name.name) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Block all but "shutdown" signals while waiting */
|
2016-03-16 04:58:30 +07:00
|
|
|
unsigned long shutdown_sigs_mask;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long irqflags;
|
2016-03-16 04:58:30 +07:00
|
|
|
sigset_t oldset;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(¤t->sighand->siglock, irqflags);
|
|
|
|
oldset = current->blocked;
|
2016-03-16 04:58:30 +07:00
|
|
|
shutdown_sigs_mask = SHUTDOWN_SIGS & ~oldset.sig[0];
|
|
|
|
siginitsetinv(¤t->blocked, shutdown_sigs_mask);
|
2005-04-17 05:20:36 +07:00
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irqrestore(¤t->sighand->siglock, irqflags);
|
|
|
|
|
2008-07-24 11:30:16 +07:00
|
|
|
wait_event_interruptible(wq->queue, wq->name.name == NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
spin_lock_irqsave(¤t->sighand->siglock, irqflags);
|
|
|
|
current->blocked = oldset;
|
|
|
|
recalc_sigpending();
|
|
|
|
spin_unlock_irqrestore(¤t->sighand->siglock, irqflags);
|
|
|
|
} else {
|
2016-03-16 04:58:45 +07:00
|
|
|
pr_debug("skipped sleeping\n");
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
status = wq->status;
|
|
|
|
|
2008-10-16 12:02:52 +07:00
|
|
|
/*
|
|
|
|
* For direct and offset mounts we need to track the requester's
|
|
|
|
* uid and gid in the dentry info struct. This is so it can be
|
|
|
|
* supplied, on request, by the misc device ioctl interface.
|
|
|
|
* This is needed during daemon resatart when reconnecting
|
|
|
|
* to existing, active, autofs mounts. The uid and gid (and
|
|
|
|
* related string values) may be used for macro substitution
|
|
|
|
* in autofs mount maps.
|
|
|
|
*/
|
|
|
|
if (!status) {
|
|
|
|
struct autofs_info *ino;
|
|
|
|
struct dentry *de = NULL;
|
|
|
|
|
|
|
|
/* direct mount or browsable map */
|
|
|
|
ino = autofs4_dentry_ino(dentry);
|
|
|
|
if (!ino) {
|
|
|
|
/* If not lookup actual dentry used */
|
|
|
|
de = d_lookup(dentry->d_parent, &dentry->d_name);
|
|
|
|
if (de)
|
|
|
|
ino = autofs4_dentry_ino(de);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set mount requester */
|
|
|
|
if (ino) {
|
|
|
|
spin_lock(&sbi->fs_lock);
|
|
|
|
ino->uid = wq->uid;
|
|
|
|
ino->gid = wq->gid;
|
|
|
|
spin_unlock(&sbi->fs_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (de)
|
|
|
|
dput(de);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Are we the last process to need status? */
|
2008-07-24 11:30:21 +07:00
|
|
|
mutex_lock(&sbi->wq_mutex);
|
|
|
|
if (!--wq->wait_ctr)
|
2005-04-17 05:20:36 +07:00
|
|
|
kfree(wq);
|
2008-07-24 11:30:21 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
|
|
|
|
{
|
|
|
|
struct autofs_wait_queue *wq, **wql;
|
|
|
|
|
2006-03-23 18:00:41 +07:00
|
|
|
mutex_lock(&sbi->wq_mutex);
|
2007-10-18 17:07:05 +07:00
|
|
|
for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
|
2006-03-27 16:14:49 +07:00
|
|
|
if (wq->wait_queue_token == wait_queue_token)
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2006-03-27 16:14:49 +07:00
|
|
|
if (!wq) {
|
2006-03-23 18:00:41 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*wql = wq->next; /* Unlink from chain */
|
2008-07-24 11:30:16 +07:00
|
|
|
kfree(wq->name.name);
|
|
|
|
wq->name.name = NULL; /* Do not wait on this queue */
|
2005-04-17 05:20:36 +07:00
|
|
|
wq->status = status;
|
2008-07-24 11:30:21 +07:00
|
|
|
wake_up_interruptible(&wq->queue);
|
|
|
|
if (!--wq->wait_ctr)
|
2005-04-17 05:20:36 +07:00
|
|
|
kfree(wq);
|
2008-07-24 11:30:21 +07:00
|
|
|
mutex_unlock(&sbi->wq_mutex);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|