2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/dir.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1992 Rick Sladkey
|
|
|
|
*
|
|
|
|
* nfs directory handling functions
|
|
|
|
*
|
|
|
|
* 10 Apr 1996 Added silly rename for unlink --okir
|
|
|
|
* 28 Sep 1996 Improved directory cache --okir
|
|
|
|
* 23 Aug 1997 Claus Heine claus@momo.math.rwth-aachen.de
|
|
|
|
* Re-implemented silly rename for unlink, newly implemented
|
|
|
|
* silly rename for nfs_rename() following the suggestions
|
|
|
|
* of Olaf Kirch (okir) found in this file.
|
|
|
|
* Following Linus comments on my original hack, this version
|
|
|
|
* depends only on the dcache stuff and doesn't touch the inode
|
|
|
|
* layer (iput() and friends).
|
|
|
|
* 6 Jun 1999 Cache readdir lookups in the page cache. -DaveM
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/time.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/stat.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/sunrpc/clnt.h>
|
|
|
|
#include <linux/nfs_fs.h>
|
|
|
|
#include <linux/nfs_mount.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/smp_lock.h>
|
2006-08-23 07:06:23 +07:00
|
|
|
#include <linux/pagevec.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/namei.h>
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
#include <linux/mount.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
#include "nfs4_fs.h"
|
2005-04-17 05:20:36 +07:00
|
|
|
#include "delegation.h"
|
2006-03-21 01:44:14 +07:00
|
|
|
#include "iostat.h"
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define NFS_PARANOIA 1
|
|
|
|
/* #define NFS_DEBUG_VERBOSE 1 */
|
|
|
|
|
|
|
|
static int nfs_opendir(struct inode *, struct file *);
|
|
|
|
static int nfs_readdir(struct file *, void *, filldir_t);
|
|
|
|
static struct dentry *nfs_lookup(struct inode *, struct dentry *, struct nameidata *);
|
|
|
|
static int nfs_create(struct inode *, struct dentry *, int, struct nameidata *);
|
|
|
|
static int nfs_mkdir(struct inode *, struct dentry *, int);
|
|
|
|
static int nfs_rmdir(struct inode *, struct dentry *);
|
|
|
|
static int nfs_unlink(struct inode *, struct dentry *);
|
|
|
|
static int nfs_symlink(struct inode *, struct dentry *, const char *);
|
|
|
|
static int nfs_link(struct dentry *, struct inode *, struct dentry *);
|
|
|
|
static int nfs_mknod(struct inode *, struct dentry *, int, dev_t);
|
|
|
|
static int nfs_rename(struct inode *, struct dentry *,
|
|
|
|
struct inode *, struct dentry *);
|
|
|
|
static int nfs_fsync_dir(struct file *, struct dentry *, int);
|
2005-06-23 00:16:29 +07:00
|
|
|
static loff_t nfs_llseek_dir(struct file *, loff_t, int);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-28 16:56:42 +07:00
|
|
|
const struct file_operations nfs_dir_operations = {
|
2005-06-23 00:16:29 +07:00
|
|
|
.llseek = nfs_llseek_dir,
|
2005-04-17 05:20:36 +07:00
|
|
|
.read = generic_read_dir,
|
|
|
|
.readdir = nfs_readdir,
|
|
|
|
.open = nfs_opendir,
|
|
|
|
.release = nfs_release,
|
|
|
|
.fsync = nfs_fsync_dir,
|
|
|
|
};
|
|
|
|
|
2007-02-12 15:55:39 +07:00
|
|
|
const struct inode_operations nfs_dir_inode_operations = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.create = nfs_create,
|
|
|
|
.lookup = nfs_lookup,
|
|
|
|
.link = nfs_link,
|
|
|
|
.unlink = nfs_unlink,
|
|
|
|
.symlink = nfs_symlink,
|
|
|
|
.mkdir = nfs_mkdir,
|
|
|
|
.rmdir = nfs_rmdir,
|
|
|
|
.mknod = nfs_mknod,
|
|
|
|
.rename = nfs_rename,
|
|
|
|
.permission = nfs_permission,
|
|
|
|
.getattr = nfs_getattr,
|
|
|
|
.setattr = nfs_setattr,
|
|
|
|
};
|
|
|
|
|
2005-06-23 00:16:27 +07:00
|
|
|
#ifdef CONFIG_NFS_V3
|
2007-02-12 15:55:39 +07:00
|
|
|
const struct inode_operations nfs3_dir_inode_operations = {
|
2005-06-23 00:16:27 +07:00
|
|
|
.create = nfs_create,
|
|
|
|
.lookup = nfs_lookup,
|
|
|
|
.link = nfs_link,
|
|
|
|
.unlink = nfs_unlink,
|
|
|
|
.symlink = nfs_symlink,
|
|
|
|
.mkdir = nfs_mkdir,
|
|
|
|
.rmdir = nfs_rmdir,
|
|
|
|
.mknod = nfs_mknod,
|
|
|
|
.rename = nfs_rename,
|
|
|
|
.permission = nfs_permission,
|
|
|
|
.getattr = nfs_getattr,
|
|
|
|
.setattr = nfs_setattr,
|
|
|
|
.listxattr = nfs3_listxattr,
|
|
|
|
.getxattr = nfs3_getxattr,
|
|
|
|
.setxattr = nfs3_setxattr,
|
|
|
|
.removexattr = nfs3_removexattr,
|
|
|
|
};
|
|
|
|
#endif /* CONFIG_NFS_V3 */
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_NFS_V4
|
|
|
|
|
|
|
|
static struct dentry *nfs_atomic_lookup(struct inode *, struct dentry *, struct nameidata *);
|
2007-02-12 15:55:39 +07:00
|
|
|
const struct inode_operations nfs4_dir_inode_operations = {
|
2005-04-17 05:20:36 +07:00
|
|
|
.create = nfs_create,
|
|
|
|
.lookup = nfs_atomic_lookup,
|
|
|
|
.link = nfs_link,
|
|
|
|
.unlink = nfs_unlink,
|
|
|
|
.symlink = nfs_symlink,
|
|
|
|
.mkdir = nfs_mkdir,
|
|
|
|
.rmdir = nfs_rmdir,
|
|
|
|
.mknod = nfs_mknod,
|
|
|
|
.rename = nfs_rename,
|
|
|
|
.permission = nfs_permission,
|
|
|
|
.getattr = nfs_getattr,
|
|
|
|
.setattr = nfs_setattr,
|
2005-06-23 00:16:22 +07:00
|
|
|
.getxattr = nfs4_getxattr,
|
|
|
|
.setxattr = nfs4_setxattr,
|
|
|
|
.listxattr = nfs4_listxattr,
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
#endif /* CONFIG_NFS_V4 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Open file
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_opendir(struct inode *inode, struct file *filp)
|
|
|
|
{
|
2006-04-20 00:06:37 +07:00
|
|
|
int res;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: opendir(%s/%ld)\n",
|
|
|
|
inode->i_sb->s_id, inode->i_ino);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
lock_kernel();
|
|
|
|
/* Call generic open code in order to cache credentials */
|
2006-04-20 00:06:37 +07:00
|
|
|
res = nfs_open(inode, filp);
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2006-10-20 13:28:49 +07:00
|
|
|
typedef __be32 * (*decode_dirent_t)(__be32 *, struct nfs_entry *, int);
|
2005-04-17 05:20:36 +07:00
|
|
|
typedef struct {
|
|
|
|
struct file *file;
|
|
|
|
struct page *page;
|
|
|
|
unsigned long page_index;
|
2006-10-20 13:28:49 +07:00
|
|
|
__be32 *ptr;
|
2005-06-23 00:16:29 +07:00
|
|
|
u64 *dir_cookie;
|
|
|
|
loff_t current_index;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct nfs_entry *entry;
|
|
|
|
decode_dirent_t decode;
|
|
|
|
int plus;
|
|
|
|
int error;
|
2007-04-16 06:35:27 +07:00
|
|
|
unsigned long timestamp;
|
|
|
|
int timestamp_valid;
|
2005-04-17 05:20:36 +07:00
|
|
|
} nfs_readdir_descriptor_t;
|
|
|
|
|
|
|
|
/* Now we cache directories properly, by stuffing the dirent
|
|
|
|
* data directly in the page cache.
|
|
|
|
*
|
|
|
|
* Inode invalidation due to refresh etc. takes care of
|
|
|
|
* _everything_, no sloppy entry flushing logic, no extraneous
|
|
|
|
* copying, network direct to page cache, the way it was meant
|
|
|
|
* to be.
|
|
|
|
*
|
|
|
|
* NOTE: Dirent information verification is done always by the
|
|
|
|
* page-in of the RPC reply, nowhere else, this simplies
|
|
|
|
* things substantially.
|
|
|
|
*/
|
|
|
|
static
|
|
|
|
int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
|
|
|
|
{
|
|
|
|
struct file *file = desc->file;
|
2006-12-08 17:36:40 +07:00
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct rpc_cred *cred = nfs_file_cred(file);
|
|
|
|
unsigned long timestamp;
|
|
|
|
int error;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: %s: reading cookie %Lu into page %lu\n",
|
|
|
|
__FUNCTION__, (long long)desc->entry->cookie,
|
|
|
|
page->index);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
again:
|
|
|
|
timestamp = jiffies;
|
2006-12-08 17:36:40 +07:00
|
|
|
error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, desc->entry->cookie, page,
|
2005-04-17 05:20:36 +07:00
|
|
|
NFS_SERVER(inode)->dtsize, desc->plus);
|
|
|
|
if (error < 0) {
|
|
|
|
/* We requested READDIRPLUS, but the server doesn't grok it */
|
|
|
|
if (error == -ENOTSUPP && desc->plus) {
|
|
|
|
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
|
2005-08-19 01:24:11 +07:00
|
|
|
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
2005-04-17 05:20:36 +07:00
|
|
|
desc->plus = 0;
|
|
|
|
goto again;
|
|
|
|
}
|
|
|
|
goto error;
|
|
|
|
}
|
2007-04-16 06:35:27 +07:00
|
|
|
desc->timestamp = timestamp;
|
|
|
|
desc->timestamp_valid = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
SetPageUptodate(page);
|
2005-08-19 01:24:12 +07:00
|
|
|
spin_lock(&inode->i_lock);
|
2005-08-19 01:24:09 +07:00
|
|
|
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
|
2005-08-19 01:24:12 +07:00
|
|
|
spin_unlock(&inode->i_lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Ensure consistent page alignment of the data.
|
|
|
|
* Note: assumes we have exclusive access to this mapping either
|
2006-01-10 06:59:24 +07:00
|
|
|
* through inode->i_mutex or some other mechanism.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-10-20 13:28:40 +07:00
|
|
|
if (page->index == 0 && invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1) < 0) {
|
|
|
|
/* Should never happen */
|
|
|
|
nfs_zap_mapping(inode, inode->i_mapping);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_page(page);
|
|
|
|
return 0;
|
|
|
|
error:
|
|
|
|
SetPageError(page);
|
|
|
|
unlock_page(page);
|
|
|
|
nfs_zap_caches(inode);
|
|
|
|
desc->error = error;
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
int dir_decode(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
2006-10-20 13:28:49 +07:00
|
|
|
__be32 *p = desc->ptr;
|
2005-04-17 05:20:36 +07:00
|
|
|
p = desc->decode(p, desc->entry, desc->plus);
|
|
|
|
if (IS_ERR(p))
|
|
|
|
return PTR_ERR(p);
|
|
|
|
desc->ptr = p;
|
2007-04-16 06:35:27 +07:00
|
|
|
if (desc->timestamp_valid)
|
|
|
|
desc->entry->fattr->time_start = desc->timestamp;
|
|
|
|
else
|
|
|
|
desc->entry->fattr->valid &= ~NFS_ATTR_FATTR;
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline
|
|
|
|
void dir_page_release(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
kunmap(desc->page);
|
|
|
|
page_cache_release(desc->page);
|
|
|
|
desc->page = NULL;
|
|
|
|
desc->ptr = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Given a pointer to a buffer that has already been filled by a call
|
2005-06-23 00:16:29 +07:00
|
|
|
* to readdir, find the next entry with cookie '*desc->dir_cookie'.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* If the end of the buffer has been reached, return -EAGAIN, if not,
|
|
|
|
* return the offset within the buffer of the next entry to be
|
|
|
|
* read.
|
|
|
|
*/
|
|
|
|
static inline
|
2005-06-23 00:16:29 +07:00
|
|
|
int find_dirent(nfs_readdir_descriptor_t *desc)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
struct nfs_entry *entry = desc->entry;
|
|
|
|
int loop_count = 0,
|
|
|
|
status;
|
|
|
|
|
|
|
|
while((status = dir_decode(desc)) == 0) {
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: %s: examining cookie %Lu\n",
|
|
|
|
__FUNCTION__, (unsigned long long)entry->cookie);
|
2005-06-23 00:16:29 +07:00
|
|
|
if (entry->prev_cookie == *desc->dir_cookie)
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
if (loop_count++ > 200) {
|
|
|
|
loop_count = 0;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-06-23 00:16:29 +07:00
|
|
|
* Given a pointer to a buffer that has already been filled by a call
|
2005-06-23 00:16:29 +07:00
|
|
|
* to readdir, find the entry at offset 'desc->file->f_pos'.
|
2005-06-23 00:16:29 +07:00
|
|
|
*
|
|
|
|
* If the end of the buffer has been reached, return -EAGAIN, if not,
|
|
|
|
* return the offset within the buffer of the next entry to be
|
|
|
|
* read.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int find_dirent_index(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
struct nfs_entry *entry = desc->entry;
|
|
|
|
int loop_count = 0,
|
|
|
|
status;
|
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
status = dir_decode(desc);
|
|
|
|
if (status)
|
|
|
|
break;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: found cookie %Lu at index %Ld\n",
|
|
|
|
(unsigned long long)entry->cookie, desc->current_index);
|
2005-06-23 00:16:29 +07:00
|
|
|
|
2005-06-23 00:16:29 +07:00
|
|
|
if (desc->file->f_pos == desc->current_index) {
|
|
|
|
*desc->dir_cookie = entry->cookie;
|
2005-06-23 00:16:29 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
desc->current_index++;
|
|
|
|
if (loop_count++ > 200) {
|
|
|
|
loop_count = 0;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Find the given page, and call find_dirent() or find_dirent_index in
|
|
|
|
* order to try to return the next entry.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int find_dirent_page(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
2006-12-08 17:36:40 +07:00
|
|
|
struct inode *inode = desc->file->f_path.dentry->d_inode;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct page *page;
|
|
|
|
int status;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: %s: searching page %ld for target %Lu\n",
|
|
|
|
__FUNCTION__, desc->page_index,
|
|
|
|
(long long) *desc->dir_cookie);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-04-16 06:35:27 +07:00
|
|
|
/* If we find the page in the page_cache, we cannot be sure
|
|
|
|
* how fresh the data is, so we will ignore readdir_plus attributes.
|
|
|
|
*/
|
|
|
|
desc->timestamp_valid = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
page = read_cache_page(inode->i_mapping, desc->page_index,
|
|
|
|
(filler_t *)nfs_readdir_filler, desc);
|
|
|
|
if (IS_ERR(page)) {
|
|
|
|
status = PTR_ERR(page);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (!PageUptodate(page))
|
|
|
|
goto read_error;
|
|
|
|
|
|
|
|
/* NOTE: Someone else may have changed the READDIRPLUS flag */
|
|
|
|
desc->page = page;
|
|
|
|
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
|
2005-06-23 00:16:29 +07:00
|
|
|
if (*desc->dir_cookie != 0)
|
2005-06-23 00:16:29 +07:00
|
|
|
status = find_dirent(desc);
|
|
|
|
else
|
|
|
|
status = find_dirent_index(desc);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (status < 0)
|
|
|
|
dir_page_release(desc);
|
|
|
|
out:
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status);
|
2005-04-17 05:20:36 +07:00
|
|
|
return status;
|
|
|
|
read_error:
|
|
|
|
page_cache_release(page);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Recurse through the page cache pages, and return a
|
|
|
|
* filled nfs_entry structure of the next directory entry if possible.
|
|
|
|
*
|
2005-06-23 00:16:29 +07:00
|
|
|
* The target for the search is '*desc->dir_cookie' if non-0,
|
|
|
|
* 'desc->file->f_pos' otherwise
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
|
|
|
int loop_count = 0;
|
|
|
|
int res;
|
|
|
|
|
2005-06-23 00:16:29 +07:00
|
|
|
/* Always search-by-index from the beginning of the cache */
|
2005-06-23 00:16:29 +07:00
|
|
|
if (*desc->dir_cookie == 0) {
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: readdir_search_pagecache() searching for offset %Ld\n",
|
|
|
|
(long long)desc->file->f_pos);
|
2005-06-23 00:16:29 +07:00
|
|
|
desc->page_index = 0;
|
|
|
|
desc->entry->cookie = desc->entry->prev_cookie = 0;
|
|
|
|
desc->entry->eof = 0;
|
|
|
|
desc->current_index = 0;
|
2005-06-23 00:16:29 +07:00
|
|
|
} else
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: readdir_search_pagecache() searching for cookie %Lu\n",
|
|
|
|
(unsigned long long)*desc->dir_cookie);
|
2005-06-23 00:16:29 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
for (;;) {
|
|
|
|
res = find_dirent_page(desc);
|
|
|
|
if (res != -EAGAIN)
|
|
|
|
break;
|
|
|
|
/* Align to beginning of next page */
|
|
|
|
desc->page_index ++;
|
|
|
|
if (loop_count++ > 200) {
|
|
|
|
loop_count = 0;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
}
|
2006-03-21 01:44:24 +07:00
|
|
|
|
|
|
|
dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, res);
|
2005-04-17 05:20:36 +07:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned int dt_type(struct inode *inode)
|
|
|
|
{
|
|
|
|
return (inode->i_mode >> 12) & 15;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Once we've found the start of the dirent within a page: fill 'er up...
|
|
|
|
*/
|
|
|
|
static
|
|
|
|
int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
|
|
|
|
filldir_t filldir)
|
|
|
|
{
|
|
|
|
struct file *file = desc->file;
|
|
|
|
struct nfs_entry *entry = desc->entry;
|
|
|
|
struct dentry *dentry = NULL;
|
|
|
|
unsigned long fileid;
|
|
|
|
int loop_count = 0,
|
|
|
|
res;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling starting @ cookie %Lu\n",
|
|
|
|
(unsigned long long)entry->cookie);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
unsigned d_type = DT_UNKNOWN;
|
|
|
|
/* Note: entry->prev_cookie contains the cookie for
|
|
|
|
* retrieving the current dirent on the server */
|
|
|
|
fileid = nfs_fileid_to_ino_t(entry->ino);
|
|
|
|
|
|
|
|
/* Get a dentry if we have one */
|
|
|
|
if (dentry != NULL)
|
|
|
|
dput(dentry);
|
|
|
|
dentry = nfs_readdir_lookup(desc);
|
|
|
|
|
|
|
|
/* Use readdirplus info */
|
|
|
|
if (dentry != NULL && dentry->d_inode != NULL) {
|
|
|
|
d_type = dt_type(dentry->d_inode);
|
|
|
|
fileid = dentry->d_inode->i_ino;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = filldir(dirent, entry->name, entry->len,
|
2005-06-23 00:16:29 +07:00
|
|
|
file->f_pos, fileid, d_type);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (res < 0)
|
|
|
|
break;
|
2005-06-23 00:16:29 +07:00
|
|
|
file->f_pos++;
|
2005-06-23 00:16:29 +07:00
|
|
|
*desc->dir_cookie = entry->cookie;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (dir_decode(desc) != 0) {
|
|
|
|
desc->page_index ++;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (loop_count++ > 200) {
|
|
|
|
loop_count = 0;
|
|
|
|
schedule();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dir_page_release(desc);
|
|
|
|
if (dentry != NULL)
|
|
|
|
dput(dentry);
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
|
|
|
|
(unsigned long long)*desc->dir_cookie, res);
|
2005-04-17 05:20:36 +07:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we cannot find a cookie in our cache, we suspect that this is
|
|
|
|
* because it points to a deleted file, so we ask the server to return
|
|
|
|
* whatever it thinks is the next entry. We then feed this to filldir.
|
|
|
|
* If all goes well, we should then be able to find our way round the
|
|
|
|
* cache on the next call to readdir_search_pagecache();
|
|
|
|
*
|
|
|
|
* NOTE: we cannot add the anonymous page to the pagecache because
|
|
|
|
* the data it contains might not be page aligned. Besides,
|
|
|
|
* we should already have a complete representation of the
|
|
|
|
* directory in the page cache by the time we get here.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
|
|
|
|
filldir_t filldir)
|
|
|
|
{
|
|
|
|
struct file *file = desc->file;
|
2006-12-08 17:36:40 +07:00
|
|
|
struct inode *inode = file->f_path.dentry->d_inode;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct rpc_cred *cred = nfs_file_cred(file);
|
|
|
|
struct page *page = NULL;
|
|
|
|
int status;
|
2007-04-16 06:35:27 +07:00
|
|
|
unsigned long timestamp;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: uncached_readdir() searching for cookie %Lu\n",
|
|
|
|
(unsigned long long)*desc->dir_cookie);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
page = alloc_page(GFP_HIGHUSER);
|
|
|
|
if (!page) {
|
|
|
|
status = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
2007-04-16 06:35:27 +07:00
|
|
|
timestamp = jiffies;
|
2006-12-08 17:36:40 +07:00
|
|
|
desc->error = NFS_PROTO(inode)->readdir(file->f_path.dentry, cred, *desc->dir_cookie,
|
2005-04-17 05:20:36 +07:00
|
|
|
page,
|
|
|
|
NFS_SERVER(inode)->dtsize,
|
|
|
|
desc->plus);
|
2005-08-19 01:24:12 +07:00
|
|
|
spin_lock(&inode->i_lock);
|
2005-08-19 01:24:09 +07:00
|
|
|
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
|
2005-08-19 01:24:12 +07:00
|
|
|
spin_unlock(&inode->i_lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
desc->page = page;
|
|
|
|
desc->ptr = kmap(page); /* matching kunmap in nfs_do_filldir */
|
|
|
|
if (desc->error >= 0) {
|
2007-04-16 06:35:27 +07:00
|
|
|
desc->timestamp = timestamp;
|
|
|
|
desc->timestamp_valid = 1;
|
2005-04-17 05:20:36 +07:00
|
|
|
if ((status = dir_decode(desc)) == 0)
|
2005-06-23 00:16:29 +07:00
|
|
|
desc->entry->prev_cookie = *desc->dir_cookie;
|
2005-04-17 05:20:36 +07:00
|
|
|
} else
|
|
|
|
status = -EIO;
|
|
|
|
if (status < 0)
|
|
|
|
goto out_release;
|
|
|
|
|
|
|
|
status = nfs_do_filldir(desc, dirent, filldir);
|
|
|
|
|
|
|
|
/* Reset read descriptor so it searches the page cache from
|
|
|
|
* the start upon the next call to readdir_search_pagecache() */
|
|
|
|
desc->page_index = 0;
|
|
|
|
desc->entry->cookie = desc->entry->prev_cookie = 0;
|
|
|
|
desc->entry->eof = 0;
|
|
|
|
out:
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
|
|
|
|
__FUNCTION__, status);
|
2005-04-17 05:20:36 +07:00
|
|
|
return status;
|
|
|
|
out_release:
|
|
|
|
dir_page_release(desc);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2005-06-23 00:16:29 +07:00
|
|
|
/* The file offset position represents the dirent entry number. A
|
|
|
|
last cookie cache takes care of the common case of reading the
|
|
|
|
whole directory.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
|
|
|
|
{
|
2006-12-08 17:36:40 +07:00
|
|
|
struct dentry *dentry = filp->f_path.dentry;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
nfs_readdir_descriptor_t my_desc,
|
|
|
|
*desc = &my_desc;
|
|
|
|
struct nfs_entry my_entry;
|
|
|
|
struct nfs_fh fh;
|
|
|
|
struct nfs_fattr fattr;
|
|
|
|
long res;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: readdir(%s/%s) starting at cookie %Lu\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name,
|
|
|
|
(long long)filp->f_pos);
|
2006-03-21 01:44:14 +07:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSGETDENTS);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
lock_kernel();
|
|
|
|
|
2007-01-25 02:54:55 +07:00
|
|
|
res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (res < 0) {
|
|
|
|
unlock_kernel();
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2005-06-23 00:16:29 +07:00
|
|
|
* filp->f_pos points to the dirent entry number.
|
2005-06-23 00:16:29 +07:00
|
|
|
* *desc->dir_cookie has the cookie for the next entry. We have
|
2005-06-23 00:16:29 +07:00
|
|
|
* to either find the entry with the appropriate number or
|
|
|
|
* revalidate the cookie.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
memset(desc, 0, sizeof(*desc));
|
|
|
|
|
|
|
|
desc->file = filp;
|
2005-06-23 00:16:29 +07:00
|
|
|
desc->dir_cookie = &((struct nfs_open_context *)filp->private_data)->dir_cookie;
|
2005-04-17 05:20:36 +07:00
|
|
|
desc->decode = NFS_PROTO(inode)->decode_dirent;
|
|
|
|
desc->plus = NFS_USE_READDIRPLUS(inode);
|
|
|
|
|
|
|
|
my_entry.cookie = my_entry.prev_cookie = 0;
|
|
|
|
my_entry.eof = 0;
|
|
|
|
my_entry.fh = &fh;
|
|
|
|
my_entry.fattr = &fattr;
|
2005-10-28 09:12:38 +07:00
|
|
|
nfs_fattr_init(&fattr);
|
2005-04-17 05:20:36 +07:00
|
|
|
desc->entry = &my_entry;
|
|
|
|
|
|
|
|
while(!desc->entry->eof) {
|
|
|
|
res = readdir_search_pagecache(desc);
|
2005-06-23 00:16:29 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (res == -EBADCOOKIE) {
|
|
|
|
/* This means either end of directory */
|
2005-06-23 00:16:29 +07:00
|
|
|
if (*desc->dir_cookie && desc->entry->cookie != *desc->dir_cookie) {
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Or that the server has 'lost' a cookie */
|
|
|
|
res = uncached_readdir(desc, dirent, filldir);
|
|
|
|
if (res >= 0)
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
res = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (res == -ETOOSMALL && desc->plus) {
|
2005-08-19 01:24:11 +07:00
|
|
|
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_zap_caches(inode);
|
|
|
|
desc->plus = 0;
|
|
|
|
desc->entry->eof = 0;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (res < 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
res = nfs_do_filldir(desc, dirent, filldir);
|
|
|
|
if (res < 0) {
|
|
|
|
res = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
unlock_kernel();
|
2006-03-21 01:44:24 +07:00
|
|
|
if (res > 0)
|
|
|
|
res = 0;
|
|
|
|
dfprintk(VFS, "NFS: readdir(%s/%s) returns %ld\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name,
|
|
|
|
res);
|
|
|
|
return res;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2005-06-23 00:16:29 +07:00
|
|
|
loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int origin)
|
|
|
|
{
|
2006-12-08 17:36:40 +07:00
|
|
|
mutex_lock(&filp->f_path.dentry->d_inode->i_mutex);
|
2005-06-23 00:16:29 +07:00
|
|
|
switch (origin) {
|
|
|
|
case 1:
|
|
|
|
offset += filp->f_pos;
|
|
|
|
case 0:
|
|
|
|
if (offset >= 0)
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
offset = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (offset != filp->f_pos) {
|
|
|
|
filp->f_pos = offset;
|
|
|
|
((struct nfs_open_context *)filp->private_data)->dir_cookie = 0;
|
|
|
|
}
|
|
|
|
out:
|
2006-12-08 17:36:40 +07:00
|
|
|
mutex_unlock(&filp->f_path.dentry->d_inode->i_mutex);
|
2005-06-23 00:16:29 +07:00
|
|
|
return offset;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* All directory operations under NFS are synchronous, so fsync()
|
|
|
|
* is a dummy operation.
|
|
|
|
*/
|
|
|
|
int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
|
|
|
|
{
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: fsync_dir(%s/%s) datasync %d\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name,
|
|
|
|
datasync);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* A check for whether or not the parent directory has changed.
|
|
|
|
* In the case it has, we assume that the dentries are untrustworthy
|
|
|
|
* and may need to be looked up again.
|
|
|
|
*/
|
2007-01-31 20:16:24 +07:00
|
|
|
static int nfs_check_verifier(struct inode *dir, struct dentry *dentry)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
if (IS_ROOT(dentry))
|
|
|
|
return 1;
|
2005-08-19 01:24:09 +07:00
|
|
|
if ((NFS_I(dir)->cache_validity & NFS_INO_INVALID_ATTR) != 0
|
2005-04-17 05:20:36 +07:00
|
|
|
|| nfs_attribute_timeout(dir))
|
|
|
|
return 0;
|
|
|
|
return nfs_verify_change_attribute(dir, (unsigned long)dentry->d_fsdata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
|
|
|
|
{
|
|
|
|
dentry->d_fsdata = (void *)verf;
|
|
|
|
}
|
|
|
|
|
2007-01-31 20:16:24 +07:00
|
|
|
static void nfs_refresh_verifier(struct dentry * dentry, unsigned long verf)
|
|
|
|
{
|
|
|
|
if (time_after(verf, (unsigned long)dentry->d_fsdata))
|
|
|
|
nfs_set_verifier(dentry, verf);
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Whenever an NFS operation succeeds, we know that the dentry
|
|
|
|
* is valid, so we update the revalidation timestamp.
|
|
|
|
*/
|
|
|
|
static inline void nfs_renew_times(struct dentry * dentry)
|
|
|
|
{
|
|
|
|
dentry->d_time = jiffies;
|
|
|
|
}
|
|
|
|
|
2005-06-08 05:37:01 +07:00
|
|
|
/*
|
|
|
|
* Return the intent data that applies to this particular path component
|
|
|
|
*
|
|
|
|
* Note that the current set of intents only apply to the very last
|
|
|
|
* component of the path.
|
|
|
|
* We check for this using LOOKUP_CONTINUE and LOOKUP_PARENT.
|
|
|
|
*/
|
|
|
|
static inline unsigned int nfs_lookup_check_intent(struct nameidata *nd, unsigned int mask)
|
|
|
|
{
|
|
|
|
if (nd->flags & (LOOKUP_CONTINUE|LOOKUP_PARENT))
|
|
|
|
return 0;
|
|
|
|
return nd->flags & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Inode and filehandle revalidation for lookups.
|
|
|
|
*
|
|
|
|
* We force revalidation in the cases where the VFS sets LOOKUP_REVAL,
|
|
|
|
* or if the intent information indicates that we're about to open this
|
|
|
|
* particular file and the "nocto" mount flag is not set.
|
|
|
|
*
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline
|
|
|
|
int nfs_lookup_verify_inode(struct inode *inode, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(inode);
|
|
|
|
|
|
|
|
if (nd != NULL) {
|
|
|
|
/* VFS wants an on-the-wire revalidation */
|
2005-06-08 05:37:01 +07:00
|
|
|
if (nd->flags & LOOKUP_REVAL)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out_force;
|
|
|
|
/* This is an open(2) */
|
2005-06-08 05:37:01 +07:00
|
|
|
if (nfs_lookup_check_intent(nd, LOOKUP_OPEN) != 0 &&
|
2006-07-06 00:05:13 +07:00
|
|
|
!(server->flags & NFS_MOUNT_NOCTO) &&
|
|
|
|
(S_ISREG(inode->i_mode) ||
|
|
|
|
S_ISDIR(inode->i_mode)))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out_force;
|
|
|
|
}
|
|
|
|
return nfs_revalidate_inode(server, inode);
|
|
|
|
out_force:
|
|
|
|
return __nfs_revalidate_inode(server, inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We judge how long we want to trust negative
|
|
|
|
* dentries by looking at the parent inode mtime.
|
|
|
|
*
|
|
|
|
* If parent mtime has changed, we revalidate, else we wait for a
|
|
|
|
* period corresponding to the parent's attribute cache timeout value.
|
|
|
|
*/
|
|
|
|
static inline
|
|
|
|
int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
|
|
|
|
struct nameidata *nd)
|
|
|
|
{
|
|
|
|
/* Don't revalidate a negative dentry if we're creating a new file */
|
2005-06-08 05:37:01 +07:00
|
|
|
if (nd != NULL && nfs_lookup_check_intent(nd, LOOKUP_CREATE) != 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
return !nfs_check_verifier(dir, dentry);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called every time the dcache has a lookup hit,
|
|
|
|
* and we should check whether we can really trust that
|
|
|
|
* lookup.
|
|
|
|
*
|
|
|
|
* NOTE! The hit can be a negative hit too, don't assume
|
|
|
|
* we have an inode!
|
|
|
|
*
|
|
|
|
* If the parent directory is seen to have changed, we throw out the
|
|
|
|
* cached dentry and do a new lookup.
|
|
|
|
*/
|
|
|
|
static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct inode *dir;
|
|
|
|
struct inode *inode;
|
|
|
|
struct dentry *parent;
|
|
|
|
int error;
|
|
|
|
struct nfs_fh fhandle;
|
|
|
|
struct nfs_fattr fattr;
|
|
|
|
unsigned long verifier;
|
|
|
|
|
|
|
|
parent = dget_parent(dentry);
|
|
|
|
lock_kernel();
|
|
|
|
dir = parent->d_inode;
|
2006-03-21 01:44:14 +07:00
|
|
|
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
|
2005-04-17 05:20:36 +07:00
|
|
|
inode = dentry->d_inode;
|
|
|
|
|
|
|
|
if (!inode) {
|
|
|
|
if (nfs_neg_need_reval(dir, dentry, nd))
|
|
|
|
goto out_bad;
|
|
|
|
goto out_valid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_bad_inode(inode)) {
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
|
|
|
|
__FUNCTION__, dentry->d_parent->d_name.name,
|
|
|
|
dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out_bad;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Revalidate parent directory attribute cache */
|
|
|
|
if (nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
|
|
|
|
goto out_zap_parent;
|
|
|
|
|
|
|
|
/* Force a full look up iff the parent directory has changed */
|
|
|
|
if (nfs_check_verifier(dir, dentry)) {
|
|
|
|
if (nfs_lookup_verify_inode(inode, nd))
|
|
|
|
goto out_zap_parent;
|
|
|
|
goto out_valid;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (NFS_STALE(inode))
|
|
|
|
goto out_bad;
|
|
|
|
|
|
|
|
verifier = nfs_save_change_attribute(dir);
|
|
|
|
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
|
|
|
|
if (error)
|
|
|
|
goto out_bad;
|
|
|
|
if (nfs_compare_fh(NFS_FH(inode), &fhandle))
|
|
|
|
goto out_bad;
|
|
|
|
if ((error = nfs_refresh_inode(inode, &fattr)) != 0)
|
|
|
|
goto out_bad;
|
|
|
|
|
|
|
|
nfs_renew_times(dentry);
|
2007-01-31 20:16:24 +07:00
|
|
|
nfs_refresh_verifier(dentry, verifier);
|
2005-04-17 05:20:36 +07:00
|
|
|
out_valid:
|
|
|
|
unlock_kernel();
|
|
|
|
dput(parent);
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
|
|
|
|
__FUNCTION__, dentry->d_parent->d_name.name,
|
|
|
|
dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 1;
|
|
|
|
out_zap_parent:
|
|
|
|
nfs_zap_caches(dir);
|
|
|
|
out_bad:
|
|
|
|
NFS_CACHEINV(dir);
|
|
|
|
if (inode && S_ISDIR(inode->i_mode)) {
|
|
|
|
/* Purge readdir caches. */
|
|
|
|
nfs_zap_caches(inode);
|
|
|
|
/* If we have submounts, don't unhash ! */
|
|
|
|
if (have_submounts(dentry))
|
|
|
|
goto out_valid;
|
|
|
|
shrink_dcache_parent(dentry);
|
|
|
|
}
|
|
|
|
d_drop(dentry);
|
|
|
|
unlock_kernel();
|
|
|
|
dput(parent);
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
|
|
|
|
__FUNCTION__, dentry->d_parent->d_name.name,
|
|
|
|
dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called from dput() when d_count is going to 0.
|
|
|
|
*/
|
|
|
|
static int nfs_dentry_delete(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
dfprintk(VFS, "NFS: dentry_delete(%s/%s, %x)\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name,
|
|
|
|
dentry->d_flags);
|
|
|
|
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
/* Unhash it, so that ->d_iput() would be called */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
|
|
|
|
/* Unhash it, so that ancestors of killed async unlink
|
|
|
|
* files will be cleaned up during umount */
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Called when the dentry loses inode.
|
|
|
|
* We use it to clean up silly-renamed files.
|
|
|
|
*/
|
|
|
|
static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
|
|
|
|
{
|
2005-10-19 04:20:19 +07:00
|
|
|
nfs_inode_return_delegation(inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
lock_kernel();
|
2006-10-01 13:29:03 +07:00
|
|
|
drop_nlink(inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_complete_unlink(dentry);
|
|
|
|
unlock_kernel();
|
|
|
|
}
|
|
|
|
/* When creating a negative dentry, we want to renew d_time */
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
iput(inode);
|
|
|
|
}
|
|
|
|
|
|
|
|
struct dentry_operations nfs_dentry_operations = {
|
|
|
|
.d_revalidate = nfs_lookup_revalidate,
|
|
|
|
.d_delete = nfs_dentry_delete,
|
|
|
|
.d_iput = nfs_dentry_iput,
|
|
|
|
};
|
|
|
|
|
2005-06-08 05:37:01 +07:00
|
|
|
/*
|
|
|
|
* Use intent information to check whether or not we're going to do
|
|
|
|
* an O_EXCL create using this path component.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline
|
|
|
|
int nfs_is_exclusive_create(struct inode *dir, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
if (NFS_PROTO(dir)->version == 2)
|
|
|
|
return 0;
|
2005-06-08 05:37:01 +07:00
|
|
|
if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_CREATE) == 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
return (nd->intent.open.flags & O_EXCL) != 0;
|
|
|
|
}
|
|
|
|
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
static inline int nfs_reval_fsid(struct vfsmount *mnt, struct inode *dir,
|
|
|
|
struct nfs_fh *fh, struct nfs_fattr *fattr)
|
2006-06-09 20:34:19 +07:00
|
|
|
{
|
|
|
|
struct nfs_server *server = NFS_SERVER(dir);
|
|
|
|
|
|
|
|
if (!nfs_fsid_equal(&server->fsid, &fattr->fsid))
|
|
|
|
/* Revalidate fsid on root dir */
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
return __nfs_revalidate_inode(server, mnt->mnt_root->d_inode);
|
2006-06-09 20:34:19 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct dentry *res;
|
|
|
|
struct inode *inode = NULL;
|
|
|
|
int error;
|
|
|
|
struct nfs_fh fhandle;
|
|
|
|
struct nfs_fattr fattr;
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: lookup(%s/%s)\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name);
|
2006-03-21 01:44:14 +07:00
|
|
|
nfs_inc_stats(dir, NFSIOS_VFSLOOKUP);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
res = ERR_PTR(-ENAMETOOLONG);
|
|
|
|
if (dentry->d_name.len > NFS_SERVER(dir)->namelen)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
res = ERR_PTR(-ENOMEM);
|
|
|
|
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
|
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
|
2006-09-05 23:27:44 +07:00
|
|
|
/*
|
|
|
|
* If we're doing an exclusive create, optimize away the lookup
|
|
|
|
* but don't hash the dentry.
|
|
|
|
*/
|
|
|
|
if (nfs_is_exclusive_create(dir, nd)) {
|
|
|
|
d_instantiate(dentry, NULL);
|
|
|
|
res = NULL;
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, &fhandle, &fattr);
|
|
|
|
if (error == -ENOENT)
|
|
|
|
goto no_entry;
|
|
|
|
if (error < 0) {
|
|
|
|
res = ERR_PTR(error);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
error = nfs_reval_fsid(nd->mnt, dir, &fhandle, &fattr);
|
2006-06-09 20:34:19 +07:00
|
|
|
if (error < 0) {
|
|
|
|
res = ERR_PTR(error);
|
|
|
|
goto out_unlock;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
inode = nfs_fhget(dentry->d_sb, &fhandle, &fattr);
|
2006-03-21 01:44:48 +07:00
|
|
|
res = (struct dentry *)inode;
|
|
|
|
if (IS_ERR(res))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out_unlock;
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
no_entry:
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
res = d_materialise_unique(dentry, inode);
|
2006-10-22 00:24:20 +07:00
|
|
|
if (res != NULL) {
|
2006-10-22 00:24:24 +07:00
|
|
|
struct dentry *parent;
|
2006-10-22 00:24:20 +07:00
|
|
|
if (IS_ERR(res))
|
|
|
|
goto out_unlock;
|
2006-10-22 00:24:24 +07:00
|
|
|
/* Was a directory renamed! */
|
|
|
|
parent = dget_parent(res);
|
|
|
|
if (!IS_ROOT(parent))
|
|
|
|
nfs_mark_for_revalidate(parent->d_inode);
|
|
|
|
dput(parent);
|
2005-04-17 05:20:36 +07:00
|
|
|
dentry = res;
|
2006-10-22 00:24:20 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
out_unlock:
|
|
|
|
unlock_kernel();
|
|
|
|
out:
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4
|
|
|
|
static int nfs_open_revalidate(struct dentry *, struct nameidata *);
|
|
|
|
|
|
|
|
struct dentry_operations nfs4_dentry_operations = {
|
|
|
|
.d_revalidate = nfs_open_revalidate,
|
|
|
|
.d_delete = nfs_dentry_delete,
|
|
|
|
.d_iput = nfs_dentry_iput,
|
|
|
|
};
|
|
|
|
|
2005-06-08 05:37:01 +07:00
|
|
|
/*
|
|
|
|
* Use intent information to determine whether we need to substitute
|
|
|
|
* the NFSv4-style stateful OPEN for the LOOKUP call
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
static int is_atomic_open(struct inode *dir, struct nameidata *nd)
|
|
|
|
{
|
2005-06-08 05:37:01 +07:00
|
|
|
if (nd == NULL || nfs_lookup_check_intent(nd, LOOKUP_OPEN) == 0)
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
/* NFS does not (yet) have a stateful open for directories */
|
|
|
|
if (nd->flags & LOOKUP_DIRECTORY)
|
|
|
|
return 0;
|
|
|
|
/* Are we trying to write to a read only partition? */
|
|
|
|
if (IS_RDONLY(dir) && (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
|
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dentry *nfs_atomic_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct dentry *res = NULL;
|
|
|
|
int error;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: atomic_lookup(%s/%ld), %s\n",
|
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Check that we are indeed trying to open this file */
|
|
|
|
if (!is_atomic_open(dir, nd))
|
|
|
|
goto no_open;
|
|
|
|
|
|
|
|
if (dentry->d_name.len > NFS_SERVER(dir)->namelen) {
|
|
|
|
res = ERR_PTR(-ENAMETOOLONG);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
|
|
|
|
|
|
|
|
/* Let vfs_create() deal with O_EXCL */
|
2005-10-19 04:20:17 +07:00
|
|
|
if (nd->intent.open.flags & O_EXCL) {
|
|
|
|
d_add(dentry, NULL);
|
|
|
|
goto out;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Open the file on the server */
|
|
|
|
lock_kernel();
|
|
|
|
/* Revalidate parent directory attribute cache */
|
|
|
|
error = nfs_revalidate_inode(NFS_SERVER(dir), dir);
|
|
|
|
if (error < 0) {
|
|
|
|
res = ERR_PTR(error);
|
2005-08-20 07:57:48 +07:00
|
|
|
unlock_kernel();
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nd->intent.open.flags & O_CREAT) {
|
|
|
|
nfs_begin_data_update(dir);
|
2005-10-19 04:20:17 +07:00
|
|
|
res = nfs4_atomic_open(dir, dentry, nd);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(dir);
|
|
|
|
} else
|
2005-10-19 04:20:17 +07:00
|
|
|
res = nfs4_atomic_open(dir, dentry, nd);
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
2005-10-19 04:20:17 +07:00
|
|
|
if (IS_ERR(res)) {
|
|
|
|
error = PTR_ERR(res);
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (error) {
|
|
|
|
/* Make a negative dentry */
|
|
|
|
case -ENOENT:
|
2005-10-19 04:20:17 +07:00
|
|
|
res = NULL;
|
|
|
|
goto out;
|
2005-04-17 05:20:36 +07:00
|
|
|
/* This turned out not to be a regular file */
|
2005-10-19 04:20:18 +07:00
|
|
|
case -EISDIR:
|
|
|
|
case -ENOTDIR:
|
|
|
|
goto no_open;
|
2005-04-17 05:20:36 +07:00
|
|
|
case -ELOOP:
|
|
|
|
if (!(nd->intent.open.flags & O_NOFOLLOW))
|
|
|
|
goto no_open;
|
|
|
|
/* case -EINVAL: */
|
|
|
|
default:
|
|
|
|
goto out;
|
|
|
|
}
|
2005-10-19 04:20:17 +07:00
|
|
|
} else if (res != NULL)
|
2005-04-17 05:20:36 +07:00
|
|
|
dentry = res;
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
out:
|
|
|
|
return res;
|
|
|
|
no_open:
|
|
|
|
return nfs_lookup(dir, dentry, nd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct dentry *parent = NULL;
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct inode *dir;
|
|
|
|
unsigned long verifier;
|
|
|
|
int openflags, ret = 0;
|
|
|
|
|
|
|
|
parent = dget_parent(dentry);
|
|
|
|
dir = parent->d_inode;
|
|
|
|
if (!is_atomic_open(dir, nd))
|
|
|
|
goto no_open;
|
|
|
|
/* We can't create new files in nfs_open_revalidate(), so we
|
|
|
|
* optimize away revalidation of negative dentries.
|
|
|
|
*/
|
|
|
|
if (inode == NULL)
|
|
|
|
goto out;
|
|
|
|
/* NFS only supports OPEN on regular files */
|
|
|
|
if (!S_ISREG(inode->i_mode))
|
|
|
|
goto no_open;
|
|
|
|
openflags = nd->intent.open.flags;
|
|
|
|
/* We cannot do exclusive creation on a positive dentry */
|
|
|
|
if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
|
|
|
|
goto no_open;
|
|
|
|
/* We can't create new files, or truncate existing ones here */
|
|
|
|
openflags &= ~(O_CREAT|O_TRUNC);
|
|
|
|
|
|
|
|
/*
|
2006-01-10 06:59:24 +07:00
|
|
|
* Note: we're not holding inode->i_mutex and so may be racing with
|
2005-04-17 05:20:36 +07:00
|
|
|
* operations that change the directory. We therefore save the
|
|
|
|
* change attribute *before* we do the RPC call.
|
|
|
|
*/
|
|
|
|
lock_kernel();
|
|
|
|
verifier = nfs_save_change_attribute(dir);
|
2005-10-19 04:20:17 +07:00
|
|
|
ret = nfs4_open_revalidate(dir, dentry, openflags, nd);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!ret)
|
2007-01-31 20:16:24 +07:00
|
|
|
nfs_refresh_verifier(dentry, verifier);
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
|
|
|
out:
|
|
|
|
dput(parent);
|
|
|
|
if (!ret)
|
|
|
|
d_drop(dentry);
|
|
|
|
return ret;
|
|
|
|
no_open:
|
|
|
|
dput(parent);
|
|
|
|
if (inode != NULL && nfs_have_delegation(inode, FMODE_READ))
|
|
|
|
return 1;
|
|
|
|
return nfs_lookup_revalidate(dentry, nd);
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_NFSV4 */
|
|
|
|
|
|
|
|
static struct dentry *nfs_readdir_lookup(nfs_readdir_descriptor_t *desc)
|
|
|
|
{
|
2006-12-08 17:36:40 +07:00
|
|
|
struct dentry *parent = desc->file->f_path.dentry;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct inode *dir = parent->d_inode;
|
|
|
|
struct nfs_entry *entry = desc->entry;
|
|
|
|
struct dentry *dentry, *alias;
|
|
|
|
struct qstr name = {
|
|
|
|
.name = entry->name,
|
|
|
|
.len = entry->len,
|
|
|
|
};
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
switch (name.len) {
|
|
|
|
case 2:
|
|
|
|
if (name.name[0] == '.' && name.name[1] == '.')
|
|
|
|
return dget_parent(parent);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (name.name[0] == '.')
|
|
|
|
return dget(parent);
|
|
|
|
}
|
|
|
|
name.hash = full_name_hash(name.name, name.len);
|
|
|
|
dentry = d_lookup(parent, &name);
|
2007-01-16 01:56:29 +07:00
|
|
|
if (dentry != NULL) {
|
2007-01-16 22:09:44 +07:00
|
|
|
/* Is this a positive dentry that matches the readdir info? */
|
|
|
|
if (dentry->d_inode != NULL &&
|
|
|
|
(NFS_FILEID(dentry->d_inode) == entry->ino ||
|
|
|
|
d_mountpoint(dentry))) {
|
|
|
|
if (!desc->plus || entry->fh->size == 0)
|
|
|
|
return dentry;
|
|
|
|
if (nfs_compare_fh(NFS_FH(dentry->d_inode),
|
|
|
|
entry->fh) == 0)
|
|
|
|
goto out_renew;
|
|
|
|
}
|
2007-01-16 01:56:29 +07:00
|
|
|
/* No, so d_drop to allow one to be created */
|
|
|
|
d_drop(dentry);
|
|
|
|
dput(dentry);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!desc->plus || !(entry->fattr->valid & NFS_ATTR_FATTR))
|
|
|
|
return NULL;
|
2006-01-10 06:59:24 +07:00
|
|
|
/* Note: caller is already holding the dir->i_mutex! */
|
2005-04-17 05:20:36 +07:00
|
|
|
dentry = d_alloc(parent, &name);
|
|
|
|
if (dentry == NULL)
|
|
|
|
return NULL;
|
|
|
|
dentry->d_op = NFS_PROTO(dir)->dentry_ops;
|
|
|
|
inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr);
|
2006-03-21 01:44:48 +07:00
|
|
|
if (IS_ERR(inode)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
dput(dentry);
|
|
|
|
return NULL;
|
|
|
|
}
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
|
|
|
|
alias = d_materialise_unique(dentry, inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (alias != NULL) {
|
|
|
|
dput(dentry);
|
2006-10-22 00:24:20 +07:00
|
|
|
if (IS_ERR(alias))
|
|
|
|
return NULL;
|
2005-04-17 05:20:36 +07:00
|
|
|
dentry = alias;
|
|
|
|
}
|
NFS: Share NFS superblocks per-protocol per-server per-FSID
The attached patch makes NFS share superblocks between mounts from the same
server and FSID over the same protocol.
It does this by creating each superblock with a false root and returning the
real root dentry in the vfsmount presented by get_sb(). The root dentry set
starts off as an anonymous dentry if we don't already have the dentry for its
inode, otherwise it simply returns the dentry we already have.
We may thus end up with several trees of dentries in the superblock, and if at
some later point one of anonymous tree roots is discovered by normal filesystem
activity to be located in another tree within the superblock, the anonymous
root is named and materialises attached to the second tree at the appropriate
point.
Why do it this way? Why not pass an extra argument to the mount() syscall to
indicate the subpath and then pathwalk from the server root to the desired
directory? You can't guarantee this will work for two reasons:
(1) The root and intervening nodes may not be accessible to the client.
With NFS2 and NFS3, for instance, mountd is called on the server to get
the filehandle for the tip of a path. mountd won't give us handles for
anything we don't have permission to access, and so we can't set up NFS
inodes for such nodes, and so can't easily set up dentries (we'd have to
have ghost inodes or something).
With this patch we don't actually create dentries until we get handles
from the server that we can use to set up their inodes, and we don't
actually bind them into the tree until we know for sure where they go.
(2) Inaccessible symbolic links.
If we're asked to mount two exports from the server, eg:
mount warthog:/warthog/aaa/xxx /mmm
mount warthog:/warthog/bbb/yyy /nnn
We may not be able to access anything nearer the root than xxx and yyy,
but we may find out later that /mmm/www/yyy, say, is actually the same
directory as the one mounted on /nnn. What we might then find out, for
example, is that /warthog/bbb was actually a symbolic link to
/warthog/aaa/xxx/www, but we can't actually determine that by talking to
the server until /warthog is made available by NFS.
This would lead to having constructed an errneous dentry tree which we
can't easily fix. We can end up with a dentry marked as a directory when
it should actually be a symlink, or we could end up with an apparently
hardlinked directory.
With this patch we need not make assumptions about the type of a dentry
for which we can't retrieve information, nor need we assume we know its
place in the grand scheme of things until we actually see that place.
This patch reduces the possibility of aliasing in the inode and page caches for
inodes that may be accessed by more than one NFS export. It also reduces the
number of superblocks required for NFS where there are many NFS exports being
used from a server (home directory server + autofs for example).
This in turn makes it simpler to do local caching of network filesystems, as it
can then be guaranteed that there won't be links from multiple inodes in
separate superblocks to the same cache file.
Obviously, cache aliasing between different levels of NFS protocol could still
be a problem, but at least that gives us another key to use when indexing the
cache.
This patch makes the following changes:
(1) The server record construction/destruction has been abstracted out into
its own set of functions to make things easier to get right. These have
been moved into fs/nfs/client.c.
All the code in fs/nfs/client.c has to do with the management of
connections to servers, and doesn't touch superblocks in any way; the
remaining code in fs/nfs/super.c has to do with VFS superblock management.
(2) The sequence of events undertaken by NFS mount is now reordered:
(a) A volume representation (struct nfs_server) is allocated.
(b) A server representation (struct nfs_client) is acquired. This may be
allocated or shared, and is keyed on server address, port and NFS
version.
(c) If allocated, the client representation is initialised. The state
member variable of nfs_client is used to prevent a race during
initialisation from two mounts.
(d) For NFS4 a simple pathwalk is performed, walking from FH to FH to find
the root filehandle for the mount (fs/nfs/getroot.c). For NFS2/3 we
are given the root FH in advance.
(e) The volume FSID is probed for on the root FH.
(f) The volume representation is initialised from the FSINFO record
retrieved on the root FH.
(g) sget() is called to acquire a superblock. This may be allocated or
shared, keyed on client pointer and FSID.
(h) If allocated, the superblock is initialised.
(i) If the superblock is shared, then the new nfs_server record is
discarded.
(j) The root dentry for this mount is looked up from the root FH.
(k) The root dentry for this mount is assigned to the vfsmount.
(3) nfs_readdir_lookup() creates dentries for each of the entries readdir()
returns; this function now attaches disconnected trees from alternate
roots that happen to be discovered attached to a directory being read (in
the same way nfs_lookup() is made to do for lookup ops).
The new d_materialise_unique() function is now used to do this, thus
permitting the whole thing to be done under one set of locks, and thus
avoiding any race between mount and lookup operations on the same
directory.
(4) The client management code uses a new debug facility: NFSDBG_CLIENT which
is set by echoing 1024 to /proc/net/sunrpc/nfs_debug.
(5) Clone mounts are now called xdev mounts.
(6) Use the dentry passed to the statfs() op as the handle for retrieving fs
statistics rather than the root dentry of the superblock (which is now a
dummy).
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-08-23 07:06:13 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
return dentry;
|
2007-01-31 20:16:24 +07:00
|
|
|
out_renew:
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_refresh_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
return dentry;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Code common to create, mkdir, and mknod.
|
|
|
|
*/
|
|
|
|
int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
|
|
|
|
struct nfs_fattr *fattr)
|
|
|
|
{
|
|
|
|
struct inode *inode;
|
|
|
|
int error = -EACCES;
|
|
|
|
|
|
|
|
/* We may have been initialized further down */
|
|
|
|
if (dentry->d_inode)
|
|
|
|
return 0;
|
|
|
|
if (fhandle->size == 0) {
|
|
|
|
struct inode *dir = dentry->d_parent->d_inode;
|
|
|
|
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
|
|
|
|
if (error)
|
2006-08-23 07:06:22 +07:00
|
|
|
return error;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (!(fattr->valid & NFS_ATTR_FATTR)) {
|
|
|
|
struct nfs_server *server = NFS_SB(dentry->d_sb);
|
2006-08-23 07:06:12 +07:00
|
|
|
error = server->nfs_client->rpc_ops->getattr(server, fhandle, fattr);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (error < 0)
|
2006-08-23 07:06:22 +07:00
|
|
|
return error;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
inode = nfs_fhget(dentry->d_sb, fhandle, fattr);
|
2006-03-21 01:44:48 +07:00
|
|
|
error = PTR_ERR(inode);
|
|
|
|
if (IS_ERR(inode))
|
2006-08-23 07:06:22 +07:00
|
|
|
return error;
|
2005-04-17 05:20:36 +07:00
|
|
|
d_instantiate(dentry, inode);
|
2006-09-05 23:27:44 +07:00
|
|
|
if (d_unhashed(dentry))
|
|
|
|
d_rehash(dentry);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Following a failed create operation, we drop the dentry rather
|
|
|
|
* than retain a negative dentry. This avoids a problem in the event
|
|
|
|
* that the operation succeeded on the server, but an error in the
|
|
|
|
* reply path made it appear to have failed.
|
|
|
|
*/
|
|
|
|
static int nfs_create(struct inode *dir, struct dentry *dentry, int mode,
|
|
|
|
struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int error;
|
|
|
|
int open_flags = 0;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: create(%s/%ld), %s\n",
|
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
attr.ia_mode = mode;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
|
|
|
|
if (nd && (nd->flags & LOOKUP_CREATE))
|
|
|
|
open_flags = nd->intent.open.flags;
|
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
nfs_begin_data_update(dir);
|
2005-10-19 04:20:17 +07:00
|
|
|
error = NFS_PROTO(dir)->create(dir, dentry, &attr, open_flags, nd);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(dir);
|
|
|
|
if (error != 0)
|
|
|
|
goto out_err;
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
unlock_kernel();
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
unlock_kernel();
|
|
|
|
d_drop(dentry);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See comments for nfs_proc_create regarding failed operations.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
nfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev)
|
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int status;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: mknod(%s/%ld), %s\n",
|
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!new_valid_dev(rdev))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
attr.ia_mode = mode;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
nfs_begin_data_update(dir);
|
|
|
|
status = NFS_PROTO(dir)->mknod(dir, dentry, &attr, rdev);
|
|
|
|
nfs_end_data_update(dir);
|
|
|
|
if (status != 0)
|
|
|
|
goto out_err;
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
unlock_kernel();
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
unlock_kernel();
|
|
|
|
d_drop(dentry);
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* See comments for nfs_proc_create regarding failed operations.
|
|
|
|
*/
|
|
|
|
static int nfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
|
|
|
|
{
|
|
|
|
struct iattr attr;
|
|
|
|
int error;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: mkdir(%s/%ld), %s\n",
|
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
|
|
|
attr.ia_mode = mode | S_IFDIR;
|
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
nfs_begin_data_update(dir);
|
|
|
|
error = NFS_PROTO(dir)->mkdir(dir, dentry, &attr);
|
|
|
|
nfs_end_data_update(dir);
|
|
|
|
if (error != 0)
|
|
|
|
goto out_err;
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
unlock_kernel();
|
|
|
|
return 0;
|
|
|
|
out_err:
|
|
|
|
d_drop(dentry);
|
|
|
|
unlock_kernel();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: rmdir(%s/%ld), %s\n",
|
|
|
|
dir->i_sb->s_id, dir->i_ino, dentry->d_name.name);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
nfs_begin_data_update(dir);
|
|
|
|
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
|
|
|
|
/* Ensure the VFS deletes this inode */
|
|
|
|
if (error == 0 && dentry->d_inode != NULL)
|
2006-10-01 13:29:06 +07:00
|
|
|
clear_nlink(dentry->d_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(dir);
|
|
|
|
unlock_kernel();
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs_sillyrename(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
static unsigned int sillycounter;
|
|
|
|
const int i_inosize = sizeof(dir->i_ino)*2;
|
|
|
|
const int countersize = sizeof(sillycounter)*2;
|
|
|
|
const int slen = sizeof(".nfs") + i_inosize + countersize - 1;
|
|
|
|
char silly[slen+1];
|
|
|
|
struct qstr qsilly;
|
|
|
|
struct dentry *sdentry;
|
|
|
|
int error = -EIO;
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: silly-rename(%s/%s, ct=%d)\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name,
|
|
|
|
atomic_read(&dentry->d_count));
|
2006-03-21 01:44:14 +07:00
|
|
|
nfs_inc_stats(dir, NFSIOS_SILLYRENAME);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#ifdef NFS_PARANOIA
|
|
|
|
if (!dentry->d_inode)
|
|
|
|
printk("NFS: silly-renaming %s/%s, negative dentry??\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name);
|
|
|
|
#endif
|
|
|
|
/*
|
|
|
|
* We don't allow a dentry to be silly-renamed twice.
|
|
|
|
*/
|
|
|
|
error = -EBUSY;
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
sprintf(silly, ".nfs%*.*lx",
|
|
|
|
i_inosize, i_inosize, dentry->d_inode->i_ino);
|
|
|
|
|
2005-11-05 03:35:02 +07:00
|
|
|
/* Return delegation in anticipation of the rename */
|
|
|
|
nfs_inode_return_delegation(dentry->d_inode);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
sdentry = NULL;
|
|
|
|
do {
|
|
|
|
char *suffix = silly + slen - countersize;
|
|
|
|
|
|
|
|
dput(sdentry);
|
|
|
|
sillycounter++;
|
|
|
|
sprintf(suffix, "%*.*x", countersize, countersize, sillycounter);
|
|
|
|
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: trying to rename %s to %s\n",
|
|
|
|
dentry->d_name.name, silly);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
sdentry = lookup_one_len(silly, dentry->d_parent, slen);
|
|
|
|
/*
|
|
|
|
* N.B. Better to return EBUSY here ... it could be
|
|
|
|
* dangerous to delete the file while it's in use.
|
|
|
|
*/
|
|
|
|
if (IS_ERR(sdentry))
|
|
|
|
goto out;
|
|
|
|
} while(sdentry->d_inode != NULL); /* need negative lookup */
|
|
|
|
|
|
|
|
qsilly.name = silly;
|
|
|
|
qsilly.len = strlen(silly);
|
|
|
|
nfs_begin_data_update(dir);
|
|
|
|
if (dentry->d_inode) {
|
|
|
|
nfs_begin_data_update(dentry->d_inode);
|
|
|
|
error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
|
|
|
|
dir, &qsilly);
|
2005-12-04 03:20:17 +07:00
|
|
|
nfs_mark_for_revalidate(dentry->d_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(dentry->d_inode);
|
|
|
|
} else
|
|
|
|
error = NFS_PROTO(dir)->rename(dir, &dentry->d_name,
|
|
|
|
dir, &qsilly);
|
|
|
|
nfs_end_data_update(dir);
|
|
|
|
if (!error) {
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
d_move(dentry, sdentry);
|
|
|
|
error = nfs_async_unlink(dentry);
|
|
|
|
/* If we return 0 we don't unlink */
|
|
|
|
}
|
|
|
|
dput(sdentry);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remove a file after making sure there are no pending writes,
|
|
|
|
* and after checking that the file has only one user.
|
|
|
|
*
|
|
|
|
* We invalidate the attribute cache and free the inode prior to the operation
|
|
|
|
* to avoid possible races if the server reuses the inode.
|
|
|
|
*/
|
|
|
|
static int nfs_safe_remove(struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct inode *dir = dentry->d_parent->d_inode;
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
int error = -EBUSY;
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: safe_remove(%s/%s)\n",
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name);
|
|
|
|
|
|
|
|
/* If the dentry was sillyrenamed, we simply call d_delete() */
|
|
|
|
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
|
|
|
|
error = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
nfs_begin_data_update(dir);
|
|
|
|
if (inode != NULL) {
|
2005-10-19 04:20:19 +07:00
|
|
|
nfs_inode_return_delegation(inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_begin_data_update(inode);
|
|
|
|
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
|
|
|
|
/* The VFS may want to delete this inode */
|
|
|
|
if (error == 0)
|
2006-10-01 13:29:03 +07:00
|
|
|
drop_nlink(inode);
|
2005-12-04 03:20:17 +07:00
|
|
|
nfs_mark_for_revalidate(inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(inode);
|
|
|
|
} else
|
|
|
|
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
|
|
|
|
nfs_end_data_update(dir);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We do silly rename. In case sillyrename() returns -EBUSY, the inode
|
|
|
|
* belongs to an active ".nfs..." file and we return -EBUSY.
|
|
|
|
*
|
|
|
|
* If sillyrename() returns 0, we do nothing, otherwise we unlink.
|
|
|
|
*/
|
|
|
|
static int nfs_unlink(struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
int need_rehash = 0;
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
|
|
|
|
dir->i_ino, dentry->d_name.name);
|
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
spin_lock(&dcache_lock);
|
|
|
|
spin_lock(&dentry->d_lock);
|
|
|
|
if (atomic_read(&dentry->d_count) > 1) {
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
spin_unlock(&dcache_lock);
|
2007-01-13 14:28:12 +07:00
|
|
|
/* Start asynchronous writeout of the inode */
|
|
|
|
write_inode_now(dentry->d_inode, 0);
|
2005-04-17 05:20:36 +07:00
|
|
|
error = nfs_sillyrename(dir, dentry);
|
|
|
|
unlock_kernel();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
if (!d_unhashed(dentry)) {
|
|
|
|
__d_drop(dentry);
|
|
|
|
need_rehash = 1;
|
|
|
|
}
|
|
|
|
spin_unlock(&dentry->d_lock);
|
|
|
|
spin_unlock(&dcache_lock);
|
|
|
|
error = nfs_safe_remove(dentry);
|
|
|
|
if (!error) {
|
|
|
|
nfs_renew_times(dentry);
|
|
|
|
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
|
|
|
|
} else if (need_rehash)
|
|
|
|
d_rehash(dentry);
|
|
|
|
unlock_kernel();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-08-23 07:06:23 +07:00
|
|
|
/*
|
|
|
|
* To create a symbolic link, most file systems instantiate a new inode,
|
|
|
|
* add a page to it containing the path, then write it out to the disk
|
|
|
|
* using prepare_write/commit_write.
|
|
|
|
*
|
|
|
|
* Unfortunately the NFS client can't create the in-core inode first
|
|
|
|
* because it needs a file handle to create an in-core inode (see
|
|
|
|
* fs/nfs/inode.c:nfs_fhget). We only have a file handle *after* the
|
|
|
|
* symlink request has completed on the server.
|
|
|
|
*
|
|
|
|
* So instead we allocate a raw page, copy the symname into it, then do
|
|
|
|
* the SYMLINK request with the page as the buffer. If it succeeds, we
|
|
|
|
* now have a new file handle and can instantiate an in-core NFS inode
|
|
|
|
* and move the raw page into its mapping.
|
|
|
|
*/
|
|
|
|
static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-08-23 07:06:23 +07:00
|
|
|
struct pagevec lru_pvec;
|
|
|
|
struct page *page;
|
|
|
|
char *kaddr;
|
2005-04-17 05:20:36 +07:00
|
|
|
struct iattr attr;
|
2006-08-23 07:06:23 +07:00
|
|
|
unsigned int pathlen = strlen(symname);
|
2005-04-17 05:20:36 +07:00
|
|
|
int error;
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s)\n", dir->i_sb->s_id,
|
|
|
|
dir->i_ino, dentry->d_name.name, symname);
|
|
|
|
|
2006-08-23 07:06:23 +07:00
|
|
|
if (pathlen > PAGE_SIZE)
|
|
|
|
return -ENAMETOOLONG;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-08-23 07:06:23 +07:00
|
|
|
attr.ia_mode = S_IFLNK | S_IRWXUGO;
|
|
|
|
attr.ia_valid = ATTR_MODE;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
lock_kernel();
|
2006-08-23 07:06:23 +07:00
|
|
|
|
|
|
|
page = alloc_page(GFP_KERNEL);
|
|
|
|
if (!page) {
|
|
|
|
unlock_kernel();
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
|
|
|
memcpy(kaddr, symname, pathlen);
|
|
|
|
if (pathlen < PAGE_SIZE)
|
|
|
|
memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
|
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_begin_data_update(dir);
|
2006-08-23 07:06:23 +07:00
|
|
|
error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(dir);
|
2006-08-23 07:06:23 +07:00
|
|
|
if (error != 0) {
|
|
|
|
dfprintk(VFS, "NFS: symlink(%s/%ld, %s, %s) error %d\n",
|
|
|
|
dir->i_sb->s_id, dir->i_ino,
|
|
|
|
dentry->d_name.name, symname, error);
|
2005-04-17 05:20:36 +07:00
|
|
|
d_drop(dentry);
|
2006-08-23 07:06:23 +07:00
|
|
|
__free_page(page);
|
|
|
|
unlock_kernel();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No big deal if we can't add this page to the page cache here.
|
|
|
|
* READLINK will get the missing page from the server if needed.
|
|
|
|
*/
|
|
|
|
pagevec_init(&lru_pvec, 0);
|
|
|
|
if (!add_to_page_cache(page, dentry->d_inode->i_mapping, 0,
|
|
|
|
GFP_KERNEL)) {
|
2006-10-20 13:28:41 +07:00
|
|
|
pagevec_add(&lru_pvec, page);
|
|
|
|
pagevec_lru_add(&lru_pvec);
|
2006-08-23 07:06:23 +07:00
|
|
|
SetPageUptodate(page);
|
|
|
|
unlock_page(page);
|
|
|
|
} else
|
|
|
|
__free_page(page);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
2006-08-23 07:06:23 +07:00
|
|
|
return 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
|
|
|
|
{
|
|
|
|
struct inode *inode = old_dentry->d_inode;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: link(%s/%s -> %s/%s)\n",
|
|
|
|
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
|
|
|
|
dentry->d_parent->d_name.name, dentry->d_name.name);
|
|
|
|
|
|
|
|
lock_kernel();
|
|
|
|
nfs_begin_data_update(dir);
|
|
|
|
nfs_begin_data_update(inode);
|
|
|
|
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
|
2005-10-28 09:12:42 +07:00
|
|
|
if (error == 0) {
|
|
|
|
atomic_inc(&inode->i_count);
|
|
|
|
d_instantiate(dentry, inode);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(inode);
|
|
|
|
nfs_end_data_update(dir);
|
|
|
|
unlock_kernel();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* RENAME
|
|
|
|
* FIXME: Some nfsds, like the Linux user space nfsd, may generate a
|
|
|
|
* different file handle for the same inode after a rename (e.g. when
|
|
|
|
* moving to a different directory). A fail-safe method to do so would
|
|
|
|
* be to look up old_dir/old_name, create a link to new_dir/new_name and
|
|
|
|
* rename the old file using the sillyrename stuff. This way, the original
|
|
|
|
* file in old_dir will go away when the last process iput()s the inode.
|
|
|
|
*
|
|
|
|
* FIXED.
|
|
|
|
*
|
|
|
|
* It actually works quite well. One needs to have the possibility for
|
|
|
|
* at least one ".nfs..." file in each directory the file ever gets
|
|
|
|
* moved or linked to which happens automagically with the new
|
|
|
|
* implementation that only depends on the dcache stuff instead of
|
|
|
|
* using the inode layer
|
|
|
|
*
|
|
|
|
* Unfortunately, things are a little more complicated than indicated
|
|
|
|
* above. For a cross-directory move, we want to make sure we can get
|
|
|
|
* rid of the old inode after the operation. This means there must be
|
|
|
|
* no pending writes (if it's a file), and the use count must be 1.
|
|
|
|
* If these conditions are met, we can drop the dentries before doing
|
|
|
|
* the rename.
|
|
|
|
*/
|
|
|
|
static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
|
|
|
|
struct inode *new_dir, struct dentry *new_dentry)
|
|
|
|
{
|
|
|
|
struct inode *old_inode = old_dentry->d_inode;
|
|
|
|
struct inode *new_inode = new_dentry->d_inode;
|
|
|
|
struct dentry *dentry = NULL, *rehash = NULL;
|
|
|
|
int error = -EBUSY;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To prevent any new references to the target during the rename,
|
|
|
|
* we unhash the dentry and free the inode in advance.
|
|
|
|
*/
|
|
|
|
lock_kernel();
|
|
|
|
if (!d_unhashed(new_dentry)) {
|
|
|
|
d_drop(new_dentry);
|
|
|
|
rehash = new_dentry;
|
|
|
|
}
|
|
|
|
|
|
|
|
dfprintk(VFS, "NFS: rename(%s/%s -> %s/%s, ct=%d)\n",
|
|
|
|
old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
|
|
|
|
new_dentry->d_parent->d_name.name, new_dentry->d_name.name,
|
|
|
|
atomic_read(&new_dentry->d_count));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First check whether the target is busy ... we can't
|
|
|
|
* safely do _any_ rename if the target is in use.
|
|
|
|
*
|
|
|
|
* For files, make a copy of the dentry and then do a
|
|
|
|
* silly-rename. If the silly-rename succeeds, the
|
|
|
|
* copied dentry is hashed and becomes the new target.
|
|
|
|
*/
|
|
|
|
if (!new_inode)
|
|
|
|
goto go_ahead;
|
2005-10-19 04:20:22 +07:00
|
|
|
if (S_ISDIR(new_inode->i_mode)) {
|
|
|
|
error = -EISDIR;
|
|
|
|
if (!S_ISDIR(old_inode->i_mode))
|
|
|
|
goto out;
|
|
|
|
} else if (atomic_read(&new_dentry->d_count) > 2) {
|
2005-04-17 05:20:36 +07:00
|
|
|
int err;
|
|
|
|
/* copy the target dentry's name */
|
|
|
|
dentry = d_alloc(new_dentry->d_parent,
|
|
|
|
&new_dentry->d_name);
|
|
|
|
if (!dentry)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* silly-rename the existing target ... */
|
|
|
|
err = nfs_sillyrename(new_dir, new_dentry);
|
|
|
|
if (!err) {
|
|
|
|
new_dentry = rehash = dentry;
|
|
|
|
new_inode = NULL;
|
|
|
|
/* instantiate the replacement target */
|
|
|
|
d_instantiate(new_dentry, NULL);
|
|
|
|
} else if (atomic_read(&new_dentry->d_count) > 1) {
|
|
|
|
/* dentry still busy? */
|
|
|
|
#ifdef NFS_PARANOIA
|
|
|
|
printk("nfs_rename: target %s/%s busy, d_count=%d\n",
|
|
|
|
new_dentry->d_parent->d_name.name,
|
|
|
|
new_dentry->d_name.name,
|
|
|
|
atomic_read(&new_dentry->d_count));
|
|
|
|
#endif
|
|
|
|
goto out;
|
|
|
|
}
|
2005-08-26 06:25:34 +07:00
|
|
|
} else
|
2006-10-01 13:29:03 +07:00
|
|
|
drop_nlink(new_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
go_ahead:
|
|
|
|
/*
|
|
|
|
* ... prune child dentries and writebacks if needed.
|
|
|
|
*/
|
|
|
|
if (atomic_read(&old_dentry->d_count) > 1) {
|
2007-04-15 06:07:28 +07:00
|
|
|
if (S_ISREG(old_inode->i_mode))
|
|
|
|
nfs_wb_all(old_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
shrink_dcache_parent(old_dentry);
|
|
|
|
}
|
2005-10-19 04:20:19 +07:00
|
|
|
nfs_inode_return_delegation(old_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-01-03 15:55:33 +07:00
|
|
|
if (new_inode != NULL) {
|
|
|
|
nfs_inode_return_delegation(new_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
d_delete(new_dentry);
|
2006-01-03 15:55:33 +07:00
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
nfs_begin_data_update(old_dir);
|
|
|
|
nfs_begin_data_update(new_dir);
|
|
|
|
nfs_begin_data_update(old_inode);
|
|
|
|
error = NFS_PROTO(old_dir)->rename(old_dir, &old_dentry->d_name,
|
|
|
|
new_dir, &new_dentry->d_name);
|
2005-12-04 03:20:17 +07:00
|
|
|
nfs_mark_for_revalidate(old_inode);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_end_data_update(old_inode);
|
|
|
|
nfs_end_data_update(new_dir);
|
|
|
|
nfs_end_data_update(old_dir);
|
|
|
|
out:
|
|
|
|
if (rehash)
|
|
|
|
d_rehash(rehash);
|
|
|
|
if (!error) {
|
2006-09-09 04:22:21 +07:00
|
|
|
d_move(old_dentry, new_dentry);
|
2005-04-17 05:20:36 +07:00
|
|
|
nfs_renew_times(new_dentry);
|
2007-01-31 20:16:24 +07:00
|
|
|
nfs_refresh_verifier(new_dentry, nfs_save_change_attribute(new_dir));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/* new dentry created? */
|
|
|
|
if (dentry)
|
|
|
|
dput(dentry);
|
|
|
|
unlock_kernel();
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
static DEFINE_SPINLOCK(nfs_access_lru_lock);
|
|
|
|
static LIST_HEAD(nfs_access_lru_list);
|
|
|
|
static atomic_long_t nfs_access_nr_entries;
|
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
static void nfs_access_free_entry(struct nfs_access_entry *entry)
|
|
|
|
{
|
|
|
|
put_rpccred(entry->cred);
|
|
|
|
kfree(entry);
|
2006-07-25 22:28:18 +07:00
|
|
|
smp_mb__before_atomic_dec();
|
|
|
|
atomic_long_dec(&nfs_access_nr_entries);
|
|
|
|
smp_mb__after_atomic_dec();
|
2006-07-25 22:28:18 +07:00
|
|
|
}
|
|
|
|
|
2006-07-25 22:28:19 +07:00
|
|
|
int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
|
|
|
|
{
|
|
|
|
LIST_HEAD(head);
|
|
|
|
struct nfs_inode *nfsi;
|
|
|
|
struct nfs_access_entry *cache;
|
|
|
|
|
|
|
|
spin_lock(&nfs_access_lru_lock);
|
|
|
|
restart:
|
|
|
|
list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
|
|
|
|
struct inode *inode;
|
|
|
|
|
|
|
|
if (nr_to_scan-- == 0)
|
|
|
|
break;
|
|
|
|
inode = igrab(&nfsi->vfs_inode);
|
|
|
|
if (inode == NULL)
|
|
|
|
continue;
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
if (list_empty(&nfsi->access_cache_entry_lru))
|
|
|
|
goto remove_lru_entry;
|
|
|
|
cache = list_entry(nfsi->access_cache_entry_lru.next,
|
|
|
|
struct nfs_access_entry, lru);
|
|
|
|
list_move(&cache->lru, &head);
|
|
|
|
rb_erase(&cache->rb_node, &nfsi->access_cache);
|
|
|
|
if (!list_empty(&nfsi->access_cache_entry_lru))
|
|
|
|
list_move_tail(&nfsi->access_cache_inode_lru,
|
|
|
|
&nfs_access_lru_list);
|
|
|
|
else {
|
|
|
|
remove_lru_entry:
|
|
|
|
list_del_init(&nfsi->access_cache_inode_lru);
|
|
|
|
clear_bit(NFS_INO_ACL_LRU_SET, &nfsi->flags);
|
|
|
|
}
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
iput(inode);
|
|
|
|
goto restart;
|
|
|
|
}
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
while (!list_empty(&head)) {
|
|
|
|
cache = list_entry(head.next, struct nfs_access_entry, lru);
|
|
|
|
list_del(&cache->lru);
|
|
|
|
nfs_access_free_entry(cache);
|
|
|
|
}
|
|
|
|
return (atomic_long_read(&nfs_access_nr_entries) / 100) * sysctl_vfs_cache_pressure;
|
|
|
|
}
|
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
static void __nfs_access_zap_cache(struct inode *inode)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-08-19 01:24:09 +07:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
2006-07-25 22:28:18 +07:00
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
|
|
|
struct rb_node *n, *dispose = NULL;
|
|
|
|
struct nfs_access_entry *entry;
|
|
|
|
|
|
|
|
/* Unhook entries from the cache */
|
|
|
|
while ((n = rb_first(root_node)) != NULL) {
|
|
|
|
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
|
|
|
rb_erase(n, root_node);
|
2006-07-25 22:28:18 +07:00
|
|
|
list_del(&entry->lru);
|
2006-07-25 22:28:18 +07:00
|
|
|
n->rb_left = dispose;
|
|
|
|
dispose = n;
|
|
|
|
}
|
|
|
|
nfsi->cache_validity &= ~NFS_INO_INVALID_ACCESS;
|
|
|
|
spin_unlock(&inode->i_lock);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
/* Now kill them all! */
|
|
|
|
while (dispose != NULL) {
|
|
|
|
n = dispose;
|
|
|
|
dispose = n->rb_left;
|
|
|
|
nfs_access_free_entry(rb_entry(n, struct nfs_access_entry, rb_node));
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
void nfs_access_zap_cache(struct inode *inode)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-07-25 22:28:18 +07:00
|
|
|
/* Remove from global LRU init */
|
|
|
|
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
|
|
|
|
spin_lock(&nfs_access_lru_lock);
|
|
|
|
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
}
|
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
/* This will release the spinlock */
|
|
|
|
__nfs_access_zap_cache(inode);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-07-25 22:28:18 +07:00
|
|
|
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, struct rpc_cred *cred)
|
|
|
|
{
|
|
|
|
struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
|
|
|
|
struct nfs_access_entry *entry;
|
|
|
|
|
|
|
|
while (n != NULL) {
|
|
|
|
entry = rb_entry(n, struct nfs_access_entry, rb_node);
|
|
|
|
|
|
|
|
if (cred < entry->cred)
|
|
|
|
n = n->rb_left;
|
|
|
|
else if (cred > entry->cred)
|
|
|
|
n = n->rb_right;
|
|
|
|
else
|
|
|
|
return entry;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
2006-07-25 22:28:18 +07:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, struct nfs_access_entry *res)
|
|
|
|
{
|
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct nfs_access_entry *cache;
|
|
|
|
int err = -ENOENT;
|
|
|
|
|
2005-08-19 01:24:12 +07:00
|
|
|
spin_lock(&inode->i_lock);
|
2006-07-25 22:28:18 +07:00
|
|
|
if (nfsi->cache_validity & NFS_INO_INVALID_ACCESS)
|
|
|
|
goto out_zap;
|
|
|
|
cache = nfs_access_search_rbtree(inode, cred);
|
|
|
|
if (cache == NULL)
|
|
|
|
goto out;
|
|
|
|
if (time_after(jiffies, cache->jiffies + NFS_ATTRTIMEO(inode)))
|
|
|
|
goto out_stale;
|
|
|
|
res->jiffies = cache->jiffies;
|
|
|
|
res->cred = cache->cred;
|
|
|
|
res->mask = cache->mask;
|
2006-07-25 22:28:18 +07:00
|
|
|
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
|
2006-07-25 22:28:18 +07:00
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
return err;
|
|
|
|
out_stale:
|
|
|
|
rb_erase(&cache->rb_node, &nfsi->access_cache);
|
2006-07-25 22:28:18 +07:00
|
|
|
list_del(&cache->lru);
|
2006-07-25 22:28:18 +07:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_access_free_entry(cache);
|
|
|
|
return -ENOENT;
|
|
|
|
out_zap:
|
|
|
|
/* This will release the spinlock */
|
|
|
|
__nfs_access_zap_cache(inode);
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
|
|
|
|
{
|
2006-07-25 22:28:18 +07:00
|
|
|
struct nfs_inode *nfsi = NFS_I(inode);
|
|
|
|
struct rb_root *root_node = &nfsi->access_cache;
|
2006-07-25 22:28:18 +07:00
|
|
|
struct rb_node **p = &root_node->rb_node;
|
|
|
|
struct rb_node *parent = NULL;
|
|
|
|
struct nfs_access_entry *entry;
|
|
|
|
|
|
|
|
spin_lock(&inode->i_lock);
|
|
|
|
while (*p != NULL) {
|
|
|
|
parent = *p;
|
|
|
|
entry = rb_entry(parent, struct nfs_access_entry, rb_node);
|
|
|
|
|
|
|
|
if (set->cred < entry->cred)
|
|
|
|
p = &parent->rb_left;
|
|
|
|
else if (set->cred > entry->cred)
|
|
|
|
p = &parent->rb_right;
|
|
|
|
else
|
|
|
|
goto found;
|
|
|
|
}
|
|
|
|
rb_link_node(&set->rb_node, parent, p);
|
|
|
|
rb_insert_color(&set->rb_node, root_node);
|
2006-07-25 22:28:18 +07:00
|
|
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
2005-08-19 01:24:12 +07:00
|
|
|
spin_unlock(&inode->i_lock);
|
2006-07-25 22:28:18 +07:00
|
|
|
return;
|
|
|
|
found:
|
|
|
|
rb_replace_node(parent, &set->rb_node, root_node);
|
2006-07-25 22:28:18 +07:00
|
|
|
list_add_tail(&set->lru, &nfsi->access_cache_entry_lru);
|
|
|
|
list_del(&entry->lru);
|
2006-07-25 22:28:18 +07:00
|
|
|
spin_unlock(&inode->i_lock);
|
|
|
|
nfs_access_free_entry(entry);
|
|
|
|
}
|
|
|
|
|
|
|
|
void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
|
|
|
|
{
|
|
|
|
struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
|
|
|
|
if (cache == NULL)
|
|
|
|
return;
|
|
|
|
RB_CLEAR_NODE(&cache->rb_node);
|
2005-04-17 05:20:36 +07:00
|
|
|
cache->jiffies = set->jiffies;
|
2006-07-25 22:28:18 +07:00
|
|
|
cache->cred = get_rpccred(set->cred);
|
2005-04-17 05:20:36 +07:00
|
|
|
cache->mask = set->mask;
|
2006-07-25 22:28:18 +07:00
|
|
|
|
|
|
|
nfs_access_add_rbtree(inode, cache);
|
2006-07-25 22:28:18 +07:00
|
|
|
|
|
|
|
/* Update accounting */
|
|
|
|
smp_mb__before_atomic_inc();
|
|
|
|
atomic_long_inc(&nfs_access_nr_entries);
|
|
|
|
smp_mb__after_atomic_inc();
|
|
|
|
|
|
|
|
/* Add inode to global LRU list */
|
|
|
|
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
|
|
|
|
spin_lock(&nfs_access_lru_lock);
|
|
|
|
list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
|
|
|
|
spin_unlock(&nfs_access_lru_lock);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
|
|
|
|
{
|
|
|
|
struct nfs_access_entry cache;
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = nfs_access_get_cached(inode, cred, &cache);
|
|
|
|
if (status == 0)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* Be clever: ask server to check for all possible rights */
|
|
|
|
cache.mask = MAY_EXEC | MAY_WRITE | MAY_READ;
|
|
|
|
cache.cred = cred;
|
|
|
|
cache.jiffies = jiffies;
|
|
|
|
status = NFS_PROTO(inode)->access(inode, &cache);
|
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
nfs_access_add_cache(inode, &cache);
|
|
|
|
out:
|
|
|
|
if ((cache.mask & mask) == mask)
|
|
|
|
return 0;
|
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
|
|
|
int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
|
|
|
|
{
|
|
|
|
struct rpc_cred *cred;
|
|
|
|
int res = 0;
|
|
|
|
|
2006-03-21 01:44:14 +07:00
|
|
|
nfs_inc_stats(inode, NFSIOS_VFSACCESS);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
if (mask == 0)
|
|
|
|
goto out;
|
|
|
|
/* Is this sys_access() ? */
|
|
|
|
if (nd != NULL && (nd->flags & LOOKUP_ACCESS))
|
|
|
|
goto force_lookup;
|
|
|
|
|
|
|
|
switch (inode->i_mode & S_IFMT) {
|
|
|
|
case S_IFLNK:
|
|
|
|
goto out;
|
|
|
|
case S_IFREG:
|
|
|
|
/* NFSv4 has atomic_open... */
|
|
|
|
if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
|
|
|
|
&& nd != NULL
|
|
|
|
&& (nd->flags & LOOKUP_OPEN))
|
|
|
|
goto out;
|
|
|
|
break;
|
|
|
|
case S_IFDIR:
|
|
|
|
/*
|
|
|
|
* Optimize away all write operations, since the server
|
|
|
|
* will check permissions when we perform the op.
|
|
|
|
*/
|
|
|
|
if ((mask & MAY_WRITE) && !(mask & MAY_READ))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
force_lookup:
|
|
|
|
lock_kernel();
|
|
|
|
|
|
|
|
if (!NFS_PROTO(inode)->access)
|
|
|
|
goto out_notsup;
|
|
|
|
|
|
|
|
cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
|
|
|
|
if (!IS_ERR(cred)) {
|
|
|
|
res = nfs_do_access(inode, cred, mask);
|
|
|
|
put_rpccred(cred);
|
|
|
|
} else
|
|
|
|
res = PTR_ERR(cred);
|
|
|
|
unlock_kernel();
|
|
|
|
out:
|
2006-03-21 01:44:24 +07:00
|
|
|
dfprintk(VFS, "NFS: permission(%s/%ld), mask=0x%x, res=%d\n",
|
|
|
|
inode->i_sb->s_id, inode->i_ino, mask, res);
|
2005-04-17 05:20:36 +07:00
|
|
|
return res;
|
|
|
|
out_notsup:
|
|
|
|
res = nfs_revalidate_inode(NFS_SERVER(inode), inode);
|
|
|
|
if (res == 0)
|
|
|
|
res = generic_permission(inode, mask, NULL);
|
|
|
|
unlock_kernel();
|
2006-03-21 01:44:24 +07:00
|
|
|
goto out;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Local variables:
|
|
|
|
* version-control: t
|
|
|
|
* kept-new-versions: 5
|
|
|
|
* End:
|
|
|
|
*/
|