linux_dsm_epyc7002/fs/nfs/inode.c

2136 lines
58 KiB
C
Raw Normal View History

/*
* linux/fs/nfs/inode.c
*
* Copyright (C) 1992 Rick Sladkey
*
* nfs inode and superblock handling functions
*
* Modularised by Alan Cox <alan@lxorguk.ukuu.org.uk>, while hacking some
* experimental NFS changes. Modularisation taken straight from SYS5 fs.
*
* Change to nfs_read_super() to permit NFS mounts to multi-homed hosts.
* J.S.Peatfield@damtp.cam.ac.uk
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/metrics.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/lockd/bind.h>
#include <linux/seq_file.h>
#include <linux/mount.h>
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/nfs_xdr.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
#include <linux/slab.h>
#include <linux/compat.h>
#include <linux/freezer.h>
#include <asm/uaccess.h>
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
#include "iostat.h"
#include "internal.h"
#include "fscache.h"
#include "pnfs.h"
#include "nfs.h"
#include "netns.h"
#include "nfstrace.h"
#define NFSDBG_FACILITY NFSDBG_VFS
#define NFS_64_BIT_INODE_NUMBERS_ENABLED 1
/* Default is to see 64-bit inode numbers */
static bool enable_ino64 = NFS_64_BIT_INODE_NUMBERS_ENABLED;
static void nfs_invalidate_inode(struct inode *);
static int nfs_update_inode(struct inode *, struct nfs_fattr *);
static struct kmem_cache * nfs_inode_cachep;
static inline unsigned long
nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
{
return nfs_fileid_to_ino_t(fattr->fileid);
}
static int nfs_wait_killable(int mode)
2009-03-12 01:10:30 +07:00
{
freezable_schedule_unsafe();
if (signal_pending_state(mode, current))
return -ERESTARTSYS;
2009-03-12 01:10:30 +07:00
return 0;
}
int nfs_wait_bit_killable(struct wait_bit_key *key, int mode)
{
return nfs_wait_killable(mode);
}
EXPORT_SYMBOL_GPL(nfs_wait_bit_killable);
2009-03-12 01:10:30 +07:00
int nfs_wait_atomic_killable(atomic_t *p)
{
return nfs_wait_killable(TASK_KILLABLE);
}
/**
* nfs_compat_user_ino64 - returns the user-visible inode number
* @fileid: 64-bit fileid
*
* This function returns a 32-bit inode number if the boot parameter
* nfs.enable_ino64 is zero.
*/
u64 nfs_compat_user_ino64(u64 fileid)
{
#ifdef CONFIG_COMPAT
compat_ulong_t ino;
#else
unsigned long ino;
#endif
if (enable_ino64)
return fileid;
ino = fileid;
if (sizeof(ino) < sizeof(fileid))
ino ^= fileid >> (sizeof(fileid)-sizeof(ino)) * 8;
return ino;
}
int nfs_drop_inode(struct inode *inode)
{
return NFS_STALE(inode) || generic_drop_inode(inode);
}
EXPORT_SYMBOL_GPL(nfs_drop_inode);
void nfs_clear_inode(struct inode *inode)
{
/*
* The following should never happen...
*/
WARN_ON_ONCE(nfs_have_writebacks(inode));
WARN_ON_ONCE(!list_empty(&NFS_I(inode)->open_files));
nfs_zap_acl_cache(inode);
nfs_access_zap_cache(inode);
NFS: Use i_writecount to control whether to get an fscache cookie in nfs_open() Use i_writecount to control whether to get an fscache cookie in nfs_open() as NFS does not do write caching yet. I *think* this is the cause of a problem encountered by Mark Moseley whereby __fscache_uncache_page() gets a NULL pointer dereference because cookie->def is NULL: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 PGD 0 Thread overran stack, or stack corrupted Oops: 0000 [#1] SMP Modules linked in: ... CPU: 7 PID: 18993 Comm: php Not tainted 3.11.1 #1 Hardware name: Dell Inc. PowerEdge R420/072XWF, BIOS 1.3.5 08/21/2012 task: ffff8804203460c0 ti: ffff880420346640 RIP: 0010:[<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 RSP: 0018:ffff8801053af878 EFLAGS: 00210286 RAX: 0000000000000000 RBX: ffff8800be2f8780 RCX: ffff88022ffae5e8 RDX: 0000000000004c66 RSI: ffffea00055ff440 RDI: ffff8800be2f8780 RBP: ffff8801053af898 R08: 0000000000000001 R09: 0000000000000003 R10: 0000000000000000 R11: 0000000000000000 R12: ffffea00055ff440 R13: 0000000000001000 R14: ffff8800c50be538 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88042fc60000(0063) knlGS:00000000e439c700 CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 CR2: 0000000000000010 CR3: 0000000001d8f000 CR4: 00000000000607f0 Stack: ... Call Trace: [<ffffffff81365a72>] __nfs_fscache_invalidate_page+0x42/0x70 [<ffffffff813553d5>] nfs_invalidate_page+0x75/0x90 [<ffffffff811b8f5e>] truncate_inode_page+0x8e/0x90 [<ffffffff811b90ad>] truncate_inode_pages_range.part.12+0x14d/0x620 [<ffffffff81d6387d>] ? __mutex_lock_slowpath+0x1fd/0x2e0 [<ffffffff811b95d3>] truncate_inode_pages_range+0x53/0x70 [<ffffffff811b969d>] truncate_inode_pages+0x2d/0x40 [<ffffffff811b96ff>] truncate_pagecache+0x4f/0x70 [<ffffffff81356840>] nfs_setattr_update_inode+0xa0/0x120 [<ffffffff81368de4>] nfs3_proc_setattr+0xc4/0xe0 [<ffffffff81357f78>] nfs_setattr+0xc8/0x150 [<ffffffff8122d95b>] notify_change+0x1cb/0x390 [<ffffffff8120a55b>] do_truncate+0x7b/0xc0 [<ffffffff8121f96c>] do_last+0xa4c/0xfd0 [<ffffffff8121ffbc>] path_openat+0xcc/0x670 [<ffffffff81220a0e>] do_filp_open+0x4e/0xb0 [<ffffffff8120ba1f>] do_sys_open+0x13f/0x2b0 [<ffffffff8126aaf6>] compat_SyS_open+0x36/0x50 [<ffffffff81d7204c>] sysenter_dispatch+0x7/0x24 The code at the instruction pointer was disassembled: > (gdb) disas __fscache_uncache_page > Dump of assembler code for function __fscache_uncache_page: > ... > 0xffffffff812a18ff <+31>: mov 0x48(%rbx),%rax > 0xffffffff812a1903 <+35>: cmpb $0x0,0x10(%rax) > 0xffffffff812a1907 <+39>: je 0xffffffff812a19cd <__fscache_uncache_page+237> These instructions make up: ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); That cmpb is the faulting instruction (%rax is 0). So cookie->def is NULL - which presumably means that the cookie has already been at least partway through __fscache_relinquish_cookie(). What I think may be happening is something like a three-way race on the same file: PROCESS 1 PROCESS 2 PROCESS 3 =============== =============== =============== open(O_TRUNC|O_WRONLY) open(O_RDONLY) open(O_WRONLY) -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() __fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_enable_inode_cookie() __fscache_acquire_cookie() nfs_inode->fscache = cookie <--nfs_fscache_set_inode_cookie() <--nfs_open() -->nfs_setattr() ... ... -->nfs_invalidate_page() -->__nfs_fscache_invalidate_page() cookie = nfsi->fscache -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() -->__fscache_relinquish_cookie() -->__fscache_uncache_page(cookie) <crash> <--__fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() What is needed is something to prevent process #2 from reacquiring the cookie - and I think checking i_writecount should do the trick. It's also possible to have a two-way race on this if the file is opened O_TRUNC|O_RDONLY instead. Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2013-09-27 17:20:03 +07:00
nfs_fscache_clear_inode(inode);
}
EXPORT_SYMBOL_GPL(nfs_clear_inode);
void nfs_evict_inode(struct inode *inode)
{
mm + fs: store shadow entries in page cache Reclaim will be leaving shadow entries in the page cache radix tree upon evicting the real page. As those pages are found from the LRU, an iput() can lead to the inode being freed concurrently. At this point, reclaim must no longer install shadow pages because the inode freeing code needs to ensure the page tree is really empty. Add an address_space flag, AS_EXITING, that the inode freeing code sets under the tree lock before doing the final truncate. Reclaim will check for this flag before installing shadow pages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan@kernel.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Bob Liu <bob.liu@oracle.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jan Kara <jack@suse.cz> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Luigi Semenzato <semenzato@google.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Metin Doslu <metin@citusdata.com> Cc: Michel Lespinasse <walken@google.com> Cc: Ozgun Erdogan <ozgun@citusdata.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Roman Gushchin <klamm@yandex-team.ru> Cc: Ryan Mallon <rmallon@gmail.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vlastimil Babka <vbabka@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-04-04 04:47:49 +07:00
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
nfs_clear_inode(inode);
}
int nfs_sync_inode(struct inode *inode)
{
inode_dio_wait(inode);
return nfs_wb_all(inode);
}
EXPORT_SYMBOL_GPL(nfs_sync_inode);
/**
* nfs_sync_mapping - helper to flush all mmapped dirty data to disk
*/
int nfs_sync_mapping(struct address_space *mapping)
{
int ret = 0;
if (mapping->nrpages != 0) {
unmap_mapping_range(mapping, 0, 0, 0);
ret = nfs_wb_all(mapping->host);
}
return ret;
}
static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
{
struct nfs_inode *nfsi = NFS_I(inode);
if (inode->i_mapping->nrpages == 0)
flags &= ~NFS_INO_INVALID_DATA;
nfsi->cache_validity |= flags;
if (flags & NFS_INO_INVALID_DATA)
nfs_fscache_invalidate(inode);
}
/*
* Invalidate the local caches
*/
static void nfs_zap_caches_locked(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
int mode = inode->i_mode;
nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = jiffies;
memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
NFS: Use FS-Cache invalidation Use the new FS-Cache invalidation facility from NFS to deal with foreign changes being detected on the server rather than attempting to retire the old cookie and get a new one. The problem with the old method was that NFS did not wait for all outstanding storage and retrieval ops on the cache to complete. There was no automatic wait between the calls to ->readpages() and calls to invalidate_inode_pages2() as the latter can only wait on locked pages that have been added to the pagecache (which they haven't yet on entry to ->readpages()). This was leading to oopses like the one below when an outstanding read got cut off from its cookie by a premature release. BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8 IP: [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] PGD 15889067 PUD 15890067 PMD 0 Oops: 0000 [#1] SMP CPU 0 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 4544, comm: tar Not tainted 3.1.0-rc4-fsdevel+ #1064 /DG965RY RIP: 0010:[<ffffffffa0075118>] [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] RSP: 0018:ffff8800158799e8 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff8800070d41e0 RCX: ffff8800083dc1b0 RDX: 0000000000000000 RSI: ffff880015879960 RDI: ffff88003e627b90 RBP: ffff880015879a28 R08: 0000000000000002 R09: 0000000000000002 R10: 0000000000000001 R11: ffff880015879950 R12: ffff880015879aa4 R13: 0000000000000000 R14: ffff8800083dc158 R15: ffff880015879be8 FS: 00007f671e9d87c0(0000) GS:ffff88003bc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000000000a8 CR3: 000000001587f000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process tar (pid: 4544, threadinfo ffff880015878000, task ffff880015875040) Stack: ffffffffa00b1759 ffff8800070dc158 ffff8800000213da ffff88002a286508 ffff880015879aa4 ffff880015879be8 0000000000000001 ffff88002a2866e8 ffff880015879a88 ffffffffa00b20be 00000000000200da ffff880015875040 Call Trace: [<ffffffffa00b1759>] ? nfs_fscache_wait_bit+0xd/0xd [nfs] [<ffffffffa00b20be>] __nfs_readpages_from_fscache+0x7e/0x13f [nfs] [<ffffffff81095fe7>] ? __alloc_pages_nodemask+0x156/0x662 [<ffffffffa0098763>] nfs_readpages+0xee/0x187 [nfs] [<ffffffff81098a5e>] __do_page_cache_readahead+0x1be/0x267 [<ffffffff81098942>] ? __do_page_cache_readahead+0xa2/0x267 [<ffffffff81098d7b>] ra_submit+0x1c/0x20 [<ffffffff8109900a>] ondemand_readahead+0x28b/0x29a [<ffffffff810990ce>] page_cache_sync_readahead+0x38/0x3a [<ffffffff81091d8a>] generic_file_aio_read+0x2ab/0x67e [<ffffffffa008cfbe>] nfs_file_read+0xa4/0xc9 [nfs] [<ffffffff810c22c4>] do_sync_read+0xba/0xfa [<ffffffff810a62c9>] ? might_fault+0x4e/0x9e [<ffffffff81177a47>] ? security_file_permission+0x7b/0x84 [<ffffffff810c25dd>] ? rw_verify_area+0xab/0xc8 [<ffffffff810c29a4>] vfs_read+0xaa/0x13a [<ffffffff810c2a79>] sys_read+0x45/0x6c [<ffffffff813ac37b>] system_call_fastpath+0x16/0x1b Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2012-12-21 04:52:38 +07:00
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_DATA
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_PAGECACHE);
} else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_PAGECACHE);
nfs_zap_label_cache_locked(nfsi);
}
void nfs_zap_caches(struct inode *inode)
{
spin_lock(&inode->i_lock);
nfs_zap_caches_locked(inode);
spin_unlock(&inode->i_lock);
}
void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
{
if (mapping->nrpages != 0) {
spin_lock(&inode->i_lock);
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
spin_unlock(&inode->i_lock);
}
}
void nfs_zap_acl_cache(struct inode *inode)
{
void (*clear_acl_cache)(struct inode *);
clear_acl_cache = NFS_PROTO(inode)->clear_acl_cache;
if (clear_acl_cache != NULL)
clear_acl_cache(inode);
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_ACL;
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
void nfs_invalidate_atime(struct inode *inode)
{
spin_lock(&inode->i_lock);
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
/*
* Invalidate, but do not unhash, the inode.
* NB: must be called with inode->i_lock held!
*/
static void nfs_invalidate_inode(struct inode *inode)
{
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
nfs_zap_caches_locked(inode);
}
struct nfs_find_desc {
struct nfs_fh *fh;
struct nfs_fattr *fattr;
};
/*
* In NFSv3 we can have 64bit inode numbers. In order to support
* this, and re-exported directories (also seen in NFSv2)
* we are forced to allow 2 different inodes to have the same
* i_ino.
*/
static int
nfs_find_actor(struct inode *inode, void *opaque)
{
struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
struct nfs_fh *fh = desc->fh;
struct nfs_fattr *fattr = desc->fattr;
if (NFS_FILEID(inode) != fattr->fileid)
return 0;
nfs: don't allow nfs_find_actor to match inodes of the wrong type Benny Halevy reported the following oops when testing RHEL6: <7>nfs_update_inode: inode 892950 mode changed, 0040755 to 0100644 <1>BUG: unable to handle kernel NULL pointer dereference at (null) <1>IP: [<ffffffffa02a52c5>] nfs_closedir+0x15/0x30 [nfs] <4>PGD 81448a067 PUD 831632067 PMD 0 <4>Oops: 0000 [#1] SMP <4>last sysfs file: /sys/kernel/mm/redhat_transparent_hugepage/enabled <4>CPU 6 <4>Modules linked in: fuse bonding 8021q garp ebtable_nat ebtables be2iscsi iscsi_boot_sysfs bnx2i cnic uio cxgb4i cxgb4 cxgb3i libcxgbi cxgb3 mdio ib_iser rdma_cm ib_cm iw_cm ib_sa ib_mad ib_core ib_addr iscsi_tcp libiscsi_tcp libiscsi scsi_transport_iscsi softdog bridge stp llc xt_physdev ipt_REJECT nf_conntrack_ipv4 nf_defrag_ipv4 xt_multiport iptable_filter ip_tables ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 xt_state nf_conntrack ip6table_filter ip6_tables ipv6 dm_round_robin dm_multipath objlayoutdriver2(U) nfs(U) lockd fscache auth_rpcgss nfs_acl sunrpc vhost_net macvtap macvlan tun kvm_intel kvm be2net igb dca ptp pps_core microcode serio_raw sg iTCO_wdt iTCO_vendor_support i7core_edac edac_core shpchp ext4 mbcache jbd2 sd_mod crc_t10dif ahci dm_mirror dm_region_hash dm_log dm_mod [last unloaded: scsi_wait_scan] <4> <4>Pid: 6332, comm: dd Not tainted 2.6.32-358.el6.x86_64 #1 HP ProLiant DL170e G6 /ProLiant DL170e G6 <4>RIP: 0010:[<ffffffffa02a52c5>] [<ffffffffa02a52c5>] nfs_closedir+0x15/0x30 [nfs] <4>RSP: 0018:ffff88081458bb98 EFLAGS: 00010292 <4>RAX: ffffffffa02a52b0 RBX: 0000000000000000 RCX: 0000000000000003 <4>RDX: ffffffffa02e45a0 RSI: ffff88081440b300 RDI: ffff88082d5f5760 <4>RBP: ffff88081458bba8 R08: 0000000000000000 R09: 0000000000000000 <4>R10: 0000000000000772 R11: 0000000000400004 R12: 0000000040000008 <4>R13: ffff88082d5f5760 R14: ffff88082d6e8800 R15: ffff88082f12d780 <4>FS: 00007f728f37e700(0000) GS:ffff8800456c0000(0000) knlGS:0000000000000000 <4>CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b <4>CR2: 0000000000000000 CR3: 0000000831279000 CR4: 00000000000007e0 <4>DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 <4>DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 <4>Process dd (pid: 6332, threadinfo ffff88081458a000, task ffff88082fa0e040) <4>Stack: <4> 0000000040000008 ffff88081440b300 ffff88081458bbf8 ffffffff81182745 <4><d> ffff88082d5f5760 ffff88082d6e8800 ffff88081458bbf8 ffffffffffffffea <4><d> ffff88082f12d780 ffff88082d6e8800 ffffffffa02a50a0 ffff88082d5f5760 <4>Call Trace: <4> [<ffffffff81182745>] __fput+0xf5/0x210 <4> [<ffffffffa02a50a0>] ? do_open+0x0/0x20 [nfs] <4> [<ffffffff81182885>] fput+0x25/0x30 <4> [<ffffffff8117e23e>] __dentry_open+0x27e/0x360 <4> [<ffffffff811c397a>] ? inotify_d_instantiate+0x2a/0x60 <4> [<ffffffff8117e4b9>] lookup_instantiate_filp+0x69/0x90 <4> [<ffffffffa02a6679>] nfs_intent_set_file+0x59/0x90 [nfs] <4> [<ffffffffa02a686b>] nfs_atomic_lookup+0x1bb/0x310 [nfs] <4> [<ffffffff8118e0c2>] __lookup_hash+0x102/0x160 <4> [<ffffffff81225052>] ? selinux_inode_permission+0x72/0xb0 <4> [<ffffffff8118e76a>] lookup_hash+0x3a/0x50 <4> [<ffffffff81192a4b>] do_filp_open+0x2eb/0xdd0 <4> [<ffffffff8104757c>] ? __do_page_fault+0x1ec/0x480 <4> [<ffffffff8119f562>] ? alloc_fd+0x92/0x160 <4> [<ffffffff8117de79>] do_sys_open+0x69/0x140 <4> [<ffffffff811811f6>] ? sys_lseek+0x66/0x80 <4> [<ffffffff8117df90>] sys_open+0x20/0x30 <4> [<ffffffff8100b072>] system_call_fastpath+0x16/0x1b <4>Code: 65 48 8b 04 25 c8 cb 00 00 83 a8 44 e0 ff ff 01 5b 41 5c c9 c3 90 55 48 89 e5 53 48 83 ec 08 0f 1f 44 00 00 48 8b 9e a0 00 00 00 <48> 8b 3b e8 13 0c f7 ff 48 89 df e8 ab 3d ec e0 48 83 c4 08 31 <1>RIP [<ffffffffa02a52c5>] nfs_closedir+0x15/0x30 [nfs] <4> RSP <ffff88081458bb98> <4>CR2: 0000000000000000 I think this is ultimately due to a bug on the server. The client had previously found a directory dentry. It then later tried to do an atomic open on a new (regular file) dentry. The attributes it got back had the same filehandle as the previously found directory inode. It then tried to put the filp because it failed the aops tests for O_DIRECT opens, and oopsed here because the ctx was still NULL. Obviously the root cause here is a server issue, but we can take steps to mitigate this on the client. When nfs_fhget is called, we always know what type of inode it is. In the event that there's a broken or malicious server on the other end of the wire, the client can end up crashing because the wrong ops are set on it. Have nfs_find_actor check that the inode type is correct after checking the fileid. The fileid check should rarely ever match, so it should only rarely ever get to this check. In the case where we have a broken server, we may see two different inodes with the same i_ino, but the client should be able to cope with them without crashing. This should fix the oops reported here: https://bugzilla.redhat.com/show_bug.cgi?id=913660 Reported-by: Benny Halevy <bhalevy@tonian.com> Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2013-02-28 08:10:34 +07:00
if ((S_IFMT & inode->i_mode) != (S_IFMT & fattr->mode))
return 0;
if (nfs_compare_fh(NFS_FH(inode), fh))
return 0;
if (is_bad_inode(inode) || NFS_STALE(inode))
return 0;
return 1;
}
static int
nfs_init_locked(struct inode *inode, void *opaque)
{
struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
struct nfs_fattr *fattr = desc->fattr;
set_nfs_fileid(inode, fattr->fileid);
nfs_copy_fh(NFS_FH(inode), desc->fh);
return 0;
}
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static void nfs_clear_label_invalid(struct inode *inode)
{
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
spin_unlock(&inode->i_lock);
}
void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
int error;
if (label == NULL)
return;
if ((fattr->valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL) && inode->i_security) {
error = security_inode_notifysecctx(inode, label->label,
label->len);
if (error)
printk(KERN_ERR "%s() %s %d "
"security_inode_notifysecctx() %d\n",
__func__,
(char *)label->label,
label->len, error);
nfs_clear_label_invalid(inode);
}
}
struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags)
{
struct nfs4_label *label = NULL;
int minor_version = server->nfs_client->cl_minorversion;
if (minor_version < 2)
return label;
if (!(server->caps & NFS_CAP_SECURITY_LABEL))
return label;
label = kzalloc(sizeof(struct nfs4_label), flags);
if (label == NULL)
return ERR_PTR(-ENOMEM);
label->label = kzalloc(NFS4_MAXLABELLEN, flags);
if (label->label == NULL) {
kfree(label);
return ERR_PTR(-ENOMEM);
}
label->len = NFS4_MAXLABELLEN;
return label;
}
EXPORT_SYMBOL_GPL(nfs4_label_alloc);
#else
void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
}
#endif
EXPORT_SYMBOL_GPL(nfs_setsecurity);
/*
* This is our front-end to iget that looks up inodes by file handle
* instead of inode number.
*/
struct inode *
nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, struct nfs4_label *label)
{
struct nfs_find_desc desc = {
.fh = fh,
.fattr = fattr
};
struct inode *inode = ERR_PTR(-ENOENT);
unsigned long hash;
nfs_attr_check_mountpoint(sb, fattr);
if (nfs_attr_use_mounted_on_fileid(fattr))
fattr->fileid = fattr->mounted_on_fileid;
else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
hash = nfs_fattr_to_ino_t(fattr);
inode = iget5_locked(sb, hash, nfs_find_actor, nfs_init_locked, &desc);
if (inode == NULL) {
inode = ERR_PTR(-ENOMEM);
goto out_no_inode;
}
if (inode->i_state & I_NEW) {
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long now = jiffies;
/* We set i_ino for the few things that still rely on it,
* such as stat(2) */
inode->i_ino = hash;
/* We can't support update_atime(), since the server will reset it */
inode->i_flags |= S_NOATIME|S_NOCMTIME;
inode->i_mode = fattr->mode;
if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
&& nfs_server_capable(inode, NFS_CAP_MODE))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
/* Why so? Because we want revalidate for devices/FIFOs, and
* that's precisely what we have in nfs_file_inode_operations.
*/
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->file_inode_ops;
if (S_ISREG(inode->i_mode)) {
inode->i_fop = NFS_SB(sb)->nfs_client->rpc_ops->file_ops;
inode->i_data.a_ops = &nfs_file_aops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
inode->i_fop = &nfs_dir_operations;
inode->i_data.a_ops = &nfs_dir_aops;
/* Deal with crossing mountpoints */
if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
inode->i_op = &nfs_referral_inode_operations;
else
inode->i_op = &nfs_mountpoint_inode_operations;
inode->i_fop = NULL;
inode->i_flags |= S_AUTOMOUNT;
}
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &nfs_symlink_inode_operations;
inode_nohighmem(inode);
} else
init_special_inode(inode, inode->i_mode, fattr->rdev);
memset(&inode->i_atime, 0, sizeof(inode->i_atime));
memset(&inode->i_mtime, 0, sizeof(inode->i_mtime));
memset(&inode->i_ctime, 0, sizeof(inode->i_ctime));
inode->i_version = 0;
inode->i_size = 0;
clear_nlink(inode);
inode->i_uid = make_kuid(&init_user_ns, -2);
inode->i_gid = make_kgid(&init_user_ns, -2);
inode->i_blocks = 0;
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
nfsi->write_io = 0;
nfsi->read_io = 0;
nfsi->read_cache_jiffies = fattr->time_start;
nfsi->attr_gencount = fattr->gencount;
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
inode->i_atime = fattr->atime;
else if (nfs_server_capable(inode, NFS_CAP_ATIME))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_MTIME)
inode->i_mtime = fattr->mtime;
else if (nfs_server_capable(inode, NFS_CAP_MTIME))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_CTIME)
inode->i_ctime = fattr->ctime;
else if (nfs_server_capable(inode, NFS_CAP_CTIME))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
inode->i_version = fattr->change_attr;
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE);
if (fattr->valid & NFS_ATTR_FATTR_SIZE)
inode->i_size = nfs_size_to_loff_t(fattr->size);
else
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE);
if (fattr->valid & NFS_ATTR_FATTR_NLINK)
set_nlink(inode, fattr->nlink);
else if (nfs_server_capable(inode, NFS_CAP_NLINK))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_OWNER)
inode->i_uid = fattr->uid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_GROUP)
inode->i_gid = fattr->gid;
else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
}
nfs_setsecurity(inode, fattr, label);
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = now;
nfsi->access_cache = RB_ROOT;
NFS: Use i_writecount to control whether to get an fscache cookie in nfs_open() Use i_writecount to control whether to get an fscache cookie in nfs_open() as NFS does not do write caching yet. I *think* this is the cause of a problem encountered by Mark Moseley whereby __fscache_uncache_page() gets a NULL pointer dereference because cookie->def is NULL: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 PGD 0 Thread overran stack, or stack corrupted Oops: 0000 [#1] SMP Modules linked in: ... CPU: 7 PID: 18993 Comm: php Not tainted 3.11.1 #1 Hardware name: Dell Inc. PowerEdge R420/072XWF, BIOS 1.3.5 08/21/2012 task: ffff8804203460c0 ti: ffff880420346640 RIP: 0010:[<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 RSP: 0018:ffff8801053af878 EFLAGS: 00210286 RAX: 0000000000000000 RBX: ffff8800be2f8780 RCX: ffff88022ffae5e8 RDX: 0000000000004c66 RSI: ffffea00055ff440 RDI: ffff8800be2f8780 RBP: ffff8801053af898 R08: 0000000000000001 R09: 0000000000000003 R10: 0000000000000000 R11: 0000000000000000 R12: ffffea00055ff440 R13: 0000000000001000 R14: ffff8800c50be538 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88042fc60000(0063) knlGS:00000000e439c700 CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 CR2: 0000000000000010 CR3: 0000000001d8f000 CR4: 00000000000607f0 Stack: ... Call Trace: [<ffffffff81365a72>] __nfs_fscache_invalidate_page+0x42/0x70 [<ffffffff813553d5>] nfs_invalidate_page+0x75/0x90 [<ffffffff811b8f5e>] truncate_inode_page+0x8e/0x90 [<ffffffff811b90ad>] truncate_inode_pages_range.part.12+0x14d/0x620 [<ffffffff81d6387d>] ? __mutex_lock_slowpath+0x1fd/0x2e0 [<ffffffff811b95d3>] truncate_inode_pages_range+0x53/0x70 [<ffffffff811b969d>] truncate_inode_pages+0x2d/0x40 [<ffffffff811b96ff>] truncate_pagecache+0x4f/0x70 [<ffffffff81356840>] nfs_setattr_update_inode+0xa0/0x120 [<ffffffff81368de4>] nfs3_proc_setattr+0xc4/0xe0 [<ffffffff81357f78>] nfs_setattr+0xc8/0x150 [<ffffffff8122d95b>] notify_change+0x1cb/0x390 [<ffffffff8120a55b>] do_truncate+0x7b/0xc0 [<ffffffff8121f96c>] do_last+0xa4c/0xfd0 [<ffffffff8121ffbc>] path_openat+0xcc/0x670 [<ffffffff81220a0e>] do_filp_open+0x4e/0xb0 [<ffffffff8120ba1f>] do_sys_open+0x13f/0x2b0 [<ffffffff8126aaf6>] compat_SyS_open+0x36/0x50 [<ffffffff81d7204c>] sysenter_dispatch+0x7/0x24 The code at the instruction pointer was disassembled: > (gdb) disas __fscache_uncache_page > Dump of assembler code for function __fscache_uncache_page: > ... > 0xffffffff812a18ff <+31>: mov 0x48(%rbx),%rax > 0xffffffff812a1903 <+35>: cmpb $0x0,0x10(%rax) > 0xffffffff812a1907 <+39>: je 0xffffffff812a19cd <__fscache_uncache_page+237> These instructions make up: ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); That cmpb is the faulting instruction (%rax is 0). So cookie->def is NULL - which presumably means that the cookie has already been at least partway through __fscache_relinquish_cookie(). What I think may be happening is something like a three-way race on the same file: PROCESS 1 PROCESS 2 PROCESS 3 =============== =============== =============== open(O_TRUNC|O_WRONLY) open(O_RDONLY) open(O_WRONLY) -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() __fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_enable_inode_cookie() __fscache_acquire_cookie() nfs_inode->fscache = cookie <--nfs_fscache_set_inode_cookie() <--nfs_open() -->nfs_setattr() ... ... -->nfs_invalidate_page() -->__nfs_fscache_invalidate_page() cookie = nfsi->fscache -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() -->__fscache_relinquish_cookie() -->__fscache_uncache_page(cookie) <crash> <--__fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() What is needed is something to prevent process #2 from reacquiring the cookie - and I think checking i_writecount should do the trick. It's also possible to have a two-way race on this if the file is opened O_TRUNC|O_RDONLY instead. Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2013-09-27 17:20:03 +07:00
nfs_fscache_init_inode(inode);
unlock_new_inode(inode);
} else
nfs_refresh_inode(inode, fattr);
dprintk("NFS: nfs_fhget(%s/%Lu fh_crc=0x%08x ct=%d)\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode),
nfs_display_fhandle_hash(fh),
atomic_read(&inode->i_count));
out:
return inode;
out_no_inode:
dprintk("nfs_fhget: iget failed with error %ld\n", PTR_ERR(inode));
goto out;
}
EXPORT_SYMBOL_GPL(nfs_fhget);
#define NFS_VALID_ATTRS (ATTR_MODE|ATTR_UID|ATTR_GID|ATTR_SIZE|ATTR_ATIME|ATTR_ATIME_SET|ATTR_MTIME|ATTR_MTIME_SET|ATTR_FILE|ATTR_OPEN)
int
nfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct nfs_fattr *fattr;
int error = 0;
nfs_inc_stats(inode, NFSIOS_VFSSETATTR);
/* skip mode change if it's just for clearing setuid/setgid */
if (attr->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
attr->ia_valid &= ~ATTR_MODE;
if (attr->ia_valid & ATTR_SIZE) {
BUG_ON(!S_ISREG(inode->i_mode));
error = inode_newsize_ok(inode, attr->ia_size);
if (error)
return error;
if (attr->ia_size == i_size_read(inode))
attr->ia_valid &= ~ATTR_SIZE;
}
/* Optimization: if the end result is no change, don't RPC */
attr->ia_valid &= NFS_VALID_ATTRS;
if ((attr->ia_valid & ~(ATTR_FILE|ATTR_OPEN)) == 0)
return 0;
trace_nfs_setattr_enter(inode);
/* Write all dirty data */
if (S_ISREG(inode->i_mode))
nfs_sync_inode(inode);
fattr = nfs_alloc_fattr();
if (fattr == NULL) {
error = -ENOMEM;
goto out;
}
/*
* Return any delegations if we're going to change ACLs
*/
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0)
NFS_PROTO(inode)->return_delegation(inode);
error = NFS_PROTO(inode)->setattr(dentry, fattr, attr);
if (error == 0)
error = nfs_refresh_inode(inode, fattr);
nfs_free_fattr(fattr);
out:
trace_nfs_setattr_exit(inode, error);
return error;
}
EXPORT_SYMBOL_GPL(nfs_setattr);
/**
* nfs_vmtruncate - unmap mappings "freed" by truncate() syscall
* @inode: inode of the file used
* @offset: file offset to start truncating
*
* This is a copy of the common vmtruncate, but with the locking
* corrected to take into account the fact that NFS requires
* inode->i_size to be updated under the inode->i_lock.
* Note: must be called with inode->i_lock held!
*/
static int nfs_vmtruncate(struct inode * inode, loff_t offset)
{
int err;
err = inode_newsize_ok(inode, offset);
if (err)
goto out;
i_size_write(inode, offset);
/* Optimisation */
if (offset == 0)
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
truncate_pagecache(inode, offset);
spin_lock(&inode->i_lock);
out:
return err;
}
/**
* nfs_setattr_update_inode - Update inode metadata after a setattr call.
* @inode: pointer to struct inode
* @attr: pointer to struct iattr
*
* Note: we do this in the *proc.c in order to ensure that
* it works for things like exclusive creates too.
*/
void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr,
struct nfs_fattr *fattr)
{
/* Barrier: bump the attribute generation count. */
nfs_fattr_set_barrier(fattr);
spin_lock(&inode->i_lock);
NFS_I(inode)->attr_gencount = fattr->gencount;
if ((attr->ia_valid & (ATTR_MODE|ATTR_UID|ATTR_GID)) != 0) {
if ((attr->ia_valid & ATTR_MODE) != 0) {
int mode = attr->ia_mode & S_IALLUGO;
mode |= inode->i_mode & ~S_IALLUGO;
inode->i_mode = mode;
}
if ((attr->ia_valid & ATTR_UID) != 0)
inode->i_uid = attr->ia_uid;
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL);
}
if ((attr->ia_valid & ATTR_SIZE) != 0) {
nfs_inc_stats(inode, NFSIOS_SETATTRTRUNC);
nfs_vmtruncate(inode, attr->ia_size);
}
if (fattr->valid)
nfs_update_inode(inode, fattr);
else
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATTR;
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_setattr_update_inode);
static void nfs_request_parent_use_readdirplus(struct dentry *dentry)
{
struct dentry *parent;
parent = dget_parent(dentry);
nfs_force_use_readdirplus(d_inode(parent));
dput(parent);
}
static bool nfs_need_revalidate_inode(struct inode *inode)
{
if (NFS_I(inode)->cache_validity &
(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
return true;
if (nfs_attribute_cache_expired(inode))
return true;
return false;
}
int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
struct inode *inode = d_inode(dentry);
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err = 0;
trace_nfs_getattr_enter(inode);
/* Flush out writes to the server in order to update c/mtime. */
if (S_ISREG(inode->i_mode)) {
inode_lock(inode);
err = nfs_sync_inode(inode);
inode_unlock(inode);
if (err)
goto out;
}
/*
* We may force a getattr if the user cares about atime.
*
* Note that we only have to check the vfsmount flags here:
* - NFS always sets S_NOATIME by so checking it would give a
* bogus result
* - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
* no point in checking those.
*/
if ((mnt->mnt_flags & MNT_NOATIME) ||
((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
need_atime = 0;
if (need_atime || nfs_need_revalidate_inode(inode)) {
struct nfs_server *server = NFS_SERVER(inode);
if (server->caps & NFS_CAP_READDIRPLUS)
nfs_request_parent_use_readdirplus(dentry);
err = __nfs_revalidate_inode(server, inode);
}
if (!err) {
generic_fillattr(inode, stat);
stat->ino = nfs_compat_user_ino64(NFS_FILEID(inode));
if (S_ISDIR(inode->i_mode))
stat->blksize = NFS_SERVER(inode)->dtsize;
}
out:
trace_nfs_getattr_exit(inode, err);
return err;
}
EXPORT_SYMBOL_GPL(nfs_getattr);
static void nfs_init_lock_context(struct nfs_lock_context *l_ctx)
{
atomic_set(&l_ctx->count, 1);
l_ctx->lockowner.l_owner = current->files;
l_ctx->lockowner.l_pid = current->tgid;
INIT_LIST_HEAD(&l_ctx->list);
atomic_set(&l_ctx->io_count, 0);
}
static struct nfs_lock_context *__nfs_find_lock_context(struct nfs_open_context *ctx)
{
struct nfs_lock_context *head = &ctx->lock_context;
struct nfs_lock_context *pos = head;
do {
if (pos->lockowner.l_owner != current->files)
continue;
if (pos->lockowner.l_pid != current->tgid)
continue;
atomic_inc(&pos->count);
return pos;
} while ((pos = list_entry(pos->list.next, typeof(*pos), list)) != head);
return NULL;
}
struct nfs_lock_context *nfs_get_lock_context(struct nfs_open_context *ctx)
{
struct nfs_lock_context *res, *new = NULL;
struct inode *inode = d_inode(ctx->dentry);
spin_lock(&inode->i_lock);
res = __nfs_find_lock_context(ctx);
if (res == NULL) {
spin_unlock(&inode->i_lock);
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (new == NULL)
return ERR_PTR(-ENOMEM);
nfs_init_lock_context(new);
spin_lock(&inode->i_lock);
res = __nfs_find_lock_context(ctx);
if (res == NULL) {
list_add_tail(&new->list, &ctx->lock_context.list);
new->open_context = ctx;
res = new;
new = NULL;
}
}
spin_unlock(&inode->i_lock);
kfree(new);
return res;
}
EXPORT_SYMBOL_GPL(nfs_get_lock_context);
void nfs_put_lock_context(struct nfs_lock_context *l_ctx)
{
struct nfs_open_context *ctx = l_ctx->open_context;
struct inode *inode = d_inode(ctx->dentry);
if (!atomic_dec_and_lock(&l_ctx->count, &inode->i_lock))
return;
list_del(&l_ctx->list);
spin_unlock(&inode->i_lock);
kfree(l_ctx);
}
EXPORT_SYMBOL_GPL(nfs_put_lock_context);
/**
* nfs_close_context - Common close_context() routine NFSv2/v3
* @ctx: pointer to context
* @is_sync: is this a synchronous close
*
* Ensure that the attributes are up to date if we're mounted
* with close-to-open semantics and we have cached data that will
* need to be revalidated on open.
*/
void nfs_close_context(struct nfs_open_context *ctx, int is_sync)
{
struct nfs_inode *nfsi;
struct inode *inode;
struct nfs_server *server;
if (!(ctx->mode & FMODE_WRITE))
return;
if (!is_sync)
return;
inode = d_inode(ctx->dentry);
nfsi = NFS_I(inode);
if (inode->i_mapping->nrpages == 0)
return;
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
return;
if (!list_empty(&nfsi->open_files))
return;
server = NFS_SERVER(inode);
if (server->flags & NFS_MOUNT_NOCTO)
return;
nfs_revalidate_inode(server, inode);
}
EXPORT_SYMBOL_GPL(nfs_close_context);
struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f_mode)
{
struct nfs_open_context *ctx;
struct rpc_cred *cred = rpc_lookup_cred();
if (IS_ERR(cred))
return ERR_CAST(cred);
ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx) {
put_rpccred(cred);
return ERR_PTR(-ENOMEM);
}
nfs_sb_active(dentry->d_sb);
ctx->dentry = dget(dentry);
ctx->cred = cred;
ctx->state = NULL;
ctx->mode = f_mode;
ctx->flags = 0;
ctx->error = 0;
nfs_init_lock_context(&ctx->lock_context);
ctx->lock_context.open_context = ctx;
INIT_LIST_HEAD(&ctx->list);
ctx->mdsthreshold = NULL;
return ctx;
}
EXPORT_SYMBOL_GPL(alloc_nfs_open_context);
struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
{
if (ctx != NULL)
atomic_inc(&ctx->lock_context.count);
return ctx;
}
EXPORT_SYMBOL_GPL(get_nfs_open_context);
static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
{
struct inode *inode = d_inode(ctx->dentry);
struct super_block *sb = ctx->dentry->d_sb;
if (!list_empty(&ctx->list)) {
if (!atomic_dec_and_lock(&ctx->lock_context.count, &inode->i_lock))
return;
list_del(&ctx->list);
spin_unlock(&inode->i_lock);
} else if (!atomic_dec_and_test(&ctx->lock_context.count))
return;
if (inode != NULL)
NFS_PROTO(inode)->close_context(ctx, is_sync);
if (ctx->cred != NULL)
put_rpccred(ctx->cred);
dput(ctx->dentry);
nfs_sb_deactive(sb);
kfree(ctx->mdsthreshold);
kfree(ctx);
}
void put_nfs_open_context(struct nfs_open_context *ctx)
{
__put_nfs_open_context(ctx, 0);
}
EXPORT_SYMBOL_GPL(put_nfs_open_context);
static void put_nfs_open_context_sync(struct nfs_open_context *ctx)
{
__put_nfs_open_context(ctx, 1);
}
/*
* Ensure that mmap has a recent RPC credential for use when writing out
* shared pages
*/
void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
{
struct inode *inode = d_inode(ctx->dentry);
struct nfs_inode *nfsi = NFS_I(inode);
spin_lock(&inode->i_lock);
list_add(&ctx->list, &nfsi->open_files);
spin_unlock(&inode->i_lock);
}
EXPORT_SYMBOL_GPL(nfs_inode_attach_open_context);
void nfs_file_set_open_context(struct file *filp, struct nfs_open_context *ctx)
{
filp->private_data = get_nfs_open_context(ctx);
if (list_empty(&ctx->list))
nfs_inode_attach_open_context(ctx);
}
EXPORT_SYMBOL_GPL(nfs_file_set_open_context);
/*
* Given an inode, search for an open context with the desired characteristics
*/
struct nfs_open_context *nfs_find_open_context(struct inode *inode, struct rpc_cred *cred, fmode_t mode)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_open_context *pos, *ctx = NULL;
spin_lock(&inode->i_lock);
list_for_each_entry(pos, &nfsi->open_files, list) {
if (cred != NULL && pos->cred != cred)
continue;
if ((pos->mode & (FMODE_READ|FMODE_WRITE)) != mode)
continue;
ctx = get_nfs_open_context(pos);
break;
}
spin_unlock(&inode->i_lock);
return ctx;
}
void nfs_file_clear_open_context(struct file *filp)
{
struct nfs_open_context *ctx = nfs_file_open_context(filp);
if (ctx) {
struct inode *inode = d_inode(ctx->dentry);
/*
* We fatal error on write before. Try to writeback
* every page again.
*/
if (ctx->error < 0)
invalidate_inode_pages2(inode->i_mapping);
filp->private_data = NULL;
spin_lock(&inode->i_lock);
list_move_tail(&ctx->list, &NFS_I(inode)->open_files);
spin_unlock(&inode->i_lock);
put_nfs_open_context_sync(ctx);
}
}
/*
* These allocate and release file read/write context information.
*/
int nfs_open(struct inode *inode, struct file *filp)
{
struct nfs_open_context *ctx;
ctx = alloc_nfs_open_context(file_dentry(filp), filp->f_mode);
if (IS_ERR(ctx))
return PTR_ERR(ctx);
nfs_file_set_open_context(filp, ctx);
put_nfs_open_context(ctx);
NFS: Use i_writecount to control whether to get an fscache cookie in nfs_open() Use i_writecount to control whether to get an fscache cookie in nfs_open() as NFS does not do write caching yet. I *think* this is the cause of a problem encountered by Mark Moseley whereby __fscache_uncache_page() gets a NULL pointer dereference because cookie->def is NULL: BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 IP: [<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 PGD 0 Thread overran stack, or stack corrupted Oops: 0000 [#1] SMP Modules linked in: ... CPU: 7 PID: 18993 Comm: php Not tainted 3.11.1 #1 Hardware name: Dell Inc. PowerEdge R420/072XWF, BIOS 1.3.5 08/21/2012 task: ffff8804203460c0 ti: ffff880420346640 RIP: 0010:[<ffffffff812a1903>] __fscache_uncache_page+0x23/0x160 RSP: 0018:ffff8801053af878 EFLAGS: 00210286 RAX: 0000000000000000 RBX: ffff8800be2f8780 RCX: ffff88022ffae5e8 RDX: 0000000000004c66 RSI: ffffea00055ff440 RDI: ffff8800be2f8780 RBP: ffff8801053af898 R08: 0000000000000001 R09: 0000000000000003 R10: 0000000000000000 R11: 0000000000000000 R12: ffffea00055ff440 R13: 0000000000001000 R14: ffff8800c50be538 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88042fc60000(0063) knlGS:00000000e439c700 CS: 0010 DS: 002b ES: 002b CR0: 0000000080050033 CR2: 0000000000000010 CR3: 0000000001d8f000 CR4: 00000000000607f0 Stack: ... Call Trace: [<ffffffff81365a72>] __nfs_fscache_invalidate_page+0x42/0x70 [<ffffffff813553d5>] nfs_invalidate_page+0x75/0x90 [<ffffffff811b8f5e>] truncate_inode_page+0x8e/0x90 [<ffffffff811b90ad>] truncate_inode_pages_range.part.12+0x14d/0x620 [<ffffffff81d6387d>] ? __mutex_lock_slowpath+0x1fd/0x2e0 [<ffffffff811b95d3>] truncate_inode_pages_range+0x53/0x70 [<ffffffff811b969d>] truncate_inode_pages+0x2d/0x40 [<ffffffff811b96ff>] truncate_pagecache+0x4f/0x70 [<ffffffff81356840>] nfs_setattr_update_inode+0xa0/0x120 [<ffffffff81368de4>] nfs3_proc_setattr+0xc4/0xe0 [<ffffffff81357f78>] nfs_setattr+0xc8/0x150 [<ffffffff8122d95b>] notify_change+0x1cb/0x390 [<ffffffff8120a55b>] do_truncate+0x7b/0xc0 [<ffffffff8121f96c>] do_last+0xa4c/0xfd0 [<ffffffff8121ffbc>] path_openat+0xcc/0x670 [<ffffffff81220a0e>] do_filp_open+0x4e/0xb0 [<ffffffff8120ba1f>] do_sys_open+0x13f/0x2b0 [<ffffffff8126aaf6>] compat_SyS_open+0x36/0x50 [<ffffffff81d7204c>] sysenter_dispatch+0x7/0x24 The code at the instruction pointer was disassembled: > (gdb) disas __fscache_uncache_page > Dump of assembler code for function __fscache_uncache_page: > ... > 0xffffffff812a18ff <+31>: mov 0x48(%rbx),%rax > 0xffffffff812a1903 <+35>: cmpb $0x0,0x10(%rax) > 0xffffffff812a1907 <+39>: je 0xffffffff812a19cd <__fscache_uncache_page+237> These instructions make up: ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX); That cmpb is the faulting instruction (%rax is 0). So cookie->def is NULL - which presumably means that the cookie has already been at least partway through __fscache_relinquish_cookie(). What I think may be happening is something like a three-way race on the same file: PROCESS 1 PROCESS 2 PROCESS 3 =============== =============== =============== open(O_TRUNC|O_WRONLY) open(O_RDONLY) open(O_WRONLY) -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() __fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_enable_inode_cookie() __fscache_acquire_cookie() nfs_inode->fscache = cookie <--nfs_fscache_set_inode_cookie() <--nfs_open() -->nfs_setattr() ... ... -->nfs_invalidate_page() -->__nfs_fscache_invalidate_page() cookie = nfsi->fscache -->nfs_open() -->nfs_fscache_set_inode_cookie() nfs_fscache_inode_lock() nfs_fscache_disable_inode_cookie() -->__fscache_relinquish_cookie() -->__fscache_uncache_page(cookie) <crash> <--__fscache_relinquish_cookie() nfs_inode->fscache = NULL <--nfs_fscache_set_inode_cookie() What is needed is something to prevent process #2 from reacquiring the cookie - and I think checking i_writecount should do the trick. It's also possible to have a two-way race on this if the file is opened O_TRUNC|O_RDONLY instead. Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2013-09-27 17:20:03 +07:00
nfs_fscache_open_file(inode, filp);
return 0;
}
/*
* This function is called whenever some part of NFS notices that
* the cached attributes have to be refreshed.
*/
int
__nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
int status = -ESTALE;
struct nfs4_label *label = NULL;
struct nfs_fattr *fattr = NULL;
struct nfs_inode *nfsi = NFS_I(inode);
dfprintk(PAGECACHE, "NFS: revalidating (%s/%Lu)\n",
inode->i_sb->s_id, (unsigned long long)NFS_FILEID(inode));
trace_nfs_revalidate_inode_enter(inode);
if (is_bad_inode(inode))
goto out;
if (NFS_STALE(inode))
goto out;
status = -ENOMEM;
fattr = nfs_alloc_fattr();
if (fattr == NULL)
goto out;
nfs_inc_stats(inode, NFSIOS_INODEREVALIDATE);
label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
if (IS_ERR(label)) {
status = PTR_ERR(label);
goto out;
}
status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr, label);
if (status != 0) {
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) getattr failed, error=%d\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), status);
if (status == -ESTALE) {
nfs_zap_caches(inode);
if (!S_ISDIR(inode->i_mode))
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
goto err_out;
}
status = nfs_refresh_inode(inode, fattr);
if (status) {
dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) refresh failed, error=%d\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), status);
goto err_out;
}
if (nfsi->cache_validity & NFS_INO_INVALID_ACL)
nfs_zap_acl_cache(inode);
nfs_setsecurity(inode, fattr, label);
dfprintk(PAGECACHE, "NFS: (%s/%Lu) revalidation complete\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode));
err_out:
nfs4_label_free(label);
out:
nfs_free_fattr(fattr);
trace_nfs_revalidate_inode_exit(inode, status);
return status;
}
int nfs_attribute_timeout(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
return !time_in_range_open(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
}
int nfs_attribute_cache_expired(struct inode *inode)
{
if (nfs_have_delegated_attributes(inode))
return 0;
return nfs_attribute_timeout(inode);
}
/**
* nfs_revalidate_inode - Revalidate the inode attributes
* @server - pointer to nfs_server struct
* @inode - pointer to inode struct
*
* Updates inode attribute information by retrieving the data from the server.
*/
int nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
{
if (!nfs_need_revalidate_inode(inode))
return NFS_STALE(inode) ? -ESTALE : 0;
return __nfs_revalidate_inode(server, inode);
}
EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
int nfs_revalidate_inode_rcu(struct nfs_server *server, struct inode *inode)
{
if (!(NFS_I(inode)->cache_validity &
(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL))
&& !nfs_attribute_cache_expired(inode))
return NFS_STALE(inode) ? -ESTALE : 0;
return -ECHILD;
}
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
int ret;
if (mapping->nrpages != 0) {
if (S_ISREG(inode->i_mode)) {
unmap_mapping_range(mapping, 0, 0, 0);
ret = nfs_sync_mapping(mapping);
if (ret < 0)
return ret;
}
ret = invalidate_inode_pages2(mapping);
if (ret < 0)
return ret;
}
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
if (S_ISDIR(inode->i_mode)) {
spin_lock(&inode->i_lock);
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
spin_unlock(&inode->i_lock);
}
nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
NFS: Use FS-Cache invalidation Use the new FS-Cache invalidation facility from NFS to deal with foreign changes being detected on the server rather than attempting to retire the old cookie and get a new one. The problem with the old method was that NFS did not wait for all outstanding storage and retrieval ops on the cache to complete. There was no automatic wait between the calls to ->readpages() and calls to invalidate_inode_pages2() as the latter can only wait on locked pages that have been added to the pagecache (which they haven't yet on entry to ->readpages()). This was leading to oopses like the one below when an outstanding read got cut off from its cookie by a premature release. BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8 IP: [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] PGD 15889067 PUD 15890067 PMD 0 Oops: 0000 [#1] SMP CPU 0 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 4544, comm: tar Not tainted 3.1.0-rc4-fsdevel+ #1064 /DG965RY RIP: 0010:[<ffffffffa0075118>] [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] RSP: 0018:ffff8800158799e8 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff8800070d41e0 RCX: ffff8800083dc1b0 RDX: 0000000000000000 RSI: ffff880015879960 RDI: ffff88003e627b90 RBP: ffff880015879a28 R08: 0000000000000002 R09: 0000000000000002 R10: 0000000000000001 R11: ffff880015879950 R12: ffff880015879aa4 R13: 0000000000000000 R14: ffff8800083dc158 R15: ffff880015879be8 FS: 00007f671e9d87c0(0000) GS:ffff88003bc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000000000a8 CR3: 000000001587f000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process tar (pid: 4544, threadinfo ffff880015878000, task ffff880015875040) Stack: ffffffffa00b1759 ffff8800070dc158 ffff8800000213da ffff88002a286508 ffff880015879aa4 ffff880015879be8 0000000000000001 ffff88002a2866e8 ffff880015879a88 ffffffffa00b20be 00000000000200da ffff880015875040 Call Trace: [<ffffffffa00b1759>] ? nfs_fscache_wait_bit+0xd/0xd [nfs] [<ffffffffa00b20be>] __nfs_readpages_from_fscache+0x7e/0x13f [nfs] [<ffffffff81095fe7>] ? __alloc_pages_nodemask+0x156/0x662 [<ffffffffa0098763>] nfs_readpages+0xee/0x187 [nfs] [<ffffffff81098a5e>] __do_page_cache_readahead+0x1be/0x267 [<ffffffff81098942>] ? __do_page_cache_readahead+0xa2/0x267 [<ffffffff81098d7b>] ra_submit+0x1c/0x20 [<ffffffff8109900a>] ondemand_readahead+0x28b/0x29a [<ffffffff810990ce>] page_cache_sync_readahead+0x38/0x3a [<ffffffff81091d8a>] generic_file_aio_read+0x2ab/0x67e [<ffffffffa008cfbe>] nfs_file_read+0xa4/0xc9 [nfs] [<ffffffff810c22c4>] do_sync_read+0xba/0xfa [<ffffffff810a62c9>] ? might_fault+0x4e/0x9e [<ffffffff81177a47>] ? security_file_permission+0x7b/0x84 [<ffffffff810c25dd>] ? rw_verify_area+0xab/0xc8 [<ffffffff810c29a4>] vfs_read+0xaa/0x13a [<ffffffff810c2a79>] sys_read+0x45/0x6c [<ffffffff813ac37b>] system_call_fastpath+0x16/0x1b Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2012-12-21 04:52:38 +07:00
nfs_fscache_wait_on_invalidate(inode);
dfprintk(PAGECACHE, "NFS: (%s/%Lu) data cache invalidated\n",
inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode));
return 0;
}
static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
{
if (nfs_have_delegated_attributes(inode))
return false;
return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
|| nfs_attribute_timeout(inode)
|| NFS_STALE(inode);
}
int nfs_revalidate_mapping_rcu(struct inode *inode)
{
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long *bitlock = &nfsi->flags;
int ret = 0;
if (IS_SWAPFILE(inode))
goto out;
if (nfs_mapping_need_revalidate_inode(inode)) {
ret = -ECHILD;
goto out;
}
spin_lock(&inode->i_lock);
if (test_bit(NFS_INO_INVALIDATING, bitlock) ||
(nfsi->cache_validity & NFS_INO_INVALID_DATA))
ret = -ECHILD;
spin_unlock(&inode->i_lock);
out:
return ret;
}
/**
* __nfs_revalidate_mapping - Revalidate the pagecache
* @inode - pointer to host inode
* @mapping - pointer to mapping
* @may_lock - take inode->i_mutex?
*/
static int __nfs_revalidate_mapping(struct inode *inode,
struct address_space *mapping,
bool may_lock)
{
struct nfs_inode *nfsi = NFS_I(inode);
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
unsigned long *bitlock = &nfsi->flags;
int ret = 0;
/* swapfiles are not supposed to be shared. */
if (IS_SWAPFILE(inode))
goto out;
if (nfs_mapping_need_revalidate_inode(inode)) {
ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
if (ret < 0)
goto out;
}
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
/*
* We must clear NFS_INO_INVALID_DATA first to ensure that
* invalidations that come in while we're shooting down the mappings
* are respected. But, that leaves a race window where one revalidator
* can clear the flag, and then another checks it before the mapping
* gets invalidated. Fix that by serializing access to this part of
* the function.
*
* At the same time, we need to allow other tasks to see whether we
* might be in the middle of invalidating the pages, so we only set
* the bit lock here if it looks like we're going to be doing that.
*/
for (;;) {
sched: Remove proliferation of wait_on_bit() action functions The current "wait_on_bit" interface requires an 'action' function to be provided which does the actual waiting. There are over 20 such functions, many of them identical. Most cases can be satisfied by one of just two functions, one which uses io_schedule() and one which just uses schedule(). So: Rename wait_on_bit and wait_on_bit_lock to wait_on_bit_action and wait_on_bit_lock_action to make it explicit that they need an action function. Introduce new wait_on_bit{,_lock} and wait_on_bit{,_lock}_io which are *not* given an action function but implicitly use a standard one. The decision to error-out if a signal is pending is now made based on the 'mode' argument rather than being encoded in the action function. All instances of the old wait_on_bit and wait_on_bit_lock which can use the new version have been changed accordingly and their action functions have been discarded. wait_on_bit{_lock} does not return any specific error code in the event of a signal so the caller must check for non-zero and interpolate their own error code as appropriate. The wait_on_bit() call in __fscache_wait_on_invalidate() was ambiguous as it specified TASK_UNINTERRUPTIBLE but used fscache_wait_bit_interruptible as an action function. David Howells confirms this should be uniformly "uninterruptible" The main remaining user of wait_on_bit{,_lock}_action is NFS which needs to use a freezer-aware schedule() call. A comment in fs/gfs2/glock.c notes that having multiple 'action' functions is useful as they display differently in the 'wchan' field of 'ps'. (and /proc/$PID/wchan). As the new bit_wait{,_io} functions are tagged "__sched", they will not show up at all, but something higher in the stack. So the distinction will still be visible, only with different function names (gds2_glock_wait versus gfs2_glock_dq_wait in the gfs2/glock.c case). Since first version of this patch (against 3.15) two new action functions appeared, on in NFS and one in CIFS. CIFS also now uses an action function that makes the same freezer aware schedule call as NFS. Signed-off-by: NeilBrown <neilb@suse.de> Acked-by: David Howells <dhowells@redhat.com> (fscache, keys) Acked-by: Steven Whitehouse <swhiteho@redhat.com> (gfs2) Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Steve French <sfrench@samba.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140707051603.28027.72349.stgit@notabene.brown Signed-off-by: Ingo Molnar <mingo@kernel.org>
2014-07-07 12:16:04 +07:00
ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING,
nfs_wait_bit_killable, TASK_KILLABLE);
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
if (ret)
goto out;
spin_lock(&inode->i_lock);
if (test_bit(NFS_INO_INVALIDATING, bitlock)) {
spin_unlock(&inode->i_lock);
continue;
}
if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
break;
spin_unlock(&inode->i_lock);
goto out;
}
set_bit(NFS_INO_INVALIDATING, bitlock);
smp_wmb();
nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
spin_unlock(&inode->i_lock);
trace_nfs_invalidate_mapping_enter(inode);
if (may_lock) {
inode_lock(inode);
ret = nfs_invalidate_mapping(inode, mapping);
inode_unlock(inode);
} else
ret = nfs_invalidate_mapping(inode, mapping);
trace_nfs_invalidate_mapping_exit(inode, ret);
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
clear_bit_unlock(NFS_INO_INVALIDATING, bitlock);
smp_mb__after_atomic();
NFS: fix the handling of NFS_INO_INVALID_DATA flag in nfs_revalidate_mapping There is a possible race in how the nfs_invalidate_mapping function is handled. Currently, we go and invalidate the pages in the file and then clear NFS_INO_INVALID_DATA. The problem is that it's possible for a stale page to creep into the mapping after the page was invalidated (i.e., via readahead). If another writer comes along and sets the flag after that happens but before invalidate_inode_pages2 returns then we could clear the flag without the cache having been properly invalidated. So, we must clear the flag first and then invalidate the pages. Doing this however, opens another race: It's possible to have two concurrent read() calls that end up in nfs_revalidate_mapping at the same time. The first one clears the NFS_INO_INVALID_DATA flag and then goes to call nfs_invalidate_mapping. Just before calling that though, the other task races in, checks the flag and finds it cleared. At that point, it trusts that the mapping is good and gets the lock on the page, allowing the read() to be satisfied from the cache even though the data is no longer valid. These effects are easily manifested by running diotest3 from the LTP test suite on NFS. That program does a series of DIO writes and buffered reads. The operations are serialized and page-aligned but the existing code fails the test since it occasionally allows a read to come out of the cache incorrectly. While mixing direct and buffered I/O isn't recommended, I believe it's possible to hit this in other ways that just use buffered I/O, though that situation is much harder to reproduce. The problem is that the checking/clearing of that flag and the invalidation of the mapping really need to be atomic. Fix this by serializing concurrent invalidations with a bitlock. At the same time, we also need to allow other places that check NFS_INO_INVALID_DATA to check whether we might be in the middle of invalidating the file, so fix up a couple of places that do that to look for the new NFS_INO_INVALIDATING flag. Doing this requires us to be careful not to set the bitlock unnecessarily, so this code only does that if it believes it will be doing an invalidation. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
2014-01-28 01:46:15 +07:00
wake_up_bit(bitlock, NFS_INO_INVALIDATING);
out:
return ret;
}
/**
* nfs_revalidate_mapping - Revalidate the pagecache
* @inode - pointer to host inode
* @mapping - pointer to mapping
*/
int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
{
return __nfs_revalidate_mapping(inode, mapping, false);
}
/**
* nfs_revalidate_mapping_protected - Revalidate the pagecache
* @inode - pointer to host inode
* @mapping - pointer to mapping
*
* Differs from nfs_revalidate_mapping() in that it grabs the inode->i_mutex
* while invalidating the mapping.
*/
int nfs_revalidate_mapping_protected(struct inode *inode, struct address_space *mapping)
{
return __nfs_revalidate_mapping(inode, mapping, true);
}
static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long ret = 0;
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
&& (fattr->valid & NFS_ATTR_FATTR_CHANGE)
&& inode->i_version == fattr->pre_change_attr) {
inode->i_version = fattr->change_attr;
if (S_ISDIR(inode->i_mode))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
ret |= NFS_INO_INVALID_ATTR;
}
/* If we have atomic WCC data, we may update some attributes */
if ((fattr->valid & NFS_ATTR_FATTR_PRECTIME)
&& (fattr->valid & NFS_ATTR_FATTR_CTIME)
&& timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
ret |= NFS_INO_INVALID_ATTR;
}
if ((fattr->valid & NFS_ATTR_FATTR_PREMTIME)
&& (fattr->valid & NFS_ATTR_FATTR_MTIME)
&& timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
if (S_ISDIR(inode->i_mode))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
ret |= NFS_INO_INVALID_ATTR;
}
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
&& i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
&& nfsi->nrequests == 0) {
i_size_write(inode, nfs_size_to_loff_t(fattr->size));
ret |= NFS_INO_INVALID_ATTR;
}
NFS: Use FS-Cache invalidation Use the new FS-Cache invalidation facility from NFS to deal with foreign changes being detected on the server rather than attempting to retire the old cookie and get a new one. The problem with the old method was that NFS did not wait for all outstanding storage and retrieval ops on the cache to complete. There was no automatic wait between the calls to ->readpages() and calls to invalidate_inode_pages2() as the latter can only wait on locked pages that have been added to the pagecache (which they haven't yet on entry to ->readpages()). This was leading to oopses like the one below when an outstanding read got cut off from its cookie by a premature release. BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8 IP: [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] PGD 15889067 PUD 15890067 PMD 0 Oops: 0000 [#1] SMP CPU 0 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 4544, comm: tar Not tainted 3.1.0-rc4-fsdevel+ #1064 /DG965RY RIP: 0010:[<ffffffffa0075118>] [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] RSP: 0018:ffff8800158799e8 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff8800070d41e0 RCX: ffff8800083dc1b0 RDX: 0000000000000000 RSI: ffff880015879960 RDI: ffff88003e627b90 RBP: ffff880015879a28 R08: 0000000000000002 R09: 0000000000000002 R10: 0000000000000001 R11: ffff880015879950 R12: ffff880015879aa4 R13: 0000000000000000 R14: ffff8800083dc158 R15: ffff880015879be8 FS: 00007f671e9d87c0(0000) GS:ffff88003bc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000000000a8 CR3: 000000001587f000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process tar (pid: 4544, threadinfo ffff880015878000, task ffff880015875040) Stack: ffffffffa00b1759 ffff8800070dc158 ffff8800000213da ffff88002a286508 ffff880015879aa4 ffff880015879be8 0000000000000001 ffff88002a2866e8 ffff880015879a88 ffffffffa00b20be 00000000000200da ffff880015875040 Call Trace: [<ffffffffa00b1759>] ? nfs_fscache_wait_bit+0xd/0xd [nfs] [<ffffffffa00b20be>] __nfs_readpages_from_fscache+0x7e/0x13f [nfs] [<ffffffff81095fe7>] ? __alloc_pages_nodemask+0x156/0x662 [<ffffffffa0098763>] nfs_readpages+0xee/0x187 [nfs] [<ffffffff81098a5e>] __do_page_cache_readahead+0x1be/0x267 [<ffffffff81098942>] ? __do_page_cache_readahead+0xa2/0x267 [<ffffffff81098d7b>] ra_submit+0x1c/0x20 [<ffffffff8109900a>] ondemand_readahead+0x28b/0x29a [<ffffffff810990ce>] page_cache_sync_readahead+0x38/0x3a [<ffffffff81091d8a>] generic_file_aio_read+0x2ab/0x67e [<ffffffffa008cfbe>] nfs_file_read+0xa4/0xc9 [nfs] [<ffffffff810c22c4>] do_sync_read+0xba/0xfa [<ffffffff810a62c9>] ? might_fault+0x4e/0x9e [<ffffffff81177a47>] ? security_file_permission+0x7b/0x84 [<ffffffff810c25dd>] ? rw_verify_area+0xab/0xc8 [<ffffffff810c29a4>] vfs_read+0xaa/0x13a [<ffffffff810c2a79>] sys_read+0x45/0x6c [<ffffffff813ac37b>] system_call_fastpath+0x16/0x1b Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2012-12-21 04:52:38 +07:00
return ret;
}
/**
* nfs_check_inode_attributes - verify consistency of the inode attribute cache
* @inode - pointer to inode
* @fattr - updated attributes
*
* Verifies the attribute cache. If we have just changed the attributes,
* so that fattr carries weak cache consistency data, then it may
* also update the ctime/mtime/change_attribute.
*/
static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_inode *nfsi = NFS_I(inode);
loff_t cur_size, new_isize;
unsigned long invalid = 0;
if (nfs_have_delegated_attributes(inode))
return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
return -EIO;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
return -EIO;
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
inode->i_version != fattr->change_attr)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
invalid |= NFS_INO_INVALID_ATTR;
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
cur_size = i_size_read(inode);
new_isize = nfs_size_to_loff_t(fattr->size);
if (cur_size != new_isize)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
}
if (nfsi->nrequests != 0)
invalid &= ~NFS_INO_REVAL_PAGECACHE;
/* Have any file permissions changed? */
if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
if ((fattr->valid & NFS_ATTR_FATTR_OWNER) && !uid_eq(inode->i_uid, fattr->uid))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
if ((fattr->valid & NFS_ATTR_FATTR_GROUP) && !gid_eq(inode->i_gid, fattr->gid))
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
/* Has the link count changed? */
if ((fattr->valid & NFS_ATTR_FATTR_NLINK) && inode->i_nlink != fattr->nlink)
invalid |= NFS_INO_INVALID_ATTR;
if ((fattr->valid & NFS_ATTR_FATTR_ATIME) && !timespec_equal(&inode->i_atime, &fattr->atime))
invalid |= NFS_INO_INVALID_ATIME;
if (invalid != 0)
nfs_set_cache_invalid(inode, invalid);
nfsi->read_cache_jiffies = fattr->time_start;
return 0;
}
static atomic_long_t nfs_attr_generation_counter;
static unsigned long nfs_read_attr_generation_counter(void)
{
return atomic_long_read(&nfs_attr_generation_counter);
}
unsigned long nfs_inc_attr_generation_counter(void)
{
return atomic_long_inc_return(&nfs_attr_generation_counter);
}
EXPORT_SYMBOL_GPL(nfs_inc_attr_generation_counter);
void nfs_fattr_init(struct nfs_fattr *fattr)
{
fattr->valid = 0;
fattr->time_start = jiffies;
fattr->gencount = nfs_inc_attr_generation_counter();
fattr->owner_name = NULL;
fattr->group_name = NULL;
}
EXPORT_SYMBOL_GPL(nfs_fattr_init);
/**
* nfs_fattr_set_barrier
* @fattr: attributes
*
* Used to set a barrier after an attribute was updated. This
* barrier ensures that older attributes from RPC calls that may
* have raced with our update cannot clobber these new values.
* Note that you are still responsible for ensuring that other
* operations which change the attribute on the server do not
* collide.
*/
void nfs_fattr_set_barrier(struct nfs_fattr *fattr)
{
fattr->gencount = nfs_inc_attr_generation_counter();
}
struct nfs_fattr *nfs_alloc_fattr(void)
{
struct nfs_fattr *fattr;
fattr = kmalloc(sizeof(*fattr), GFP_NOFS);
if (fattr != NULL)
nfs_fattr_init(fattr);
return fattr;
}
EXPORT_SYMBOL_GPL(nfs_alloc_fattr);
struct nfs_fh *nfs_alloc_fhandle(void)
{
struct nfs_fh *fh;
fh = kmalloc(sizeof(struct nfs_fh), GFP_NOFS);
if (fh != NULL)
fh->size = 0;
return fh;
}
EXPORT_SYMBOL_GPL(nfs_alloc_fhandle);
#ifdef NFS_DEBUG
/*
* _nfs_display_fhandle_hash - calculate the crc32 hash for the filehandle
* in the same way that wireshark does
*
* @fh: file handle
*
* For debugging only.
*/
u32 _nfs_display_fhandle_hash(const struct nfs_fh *fh)
{
/* wireshark uses 32-bit AUTODIN crc and does a bitwise
* not on the result */
return nfs_fhandle_hash(fh);
}
EXPORT_SYMBOL_GPL(_nfs_display_fhandle_hash);
/*
* _nfs_display_fhandle - display an NFS file handle on the console
*
* @fh: file handle to display
* @caption: display caption
*
* For debugging only.
*/
void _nfs_display_fhandle(const struct nfs_fh *fh, const char *caption)
{
unsigned short i;
if (fh == NULL || fh->size == 0) {
printk(KERN_DEFAULT "%s at %p is empty\n", caption, fh);
return;
}
printk(KERN_DEFAULT "%s at %p is %u bytes, crc: 0x%08x:\n",
caption, fh, fh->size, _nfs_display_fhandle_hash(fh));
for (i = 0; i < fh->size; i += 16) {
__be32 *pos = (__be32 *)&fh->data[i];
switch ((fh->size - i - 1) >> 2) {
case 0:
printk(KERN_DEFAULT " %08x\n",
be32_to_cpup(pos));
break;
case 1:
printk(KERN_DEFAULT " %08x %08x\n",
be32_to_cpup(pos), be32_to_cpup(pos + 1));
break;
case 2:
printk(KERN_DEFAULT " %08x %08x %08x\n",
be32_to_cpup(pos), be32_to_cpup(pos + 1),
be32_to_cpup(pos + 2));
break;
default:
printk(KERN_DEFAULT " %08x %08x %08x %08x\n",
be32_to_cpup(pos), be32_to_cpup(pos + 1),
be32_to_cpup(pos + 2), be32_to_cpup(pos + 3));
}
}
}
EXPORT_SYMBOL_GPL(_nfs_display_fhandle);
#endif
/**
* nfs_inode_attrs_need_update - check if the inode attributes need updating
* @inode - pointer to inode
* @fattr - attributes
*
* Attempt to divine whether or not an RPC call reply carrying stale
* attributes got scheduled after another call carrying updated ones.
*
* To do so, the function first assumes that a more recent ctime means
* that the attributes in fattr are newer, however it also attempt to
* catch the case where ctime either didn't change, or went backwards
* (if someone reset the clock on the server) by looking at whether
* or not this RPC call was started after the inode was last updated.
* Note also the check for wraparound of 'attr_gencount'
*
* The function returns 'true' if it thinks the attributes in 'fattr' are
* more recent than the ones cached in the inode.
*
*/
static int nfs_inode_attrs_need_update(const struct inode *inode, const struct nfs_fattr *fattr)
{
const struct nfs_inode *nfsi = NFS_I(inode);
return ((long)fattr->gencount - (long)nfsi->attr_gencount) > 0 ||
((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0);
}
/*
* Don't trust the change_attribute, mtime, ctime or size if
* a pnfs LAYOUTCOMMIT is outstanding
*/
static void nfs_inode_attrs_handle_layoutcommit(struct inode *inode,
struct nfs_fattr *fattr)
{
if (pnfs_layoutcommit_outstanding(inode))
fattr->valid &= ~(NFS_ATTR_FATTR_CHANGE |
NFS_ATTR_FATTR_MTIME |
NFS_ATTR_FATTR_CTIME |
NFS_ATTR_FATTR_SIZE);
}
static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
{
int ret;
trace_nfs_refresh_inode_enter(inode);
nfs_inode_attrs_handle_layoutcommit(inode, fattr);
if (nfs_inode_attrs_need_update(inode, fattr))
ret = nfs_update_inode(inode, fattr);
else
ret = nfs_check_inode_attributes(inode, fattr);
trace_nfs_refresh_inode_exit(inode, ret);
return ret;
}
/**
* nfs_refresh_inode - try to update the inode attribute cache
* @inode - pointer to inode
* @fattr - updated attributes
*
* Check that an RPC call that returned attributes has not overlapped with
* other recent updates of the inode metadata, then decide whether it is
* safe to do a full update of the inode attributes, or whether just to
* call nfs_check_inode_attributes.
*/
int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
{
int status;
if ((fattr->valid & NFS_ATTR_FATTR) == 0)
return 0;
spin_lock(&inode->i_lock);
status = nfs_refresh_inode_locked(inode, fattr);
spin_unlock(&inode->i_lock);
return status;
}
EXPORT_SYMBOL_GPL(nfs_refresh_inode);
static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
{
unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
/*
* Don't revalidate the pagecache if we hold a delegation, but do
* force an attribute update
*/
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_FORCED;
if (S_ISDIR(inode->i_mode))
invalid |= NFS_INO_INVALID_DATA;
nfs_set_cache_invalid(inode, invalid);
if ((fattr->valid & NFS_ATTR_FATTR) == 0)
return 0;
return nfs_refresh_inode_locked(inode, fattr);
}
/**
* nfs_post_op_update_inode - try to update the inode attribute cache
* @inode - pointer to inode
* @fattr - updated attributes
*
* After an operation that has changed the inode metadata, mark the
* attribute cache as being invalid, then try to update it.
*
* NB: if the server didn't return any post op attributes, this
* function will force the retrieval of attributes before the next
* NFS request. Thus it should be used only for operations that
* are expected to change one or more attributes, to avoid
* unnecessary NFS requests and trips through nfs_update_inode().
*/
int nfs_post_op_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
int status;
spin_lock(&inode->i_lock);
nfs_fattr_set_barrier(fattr);
status = nfs_post_op_update_inode_locked(inode, fattr);
spin_unlock(&inode->i_lock);
return status;
}
EXPORT_SYMBOL_GPL(nfs_post_op_update_inode);
/**
* nfs_post_op_update_inode_force_wcc_locked - update the inode attribute cache
* @inode - pointer to inode
* @fattr - updated attributes
*
* After an operation that has changed the inode metadata, mark the
* attribute cache as being invalid, then try to update it. Fake up
* weak cache consistency data, if none exist.
*
* This function is mainly designed to be used by the ->write_done() functions.
*/
int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr)
{
int status;
/* Don't do a WCC update if these attributes are already stale */
if ((fattr->valid & NFS_ATTR_FATTR) == 0 ||
!nfs_inode_attrs_need_update(inode, fattr)) {
fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE
| NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PREMTIME
| NFS_ATTR_FATTR_PRECTIME);
goto out_noforce;
}
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECHANGE) == 0) {
fattr->pre_change_attr = inode->i_version;
fattr->valid |= NFS_ATTR_FATTR_PRECHANGE;
}
if ((fattr->valid & NFS_ATTR_FATTR_CTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRECTIME) == 0) {
memcpy(&fattr->pre_ctime, &inode->i_ctime, sizeof(fattr->pre_ctime));
fattr->valid |= NFS_ATTR_FATTR_PRECTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_MTIME) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PREMTIME) == 0) {
memcpy(&fattr->pre_mtime, &inode->i_mtime, sizeof(fattr->pre_mtime));
fattr->valid |= NFS_ATTR_FATTR_PREMTIME;
}
if ((fattr->valid & NFS_ATTR_FATTR_SIZE) != 0 &&
(fattr->valid & NFS_ATTR_FATTR_PRESIZE) == 0) {
fattr->pre_size = i_size_read(inode);
fattr->valid |= NFS_ATTR_FATTR_PRESIZE;
}
out_noforce:
status = nfs_post_op_update_inode_locked(inode, fattr);
return status;
}
/**
* nfs_post_op_update_inode_force_wcc - try to update the inode attribute cache
* @inode - pointer to inode
* @fattr - updated attributes
*
* After an operation that has changed the inode metadata, mark the
* attribute cache as being invalid, then try to update it. Fake up
* weak cache consistency data, if none exist.
*
* This function is mainly designed to be used by the ->write_done() functions.
*/
int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fattr)
{
int status;
spin_lock(&inode->i_lock);
nfs_fattr_set_barrier(fattr);
status = nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
spin_unlock(&inode->i_lock);
return status;
}
EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
2015-04-04 01:35:59 +07:00
static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
struct nfs_fattr *fattr)
{
bool ret1 = true, ret2 = true;
if (fattr->valid & NFS_ATTR_FATTR_FILEID)
ret1 = (nfsi->fileid == fattr->fileid);
if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
return ret1 || ret2;
}
/*
* Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state
* of the server's inode.
*
* This is a bit tricky because we have to make sure all dirty pages
* have been sent off to the server before calling invalidate_inode_pages.
* To make sure no other process adds more write requests while we try
* our best to flush them, we make them sleep during the attribute refresh.
*
* A very similar scenario holds for the dir cache.
*/
static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_server *server;
struct nfs_inode *nfsi = NFS_I(inode);
loff_t cur_isize, new_isize;
unsigned long invalid = 0;
unsigned long now = jiffies;
unsigned long save_cache_validity;
bool cache_revalidated = true;
dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n",
__func__, inode->i_sb->s_id, inode->i_ino,
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
2015-04-04 01:35:59 +07:00
if (!nfs_fileid_valid(nfsi, fattr)) {
printk(KERN_ERR "NFS: server %s error: fileid changed\n"
"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
inode->i_sb->s_id, (long long)nfsi->fileid,
(long long)fattr->fileid);
goto out_err;
}
/*
* Make sure the inode's type hasn't changed.
*/
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
/*
* Big trouble! The inode has become a different object.
*/
printk(KERN_DEBUG "NFS: %s: inode %lu mode changed, %07o to %07o\n",
__func__, inode->i_ino, inode->i_mode, fattr->mode);
goto out_err;
}
server = NFS_SERVER(inode);
/* Update the fsid? */
if (S_ISDIR(inode->i_mode) && (fattr->valid & NFS_ATTR_FATTR_FSID) &&
!nfs_fsid_equal(&server->fsid, &fattr->fsid) &&
!IS_AUTOMOUNT(inode))
server->fsid = fattr->fsid;
/*
* Update the read time so we don't revalidate too often.
*/
nfsi->read_cache_jiffies = fattr->time_start;
save_cache_validity = nfsi->cache_validity;
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ATIME
| NFS_INO_REVAL_FORCED
| NFS_INO_REVAL_PAGECACHE);
/* Do atomic weak cache consistency updates */
invalid |= nfs_wcc_update_inode(inode, fattr);
/* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
if (inode->i_version != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
invalid |= NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_DATA
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
inode->i_version = fattr->change_attr;
}
} else {
nfsi->cache_validity |= save_cache_validity;
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
} else if (server->caps & NFS_CAP_MTIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
} else if (server->caps & NFS_CAP_CTIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
/* Check if our cached file size is stale */
if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
new_isize = nfs_size_to_loff_t(fattr->size);
cur_isize = i_size_read(inode);
if (new_isize != cur_isize) {
/* Do we perhaps have any outstanding writes, or has
* the file grown beyond our last write? */
if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
i_size_write(inode, new_isize);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
}
dprintk("NFS: isize change on server for file %s/%ld "
"(%Ld to %Ld)\n",
inode->i_sb->s_id,
inode->i_ino,
(long long)cur_isize,
(long long)new_isize);
}
} else {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_PAGECACHE
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_ATIME)
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
else if (server->caps & NFS_CAP_ATIME) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATIME
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
umode_t newmode = inode->i_mode & S_IFMT;
newmode |= fattr->mode & S_IALLUGO;
inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
}
} else if (server->caps & NFS_CAP_MODE) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_OWNER) {
if (!uid_eq(inode->i_uid, fattr->uid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_uid = fattr->uid;
}
} else if (server->caps & NFS_CAP_OWNER) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_GROUP) {
if (!gid_eq(inode->i_gid, fattr->gid)) {
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_gid = fattr->gid;
}
} else if (server->caps & NFS_CAP_OWNER_GROUP) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_NLINK) {
if (inode->i_nlink != fattr->nlink) {
invalid |= NFS_INO_INVALID_ATTR;
if (S_ISDIR(inode->i_mode))
invalid |= NFS_INO_INVALID_DATA;
set_nlink(inode, fattr->nlink);
}
} else if (server->caps & NFS_CAP_NLINK) {
nfsi->cache_validity |= save_cache_validity &
(NFS_INO_INVALID_ATTR
| NFS_INO_REVAL_FORCED);
cache_revalidated = false;
}
if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
/*
* report the blocks in 512byte units
*/
inode->i_blocks = nfs_calc_block_size(fattr->du.nfs3.used);
} else if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
inode->i_blocks = fattr->du.nfs2.blocks;
else
cache_revalidated = false;
/* Update attrtimeo value if we're out of the unstable period */
if (invalid & NFS_INO_INVALID_ATTR) {
nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = now;
/* Set barrier to be more recent than all outstanding updates */
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
} else {
if (cache_revalidated) {
if (!time_in_range_open(now, nfsi->attrtimeo_timestamp,
nfsi->attrtimeo_timestamp + nfsi->attrtimeo)) {
nfsi->attrtimeo <<= 1;
if (nfsi->attrtimeo > NFS_MAXATTRTIMEO(inode))
nfsi->attrtimeo = NFS_MAXATTRTIMEO(inode);
}
nfsi->attrtimeo_timestamp = now;
}
/* Set the barrier to be more recent than this fattr */
if ((long)fattr->gencount - (long)nfsi->attr_gencount > 0)
nfsi->attr_gencount = fattr->gencount;
}
/* Don't declare attrcache up to date if there were no attrs! */
if (cache_revalidated)
invalid &= ~NFS_INO_INVALID_ATTR;
/* Don't invalidate the data if we were to blame */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
invalid &= ~NFS_INO_INVALID_DATA;
if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) ||
(save_cache_validity & NFS_INO_REVAL_FORCED))
nfs_set_cache_invalid(inode, invalid);
NFS: Use FS-Cache invalidation Use the new FS-Cache invalidation facility from NFS to deal with foreign changes being detected on the server rather than attempting to retire the old cookie and get a new one. The problem with the old method was that NFS did not wait for all outstanding storage and retrieval ops on the cache to complete. There was no automatic wait between the calls to ->readpages() and calls to invalidate_inode_pages2() as the latter can only wait on locked pages that have been added to the pagecache (which they haven't yet on entry to ->readpages()). This was leading to oopses like the one below when an outstanding read got cut off from its cookie by a premature release. BUG: unable to handle kernel NULL pointer dereference at 00000000000000a8 IP: [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] PGD 15889067 PUD 15890067 PMD 0 Oops: 0000 [#1] SMP CPU 0 Modules linked in: cachefiles nfs fscache auth_rpcgss nfs_acl lockd sunrpc Pid: 4544, comm: tar Not tainted 3.1.0-rc4-fsdevel+ #1064 /DG965RY RIP: 0010:[<ffffffffa0075118>] [<ffffffffa0075118>] __fscache_read_or_alloc_pages+0x1dd/0x315 [fscache] RSP: 0018:ffff8800158799e8 EFLAGS: 00010246 RAX: 0000000000000000 RBX: ffff8800070d41e0 RCX: ffff8800083dc1b0 RDX: 0000000000000000 RSI: ffff880015879960 RDI: ffff88003e627b90 RBP: ffff880015879a28 R08: 0000000000000002 R09: 0000000000000002 R10: 0000000000000001 R11: ffff880015879950 R12: ffff880015879aa4 R13: 0000000000000000 R14: ffff8800083dc158 R15: ffff880015879be8 FS: 00007f671e9d87c0(0000) GS:ffff88003bc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b CR2: 00000000000000a8 CR3: 000000001587f000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400 Process tar (pid: 4544, threadinfo ffff880015878000, task ffff880015875040) Stack: ffffffffa00b1759 ffff8800070dc158 ffff8800000213da ffff88002a286508 ffff880015879aa4 ffff880015879be8 0000000000000001 ffff88002a2866e8 ffff880015879a88 ffffffffa00b20be 00000000000200da ffff880015875040 Call Trace: [<ffffffffa00b1759>] ? nfs_fscache_wait_bit+0xd/0xd [nfs] [<ffffffffa00b20be>] __nfs_readpages_from_fscache+0x7e/0x13f [nfs] [<ffffffff81095fe7>] ? __alloc_pages_nodemask+0x156/0x662 [<ffffffffa0098763>] nfs_readpages+0xee/0x187 [nfs] [<ffffffff81098a5e>] __do_page_cache_readahead+0x1be/0x267 [<ffffffff81098942>] ? __do_page_cache_readahead+0xa2/0x267 [<ffffffff81098d7b>] ra_submit+0x1c/0x20 [<ffffffff8109900a>] ondemand_readahead+0x28b/0x29a [<ffffffff810990ce>] page_cache_sync_readahead+0x38/0x3a [<ffffffff81091d8a>] generic_file_aio_read+0x2ab/0x67e [<ffffffffa008cfbe>] nfs_file_read+0xa4/0xc9 [nfs] [<ffffffff810c22c4>] do_sync_read+0xba/0xfa [<ffffffff810a62c9>] ? might_fault+0x4e/0x9e [<ffffffff81177a47>] ? security_file_permission+0x7b/0x84 [<ffffffff810c25dd>] ? rw_verify_area+0xab/0xc8 [<ffffffff810c29a4>] vfs_read+0xaa/0x13a [<ffffffff810c2a79>] sys_read+0x45/0x6c [<ffffffff813ac37b>] system_call_fastpath+0x16/0x1b Reported-by: Mark Moseley <moseleymark@gmail.com> Signed-off-by: David Howells <dhowells@redhat.com>
2012-12-21 04:52:38 +07:00
return 0;
out_err:
/*
* No need to worry about unhashing the dentry, as the
* lookup validation will know that the inode is bad.
* (But we fall through to invalidate the caches.)
*/
nfs_invalidate_inode(inode);
return -ESTALE;
}
struct inode *nfs_alloc_inode(struct super_block *sb)
{
struct nfs_inode *nfsi;
nfsi = kmem_cache_alloc(nfs_inode_cachep, GFP_KERNEL);
if (!nfsi)
return NULL;
nfsi->flags = 0UL;
nfsi->cache_validity = 0UL;
#if IS_ENABLED(CONFIG_NFS_V4)
nfsi->nfs4_acl = NULL;
#endif /* CONFIG_NFS_V4 */
return &nfsi->vfs_inode;
}
EXPORT_SYMBOL_GPL(nfs_alloc_inode);
2011-01-07 13:49:49 +07:00
static void nfs_i_callback(struct rcu_head *head)
{
2011-01-07 13:49:49 +07:00
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
}
2011-01-07 13:49:49 +07:00
void nfs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, nfs_i_callback);
}
EXPORT_SYMBOL_GPL(nfs_destroy_inode);
2011-01-07 13:49:49 +07:00
git-nfs-build-fixes Fix various problems with nfs4 disabled. And various other things. In file included from fs/nfs/inode.c:50: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' fs/nfs/inode.c: In function 'init_once': fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'open_states' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'delegation' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'delegation_state' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'rwsem' distcc[26452] ERROR: compile fs/nfs/inode.c on g5/64 failed make[1]: *** [fs/nfs/inode.o] Error 1 make: *** [fs/nfs/inode.o] Error 2 make: *** Waiting for unfinished jobs.... In file included from fs/nfs/nfs3xdr.c:26: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' distcc[26486] ERROR: compile fs/nfs/nfs3xdr.c on g5/64 failed make[1]: *** [fs/nfs/nfs3xdr.o] Error 1 make: *** [fs/nfs/nfs3xdr.o] Error 2 In file included from fs/nfs/nfs3proc.c:24: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' distcc[26469] ERROR: compile fs/nfs/nfs3proc.c on bix/32 failed make[1]: *** [fs/nfs/nfs3proc.o] Error 1 make: *** [fs/nfs/nfs3proc.o] Error 2 **FAILED** Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andreas Gruenbacher <agruen@suse.de> Cc: Andy Adamson <andros@citi.umich.edu> Cc: Chuck Lever <cel@netapp.com> Cc: David Howells <dhowells@redhat.com> Cc: J. Bruce Fields <bfields@fieldses.org> Cc: Manoj Naik <manoj@almaden.ibm.com> Cc: Marc Eshel <eshel@almaden.ibm.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-06-25 16:41:26 +07:00
static inline void nfs4_init_once(struct nfs_inode *nfsi)
{
#if IS_ENABLED(CONFIG_NFS_V4)
git-nfs-build-fixes Fix various problems with nfs4 disabled. And various other things. In file included from fs/nfs/inode.c:50: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' fs/nfs/inode.c: In function 'init_once': fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'open_states' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'delegation' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'delegation_state' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'rwsem' distcc[26452] ERROR: compile fs/nfs/inode.c on g5/64 failed make[1]: *** [fs/nfs/inode.o] Error 1 make: *** [fs/nfs/inode.o] Error 2 make: *** Waiting for unfinished jobs.... In file included from fs/nfs/nfs3xdr.c:26: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' distcc[26486] ERROR: compile fs/nfs/nfs3xdr.c on g5/64 failed make[1]: *** [fs/nfs/nfs3xdr.o] Error 1 make: *** [fs/nfs/nfs3xdr.o] Error 2 In file included from fs/nfs/nfs3proc.c:24: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' distcc[26469] ERROR: compile fs/nfs/nfs3proc.c on bix/32 failed make[1]: *** [fs/nfs/nfs3proc.o] Error 1 make: *** [fs/nfs/nfs3proc.o] Error 2 **FAILED** Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andreas Gruenbacher <agruen@suse.de> Cc: Andy Adamson <andros@citi.umich.edu> Cc: Chuck Lever <cel@netapp.com> Cc: David Howells <dhowells@redhat.com> Cc: J. Bruce Fields <bfields@fieldses.org> Cc: Manoj Naik <manoj@almaden.ibm.com> Cc: Marc Eshel <eshel@almaden.ibm.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-06-25 16:41:26 +07:00
INIT_LIST_HEAD(&nfsi->open_states);
nfsi->delegation = NULL;
init_rwsem(&nfsi->rwsem);
nfsi->layout = NULL;
git-nfs-build-fixes Fix various problems with nfs4 disabled. And various other things. In file included from fs/nfs/inode.c:50: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' fs/nfs/inode.c: In function 'init_once': fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'open_states' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'delegation' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'delegation_state' fs/nfs/inode.c:1116: error: 'struct nfs_inode' has no member named 'rwsem' distcc[26452] ERROR: compile fs/nfs/inode.c on g5/64 failed make[1]: *** [fs/nfs/inode.o] Error 1 make: *** [fs/nfs/inode.o] Error 2 make: *** Waiting for unfinished jobs.... In file included from fs/nfs/nfs3xdr.c:26: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' distcc[26486] ERROR: compile fs/nfs/nfs3xdr.c on g5/64 failed make[1]: *** [fs/nfs/nfs3xdr.o] Error 1 make: *** [fs/nfs/nfs3xdr.o] Error 2 In file included from fs/nfs/nfs3proc.c:24: fs/nfs/internal.h:24: error: static declaration of 'nfs_do_refmount' follows non-static declaration include/linux/nfs_fs.h:320: error: previous declaration of 'nfs_do_refmount' was here fs/nfs/internal.h:65: warning: 'struct nfs4_fs_locations' declared inside parameter list fs/nfs/internal.h:65: warning: its scope is only this definition or declaration, which is probably not what you want fs/nfs/internal.h: In function 'nfs4_path': fs/nfs/internal.h:97: error: 'struct nfs_server' has no member named 'mnt_path' distcc[26469] ERROR: compile fs/nfs/nfs3proc.c on bix/32 failed make[1]: *** [fs/nfs/nfs3proc.o] Error 1 make: *** [fs/nfs/nfs3proc.o] Error 2 **FAILED** Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: Andreas Gruenbacher <agruen@suse.de> Cc: Andy Adamson <andros@citi.umich.edu> Cc: Chuck Lever <cel@netapp.com> Cc: David Howells <dhowells@redhat.com> Cc: J. Bruce Fields <bfields@fieldses.org> Cc: Manoj Naik <manoj@almaden.ibm.com> Cc: Marc Eshel <eshel@almaden.ibm.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2006-06-25 16:41:26 +07:00
#endif
}
static void init_once(void *foo)
{
struct nfs_inode *nfsi = (struct nfs_inode *) foo;
inode_init_once(&nfsi->vfs_inode);
INIT_LIST_HEAD(&nfsi->open_files);
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
INIT_LIST_HEAD(&nfsi->commit_info.list);
nfsi->nrequests = 0;
nfsi->commit_info.ncommit = 0;
atomic_set(&nfsi->commit_info.rpcs_out, 0);
init_rwsem(&nfsi->rmdir_sem);
nfs4_init_once(nfsi);
}
static int __init nfs_init_inodecache(void)
{
nfs_inode_cachep = kmem_cache_create("nfs_inode_cache",
sizeof(struct nfs_inode),
0, (SLAB_RECLAIM_ACCOUNT|
2016-01-15 06:18:21 +07:00
SLAB_MEM_SPREAD|SLAB_ACCOUNT),
init_once);
if (nfs_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void nfs_destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(nfs_inode_cachep);
}
struct workqueue_struct *nfsiod_workqueue;
EXPORT_SYMBOL_GPL(nfsiod_workqueue);
/*
* start up the nfsiod workqueue
*/
static int nfsiod_start(void)
{
struct workqueue_struct *wq;
dprintk("RPC: creating workqueue nfsiod\n");
wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0);
if (wq == NULL)
return -ENOMEM;
nfsiod_workqueue = wq;
return 0;
}
/*
* Destroy the nfsiod workqueue
*/
static void nfsiod_stop(void)
{
struct workqueue_struct *wq;
wq = nfsiod_workqueue;
if (wq == NULL)
return;
nfsiod_workqueue = NULL;
destroy_workqueue(wq);
}
int nfs_net_id;
EXPORT_SYMBOL_GPL(nfs_net_id);
static int nfs_net_init(struct net *net)
{
nfs_clients_init(net);
return nfs_fs_proc_net_init(net);
}
static void nfs_net_exit(struct net *net)
{
nfs_fs_proc_net_exit(net);
nfs_cleanup_cb_ident_idr(net);
}
static struct pernet_operations nfs_net_ops = {
.init = nfs_net_init,
.exit = nfs_net_exit,
.id = &nfs_net_id,
.size = sizeof(struct nfs_net),
};
/*
* Initialize NFS
*/
static int __init init_nfs_fs(void)
{
int err;
err = register_pernet_subsys(&nfs_net_ops);
if (err < 0)
goto out9;
err = nfs_fscache_register();
if (err < 0)
goto out8;
err = nfsiod_start();
if (err)
goto out7;
err = nfs_fs_proc_init();
if (err)
goto out6;
err = nfs_init_nfspagecache();
if (err)
goto out5;
err = nfs_init_inodecache();
if (err)
goto out4;
err = nfs_init_readpagecache();
if (err)
goto out3;
err = nfs_init_writepagecache();
if (err)
goto out2;
err = nfs_init_directcache();
if (err)
goto out1;
rpc_proc_register(&init_net, &nfs_rpcstat);
err = register_nfs_fs();
if (err)
goto out0;
return 0;
out0:
rpc_proc_unregister(&init_net, "nfs");
nfs_destroy_directcache();
out1:
nfs_destroy_writepagecache();
out2:
nfs_destroy_readpagecache();
out3:
nfs_destroy_inodecache();
out4:
nfs_destroy_nfspagecache();
out5:
nfs_fs_proc_exit();
out6:
nfsiod_stop();
out7:
nfs_fscache_unregister();
out8:
unregister_pernet_subsys(&nfs_net_ops);
out9:
return err;
}
static void __exit exit_nfs_fs(void)
{
nfs_destroy_directcache();
nfs_destroy_writepagecache();
nfs_destroy_readpagecache();
nfs_destroy_inodecache();
nfs_destroy_nfspagecache();
nfs_fscache_unregister();
unregister_pernet_subsys(&nfs_net_ops);
rpc_proc_unregister(&init_net, "nfs");
unregister_nfs_fs();
nfs_fs_proc_exit();
nfsiod_stop();
}
/* Not quite true; I just maintain it */
MODULE_AUTHOR("Olaf Kirch <okir@monad.swb.de>");
MODULE_LICENSE("GPL");
module_param(enable_ino64, bool, 0644);
module_init(init_nfs_fs)
module_exit(exit_nfs_fs)