2005-06-23 00:16:21 +07:00
|
|
|
/*
|
|
|
|
* linux/fs/nfs/nfs4_fs.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 2005 Trond Myklebust
|
|
|
|
*
|
|
|
|
* NFSv4-specific filesystem definitions and declarations
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __LINUX_FS_NFS_NFS4_FS_H
|
|
|
|
#define __LINUX_FS_NFS_NFS4_FS_H
|
|
|
|
|
|
|
|
#ifdef CONFIG_NFS_V4
|
|
|
|
|
|
|
|
struct idmap;
|
|
|
|
|
|
|
|
enum nfs4_client_state {
|
2008-12-24 03:21:48 +07:00
|
|
|
NFS4CLNT_MANAGER_RUNNING = 0,
|
2008-12-24 03:21:42 +07:00
|
|
|
NFS4CLNT_CHECK_LEASE,
|
2006-01-03 15:55:24 +07:00
|
|
|
NFS4CLNT_LEASE_EXPIRED,
|
2008-12-24 03:21:41 +07:00
|
|
|
NFS4CLNT_RECLAIM_REBOOT,
|
|
|
|
NFS4CLNT_RECLAIM_NOGRACE,
|
2008-12-24 03:21:47 +07:00
|
|
|
NFS4CLNT_DELEGRETURN,
|
2011-01-06 18:36:30 +07:00
|
|
|
NFS4CLNT_LAYOUTRECALL,
|
2009-12-05 03:55:05 +07:00
|
|
|
NFS4CLNT_SESSION_RESET,
|
2010-01-21 04:06:27 +07:00
|
|
|
NFS4CLNT_RECALL_SLOT,
|
2011-04-25 01:28:18 +07:00
|
|
|
NFS4CLNT_LEASE_CONFIRM,
|
2011-06-01 06:05:47 +07:00
|
|
|
NFS4CLNT_SERVER_SCOPE_MISMATCH,
|
2005-06-23 00:16:21 +07:00
|
|
|
};
|
|
|
|
|
2010-06-16 20:52:26 +07:00
|
|
|
enum nfs4_session_state {
|
2010-06-16 20:52:27 +07:00
|
|
|
NFS4_SESSION_INITING,
|
2010-06-16 20:52:26 +07:00
|
|
|
NFS4_SESSION_DRAINING,
|
|
|
|
};
|
|
|
|
|
2011-08-25 02:07:37 +07:00
|
|
|
#define NFS4_RENEW_TIMEOUT 0x01
|
|
|
|
#define NFS4_RENEW_DELEGATION_CB 0x02
|
|
|
|
|
2010-06-16 20:52:26 +07:00
|
|
|
struct nfs4_minor_version_ops {
|
|
|
|
u32 minor_version;
|
|
|
|
|
2011-03-25 00:12:24 +07:00
|
|
|
int (*call_sync)(struct rpc_clnt *clnt,
|
|
|
|
struct nfs_server *server,
|
2010-06-16 20:52:26 +07:00
|
|
|
struct rpc_message *msg,
|
|
|
|
struct nfs4_sequence_args *args,
|
|
|
|
struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply);
|
2010-06-16 20:52:27 +07:00
|
|
|
int (*validate_stateid)(struct nfs_delegation *,
|
|
|
|
const nfs4_stateid *);
|
2011-06-03 01:59:07 +07:00
|
|
|
int (*find_root_sec)(struct nfs_server *, struct nfs_fh *,
|
|
|
|
struct nfs_fsinfo *);
|
2010-06-16 20:52:27 +07:00
|
|
|
const struct nfs4_state_recovery_ops *reboot_recovery_ops;
|
|
|
|
const struct nfs4_state_recovery_ops *nograce_recovery_ops;
|
|
|
|
const struct nfs4_state_maintenance_ops *state_renewal_ops;
|
2010-06-16 20:52:26 +07:00
|
|
|
};
|
|
|
|
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 04:20:12 +07:00
|
|
|
/*
|
|
|
|
* struct rpc_sequence ensures that RPC calls are sent in the exact
|
|
|
|
* order that they appear on the list.
|
|
|
|
*/
|
|
|
|
struct rpc_sequence {
|
|
|
|
struct rpc_wait_queue wait; /* RPC call delay queue */
|
|
|
|
spinlock_t lock; /* Protects the list */
|
|
|
|
struct list_head list; /* Defines sequence of RPC calls */
|
|
|
|
};
|
|
|
|
|
|
|
|
#define NFS_SEQID_CONFIRMED 1
|
|
|
|
struct nfs_seqid_counter {
|
|
|
|
struct rpc_sequence *sequence;
|
|
|
|
int flags;
|
|
|
|
u32 counter;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs_seqid {
|
|
|
|
struct nfs_seqid_counter *sequence;
|
2005-10-21 04:22:41 +07:00
|
|
|
struct list_head list;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 04:20:12 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline void nfs_confirm_seqid(struct nfs_seqid_counter *seqid, int status)
|
|
|
|
{
|
|
|
|
if (seqid_mutating_err(-status))
|
|
|
|
seqid->flags |= NFS_SEQID_CONFIRMED;
|
|
|
|
}
|
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
/*
|
|
|
|
* NFS4 state_owners and lock_owners are simply labels for ordered
|
|
|
|
* sequences of RPC calls. Their sole purpose is to provide once-only
|
|
|
|
* semantics by allowing the server to identify replayed requests.
|
|
|
|
*/
|
|
|
|
struct nfs4_state_owner {
|
2007-07-06 21:53:21 +07:00
|
|
|
struct nfs_server *so_server;
|
NFS: Cache state owners after files are closed
Servers have a finite amount of memory to store NFSv4 open and lock
owners. Moreover, servers may have a difficult time determining when
they can reap their state owner table, thanks to gray areas in the
NFSv4 protocol specification. Thus clients should be careful to reuse
state owners when possible.
Currently Linux is not too careful. When a user has closed all her
files on one mount point, the state owner's reference count goes to
zero, and it is released. The next OPEN allocates a new one. A
workload that serially opens and closes files can run through a large
number of open owners this way.
When a state owner's reference count goes to zero, slap it onto a free
list for that nfs_server, with an expiry time. Garbage collect before
looking for a state owner. This makes state owners for active users
available for re-use.
Now that there can be unused state owners remaining at umount time,
purge the state owner free list when a server is destroyed. Also be
sure not to reclaim unused state owners during state recovery.
This change has benefits for the client as well. For some workloads,
this approach drops the number of OPEN_CONFIRM calls from the same as
the number of OPEN calls, down to just one. This reduces wire traffic
and thus open(2) latency. Before this patch, untarring a kernel
source tarball shows the OPEN_CONFIRM call counter steadily increasing
through the test. With the patch, the OPEN_CONFIRM count remains at 1
throughout the entire untar.
As long as the expiry time is kept short, I don't think garbage
collection should be terribly expensive, although it does bounce the
clp->cl_lock around a bit.
[ At some point we should rationalize the use of the nfs_server
->destroy method. ]
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
[Trond: Fixed a garbage collection race and a few efficiency issues]
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2011-12-07 04:13:48 +07:00
|
|
|
struct list_head so_lru;
|
|
|
|
unsigned long so_expires;
|
2010-12-24 08:32:43 +07:00
|
|
|
struct rb_node so_server_node;
|
2005-06-23 00:16:21 +07:00
|
|
|
|
|
|
|
struct rpc_cred *so_cred; /* Associated cred */
|
2007-07-03 00:58:33 +07:00
|
|
|
|
|
|
|
spinlock_t so_lock;
|
|
|
|
atomic_t so_count;
|
2008-12-24 03:21:43 +07:00
|
|
|
unsigned long so_flags;
|
2005-06-23 00:16:21 +07:00
|
|
|
struct list_head so_states;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 04:20:12 +07:00
|
|
|
struct nfs_seqid_counter so_seqid;
|
|
|
|
struct rpc_sequence so_sequence;
|
2012-01-18 10:04:24 +07:00
|
|
|
int so_owner_id;
|
2005-06-23 00:16:21 +07:00
|
|
|
};
|
|
|
|
|
2008-12-24 03:21:43 +07:00
|
|
|
enum {
|
|
|
|
NFS_OWNER_RECLAIM_REBOOT,
|
|
|
|
NFS_OWNER_RECLAIM_NOGRACE
|
|
|
|
};
|
|
|
|
|
2009-12-09 16:50:14 +07:00
|
|
|
#define NFS_LOCK_NEW 0
|
|
|
|
#define NFS_LOCK_RECLAIM 1
|
|
|
|
#define NFS_LOCK_EXPIRED 2
|
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
/*
|
|
|
|
* struct nfs4_state maintains the client-side state for a given
|
|
|
|
* (state_owner,inode) tuple (OPEN) or state_owner (LOCK).
|
|
|
|
*
|
|
|
|
* OPEN:
|
|
|
|
* In order to know when to OPEN_DOWNGRADE or CLOSE the state on the server,
|
|
|
|
* we need to know how many files are open for reading or writing on a
|
|
|
|
* given inode. This information too is stored here.
|
|
|
|
*
|
|
|
|
* LOCK: one nfs4_state (LOCK) to hold the lock stateid nfs4_state(OPEN)
|
|
|
|
*/
|
|
|
|
|
2010-07-01 23:49:11 +07:00
|
|
|
struct nfs4_lock_owner {
|
|
|
|
unsigned int lo_type;
|
|
|
|
#define NFS4_ANY_LOCK_TYPE (0U)
|
|
|
|
#define NFS4_FLOCK_LOCK_TYPE (1U << 0)
|
|
|
|
#define NFS4_POSIX_LOCK_TYPE (1U << 1)
|
|
|
|
union {
|
|
|
|
fl_owner_t posix_owner;
|
|
|
|
pid_t flock_owner;
|
|
|
|
} lo_u;
|
|
|
|
};
|
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
struct nfs4_lock_state {
|
|
|
|
struct list_head ls_locks; /* Other lock stateids */
|
2005-06-23 00:16:32 +07:00
|
|
|
struct nfs4_state * ls_state; /* Pointer to open state */
|
2005-06-23 00:16:21 +07:00
|
|
|
#define NFS_LOCK_INITIALIZED 1
|
|
|
|
int ls_flags;
|
2012-01-18 10:04:25 +07:00
|
|
|
int ls_id;
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 04:20:12 +07:00
|
|
|
struct nfs_seqid_counter ls_seqid;
|
2008-01-11 04:07:54 +07:00
|
|
|
struct rpc_sequence ls_sequence;
|
2005-06-23 00:16:21 +07:00
|
|
|
nfs4_stateid ls_stateid;
|
|
|
|
atomic_t ls_count;
|
2010-07-01 23:49:11 +07:00
|
|
|
struct nfs4_lock_owner ls_owner;
|
2005-06-23 00:16:21 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* bits for nfs4_state->flags */
|
|
|
|
enum {
|
|
|
|
LK_STATE_IN_USE,
|
2007-07-06 05:07:55 +07:00
|
|
|
NFS_DELEGATED_STATE, /* Current stateid is delegation */
|
|
|
|
NFS_O_RDONLY_STATE, /* OPEN stateid has read-only state */
|
|
|
|
NFS_O_WRONLY_STATE, /* OPEN stateid has write-only state */
|
|
|
|
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
|
2008-12-24 03:21:41 +07:00
|
|
|
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
|
|
|
|
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
|
2010-01-27 03:42:30 +07:00
|
|
|
NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
|
2005-06-23 00:16:21 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs4_state {
|
|
|
|
struct list_head open_states; /* List of states for the same state_owner */
|
|
|
|
struct list_head inode_states; /* List of states for the same inode */
|
|
|
|
struct list_head lock_states; /* List of subservient lock stateids */
|
|
|
|
|
|
|
|
struct nfs4_state_owner *owner; /* Pointer to the open owner */
|
|
|
|
struct inode *inode; /* Pointer to the inode */
|
|
|
|
|
|
|
|
unsigned long flags; /* Do we hold any locks? */
|
2005-06-23 00:16:32 +07:00
|
|
|
spinlock_t state_lock; /* Protects the lock_states list */
|
2005-06-23 00:16:21 +07:00
|
|
|
|
2007-07-09 21:45:42 +07:00
|
|
|
seqlock_t seqlock; /* Protects the stateid/open_stateid */
|
2007-07-06 05:07:55 +07:00
|
|
|
nfs4_stateid stateid; /* Current stateid: may be delegation */
|
|
|
|
nfs4_stateid open_stateid; /* OPEN stateid */
|
2005-06-23 00:16:21 +07:00
|
|
|
|
2007-07-09 21:45:42 +07:00
|
|
|
/* The following 3 fields are protected by owner->so_lock */
|
2007-07-06 05:07:55 +07:00
|
|
|
unsigned int n_rdonly; /* Number of read-only references */
|
|
|
|
unsigned int n_wronly; /* Number of write-only references */
|
|
|
|
unsigned int n_rdwr; /* Number of read/write references */
|
2008-12-24 03:21:56 +07:00
|
|
|
fmode_t state; /* State on the server (R,W, or RW) */
|
2005-06-23 00:16:21 +07:00
|
|
|
atomic_t count;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct nfs4_exception {
|
|
|
|
long timeout;
|
|
|
|
int retry;
|
2008-12-24 03:21:46 +07:00
|
|
|
struct nfs4_state *state;
|
2005-06-23 00:16:21 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct nfs4_state_recovery_ops {
|
2008-12-24 03:21:43 +07:00
|
|
|
int owner_flag_bit;
|
2008-12-24 03:21:41 +07:00
|
|
|
int state_flag_bit;
|
2005-06-23 00:16:21 +07:00
|
|
|
int (*recover_open)(struct nfs4_state_owner *, struct nfs4_state *);
|
|
|
|
int (*recover_lock)(struct nfs4_state *, struct file_lock *);
|
2009-04-01 20:22:47 +07:00
|
|
|
int (*establish_clid)(struct nfs_client *, struct rpc_cred *);
|
2009-04-01 20:22:48 +07:00
|
|
|
struct rpc_cred * (*get_clid_cred)(struct nfs_client *);
|
2009-12-06 04:08:41 +07:00
|
|
|
int (*reclaim_complete)(struct nfs_client *);
|
2005-06-23 00:16:21 +07:00
|
|
|
};
|
|
|
|
|
2009-04-01 20:22:44 +07:00
|
|
|
struct nfs4_state_maintenance_ops {
|
2011-08-25 02:07:37 +07:00
|
|
|
int (*sched_state_renewal)(struct nfs_client *, struct rpc_cred *, unsigned);
|
2009-04-01 20:22:46 +07:00
|
|
|
struct rpc_cred * (*get_state_renewal_cred_locked)(struct nfs_client *);
|
2009-04-01 20:22:45 +07:00
|
|
|
int (*renew_lease)(struct nfs_client *, struct rpc_cred *);
|
2009-04-01 20:22:44 +07:00
|
|
|
};
|
|
|
|
|
2009-02-20 12:51:22 +07:00
|
|
|
extern const struct dentry_operations nfs4_dentry_operations;
|
2007-02-12 15:55:39 +07:00
|
|
|
extern const struct inode_operations nfs4_dir_inode_operations;
|
2005-06-23 00:16:22 +07:00
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
/* nfs4proc.c */
|
2010-04-17 03:43:06 +07:00
|
|
|
extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
|
|
|
|
extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
|
2009-12-05 03:52:24 +07:00
|
|
|
extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
|
2009-04-01 20:22:47 +07:00
|
|
|
extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
|
2009-12-05 03:52:24 +07:00
|
|
|
extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
|
2011-06-23 05:20:23 +07:00
|
|
|
extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
|
2006-06-09 20:34:19 +07:00
|
|
|
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
|
2007-07-18 08:52:39 +07:00
|
|
|
extern int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
|
2006-06-09 20:34:23 +07:00
|
|
|
struct nfs4_fs_locations *fs_locations, struct page *page);
|
2010-07-01 23:49:01 +07:00
|
|
|
extern void nfs4_release_lockowner(const struct nfs4_lock_state *);
|
2010-12-09 18:35:25 +07:00
|
|
|
extern const struct xattr_handler *nfs4_xattr_handlers[];
|
2005-06-23 00:16:21 +07:00
|
|
|
|
2009-04-01 20:21:53 +07:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
2010-06-16 20:52:26 +07:00
|
|
|
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return server->nfs_client->cl_session;
|
|
|
|
}
|
|
|
|
|
|
|
|
extern int nfs4_setup_sequence(const struct nfs_server *server,
|
2009-04-01 20:22:15 +07:00
|
|
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply, struct rpc_task *task);
|
2011-03-01 08:34:19 +07:00
|
|
|
extern int nfs41_setup_sequence(struct nfs4_session *session,
|
|
|
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply, struct rpc_task *task);
|
2009-04-01 20:21:53 +07:00
|
|
|
extern void nfs4_destroy_session(struct nfs4_session *session);
|
|
|
|
extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
|
2009-12-06 07:32:11 +07:00
|
|
|
extern int nfs4_proc_create_session(struct nfs_client *);
|
2009-04-01 20:22:39 +07:00
|
|
|
extern int nfs4_proc_destroy_session(struct nfs4_session *);
|
2009-07-22 03:48:07 +07:00
|
|
|
extern int nfs4_init_session(struct nfs_server *server);
|
2009-12-07 00:23:46 +07:00
|
|
|
extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
|
|
|
|
struct nfs_fsinfo *fsinfo);
|
2011-03-23 20:27:54 +07:00
|
|
|
extern int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data,
|
2011-03-12 14:58:10 +07:00
|
|
|
bool sync);
|
2011-03-01 08:34:12 +07:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_only_client(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
return (clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) ==
|
|
|
|
EXCHGID4_FLAG_USE_PNFS_DS;
|
|
|
|
}
|
2011-03-01 08:34:17 +07:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_client(struct nfs_client *clp)
|
|
|
|
{
|
|
|
|
return clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_DS;
|
|
|
|
}
|
2009-04-01 20:22:15 +07:00
|
|
|
#else /* CONFIG_NFS_v4_1 */
|
2010-06-16 20:52:26 +07:00
|
|
|
static inline struct nfs4_session *nfs4_get_session(const struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int nfs4_setup_sequence(const struct nfs_server *server,
|
2009-04-01 20:22:15 +07:00
|
|
|
struct nfs4_sequence_args *args, struct nfs4_sequence_res *res,
|
|
|
|
int cache_reply, struct rpc_task *task)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2009-07-22 03:48:07 +07:00
|
|
|
|
|
|
|
static inline int nfs4_init_session(struct nfs_server *server)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2011-03-01 08:34:12 +07:00
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_only_client(struct nfs_client *clp)
|
2011-03-01 08:34:17 +07:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool
|
|
|
|
is_ds_client(struct nfs_client *clp)
|
2011-03-01 08:34:12 +07:00
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2009-04-01 20:21:53 +07:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-06-23 00:16:21 +07:00
|
|
|
|
2010-06-16 20:52:26 +07:00
|
|
|
extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
|
2009-04-01 20:22:44 +07:00
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
extern const u32 nfs4_fattr_bitmap[2];
|
|
|
|
extern const u32 nfs4_statfs_bitmap[2];
|
|
|
|
extern const u32 nfs4_pathconf_bitmap[2];
|
2011-07-31 07:52:37 +07:00
|
|
|
extern const u32 nfs4_fsinfo_bitmap[3];
|
2006-06-09 20:34:25 +07:00
|
|
|
extern const u32 nfs4_fs_locations_bitmap[2];
|
2005-06-23 00:16:21 +07:00
|
|
|
|
|
|
|
/* nfs4renewd.c */
|
2006-08-23 07:06:08 +07:00
|
|
|
extern void nfs4_schedule_state_renewal(struct nfs_client *);
|
2005-06-23 00:16:21 +07:00
|
|
|
extern void nfs4_renewd_prepare_shutdown(struct nfs_server *);
|
2006-08-23 07:06:08 +07:00
|
|
|
extern void nfs4_kill_renewd(struct nfs_client *);
|
2006-11-22 21:55:48 +07:00
|
|
|
extern void nfs4_renew_state(struct work_struct *);
|
2005-06-23 00:16:21 +07:00
|
|
|
|
|
|
|
/* nfs4state.c */
|
2009-04-01 20:22:46 +07:00
|
|
|
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp);
|
2008-12-24 03:21:41 +07:00
|
|
|
struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
|
2009-04-01 20:22:46 +07:00
|
|
|
#if defined(CONFIG_NFS_V4_1)
|
|
|
|
struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
|
2009-04-01 20:22:49 +07:00
|
|
|
struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
|
2011-03-10 04:00:53 +07:00
|
|
|
extern void nfs4_schedule_session_recovery(struct nfs4_session *);
|
|
|
|
#else
|
|
|
|
static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
|
|
|
|
{
|
|
|
|
}
|
2009-04-01 20:22:46 +07:00
|
|
|
#endif /* CONFIG_NFS_V4_1 */
|
2005-06-23 00:16:21 +07:00
|
|
|
|
2012-01-18 10:04:24 +07:00
|
|
|
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
|
2005-06-23 00:16:21 +07:00
|
|
|
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
|
NFS: Cache state owners after files are closed
Servers have a finite amount of memory to store NFSv4 open and lock
owners. Moreover, servers may have a difficult time determining when
they can reap their state owner table, thanks to gray areas in the
NFSv4 protocol specification. Thus clients should be careful to reuse
state owners when possible.
Currently Linux is not too careful. When a user has closed all her
files on one mount point, the state owner's reference count goes to
zero, and it is released. The next OPEN allocates a new one. A
workload that serially opens and closes files can run through a large
number of open owners this way.
When a state owner's reference count goes to zero, slap it onto a free
list for that nfs_server, with an expiry time. Garbage collect before
looking for a state owner. This makes state owners for active users
available for re-use.
Now that there can be unused state owners remaining at umount time,
purge the state owner free list when a server is destroyed. Also be
sure not to reclaim unused state owners during state recovery.
This change has benefits for the client as well. For some workloads,
this approach drops the number of OPEN_CONFIRM calls from the same as
the number of OPEN calls, down to just one. This reduces wire traffic
and thus open(2) latency. Before this patch, untarring a kernel
source tarball shows the OPEN_CONFIRM call counter steadily increasing
through the test. With the patch, the OPEN_CONFIRM count remains at 1
throughout the entire untar.
As long as the expiry time is kept short, I don't think garbage
collection should be terribly expensive, although it does bounce the
clp->cl_lock around a bit.
[ At some point we should rationalize the use of the nfs_server
->destroy method. ]
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
[Trond: Fixed a garbage collection race and a few efficiency issues]
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2011-12-07 04:13:48 +07:00
|
|
|
extern void nfs4_purge_state_owners(struct nfs_server *);
|
2005-06-23 00:16:21 +07:00
|
|
|
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
|
|
|
|
extern void nfs4_put_open_state(struct nfs4_state *);
|
2011-06-23 05:20:23 +07:00
|
|
|
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
|
|
|
|
extern void nfs4_close_sync(struct nfs4_state *, fmode_t);
|
2008-12-24 03:21:56 +07:00
|
|
|
extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
|
2011-03-10 04:00:53 +07:00
|
|
|
extern void nfs4_schedule_lease_recovery(struct nfs_client *);
|
2008-12-24 03:21:50 +07:00
|
|
|
extern void nfs4_schedule_state_manager(struct nfs_client *);
|
2011-08-25 02:07:37 +07:00
|
|
|
extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp);
|
2011-03-10 04:00:53 +07:00
|
|
|
extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
|
2009-12-06 01:46:14 +07:00
|
|
|
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
|
2010-01-21 04:06:27 +07:00
|
|
|
extern void nfs41_handle_recall_slot(struct nfs_client *clp);
|
2011-06-01 06:05:47 +07:00
|
|
|
extern void nfs41_handle_server_scope(struct nfs_client *,
|
|
|
|
struct server_scope **);
|
2005-10-19 04:20:15 +07:00
|
|
|
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
|
2005-06-23 00:16:32 +07:00
|
|
|
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
|
2010-07-01 23:49:11 +07:00
|
|
|
extern void nfs4_copy_stateid(nfs4_stateid *, struct nfs4_state *, fl_owner_t, pid_t);
|
2005-06-23 00:16:21 +07:00
|
|
|
|
2010-05-13 23:51:01 +07:00
|
|
|
extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 04:20:12 +07:00
|
|
|
extern int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task);
|
|
|
|
extern void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid);
|
|
|
|
extern void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid);
|
2009-12-16 02:47:36 +07:00
|
|
|
extern void nfs_release_seqid(struct nfs_seqid *seqid);
|
NFSv4: Add functions to order RPC calls
NFSv4 file state-changing functions such as OPEN, CLOSE, LOCK,... are all
labelled with "sequence identifiers" in order to prevent the server from
reordering RPC requests, as this could cause its file state to
become out of sync with the client.
Currently the NFS client code enforces this ordering locally using
semaphores to restrict access to structures until the RPC call is done.
This, of course, only works with synchronous RPC calls, since the
user process must first grab the semaphore.
By dropping semaphores, and instead teaching the RPC engine to hold
the RPC calls until they are ready to be sent, we can extend this
process to work nicely with asynchronous RPC calls too.
This patch adds a new list called "rpc_sequence" that defines the order
of the RPC calls to be sent. We add one such list for each state_owner.
When an RPC call is ready to be sent, it checks if it is top of the
rpc_sequence list. If so, it proceeds. If not, it goes back to sleep,
and loops until it hits top of the list.
Once the RPC call has completed, it can then bump the sequence id counter,
and remove itself from the rpc_sequence list, and then wake up the next
sleeper.
Note that the state_owner sequence ids and lock_owner sequence ids are
all indexed to the same rpc_sequence list, so OPEN, LOCK,... requests
are all ordered w.r.t. each other.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
2005-10-19 04:20:12 +07:00
|
|
|
extern void nfs_free_seqid(struct nfs_seqid *seqid);
|
|
|
|
|
2005-06-23 00:16:21 +07:00
|
|
|
extern const nfs4_stateid zero_stateid;
|
|
|
|
|
|
|
|
/* nfs4xdr.c */
|
|
|
|
extern struct rpc_procinfo nfs4_procedures[];
|
|
|
|
|
|
|
|
struct nfs4_mount_data;
|
|
|
|
|
|
|
|
/* callback_xdr.c */
|
|
|
|
extern struct svc_version nfs4_callback_version1;
|
2009-12-06 01:19:01 +07:00
|
|
|
extern struct svc_version nfs4_callback_version4;
|
2005-06-23 00:16:21 +07:00
|
|
|
|
|
|
|
#else
|
|
|
|
|
2011-06-23 05:20:23 +07:00
|
|
|
#define nfs4_close_state(a, b) do { } while (0)
|
|
|
|
#define nfs4_close_sync(a, b) do { } while (0)
|
2005-06-23 00:16:21 +07:00
|
|
|
|
|
|
|
#endif /* CONFIG_NFS_V4 */
|
|
|
|
#endif /* __LINUX_FS_NFS_NFS4_FS.H */
|