2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* include/linux/sunrpc/cache.h
|
|
|
|
*
|
|
|
|
* Generic code for various authentication-related caches
|
|
|
|
* used by sunrpc clients and servers.
|
|
|
|
*
|
|
|
|
* Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
|
|
|
|
*
|
|
|
|
* Released under terms in GPL version 2. See COPYING.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_SUNRPC_CACHE_H_
|
|
|
|
#define _LINUX_SUNRPC_CACHE_H_
|
|
|
|
|
2011-01-10 13:18:25 +07:00
|
|
|
#include <linux/kref.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/slab.h>
|
2011-07-27 06:09:06 +07:00
|
|
|
#include <linux/atomic.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/proc_fs.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Each cache requires:
|
|
|
|
* - A 'struct cache_detail' which contains information specific to the cache
|
|
|
|
* for common code to use.
|
|
|
|
* - An item structure that must contain a "struct cache_head"
|
|
|
|
* - A lookup function defined using DefineCacheLookup
|
|
|
|
* - A 'put' function that can release a cache item. It will only
|
|
|
|
* be called after cache_put has succeed, so there are guarantee
|
|
|
|
* to be no references.
|
|
|
|
* - A function to calculate a hash of an item's key.
|
|
|
|
*
|
|
|
|
* as well as assorted code fragments (e.g. compare keys) and numbers
|
|
|
|
* (e.g. hash size, goal_age, etc).
|
|
|
|
*
|
|
|
|
* Each cache must be registered so that it can be cleaned regularly.
|
|
|
|
* When the cache is unregistered, it is flushed completely.
|
|
|
|
*
|
2011-03-31 08:57:33 +07:00
|
|
|
* Entries have a ref count and a 'hashed' flag which counts the existence
|
2005-04-17 05:20:36 +07:00
|
|
|
* in the hash table.
|
|
|
|
* We only expire entries when refcount is zero.
|
2011-03-31 08:57:33 +07:00
|
|
|
* Existence in the cache is counted the refcount.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Every cache item has a common header that is used
|
|
|
|
* for expiring and refreshing entries.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
struct cache_head {
|
2015-07-27 10:10:15 +07:00
|
|
|
struct hlist_node cache_list;
|
2005-04-17 05:20:36 +07:00
|
|
|
time_t expiry_time; /* After time time, don't use the data */
|
2015-10-16 04:59:08 +07:00
|
|
|
time_t last_refresh; /* If CACHE_PENDING, this is when upcall was
|
|
|
|
* sent, else this is when update was
|
|
|
|
* received, though it is alway set to
|
|
|
|
* be *after* ->flush_time.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
2006-03-27 16:15:09 +07:00
|
|
|
struct kref ref;
|
2005-04-17 05:20:36 +07:00
|
|
|
unsigned long flags;
|
|
|
|
};
|
|
|
|
#define CACHE_VALID 0 /* Entry contains valid data */
|
|
|
|
#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
|
|
|
|
#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
|
2013-06-13 09:53:42 +07:00
|
|
|
#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
|
|
|
|
|
|
|
|
struct cache_detail {
|
2005-09-07 05:17:08 +07:00
|
|
|
struct module * owner;
|
2005-04-17 05:20:36 +07:00
|
|
|
int hash_size;
|
2015-07-27 10:10:15 +07:00
|
|
|
struct hlist_head * hash_table;
|
2018-10-01 21:41:52 +07:00
|
|
|
spinlock_t hash_lock;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
char *name;
|
2006-03-27 16:15:09 +07:00
|
|
|
void (*cache_put)(struct kref *);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2009-08-10 02:14:29 +07:00
|
|
|
int (*cache_upcall)(struct cache_detail *,
|
|
|
|
struct cache_head *);
|
|
|
|
|
2013-02-04 18:02:45 +07:00
|
|
|
void (*cache_request)(struct cache_detail *cd,
|
|
|
|
struct cache_head *ch,
|
|
|
|
char **bpp, int *blen);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
int (*cache_parse)(struct cache_detail *,
|
|
|
|
char *buf, int len);
|
|
|
|
|
|
|
|
int (*cache_show)(struct seq_file *m,
|
|
|
|
struct cache_detail *cd,
|
|
|
|
struct cache_head *h);
|
2009-08-10 02:14:26 +07:00
|
|
|
void (*warn_no_listener)(struct cache_detail *cd,
|
|
|
|
int has_died);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-27 16:15:02 +07:00
|
|
|
struct cache_head * (*alloc)(void);
|
|
|
|
int (*match)(struct cache_head *orig, struct cache_head *new);
|
|
|
|
void (*init)(struct cache_head *orig, struct cache_head *new);
|
|
|
|
void (*update)(struct cache_head *orig, struct cache_head *new);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/* fields below this comment are for internal use
|
|
|
|
* and should not be touched by cache owners
|
|
|
|
*/
|
2015-10-16 04:59:08 +07:00
|
|
|
time_t flush_time; /* flush all cache items with
|
|
|
|
* last_refresh at or earlier
|
|
|
|
* than this. last_refresh
|
|
|
|
* is never set at or earlier
|
|
|
|
* than this.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
struct list_head others;
|
|
|
|
time_t nextcheck;
|
|
|
|
int entries;
|
|
|
|
|
|
|
|
/* fields for communication over channel */
|
|
|
|
struct list_head queue;
|
|
|
|
|
|
|
|
atomic_t readers; /* how many time is /chennel open */
|
|
|
|
time_t last_close; /* if no readers, when did last close */
|
|
|
|
time_t last_warn; /* when we last warned about no readers */
|
2009-08-10 02:14:29 +07:00
|
|
|
|
|
|
|
union {
|
2017-02-07 20:47:16 +07:00
|
|
|
struct proc_dir_entry *procfs;
|
|
|
|
struct dentry *pipefs;
|
|
|
|
};
|
2012-01-20 00:42:21 +07:00
|
|
|
struct net *net;
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
/* this must be embedded in any request structure that
|
|
|
|
* identifies an object that will want a callback on
|
|
|
|
* a cache fill
|
|
|
|
*/
|
|
|
|
struct cache_req {
|
|
|
|
struct cache_deferred_req *(*defer)(struct cache_req *req);
|
sunrpc/cache: allow threads to block while waiting for cache update.
The current practice of waiting for cache updates by queueing the
whole request to be retried has (at least) two problems.
1/ With NFSv4, requests can be quite complex and re-trying a whole
request when a later part fails should only be a last-resort, not a
normal practice.
2/ Large requests, and in particular any 'write' request, will not be
queued by the current code and doing so would be undesirable.
In many cases only a very sort wait is needed before the cache gets
valid data.
So, providing the underlying transport permits it by setting
->thread_wait,
arrange to wait briefly for an upcall to be completed (as reflected in
the clearing of CACHE_PENDING).
If the short wait was not long enough and CACHE_PENDING is still set,
fall back on the old approach.
The 'thread_wait' value is set to 5 seconds when there are spare
threads, and 1 second when there are no spare threads.
These values are probably much higher than needed, but will ensure
some forward progress.
Note that as we only request an update for a non-valid item, and as
non-valid items are updated in place it is extremely unlikely that
cache_check will return -ETIMEDOUT. Normally cache_defer_req will
sleep for a short while and then find that the item is_valid.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2010-08-12 14:04:06 +07:00
|
|
|
int thread_wait; /* How long (jiffies) we can block the
|
|
|
|
* current thread to wait for updates.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
};
|
|
|
|
/* this must be embedded in a deferred_request that is being
|
|
|
|
* delayed awaiting cache-fill
|
|
|
|
*/
|
|
|
|
struct cache_deferred_req {
|
2010-08-12 14:04:08 +07:00
|
|
|
struct hlist_node hash; /* on hash chain */
|
2005-04-17 05:20:36 +07:00
|
|
|
struct list_head recent; /* on fifo */
|
|
|
|
struct cache_head *item; /* cache item we wait on */
|
|
|
|
void *owner; /* we might need to discard all defered requests
|
|
|
|
* owned by someone */
|
|
|
|
void (*revisit)(struct cache_deferred_req *req,
|
|
|
|
int too_many);
|
|
|
|
};
|
|
|
|
|
2013-06-13 09:53:42 +07:00
|
|
|
/*
|
|
|
|
* timestamps kept in the cache are expressed in seconds
|
|
|
|
* since boot. This is the best for measuring differences in
|
|
|
|
* real time.
|
|
|
|
*/
|
|
|
|
static inline time_t seconds_since_boot(void)
|
|
|
|
{
|
|
|
|
struct timespec boot;
|
|
|
|
getboottime(&boot);
|
|
|
|
return get_seconds() - boot.tv_sec;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline time_t convert_to_wallclock(time_t sinceboot)
|
|
|
|
{
|
|
|
|
struct timespec boot;
|
|
|
|
getboottime(&boot);
|
|
|
|
return boot.tv_sec + sinceboot;
|
|
|
|
}
|
2006-03-27 16:15:01 +07:00
|
|
|
|
2009-08-10 02:14:30 +07:00
|
|
|
extern const struct file_operations cache_file_operations_pipefs;
|
|
|
|
extern const struct file_operations content_file_operations_pipefs;
|
|
|
|
extern const struct file_operations cache_flush_operations_pipefs;
|
|
|
|
|
2018-10-03 23:01:22 +07:00
|
|
|
extern struct cache_head *
|
|
|
|
sunrpc_cache_lookup_rcu(struct cache_detail *detail,
|
|
|
|
struct cache_head *key, int hash);
|
2006-03-27 16:15:02 +07:00
|
|
|
extern struct cache_head *
|
|
|
|
sunrpc_cache_update(struct cache_detail *detail,
|
|
|
|
struct cache_head *new, struct cache_head *old, int hash);
|
|
|
|
|
2009-08-10 02:14:29 +07:00
|
|
|
extern int
|
2013-02-04 18:02:55 +07:00
|
|
|
sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h);
|
2009-08-10 02:14:29 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
extern void cache_clean_deferred(void *owner);
|
|
|
|
|
|
|
|
static inline struct cache_head *cache_get(struct cache_head *h)
|
|
|
|
{
|
2006-03-27 16:15:09 +07:00
|
|
|
kref_get(&h->ref);
|
2005-04-17 05:20:36 +07:00
|
|
|
return h;
|
|
|
|
}
|
|
|
|
|
2018-10-03 23:01:22 +07:00
|
|
|
static inline struct cache_head *cache_get_rcu(struct cache_head *h)
|
|
|
|
{
|
|
|
|
if (kref_get_unless_zero(&h->ref))
|
|
|
|
return h;
|
|
|
|
return NULL;
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-03-27 16:15:09 +07:00
|
|
|
static inline void cache_put(struct cache_head *h, struct cache_detail *cd)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2016-11-14 23:29:48 +07:00
|
|
|
if (kref_read(&h->ref) <= 2 &&
|
2005-04-17 05:20:36 +07:00
|
|
|
h->expiry_time < cd->nextcheck)
|
|
|
|
cd->nextcheck = h->expiry_time;
|
2006-03-27 16:15:09 +07:00
|
|
|
kref_put(&h->ref, cd->cache_put);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
SUNRPC/Cache: Always treat the invalid cache as unexpired
When the first time pynfs runs after rpc/nfsd startup, always get the warning,
"Got error: Connection closed"
I found the problem is caused by,
1. A new startup of nfsd, rpc.mountd, etc,
2. A rpc request from client (pynfs test, or normal mounting),
3. An ip_map cache is created but invalid, so upcall to rpc.mountd,
4. rpc.mountd process the ip_map upcall, before write the valid data to nfsd,
do auth_reload(), and check_useipaddr(),
5. For the first time, old_use_ipaddr = -1, it causes rpc.mountd do write_flush that doing cache_clean,
6. The ip_map cache will be treat as expired and clean,
7. When rpc.mountd write the valid data to nfsd, a new ip_map is created
and updated, the cache_check of old ip_map(doing the upcall) will
return -ETIMEDOUT.
8. RPC layer return SVC_CLOSE and close the xprt after commit 4d712ef1db05
"svcauth_gss: Close connection when dropping an incoming message"
NeilBrown suggest in another email,
"If CACHE_VALID is not set, then there is no data in the cache item,
so there is nothing to expire. So it would be nice if cache items that
don't have CACHE_VALID are never treated as expired."
v3, change the order of the two patches
v2, change the checking of CACHE_PENDING to CACHE_VALID
Reviewed-by: NeilBrown <neilb@suse.com>
Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2017-02-08 08:54:42 +07:00
|
|
|
static inline bool cache_is_expired(struct cache_detail *detail, struct cache_head *h)
|
2006-10-04 16:15:50 +07:00
|
|
|
{
|
SUNRPC/Cache: Always treat the invalid cache as unexpired
When the first time pynfs runs after rpc/nfsd startup, always get the warning,
"Got error: Connection closed"
I found the problem is caused by,
1. A new startup of nfsd, rpc.mountd, etc,
2. A rpc request from client (pynfs test, or normal mounting),
3. An ip_map cache is created but invalid, so upcall to rpc.mountd,
4. rpc.mountd process the ip_map upcall, before write the valid data to nfsd,
do auth_reload(), and check_useipaddr(),
5. For the first time, old_use_ipaddr = -1, it causes rpc.mountd do write_flush that doing cache_clean,
6. The ip_map cache will be treat as expired and clean,
7. When rpc.mountd write the valid data to nfsd, a new ip_map is created
and updated, the cache_check of old ip_map(doing the upcall) will
return -ETIMEDOUT.
8. RPC layer return SVC_CLOSE and close the xprt after commit 4d712ef1db05
"svcauth_gss: Close connection when dropping an incoming message"
NeilBrown suggest in another email,
"If CACHE_VALID is not set, then there is no data in the cache item,
so there is nothing to expire. So it would be nice if cache items that
don't have CACHE_VALID are never treated as expired."
v3, change the order of the two patches
v2, change the checking of CACHE_PENDING to CACHE_VALID
Reviewed-by: NeilBrown <neilb@suse.com>
Signed-off-by: Kinglong Mee <kinglongmee@gmail.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
2017-02-08 08:54:42 +07:00
|
|
|
if (!test_bit(CACHE_VALID, &h->flags))
|
|
|
|
return false;
|
|
|
|
|
2013-06-13 09:53:42 +07:00
|
|
|
return (h->expiry_time < seconds_since_boot()) ||
|
2015-10-16 04:59:08 +07:00
|
|
|
(detail->flush_time >= h->last_refresh);
|
2006-10-04 16:15:50 +07:00
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
extern int cache_check(struct cache_detail *detail,
|
|
|
|
struct cache_head *h, struct cache_req *rqstp);
|
|
|
|
extern void cache_flush(void);
|
|
|
|
extern void cache_purge(struct cache_detail *detail);
|
|
|
|
#define NEVER (0x7FFFFFFF)
|
sunrpc: make the cache cleaner workqueue deferrable
This patch makes the cache_cleaner workqueue deferrable, to prevent
unnecessary system wake-ups, which is very important for embedded
battery-powered devices.
do_cache_clean() is called every 30 seconds at the moment, and often
makes the system wake up from its power-save sleep state. With this
change, when the workqueue uses a deferrable timer, the
do_cache_clean() invocation will be delayed and combined with the
closest "real" wake-up. This improves the power consumption situation.
Note, I tried to create a DECLARE_DELAYED_WORK_DEFERRABLE() helper
macro, similar to DECLARE_DELAYED_WORK(), but failed because of the
way the timer wheel core stores the deferrable flag (it is the
LSBit in the time->base pointer). My attempt to define a static
variable with this bit set ended up with the "initializer element is
not constant" error.
Thus, I have to use run-time initialization, so I created a new
cache_initialize() function which is called once when sunrpc is
being initialized.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
2010-07-01 22:05:56 +07:00
|
|
|
extern void __init cache_initialize(void);
|
2010-09-27 17:00:15 +07:00
|
|
|
extern int cache_register_net(struct cache_detail *cd, struct net *net);
|
|
|
|
extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2017-10-17 23:14:23 +07:00
|
|
|
extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
|
2012-01-20 00:42:21 +07:00
|
|
|
extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
|
|
|
|
|
2011-11-25 21:12:40 +07:00
|
|
|
extern void sunrpc_init_cache_detail(struct cache_detail *cd);
|
|
|
|
extern void sunrpc_destroy_cache_detail(struct cache_detail *cd);
|
2009-08-10 02:14:30 +07:00
|
|
|
extern int sunrpc_cache_register_pipefs(struct dentry *parent, const char *,
|
2011-07-25 11:35:13 +07:00
|
|
|
umode_t, struct cache_detail *);
|
2009-08-10 02:14:30 +07:00
|
|
|
extern void sunrpc_cache_unregister_pipefs(struct cache_detail *);
|
2016-12-23 00:38:06 +07:00
|
|
|
extern void sunrpc_cache_unhash(struct cache_detail *, struct cache_head *);
|
2009-08-10 02:14:30 +07:00
|
|
|
|
2015-07-27 10:09:42 +07:00
|
|
|
/* Must store cache_detail in seq_file->private if using next three functions */
|
2018-10-03 23:01:22 +07:00
|
|
|
extern void *cache_seq_start_rcu(struct seq_file *file, loff_t *pos);
|
|
|
|
extern void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos);
|
|
|
|
extern void cache_seq_stop_rcu(struct seq_file *file, void *p);
|
2015-07-27 10:09:42 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
extern void qword_add(char **bpp, int *lp, char *str);
|
|
|
|
extern void qword_addhex(char **bpp, int *lp, char *buf, int blen);
|
|
|
|
extern int qword_get(char **bpp, char *dest, int bufsize);
|
|
|
|
|
|
|
|
static inline int get_int(char **bpp, int *anint)
|
|
|
|
{
|
|
|
|
char buf[50];
|
2012-11-14 22:48:05 +07:00
|
|
|
char *ep;
|
|
|
|
int rv;
|
2012-07-07 02:31:56 +07:00
|
|
|
int len = qword_get(bpp, buf, sizeof(buf));
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (len == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
2012-11-14 22:48:05 +07:00
|
|
|
rv = simple_strtol(buf, &ep, 0);
|
|
|
|
if (*ep)
|
2012-07-07 02:31:56 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
2012-11-14 22:48:05 +07:00
|
|
|
*anint = rv;
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-06-13 03:54:16 +07:00
|
|
|
static inline int get_uint(char **bpp, unsigned int *anint)
|
|
|
|
{
|
|
|
|
char buf[50];
|
|
|
|
int len = qword_get(bpp, buf, sizeof(buf));
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (len == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (kstrtouint(buf, 0, anint))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-08-16 00:46:40 +07:00
|
|
|
static inline int get_time(char **bpp, time_t *time)
|
|
|
|
{
|
|
|
|
char buf[50];
|
|
|
|
long long ll;
|
|
|
|
int len = qword_get(bpp, buf, sizeof(buf));
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
if (len == 0)
|
|
|
|
return -ENOENT;
|
|
|
|
|
|
|
|
if (kstrtoll(buf, 0, &ll))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
*time = (time_t)ll;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
static inline time_t get_expiry(char **bpp)
|
|
|
|
{
|
2013-08-16 00:46:40 +07:00
|
|
|
time_t rv;
|
2010-08-12 13:55:22 +07:00
|
|
|
struct timespec boot;
|
|
|
|
|
2013-08-16 00:46:40 +07:00
|
|
|
if (get_time(bpp, &rv))
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
if (rv < 0)
|
|
|
|
return 0;
|
2010-08-12 13:55:22 +07:00
|
|
|
getboottime(&boot);
|
|
|
|
return rv - boot.tv_sec;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* _LINUX_SUNRPC_CACHE_H_ */
|