dcache: convert to use new lru list infrastructure

[glommer@openvz.org: don't reintroduce double decrement of nr_unused_dentries, adapted for new LRU return codes]
Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Glauber Costa <glommer@openvz.org>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com>
Cc: Arve Hjønnevåg <arve@android.com>
Cc: Carlos Maiolino <cmaiolino@redhat.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Chuck Lever <chuck.lever@oracle.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Rientjes <rientjes@google.com>
Cc: Gleb Natapov <gleb@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: J. Bruce Fields <bfields@redhat.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Kent Overstreet <koverstreet@google.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Steven Whitehouse <swhiteho@redhat.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Trond Myklebust <Trond.Myklebust@netapp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Dave Chinner 2013-08-28 10:18:00 +10:00 committed by Al Viro
parent d38fa6986e
commit f604156751
3 changed files with 90 additions and 106 deletions

View File

@ -37,6 +37,7 @@
#include <linux/rculist_bl.h> #include <linux/rculist_bl.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/list_lru.h>
#include "internal.h" #include "internal.h"
#include "mount.h" #include "mount.h"
@ -356,28 +357,17 @@ static void dentry_unlink_inode(struct dentry * dentry)
} }
/* /*
* dentry_lru_(add|del|move_list) must be called with d_lock held. * dentry_lru_(add|del)_list) must be called with d_lock held.
*/ */
static void dentry_lru_add(struct dentry *dentry) static void dentry_lru_add(struct dentry *dentry)
{ {
if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) {
spin_lock(&dentry->d_sb->s_dentry_lru_lock); if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
this_cpu_inc(nr_dentry_unused);
dentry->d_flags |= DCACHE_LRU_LIST; dentry->d_flags |= DCACHE_LRU_LIST;
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
this_cpu_inc(nr_dentry_unused);
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
} }
} }
static void __dentry_lru_del(struct dentry *dentry)
{
list_del_init(&dentry->d_lru);
dentry->d_flags &= ~DCACHE_LRU_LIST;
dentry->d_sb->s_nr_dentry_unused--;
this_cpu_dec(nr_dentry_unused);
}
/* /*
* Remove a dentry with references from the LRU. * Remove a dentry with references from the LRU.
* *
@ -393,27 +383,9 @@ static void dentry_lru_del(struct dentry *dentry)
return; return;
} }
if (!list_empty(&dentry->d_lru)) { if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru))
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
__dentry_lru_del(dentry);
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
}
}
static void dentry_lru_move_list(struct dentry *dentry, struct list_head *list)
{
BUG_ON(dentry->d_flags & DCACHE_SHRINK_LIST);
spin_lock(&dentry->d_sb->s_dentry_lru_lock);
if (list_empty(&dentry->d_lru)) {
dentry->d_flags |= DCACHE_LRU_LIST;
list_add_tail(&dentry->d_lru, list);
} else {
list_move_tail(&dentry->d_lru, list);
dentry->d_sb->s_nr_dentry_unused--;
this_cpu_dec(nr_dentry_unused); this_cpu_dec(nr_dentry_unused);
} dentry->d_flags &= ~DCACHE_LRU_LIST;
spin_unlock(&dentry->d_sb->s_dentry_lru_lock);
} }
/** /**
@ -901,12 +873,72 @@ static void shrink_dentry_list(struct list_head *list)
rcu_read_unlock(); rcu_read_unlock();
} }
static enum lru_status
dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
{
struct list_head *freeable = arg;
struct dentry *dentry = container_of(item, struct dentry, d_lru);
/*
* we are inverting the lru lock/dentry->d_lock here,
* so use a trylock. If we fail to get the lock, just skip
* it
*/
if (!spin_trylock(&dentry->d_lock))
return LRU_SKIP;
/*
* Referenced dentries are still in use. If they have active
* counts, just remove them from the LRU. Otherwise give them
* another pass through the LRU.
*/
if (dentry->d_lockref.count) {
list_del_init(&dentry->d_lru);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
if (dentry->d_flags & DCACHE_REFERENCED) {
dentry->d_flags &= ~DCACHE_REFERENCED;
spin_unlock(&dentry->d_lock);
/*
* The list move itself will be made by the common LRU code. At
* this point, we've dropped the dentry->d_lock but keep the
* lru lock. This is safe to do, since every list movement is
* protected by the lru lock even if both locks are held.
*
* This is guaranteed by the fact that all LRU management
* functions are intermediated by the LRU API calls like
* list_lru_add and list_lru_del. List movement in this file
* only ever occur through this functions or through callbacks
* like this one, that are called from the LRU API.
*
* The only exceptions to this are functions like
* shrink_dentry_list, and code that first checks for the
* DCACHE_SHRINK_LIST flag. Those are guaranteed to be
* operating only with stack provided lists after they are
* properly isolated from the main list. It is thus, always a
* local access.
*/
return LRU_ROTATE;
}
dentry->d_flags |= DCACHE_SHRINK_LIST;
list_move_tail(&dentry->d_lru, freeable);
this_cpu_dec(nr_dentry_unused);
spin_unlock(&dentry->d_lock);
return LRU_REMOVED;
}
/** /**
* prune_dcache_sb - shrink the dcache * prune_dcache_sb - shrink the dcache
* @sb: superblock * @sb: superblock
* @count: number of entries to try to free * @nr_to_scan : number of entries to try to free
* *
* Attempt to shrink the superblock dcache LRU by @count entries. This is * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
* done when we need more memory an called from the superblock shrinker * done when we need more memory an called from the superblock shrinker
* function. * function.
* *
@ -915,45 +947,12 @@ static void shrink_dentry_list(struct list_head *list)
*/ */
long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan) long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan)
{ {
struct dentry *dentry; LIST_HEAD(dispose);
LIST_HEAD(referenced); long freed;
LIST_HEAD(tmp);
long freed = 0;
relock: freed = list_lru_walk(&sb->s_dentry_lru, dentry_lru_isolate,
spin_lock(&sb->s_dentry_lru_lock); &dispose, nr_to_scan);
while (!list_empty(&sb->s_dentry_lru)) { shrink_dentry_list(&dispose);
dentry = list_entry(sb->s_dentry_lru.prev,
struct dentry, d_lru);
BUG_ON(dentry->d_sb != sb);
if (!spin_trylock(&dentry->d_lock)) {
spin_unlock(&sb->s_dentry_lru_lock);
cpu_relax();
goto relock;
}
if (dentry->d_flags & DCACHE_REFERENCED) {
dentry->d_flags &= ~DCACHE_REFERENCED;
list_move(&dentry->d_lru, &referenced);
spin_unlock(&dentry->d_lock);
} else {
list_move(&dentry->d_lru, &tmp);
dentry->d_flags |= DCACHE_SHRINK_LIST;
this_cpu_dec(nr_dentry_unused);
sb->s_nr_dentry_unused--;
spin_unlock(&dentry->d_lock);
freed++;
if (!--nr_to_scan)
break;
}
cond_resched_lock(&sb->s_dentry_lru_lock);
}
if (!list_empty(&referenced))
list_splice(&referenced, &sb->s_dentry_lru);
spin_unlock(&sb->s_dentry_lru_lock);
shrink_dentry_list(&tmp);
return freed; return freed;
} }
@ -987,24 +986,10 @@ shrink_dcache_list(
*/ */
void shrink_dcache_sb(struct super_block *sb) void shrink_dcache_sb(struct super_block *sb)
{ {
LIST_HEAD(tmp); long disposed;
spin_lock(&sb->s_dentry_lru_lock); disposed = list_lru_dispose_all(&sb->s_dentry_lru, shrink_dcache_list);
while (!list_empty(&sb->s_dentry_lru)) { this_cpu_sub(nr_dentry_unused, disposed);
/*
* account for removal here so we don't need to handle it later
* even though the dentry is no longer on the lru list.
*/
list_splice_init(&sb->s_dentry_lru, &tmp);
this_cpu_sub(nr_dentry_unused, sb->s_nr_dentry_unused);
sb->s_nr_dentry_unused = 0;
spin_unlock(&sb->s_dentry_lru_lock);
shrink_dcache_list(&tmp);
spin_lock(&sb->s_dentry_lru_lock);
}
spin_unlock(&sb->s_dentry_lru_lock);
} }
EXPORT_SYMBOL(shrink_dcache_sb); EXPORT_SYMBOL(shrink_dcache_sb);
@ -1366,7 +1351,8 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
if (dentry->d_lockref.count) { if (dentry->d_lockref.count) {
dentry_lru_del(dentry); dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
dentry_lru_move_list(dentry, &data->dispose); dentry_lru_del(dentry);
list_add_tail(&dentry->d_lru, &data->dispose);
dentry->d_flags |= DCACHE_SHRINK_LIST; dentry->d_flags |= DCACHE_SHRINK_LIST;
data->found++; data->found++;
ret = D_WALK_NORETRY; ret = D_WALK_NORETRY;

View File

@ -79,11 +79,11 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
fs_objects = sb->s_op->nr_cached_objects(sb); fs_objects = sb->s_op->nr_cached_objects(sb);
inodes = list_lru_count(&sb->s_inode_lru); inodes = list_lru_count(&sb->s_inode_lru);
total_objects = sb->s_nr_dentry_unused + inodes + fs_objects + 1; dentries = list_lru_count(&sb->s_dentry_lru);
total_objects = dentries + inodes + fs_objects + 1;
/* proportion the scan between the caches */ /* proportion the scan between the caches */
dentries = mult_frac(sc->nr_to_scan, sb->s_nr_dentry_unused, dentries = mult_frac(sc->nr_to_scan, dentries, total_objects);
total_objects);
inodes = mult_frac(sc->nr_to_scan, inodes, total_objects); inodes = mult_frac(sc->nr_to_scan, inodes, total_objects);
/* /*
@ -117,7 +117,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
if (sb->s_op && sb->s_op->nr_cached_objects) if (sb->s_op && sb->s_op->nr_cached_objects)
total_objects = sb->s_op->nr_cached_objects(sb); total_objects = sb->s_op->nr_cached_objects(sb);
total_objects += sb->s_nr_dentry_unused; total_objects += list_lru_count(&sb->s_dentry_lru);
total_objects += list_lru_count(&sb->s_inode_lru); total_objects += list_lru_count(&sb->s_inode_lru);
total_objects = vfs_pressure_ratio(total_objects); total_objects = vfs_pressure_ratio(total_objects);
@ -191,8 +191,7 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_NODE(&s->s_instances);
INIT_HLIST_BL_HEAD(&s->s_anon); INIT_HLIST_BL_HEAD(&s->s_anon);
INIT_LIST_HEAD(&s->s_inodes); INIT_LIST_HEAD(&s->s_inodes);
INIT_LIST_HEAD(&s->s_dentry_lru); list_lru_init(&s->s_dentry_lru);
spin_lock_init(&s->s_dentry_lru_lock);
list_lru_init(&s->s_inode_lru); list_lru_init(&s->s_inode_lru);
INIT_LIST_HEAD(&s->s_mounts); INIT_LIST_HEAD(&s->s_mounts);
init_rwsem(&s->s_umount); init_rwsem(&s->s_umount);

View File

@ -1270,14 +1270,6 @@ struct super_block {
struct list_head s_files; struct list_head s_files;
#endif #endif
struct list_head s_mounts; /* list of mounts; _not_ for fs use */ struct list_head s_mounts; /* list of mounts; _not_ for fs use */
/* s_dentry_lru_lock protects s_dentry_lru and s_nr_dentry_unused */
spinlock_t s_dentry_lru_lock ____cacheline_aligned_in_smp;
struct list_head s_dentry_lru; /* unused dentry lru */
long s_nr_dentry_unused; /* # of dentry on lru */
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
struct block_device *s_bdev; struct block_device *s_bdev;
struct backing_dev_info *s_bdi; struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd; struct mtd_info *s_mtd;
@ -1331,6 +1323,13 @@ struct super_block {
/* AIO completions deferred from interrupt context */ /* AIO completions deferred from interrupt context */
struct workqueue_struct *s_dio_done_wq; struct workqueue_struct *s_dio_done_wq;
/*
* Keep the lru lists last in the structure so they always sit on their
* own individual cachelines.
*/
struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
}; };
extern struct timespec current_fs_time(struct super_block *sb); extern struct timespec current_fs_time(struct super_block *sb);