Btrfs: use slabs for auto defrag allocation

The auto defrag allocation is in the fast path of the IO, so use slabs
to improve the speed of the allocation.

And besides that, it can do check for leaked objects when the module is removed.

Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Chris Mason <chris.mason@fusionio.com>
This commit is contained in:
Miao Xie 2012-11-26 09:24:43 +00:00 committed by Chris Mason
parent 905b0dda06
commit 9247f3170b
3 changed files with 34 additions and 5 deletions

View File

@ -3505,6 +3505,8 @@ void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space);
/* file.c */
int btrfs_auto_defrag_init(void);
void btrfs_auto_defrag_exit(void);
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);

View File

@ -41,6 +41,7 @@
#include "compat.h"
#include "volumes.h"
static struct kmem_cache *btrfs_inode_defrag_cachep;
/*
* when auto defrag is enabled we
* queue up these defrag structs to remember which
@ -127,7 +128,7 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
return;
exists:
kfree(defrag);
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
return;
}
@ -157,7 +158,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
else
transid = BTRFS_I(inode)->root->last_trans;
defrag = kzalloc(sizeof(*defrag), GFP_NOFS);
defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS);
if (!defrag)
return -ENOMEM;
@ -169,7 +170,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
__btrfs_add_inode_defrag(inode, defrag);
else
kfree(defrag);
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock);
return 0;
}
@ -315,7 +316,8 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
next:
spin_lock(&fs_info->defrag_inodes_lock);
next_free:
kfree(defrag);
if (defrag)
kmem_cache_free(btrfs_inode_defrag_cachep, defrag);
}
spin_unlock(&fs_info->defrag_inodes_lock);
@ -2293,3 +2295,21 @@ const struct file_operations btrfs_file_operations = {
.compat_ioctl = btrfs_ioctl,
#endif
};
void btrfs_auto_defrag_exit(void)
{
if (btrfs_inode_defrag_cachep)
kmem_cache_destroy(btrfs_inode_defrag_cachep);
}
int btrfs_auto_defrag_init(void)
{
btrfs_inode_defrag_cachep = kmem_cache_create("btrfs_inode_defrag",
sizeof(struct inode_defrag), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
NULL);
if (!btrfs_inode_defrag_cachep)
return -ENOMEM;
return 0;
}

View File

@ -1680,10 +1680,14 @@ static int __init init_btrfs_fs(void)
if (err)
goto free_ordered_data;
err = btrfs_interface_init();
err = btrfs_auto_defrag_init();
if (err)
goto free_delayed_inode;
err = btrfs_interface_init();
if (err)
goto free_auto_defrag;
err = register_filesystem(&btrfs_fs_type);
if (err)
goto unregister_ioctl;
@ -1695,6 +1699,8 @@ static int __init init_btrfs_fs(void)
unregister_ioctl:
btrfs_interface_exit();
free_auto_defrag:
btrfs_auto_defrag_exit();
free_delayed_inode:
btrfs_delayed_inode_exit();
free_ordered_data:
@ -1714,6 +1720,7 @@ static int __init init_btrfs_fs(void)
static void __exit exit_btrfs_fs(void)
{
btrfs_destroy_cachep();
btrfs_auto_defrag_exit();
btrfs_delayed_inode_exit();
ordered_data_exit();
extent_map_exit();