btrfs: move fs_info init work into it's own helper function

open_ctree mixes initialization of fs stuff and fs_info stuff, which
makes it confusing when doing things like adding the root leak
detection.  Make a separate function that inits all the static
structures inside of the fs_info needed for the fs to operate, and then
call that before we start setting up the fs_info to be mounted.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Josef Bacik 2020-01-24 09:32:58 -05:00 committed by David Sterba
parent 141386e1a5
commit ae18c37ad5

View File

@ -2649,70 +2649,9 @@ static int __cold init_tree_roots(struct btrfs_fs_info *fs_info)
return ret;
}
int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices,
char *options)
static int init_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb)
{
u32 sectorsize;
u32 nodesize;
u32 stripesize;
u64 generation;
u64 features;
u16 csum_type;
struct btrfs_key location;
struct buffer_head *bh;
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *tree_root;
struct btrfs_root *chunk_root;
int ret;
int err = -EINVAL;
int clear_free_space_tree = 0;
int level;
tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
GFP_KERNEL);
fs_info->tree_root = tree_root;
chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
GFP_KERNEL);
fs_info->chunk_root = chunk_root;
if (!tree_root || !chunk_root) {
err = -ENOMEM;
goto fail;
}
ret = init_srcu_struct(&fs_info->subvol_srcu);
if (ret) {
err = ret;
goto fail;
}
ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_srcu;
}
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_srcu;
}
fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
if (ret) {
err = ret;
goto fail_srcu;
}
ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
GFP_KERNEL);
if (ret) {
err = ret;
goto fail_srcu;
}
INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC);
INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC);
@ -2775,21 +2714,6 @@ int __cold open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->ordered_roots);
spin_lock_init(&fs_info->ordered_root_lock);
fs_info->btree_inode = new_inode(sb);
if (!fs_info->btree_inode) {
err = -ENOMEM;
goto fail_srcu;
}
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
GFP_KERNEL);
if (!fs_info->delayed_root) {
err = -ENOMEM;
goto fail_iput;
}
btrfs_init_delayed_root(fs_info->delayed_root);
btrfs_init_scrub(fs_info);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
fs_info->check_integrity_print_mask = 0;
@ -2800,8 +2724,6 @@ int __cold open_ctree(struct super_block *sb,
sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
btrfs_init_btree_inode(fs_info);
spin_lock_init(&fs_info->block_group_cache_lock);
fs_info->block_group_cache_tree = RB_ROOT;
fs_info->first_logical_byte = (u64)-1;
@ -2847,12 +2769,94 @@ int __cold open_ctree(struct super_block *sb,
fs_info->send_in_progress = 0;
ret = init_srcu_struct(&fs_info->subvol_srcu);
if (ret)
return ret;
ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
if (ret)
goto fail;
ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL);
if (ret)
goto fail;
fs_info->dirty_metadata_batch = PAGE_SIZE *
(1 + ilog2(nr_cpu_ids));
ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL);
if (ret)
goto fail;
ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0,
GFP_KERNEL);
if (ret)
goto fail;
fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root),
GFP_KERNEL);
if (!fs_info->delayed_root) {
ret = -ENOMEM;
goto fail;
}
btrfs_init_delayed_root(fs_info->delayed_root);
ret = btrfs_alloc_stripe_hash_table(fs_info);
if (ret)
goto fail;
return 0;
fail:
cleanup_srcu_struct(&fs_info->subvol_srcu);
return ret;
}
int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices,
char *options)
{
u32 sectorsize;
u32 nodesize;
u32 stripesize;
u64 generation;
u64 features;
u16 csum_type;
struct btrfs_key location;
struct buffer_head *bh;
struct btrfs_super_block *disk_super;
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
struct btrfs_root *tree_root;
struct btrfs_root *chunk_root;
int ret;
int err = -EINVAL;
int clear_free_space_tree = 0;
int level;
ret = init_fs_info(fs_info, sb);
if (ret) {
err = ret;
goto fail_alloc;
goto fail;
}
/* These need to be init'ed before we start creating inodes and such. */
tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID,
GFP_KERNEL);
fs_info->tree_root = tree_root;
chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID,
GFP_KERNEL);
fs_info->chunk_root = chunk_root;
if (!tree_root || !chunk_root) {
err = -ENOMEM;
goto fail_srcu;
}
fs_info->btree_inode = new_inode(sb);
if (!fs_info->btree_inode) {
err = -ENOMEM;
goto fail_srcu;
}
mapping_set_gfp_mask(fs_info->btree_inode->i_mapping, GFP_NOFS);
btrfs_init_btree_inode(fs_info);
invalidate_bdev(fs_devices->latest_bdev);
/*
@ -3355,7 +3359,6 @@ int __cold open_ctree(struct super_block *sb,
btrfs_stop_all_workers(fs_info);
btrfs_free_block_groups(fs_info);
fail_alloc:
fail_iput:
btrfs_mapping_tree_free(&fs_info->mapping_tree);
iput(fs_info->btree_inode);