mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 18:10:52 +07:00
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: ext4: remove write-only variables from ext4_ordered_write_end ext4: unexport jbd2_journal_update_superblock ext4: Cleanup whitespace and other miscellaneous style issues ext4: improve ext4_fill_flex_info() a bit ext4: Cleanup the block reservation code path ext4: don't assume extents can't cross block groups when truncating ext4: Fix lack of credits BUG() when deleting a badly fragmented inode ext4: Fix ext4_ext_journal_restart() ext4: fix ext4_da_write_begin error path jbd2: don't abort if flushing file data failed ext4: don't read inode block if the buffer has a write error ext4: Don't allow lg prealloc list to be grow large. ext4: Convert the usage of NR_CPUS to nr_cpu_ids. ext4: Improve error handling in mballoc ext4: lock block groups when initializing ext4: sync up block and inode bitmap reading functions ext4: Allow read/only mounts with corrupted block group checksums ext4: Fix data corruption when writing to prealloc area
This commit is contained in:
commit
8f616cd524
172
fs/ext4/acl.c
172
fs/ext4/acl.c
@ -40,34 +40,35 @@ ext4_acl_from_disk(const void *value, size_t size)
|
||||
acl = posix_acl_alloc(count, GFP_NOFS);
|
||||
if (!acl)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
for (n=0; n < count; n++) {
|
||||
for (n = 0; n < count; n++) {
|
||||
ext4_acl_entry *entry =
|
||||
(ext4_acl_entry *)value;
|
||||
if ((char *)value + sizeof(ext4_acl_entry_short) > end)
|
||||
goto fail;
|
||||
acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag);
|
||||
acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm);
|
||||
switch(acl->a_entries[n].e_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
value = (char *)value +
|
||||
sizeof(ext4_acl_entry_short);
|
||||
acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
|
||||
break;
|
||||
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
value = (char *)value + sizeof(ext4_acl_entry);
|
||||
if ((char *)value > end)
|
||||
goto fail;
|
||||
acl->a_entries[n].e_id =
|
||||
le32_to_cpu(entry->e_id);
|
||||
break;
|
||||
switch (acl->a_entries[n].e_tag) {
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
value = (char *)value +
|
||||
sizeof(ext4_acl_entry_short);
|
||||
acl->a_entries[n].e_id = ACL_UNDEFINED_ID;
|
||||
break;
|
||||
|
||||
default:
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
value = (char *)value + sizeof(ext4_acl_entry);
|
||||
if ((char *)value > end)
|
||||
goto fail;
|
||||
acl->a_entries[n].e_id =
|
||||
le32_to_cpu(entry->e_id);
|
||||
break;
|
||||
|
||||
default:
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
if (value != end)
|
||||
@ -96,27 +97,26 @@ ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
|
||||
e = (char *)ext_acl + sizeof(ext4_acl_header);
|
||||
for (n=0; n < acl->a_count; n++) {
|
||||
for (n = 0; n < acl->a_count; n++) {
|
||||
ext4_acl_entry *entry = (ext4_acl_entry *)e;
|
||||
entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag);
|
||||
entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm);
|
||||
switch(acl->a_entries[n].e_tag) {
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
entry->e_id =
|
||||
cpu_to_le32(acl->a_entries[n].e_id);
|
||||
e += sizeof(ext4_acl_entry);
|
||||
break;
|
||||
switch (acl->a_entries[n].e_tag) {
|
||||
case ACL_USER:
|
||||
case ACL_GROUP:
|
||||
entry->e_id = cpu_to_le32(acl->a_entries[n].e_id);
|
||||
e += sizeof(ext4_acl_entry);
|
||||
break;
|
||||
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
e += sizeof(ext4_acl_entry_short);
|
||||
break;
|
||||
case ACL_USER_OBJ:
|
||||
case ACL_GROUP_OBJ:
|
||||
case ACL_MASK:
|
||||
case ACL_OTHER:
|
||||
e += sizeof(ext4_acl_entry_short);
|
||||
break;
|
||||
|
||||
default:
|
||||
goto fail;
|
||||
default:
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
return (char *)ext_acl;
|
||||
@ -167,23 +167,23 @@ ext4_get_acl(struct inode *inode, int type)
|
||||
if (!test_opt(inode->i_sb, POSIX_ACL))
|
||||
return NULL;
|
||||
|
||||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
acl = ext4_iget_acl(inode, &ei->i_acl);
|
||||
if (acl != EXT4_ACL_NOT_CACHED)
|
||||
return acl;
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
break;
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
acl = ext4_iget_acl(inode, &ei->i_acl);
|
||||
if (acl != EXT4_ACL_NOT_CACHED)
|
||||
return acl;
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
acl = ext4_iget_acl(inode, &ei->i_default_acl);
|
||||
if (acl != EXT4_ACL_NOT_CACHED)
|
||||
return acl;
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
acl = ext4_iget_acl(inode, &ei->i_default_acl);
|
||||
if (acl != EXT4_ACL_NOT_CACHED)
|
||||
return acl;
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
return ERR_PTR(-EINVAL);
|
||||
default:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
|
||||
if (retval > 0) {
|
||||
@ -201,14 +201,14 @@ ext4_get_acl(struct inode *inode, int type)
|
||||
kfree(value);
|
||||
|
||||
if (!IS_ERR(acl)) {
|
||||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ext4_iset_acl(inode, &ei->i_acl, acl);
|
||||
break;
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ext4_iset_acl(inode, &ei->i_acl, acl);
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
ext4_iset_acl(inode, &ei->i_default_acl, acl);
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
ext4_iset_acl(inode, &ei->i_default_acl, acl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return acl;
|
||||
@ -232,31 +232,31 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
|
||||
if (S_ISLNK(inode->i_mode))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
if (acl) {
|
||||
mode_t mode = inode->i_mode;
|
||||
error = posix_acl_equiv_mode(acl, &mode);
|
||||
if (error < 0)
|
||||
return error;
|
||||
else {
|
||||
inode->i_mode = mode;
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
if (error == 0)
|
||||
acl = NULL;
|
||||
}
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_ACCESS;
|
||||
if (acl) {
|
||||
mode_t mode = inode->i_mode;
|
||||
error = posix_acl_equiv_mode(acl, &mode);
|
||||
if (error < 0)
|
||||
return error;
|
||||
else {
|
||||
inode->i_mode = mode;
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
if (error == 0)
|
||||
acl = NULL;
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
return acl ? -EACCES : 0;
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
name_index = EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT;
|
||||
if (!S_ISDIR(inode->i_mode))
|
||||
return acl ? -EACCES : 0;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if (acl) {
|
||||
value = ext4_acl_to_disk(acl, &size);
|
||||
@ -269,14 +269,14 @@ ext4_set_acl(handle_t *handle, struct inode *inode, int type,
|
||||
|
||||
kfree(value);
|
||||
if (!error) {
|
||||
switch(type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ext4_iset_acl(inode, &ei->i_acl, acl);
|
||||
break;
|
||||
switch (type) {
|
||||
case ACL_TYPE_ACCESS:
|
||||
ext4_iset_acl(inode, &ei->i_acl, acl);
|
||||
break;
|
||||
|
||||
case ACL_TYPE_DEFAULT:
|
||||
ext4_iset_acl(inode, &ei->i_default_acl, acl);
|
||||
break;
|
||||
case ACL_TYPE_DEFAULT:
|
||||
ext4_iset_acl(inode, &ei->i_default_acl, acl);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return error;
|
||||
|
@ -314,25 +314,28 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
|
||||
if (unlikely(!bh)) {
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read block bitmap - "
|
||||
"block_group = %d, block_bitmap = %llu",
|
||||
(int)block_group, (unsigned long long)bitmap_blk);
|
||||
"block_group = %lu, block_bitmap = %llu",
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
if (bh_uptodate_or_lock(bh))
|
||||
return bh;
|
||||
|
||||
spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
|
||||
ext4_init_block_bitmap(sb, bh, block_group, desc);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
return bh;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (bh_submit_read(bh) < 0) {
|
||||
put_bh(bh);
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read block bitmap - "
|
||||
"block_group = %d, block_bitmap = %llu",
|
||||
(int)block_group, (unsigned long long)bitmap_blk);
|
||||
"block_group = %lu, block_bitmap = %llu",
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
ext4_valid_block_bitmap(sb, desc, block_group, bh);
|
||||
|
@ -1044,7 +1044,6 @@ extern void ext4_mb_update_group_info(struct ext4_group_info *grp,
|
||||
|
||||
|
||||
/* inode.c */
|
||||
void ext4_da_release_space(struct inode *inode, int used, int to_free);
|
||||
int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
|
||||
struct buffer_head *bh, ext4_fsblk_t blocknr);
|
||||
struct buffer_head *ext4_getblk(handle_t *, struct inode *,
|
||||
|
@ -99,7 +99,7 @@ static int ext4_ext_journal_restart(handle_t *handle, int needed)
|
||||
if (handle->h_buffer_credits > needed)
|
||||
return 0;
|
||||
err = ext4_journal_extend(handle, needed);
|
||||
if (err)
|
||||
if (err <= 0)
|
||||
return err;
|
||||
return ext4_journal_restart(handle, needed);
|
||||
}
|
||||
@ -1441,7 +1441,7 @@ unsigned int ext4_ext_check_overlap(struct inode *inode,
|
||||
|
||||
/*
|
||||
* get the next allocated block if the extent in the path
|
||||
* is before the requested block(s)
|
||||
* is before the requested block(s)
|
||||
*/
|
||||
if (b2 < b1) {
|
||||
b2 = ext4_ext_next_allocated_block(path);
|
||||
@ -1910,9 +1910,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
|
||||
BUG_ON(b != ex_ee_block + ex_ee_len - 1);
|
||||
}
|
||||
|
||||
/* at present, extent can't cross block group: */
|
||||
/* leaf + bitmap + group desc + sb + inode */
|
||||
credits = 5;
|
||||
/*
|
||||
* 3 for leaf, sb, and inode plus 2 (bmap and group
|
||||
* descriptor) for each block group; assume two block
|
||||
* groups plus ex_ee_len/blocks_per_block_group for
|
||||
* the worst case
|
||||
*/
|
||||
credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
|
||||
if (ex == EXT_FIRST_EXTENT(eh)) {
|
||||
correct_index = 1;
|
||||
credits += (ext_depth(inode)) + 1;
|
||||
@ -2323,7 +2327,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
unsigned int newdepth;
|
||||
/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
|
||||
if (allocated <= EXT4_EXT_ZERO_LEN) {
|
||||
/* Mark first half uninitialized.
|
||||
/*
|
||||
* iblock == ee_block is handled by the zerouout
|
||||
* at the beginning.
|
||||
* Mark first half uninitialized.
|
||||
* Mark second half initialized and zero out the
|
||||
* initialized extent
|
||||
*/
|
||||
@ -2346,7 +2353,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
ex->ee_len = orig_ex.ee_len;
|
||||
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||
ext4_ext_dirty(handle, inode, path + depth);
|
||||
/* zeroed the full extent */
|
||||
/* blocks available from iblock */
|
||||
return allocated;
|
||||
|
||||
} else if (err)
|
||||
@ -2374,6 +2381,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
err = PTR_ERR(path);
|
||||
return err;
|
||||
}
|
||||
/* get the second half extent details */
|
||||
ex = path[depth].p_ext;
|
||||
err = ext4_ext_get_access(handle, inode,
|
||||
path + depth);
|
||||
@ -2403,6 +2411,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||
ext4_ext_dirty(handle, inode, path + depth);
|
||||
/* zeroed the full extent */
|
||||
/* blocks available from iblock */
|
||||
return allocated;
|
||||
|
||||
} else if (err)
|
||||
@ -2418,23 +2427,22 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
*/
|
||||
orig_ex.ee_len = cpu_to_le16(ee_len -
|
||||
ext4_ext_get_actual_len(ex3));
|
||||
if (newdepth != depth) {
|
||||
depth = newdepth;
|
||||
ext4_ext_drop_refs(path);
|
||||
path = ext4_ext_find_extent(inode, iblock, path);
|
||||
if (IS_ERR(path)) {
|
||||
err = PTR_ERR(path);
|
||||
goto out;
|
||||
}
|
||||
eh = path[depth].p_hdr;
|
||||
ex = path[depth].p_ext;
|
||||
if (ex2 != &newex)
|
||||
ex2 = ex;
|
||||
|
||||
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||
if (err)
|
||||
goto out;
|
||||
depth = newdepth;
|
||||
ext4_ext_drop_refs(path);
|
||||
path = ext4_ext_find_extent(inode, iblock, path);
|
||||
if (IS_ERR(path)) {
|
||||
err = PTR_ERR(path);
|
||||
goto out;
|
||||
}
|
||||
eh = path[depth].p_hdr;
|
||||
ex = path[depth].p_ext;
|
||||
if (ex2 != &newex)
|
||||
ex2 = ex;
|
||||
|
||||
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
allocated = max_blocks;
|
||||
|
||||
/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
|
||||
@ -2452,6 +2460,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
||||
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||
ext4_ext_dirty(handle, inode, path + depth);
|
||||
/* zero out the first half */
|
||||
/* blocks available from iblock */
|
||||
return allocated;
|
||||
}
|
||||
}
|
||||
|
@ -97,34 +97,44 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
|
||||
* Return buffer_head of bitmap on success or NULL.
|
||||
*/
|
||||
static struct buffer_head *
|
||||
read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
||||
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
|
||||
{
|
||||
struct ext4_group_desc *desc;
|
||||
struct buffer_head *bh = NULL;
|
||||
ext4_fsblk_t bitmap_blk;
|
||||
|
||||
desc = ext4_get_group_desc(sb, block_group, NULL);
|
||||
if (!desc)
|
||||
goto error_out;
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
|
||||
bh = sb_getblk(sb, ext4_inode_bitmap(sb, desc));
|
||||
if (!buffer_uptodate(bh)) {
|
||||
lock_buffer(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
ext4_init_inode_bitmap(sb, bh, block_group,
|
||||
desc);
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
} else {
|
||||
bh = sb_bread(sb, ext4_inode_bitmap(sb, desc));
|
||||
}
|
||||
if (!bh)
|
||||
ext4_error(sb, "read_inode_bitmap",
|
||||
return NULL;
|
||||
bitmap_blk = ext4_inode_bitmap(sb, desc);
|
||||
bh = sb_getblk(sb, bitmap_blk);
|
||||
if (unlikely(!bh)) {
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read inode bitmap - "
|
||||
"block_group = %lu, inode_bitmap = %llu",
|
||||
block_group, ext4_inode_bitmap(sb, desc));
|
||||
error_out:
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
if (bh_uptodate_or_lock(bh))
|
||||
return bh;
|
||||
|
||||
spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
|
||||
ext4_init_inode_bitmap(sb, bh, block_group, desc);
|
||||
set_buffer_uptodate(bh);
|
||||
unlock_buffer(bh);
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
return bh;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
|
||||
if (bh_submit_read(bh) < 0) {
|
||||
put_bh(bh);
|
||||
ext4_error(sb, __func__,
|
||||
"Cannot read inode bitmap - "
|
||||
"block_group = %lu, inode_bitmap = %llu",
|
||||
block_group, bitmap_blk);
|
||||
return NULL;
|
||||
}
|
||||
return bh;
|
||||
}
|
||||
|
||||
@ -200,7 +210,7 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
|
||||
}
|
||||
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
|
||||
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
|
||||
bitmap_bh = read_inode_bitmap(sb, block_group);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
|
||||
if (!bitmap_bh)
|
||||
goto error_return;
|
||||
|
||||
@ -623,7 +633,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
|
||||
goto fail;
|
||||
|
||||
brelse(bitmap_bh);
|
||||
bitmap_bh = read_inode_bitmap(sb, group);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, group);
|
||||
if (!bitmap_bh)
|
||||
goto fail;
|
||||
|
||||
@ -728,7 +738,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
|
||||
|
||||
/* When marking the block group with
|
||||
* ~EXT4_BG_INODE_UNINIT we don't want to depend
|
||||
* on the value of bg_itable_unsed even though
|
||||
* on the value of bg_itable_unused even though
|
||||
* mke2fs could have initialized the same for us.
|
||||
* Instead we calculated the value below
|
||||
*/
|
||||
@ -891,7 +901,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
||||
|
||||
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
|
||||
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
|
||||
bitmap_bh = read_inode_bitmap(sb, block_group);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
|
||||
if (!bitmap_bh) {
|
||||
ext4_warning(sb, __func__,
|
||||
"inode bitmap error for orphan %lu", ino);
|
||||
@ -969,7 +979,7 @@ unsigned long ext4_count_free_inodes (struct super_block * sb)
|
||||
continue;
|
||||
desc_count += le16_to_cpu(gdp->bg_free_inodes_count);
|
||||
brelse(bitmap_bh);
|
||||
bitmap_bh = read_inode_bitmap(sb, i);
|
||||
bitmap_bh = ext4_read_inode_bitmap(sb, i);
|
||||
if (!bitmap_bh)
|
||||
continue;
|
||||
|
||||
|
164
fs/ext4/inode.c
164
fs/ext4/inode.c
@ -191,6 +191,7 @@ static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
|
||||
void ext4_delete_inode (struct inode * inode)
|
||||
{
|
||||
handle_t *handle;
|
||||
int err;
|
||||
|
||||
if (ext4_should_order_data(inode))
|
||||
ext4_begin_ordered_truncate(inode, 0);
|
||||
@ -199,8 +200,9 @@ void ext4_delete_inode (struct inode * inode)
|
||||
if (is_bad_inode(inode))
|
||||
goto no_delete;
|
||||
|
||||
handle = start_transaction(inode);
|
||||
handle = ext4_journal_start(inode, blocks_for_truncate(inode)+3);
|
||||
if (IS_ERR(handle)) {
|
||||
ext4_std_error(inode->i_sb, PTR_ERR(handle));
|
||||
/*
|
||||
* If we're going to skip the normal cleanup, we still need to
|
||||
* make sure that the in-core orphan linked list is properly
|
||||
@ -213,8 +215,34 @@ void ext4_delete_inode (struct inode * inode)
|
||||
if (IS_SYNC(inode))
|
||||
handle->h_sync = 1;
|
||||
inode->i_size = 0;
|
||||
err = ext4_mark_inode_dirty(handle, inode);
|
||||
if (err) {
|
||||
ext4_warning(inode->i_sb, __func__,
|
||||
"couldn't mark inode dirty (err %d)", err);
|
||||
goto stop_handle;
|
||||
}
|
||||
if (inode->i_blocks)
|
||||
ext4_truncate(inode);
|
||||
|
||||
/*
|
||||
* ext4_ext_truncate() doesn't reserve any slop when it
|
||||
* restarts journal transactions; therefore there may not be
|
||||
* enough credits left in the handle to remove the inode from
|
||||
* the orphan list and set the dtime field.
|
||||
*/
|
||||
if (handle->h_buffer_credits < 3) {
|
||||
err = ext4_journal_extend(handle, 3);
|
||||
if (err > 0)
|
||||
err = ext4_journal_restart(handle, 3);
|
||||
if (err != 0) {
|
||||
ext4_warning(inode->i_sb, __func__,
|
||||
"couldn't extend journal (err %d)", err);
|
||||
stop_handle:
|
||||
ext4_journal_stop(handle);
|
||||
goto no_delete;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Kill off the orphan record which ext4_truncate created.
|
||||
* AKPM: I think this can be inside the above `if'.
|
||||
@ -952,6 +980,67 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate @blocks for non extent file based file
|
||||
*/
|
||||
static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
|
||||
int ind_blks, dind_blks, tind_blks;
|
||||
|
||||
/* number of new indirect blocks needed */
|
||||
ind_blks = (blocks + icap - 1) / icap;
|
||||
|
||||
dind_blks = (ind_blks + icap - 1) / icap;
|
||||
|
||||
tind_blks = 1;
|
||||
|
||||
return ind_blks + dind_blks + tind_blks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate given number of blocks
|
||||
*/
|
||||
static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
|
||||
return ext4_ext_calc_metadata_amount(inode, blocks);
|
||||
|
||||
return ext4_indirect_calc_metadata_amount(inode, blocks);
|
||||
}
|
||||
|
||||
static void ext4_da_update_reserve_space(struct inode *inode, int used)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
int total, mdb, mdb_free;
|
||||
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
/* recalculate the number of metablocks still need to be reserved */
|
||||
total = EXT4_I(inode)->i_reserved_data_blocks - used;
|
||||
mdb = ext4_calc_metadata_amount(inode, total);
|
||||
|
||||
/* figure out how many metablocks to release */
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
|
||||
|
||||
/* Account for allocated meta_blocks */
|
||||
mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
|
||||
|
||||
/* update fs free blocks counter for truncate case */
|
||||
percpu_counter_add(&sbi->s_freeblocks_counter, mdb_free);
|
||||
|
||||
/* update per-inode reservations */
|
||||
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
|
||||
EXT4_I(inode)->i_reserved_data_blocks -= used;
|
||||
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
|
||||
EXT4_I(inode)->i_allocated_meta_blocks = 0;
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
}
|
||||
|
||||
/* Maximum number of blocks we map for direct IO at once. */
|
||||
#define DIO_MAX_BLOCKS 4096
|
||||
/*
|
||||
@ -965,10 +1054,9 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
|
||||
|
||||
|
||||
/*
|
||||
* The ext4_get_blocks_wrap() function try to look up the requested blocks,
|
||||
* and returns if the blocks are already mapped.
|
||||
*
|
||||
*
|
||||
* ext4_ext4 get_block() wrapper function
|
||||
* It will do a look up first, and returns if the blocks already mapped.
|
||||
* Otherwise it takes the write lock of the i_data_sem and allocate blocks
|
||||
* and store the allocated blocks in the result buffer head and mark it
|
||||
* mapped.
|
||||
@ -1069,7 +1157,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
|
||||
* which were deferred till now
|
||||
*/
|
||||
if ((retval > 0) && buffer_delay(bh))
|
||||
ext4_da_release_space(inode, retval, 0);
|
||||
ext4_da_update_reserve_space(inode, retval);
|
||||
}
|
||||
|
||||
up_write((&EXT4_I(inode)->i_data_sem));
|
||||
@ -1336,12 +1424,8 @@ static int ext4_ordered_write_end(struct file *file,
|
||||
{
|
||||
handle_t *handle = ext4_journal_current_handle();
|
||||
struct inode *inode = mapping->host;
|
||||
unsigned from, to;
|
||||
int ret = 0, ret2;
|
||||
|
||||
from = pos & (PAGE_CACHE_SIZE - 1);
|
||||
to = from + len;
|
||||
|
||||
ret = ext4_jbd2_file_inode(handle, inode);
|
||||
|
||||
if (ret == 0) {
|
||||
@ -1437,36 +1521,6 @@ static int ext4_journalled_write_end(struct file *file,
|
||||
|
||||
return ret ? ret : copied;
|
||||
}
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate @blocks for non extent file based file
|
||||
*/
|
||||
static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
|
||||
int ind_blks, dind_blks, tind_blks;
|
||||
|
||||
/* number of new indirect blocks needed */
|
||||
ind_blks = (blocks + icap - 1) / icap;
|
||||
|
||||
dind_blks = (ind_blks + icap - 1) / icap;
|
||||
|
||||
tind_blks = 1;
|
||||
|
||||
return ind_blks + dind_blks + tind_blks;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the number of metadata blocks need to reserve
|
||||
* to allocate given number of blocks
|
||||
*/
|
||||
static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
|
||||
{
|
||||
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
|
||||
return ext4_ext_calc_metadata_amount(inode, blocks);
|
||||
|
||||
return ext4_indirect_calc_metadata_amount(inode, blocks);
|
||||
}
|
||||
|
||||
static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
|
||||
{
|
||||
@ -1490,7 +1544,6 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/* reduce fs free blocks counter */
|
||||
percpu_counter_sub(&sbi->s_freeblocks_counter, total);
|
||||
|
||||
@ -1501,35 +1554,31 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
|
||||
return 0; /* success */
|
||||
}
|
||||
|
||||
void ext4_da_release_space(struct inode *inode, int used, int to_free)
|
||||
static void ext4_da_release_space(struct inode *inode, int to_free)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
|
||||
int total, mdb, mdb_free, release;
|
||||
|
||||
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
/* recalculate the number of metablocks still need to be reserved */
|
||||
total = EXT4_I(inode)->i_reserved_data_blocks - used - to_free;
|
||||
total = EXT4_I(inode)->i_reserved_data_blocks - to_free;
|
||||
mdb = ext4_calc_metadata_amount(inode, total);
|
||||
|
||||
/* figure out how many metablocks to release */
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
|
||||
|
||||
/* Account for allocated meta_blocks */
|
||||
mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
|
||||
|
||||
release = to_free + mdb_free;
|
||||
|
||||
/* update fs free blocks counter for truncate case */
|
||||
percpu_counter_add(&sbi->s_freeblocks_counter, release);
|
||||
|
||||
/* update per-inode reservations */
|
||||
BUG_ON(used + to_free > EXT4_I(inode)->i_reserved_data_blocks);
|
||||
EXT4_I(inode)->i_reserved_data_blocks -= (used + to_free);
|
||||
BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
|
||||
EXT4_I(inode)->i_reserved_data_blocks -= to_free;
|
||||
|
||||
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
|
||||
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
|
||||
EXT4_I(inode)->i_allocated_meta_blocks = 0;
|
||||
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
|
||||
}
|
||||
|
||||
@ -1551,7 +1600,7 @@ static void ext4_da_page_release_reservation(struct page *page,
|
||||
}
|
||||
curr_off = next_off;
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
ext4_da_release_space(page->mapping->host, 0, to_release);
|
||||
ext4_da_release_space(page->mapping->host, to_release);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2280,8 +2329,11 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
|
||||
}
|
||||
|
||||
page = __grab_cache_page(mapping, index);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
if (!page) {
|
||||
ext4_journal_stop(handle);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
*pagep = page;
|
||||
|
||||
ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
|
||||
@ -3590,6 +3642,16 @@ static int __ext4_get_inode_loc(struct inode *inode,
|
||||
}
|
||||
if (!buffer_uptodate(bh)) {
|
||||
lock_buffer(bh);
|
||||
|
||||
/*
|
||||
* If the buffer has the write error flag, we have failed
|
||||
* to write out another inode in the same block. In this
|
||||
* case, we don't have to read the block because we may
|
||||
* read the old inode data successfully.
|
||||
*/
|
||||
if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
|
||||
set_buffer_uptodate(bh);
|
||||
|
||||
if (buffer_uptodate(bh)) {
|
||||
/* someone brought it uptodate while we waited */
|
||||
unlock_buffer(bh);
|
||||
|
@ -787,13 +787,16 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
|
||||
if (bh_uptodate_or_lock(bh[i]))
|
||||
continue;
|
||||
|
||||
spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
|
||||
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
|
||||
ext4_init_block_bitmap(sb, bh[i],
|
||||
first_group + i, desc);
|
||||
set_buffer_uptodate(bh[i]);
|
||||
unlock_buffer(bh[i]);
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
|
||||
continue;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
|
||||
get_bh(bh[i]);
|
||||
bh[i]->b_end_io = end_buffer_read_sync;
|
||||
submit_bh(READ, bh[i]);
|
||||
@ -2477,7 +2480,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
|
||||
int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
unsigned i;
|
||||
unsigned i, j;
|
||||
unsigned offset;
|
||||
unsigned max;
|
||||
int ret;
|
||||
@ -2537,7 +2540,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
||||
sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
|
||||
sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
|
||||
|
||||
i = sizeof(struct ext4_locality_group) * NR_CPUS;
|
||||
i = sizeof(struct ext4_locality_group) * nr_cpu_ids;
|
||||
sbi->s_locality_groups = kmalloc(i, GFP_KERNEL);
|
||||
if (sbi->s_locality_groups == NULL) {
|
||||
clear_opt(sbi->s_mount_opt, MBALLOC);
|
||||
@ -2545,11 +2548,12 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
|
||||
kfree(sbi->s_mb_maxs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
for (i = 0; i < nr_cpu_ids; i++) {
|
||||
struct ext4_locality_group *lg;
|
||||
lg = &sbi->s_locality_groups[i];
|
||||
mutex_init(&lg->lg_mutex);
|
||||
INIT_LIST_HEAD(&lg->lg_prealloc_list);
|
||||
for (j = 0; j < PREALLOC_TB_SIZE; j++)
|
||||
INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
|
||||
spin_lock_init(&lg->lg_prealloc_lock);
|
||||
}
|
||||
|
||||
@ -3260,6 +3264,7 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
|
||||
struct ext4_prealloc_space *pa)
|
||||
{
|
||||
unsigned int len = ac->ac_o_ex.fe_len;
|
||||
|
||||
ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
|
||||
&ac->ac_b_ex.fe_group,
|
||||
&ac->ac_b_ex.fe_start);
|
||||
@ -3282,6 +3287,7 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
|
||||
static noinline_for_stack int
|
||||
ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||
{
|
||||
int order, i;
|
||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||
struct ext4_locality_group *lg;
|
||||
struct ext4_prealloc_space *pa;
|
||||
@ -3322,22 +3328,29 @@ ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||
lg = ac->ac_lg;
|
||||
if (lg == NULL)
|
||||
return 0;
|
||||
order = fls(ac->ac_o_ex.fe_len) - 1;
|
||||
if (order > PREALLOC_TB_SIZE - 1)
|
||||
/* The max size of hash table is PREALLOC_TB_SIZE */
|
||||
order = PREALLOC_TB_SIZE - 1;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) {
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
|
||||
atomic_inc(&pa->pa_count);
|
||||
ext4_mb_use_group_pa(ac, pa);
|
||||
for (i = order; i < PREALLOC_TB_SIZE; i++) {
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
|
||||
pa_inode_list) {
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (pa->pa_deleted == 0 &&
|
||||
pa->pa_free >= ac->ac_o_ex.fe_len) {
|
||||
atomic_inc(&pa->pa_count);
|
||||
ext4_mb_use_group_pa(ac, pa);
|
||||
spin_unlock(&pa->pa_lock);
|
||||
ac->ac_criteria = 20;
|
||||
rcu_read_unlock();
|
||||
return 1;
|
||||
}
|
||||
spin_unlock(&pa->pa_lock);
|
||||
ac->ac_criteria = 20;
|
||||
rcu_read_unlock();
|
||||
return 1;
|
||||
}
|
||||
spin_unlock(&pa->pa_lock);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3560,6 +3573,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
||||
pa->pa_free = pa->pa_len;
|
||||
atomic_set(&pa->pa_count, 1);
|
||||
spin_lock_init(&pa->pa_lock);
|
||||
INIT_LIST_HEAD(&pa->pa_inode_list);
|
||||
pa->pa_deleted = 0;
|
||||
pa->pa_linear = 1;
|
||||
|
||||
@ -3580,10 +3594,10 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
||||
list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
|
||||
ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
|
||||
|
||||
spin_lock(pa->pa_obj_lock);
|
||||
list_add_tail_rcu(&pa->pa_inode_list, &lg->lg_prealloc_list);
|
||||
spin_unlock(pa->pa_obj_lock);
|
||||
|
||||
/*
|
||||
* We will later add the new pa to the right bucket
|
||||
* after updating the pa_free in ext4_mb_release_context
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3733,20 +3747,23 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
|
||||
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, group);
|
||||
if (bitmap_bh == NULL) {
|
||||
/* error handling here */
|
||||
ext4_mb_release_desc(&e4b);
|
||||
BUG_ON(bitmap_bh == NULL);
|
||||
ext4_error(sb, __func__, "Error in reading block "
|
||||
"bitmap for %lu\n", group);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = ext4_mb_load_buddy(sb, group, &e4b);
|
||||
BUG_ON(err != 0); /* error handling here */
|
||||
if (err) {
|
||||
ext4_error(sb, __func__, "Error in loading buddy "
|
||||
"information for %lu\n", group);
|
||||
put_bh(bitmap_bh);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (needed == 0)
|
||||
needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
|
||||
|
||||
grp = ext4_get_group_info(sb, group);
|
||||
INIT_LIST_HEAD(&list);
|
||||
|
||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||
repeat:
|
||||
ext4_lock_group(sb, group);
|
||||
@ -3903,13 +3920,18 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
||||
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
|
||||
|
||||
err = ext4_mb_load_buddy(sb, group, &e4b);
|
||||
BUG_ON(err != 0); /* error handling here */
|
||||
if (err) {
|
||||
ext4_error(sb, __func__, "Error in loading buddy "
|
||||
"information for %lu\n", group);
|
||||
continue;
|
||||
}
|
||||
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, group);
|
||||
if (bitmap_bh == NULL) {
|
||||
/* error handling here */
|
||||
ext4_error(sb, __func__, "Error in reading block "
|
||||
"bitmap for %lu\n", group);
|
||||
ext4_mb_release_desc(&e4b);
|
||||
BUG_ON(bitmap_bh == NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
ext4_lock_group(sb, group);
|
||||
@ -4112,22 +4134,168 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
|
||||
|
||||
}
|
||||
|
||||
static noinline_for_stack void
|
||||
ext4_mb_discard_lg_preallocations(struct super_block *sb,
|
||||
struct ext4_locality_group *lg,
|
||||
int order, int total_entries)
|
||||
{
|
||||
ext4_group_t group = 0;
|
||||
struct ext4_buddy e4b;
|
||||
struct list_head discard_list;
|
||||
struct ext4_prealloc_space *pa, *tmp;
|
||||
struct ext4_allocation_context *ac;
|
||||
|
||||
mb_debug("discard locality group preallocation\n");
|
||||
|
||||
INIT_LIST_HEAD(&discard_list);
|
||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||
|
||||
spin_lock(&lg->lg_prealloc_lock);
|
||||
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
|
||||
pa_inode_list) {
|
||||
spin_lock(&pa->pa_lock);
|
||||
if (atomic_read(&pa->pa_count)) {
|
||||
/*
|
||||
* This is the pa that we just used
|
||||
* for block allocation. So don't
|
||||
* free that
|
||||
*/
|
||||
spin_unlock(&pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
if (pa->pa_deleted) {
|
||||
spin_unlock(&pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
/* only lg prealloc space */
|
||||
BUG_ON(!pa->pa_linear);
|
||||
|
||||
/* seems this one can be freed ... */
|
||||
pa->pa_deleted = 1;
|
||||
spin_unlock(&pa->pa_lock);
|
||||
|
||||
list_del_rcu(&pa->pa_inode_list);
|
||||
list_add(&pa->u.pa_tmp_list, &discard_list);
|
||||
|
||||
total_entries--;
|
||||
if (total_entries <= 5) {
|
||||
/*
|
||||
* we want to keep only 5 entries
|
||||
* allowing it to grow to 8. This
|
||||
* mak sure we don't call discard
|
||||
* soon for this list.
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&lg->lg_prealloc_lock);
|
||||
|
||||
list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
|
||||
|
||||
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
|
||||
if (ext4_mb_load_buddy(sb, group, &e4b)) {
|
||||
ext4_error(sb, __func__, "Error in loading buddy "
|
||||
"information for %lu\n", group);
|
||||
continue;
|
||||
}
|
||||
ext4_lock_group(sb, group);
|
||||
list_del(&pa->pa_group_list);
|
||||
ext4_mb_release_group_pa(&e4b, pa, ac);
|
||||
ext4_unlock_group(sb, group);
|
||||
|
||||
ext4_mb_release_desc(&e4b);
|
||||
list_del(&pa->u.pa_tmp_list);
|
||||
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
||||
}
|
||||
if (ac)
|
||||
kmem_cache_free(ext4_ac_cachep, ac);
|
||||
}
|
||||
|
||||
/*
|
||||
* We have incremented pa_count. So it cannot be freed at this
|
||||
* point. Also we hold lg_mutex. So no parallel allocation is
|
||||
* possible from this lg. That means pa_free cannot be updated.
|
||||
*
|
||||
* A parallel ext4_mb_discard_group_preallocations is possible.
|
||||
* which can cause the lg_prealloc_list to be updated.
|
||||
*/
|
||||
|
||||
static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
|
||||
{
|
||||
int order, added = 0, lg_prealloc_count = 1;
|
||||
struct super_block *sb = ac->ac_sb;
|
||||
struct ext4_locality_group *lg = ac->ac_lg;
|
||||
struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
|
||||
|
||||
order = fls(pa->pa_free) - 1;
|
||||
if (order > PREALLOC_TB_SIZE - 1)
|
||||
/* The max size of hash table is PREALLOC_TB_SIZE */
|
||||
order = PREALLOC_TB_SIZE - 1;
|
||||
/* Add the prealloc space to lg */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
|
||||
pa_inode_list) {
|
||||
spin_lock(&tmp_pa->pa_lock);
|
||||
if (tmp_pa->pa_deleted) {
|
||||
spin_unlock(&pa->pa_lock);
|
||||
continue;
|
||||
}
|
||||
if (!added && pa->pa_free < tmp_pa->pa_free) {
|
||||
/* Add to the tail of the previous entry */
|
||||
list_add_tail_rcu(&pa->pa_inode_list,
|
||||
&tmp_pa->pa_inode_list);
|
||||
added = 1;
|
||||
/*
|
||||
* we want to count the total
|
||||
* number of entries in the list
|
||||
*/
|
||||
}
|
||||
spin_unlock(&tmp_pa->pa_lock);
|
||||
lg_prealloc_count++;
|
||||
}
|
||||
if (!added)
|
||||
list_add_tail_rcu(&pa->pa_inode_list,
|
||||
&lg->lg_prealloc_list[order]);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Now trim the list to be not more than 8 elements */
|
||||
if (lg_prealloc_count > 8) {
|
||||
ext4_mb_discard_lg_preallocations(sb, lg,
|
||||
order, lg_prealloc_count);
|
||||
return;
|
||||
}
|
||||
return ;
|
||||
}
|
||||
|
||||
/*
|
||||
* release all resource we used in allocation
|
||||
*/
|
||||
static int ext4_mb_release_context(struct ext4_allocation_context *ac)
|
||||
{
|
||||
if (ac->ac_pa) {
|
||||
if (ac->ac_pa->pa_linear) {
|
||||
struct ext4_prealloc_space *pa = ac->ac_pa;
|
||||
if (pa) {
|
||||
if (pa->pa_linear) {
|
||||
/* see comment in ext4_mb_use_group_pa() */
|
||||
spin_lock(&ac->ac_pa->pa_lock);
|
||||
ac->ac_pa->pa_pstart += ac->ac_b_ex.fe_len;
|
||||
ac->ac_pa->pa_lstart += ac->ac_b_ex.fe_len;
|
||||
ac->ac_pa->pa_free -= ac->ac_b_ex.fe_len;
|
||||
ac->ac_pa->pa_len -= ac->ac_b_ex.fe_len;
|
||||
spin_unlock(&ac->ac_pa->pa_lock);
|
||||
spin_lock(&pa->pa_lock);
|
||||
pa->pa_pstart += ac->ac_b_ex.fe_len;
|
||||
pa->pa_lstart += ac->ac_b_ex.fe_len;
|
||||
pa->pa_free -= ac->ac_b_ex.fe_len;
|
||||
pa->pa_len -= ac->ac_b_ex.fe_len;
|
||||
spin_unlock(&pa->pa_lock);
|
||||
/*
|
||||
* We want to add the pa to the right bucket.
|
||||
* Remove it from the list and while adding
|
||||
* make sure the list to which we are adding
|
||||
* doesn't grow big.
|
||||
*/
|
||||
if (likely(pa->pa_free)) {
|
||||
spin_lock(pa->pa_obj_lock);
|
||||
list_del_rcu(&pa->pa_inode_list);
|
||||
spin_unlock(pa->pa_obj_lock);
|
||||
ext4_mb_add_n_trim(ac);
|
||||
}
|
||||
}
|
||||
ext4_mb_put_pa(ac, ac->ac_sb, ac->ac_pa);
|
||||
ext4_mb_put_pa(ac, ac->ac_sb, pa);
|
||||
}
|
||||
if (ac->ac_bitmap_page)
|
||||
page_cache_release(ac->ac_bitmap_page);
|
||||
@ -4420,11 +4588,15 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
|
||||
count -= overflow;
|
||||
}
|
||||
bitmap_bh = ext4_read_block_bitmap(sb, block_group);
|
||||
if (!bitmap_bh)
|
||||
if (!bitmap_bh) {
|
||||
err = -EIO;
|
||||
goto error_return;
|
||||
}
|
||||
gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
|
||||
if (!gdp)
|
||||
if (!gdp) {
|
||||
err = -EIO;
|
||||
goto error_return;
|
||||
}
|
||||
|
||||
if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
|
||||
in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
|
||||
|
@ -164,11 +164,17 @@ struct ext4_free_extent {
|
||||
* Locality group:
|
||||
* we try to group all related changes together
|
||||
* so that writeback can flush/allocate them together as well
|
||||
* Size of lg_prealloc_list hash is determined by MB_DEFAULT_GROUP_PREALLOC
|
||||
* (512). We store prealloc space into the hash based on the pa_free blocks
|
||||
* order value.ie, fls(pa_free)-1;
|
||||
*/
|
||||
#define PREALLOC_TB_SIZE 10
|
||||
struct ext4_locality_group {
|
||||
/* for allocator */
|
||||
struct mutex lg_mutex; /* to serialize allocates */
|
||||
struct list_head lg_prealloc_list;/* list of preallocations */
|
||||
/* to serialize allocates */
|
||||
struct mutex lg_mutex;
|
||||
/* list of preallocations */
|
||||
struct list_head lg_prealloc_list[PREALLOC_TB_SIZE];
|
||||
spinlock_t lg_prealloc_lock;
|
||||
};
|
||||
|
||||
|
@ -73,7 +73,7 @@ static int verify_group_input(struct super_block *sb,
|
||||
"Inode bitmap not in group (block %llu)",
|
||||
(unsigned long long)input->inode_bitmap);
|
||||
else if (outside(input->inode_table, start, end) ||
|
||||
outside(itend - 1, start, end))
|
||||
outside(itend - 1, start, end))
|
||||
ext4_warning(sb, __func__,
|
||||
"Inode table not in group (blocks %llu-%llu)",
|
||||
(unsigned long long)input->inode_table, itend - 1);
|
||||
@ -104,7 +104,7 @@ static int verify_group_input(struct super_block *sb,
|
||||
(unsigned long long)input->inode_bitmap,
|
||||
start, metaend - 1);
|
||||
else if (inside(input->inode_table, start, metaend) ||
|
||||
inside(itend - 1, start, metaend))
|
||||
inside(itend - 1, start, metaend))
|
||||
ext4_warning(sb, __func__,
|
||||
"Inode table (%llu-%llu) overlaps"
|
||||
"GDT table (%llu-%llu)",
|
||||
@ -158,9 +158,9 @@ static int extend_or_restart_transaction(handle_t *handle, int thresh,
|
||||
if (err) {
|
||||
if ((err = ext4_journal_restart(handle, EXT4_MAX_TRANS_DATA)))
|
||||
return err;
|
||||
if ((err = ext4_journal_get_write_access(handle, bh)))
|
||||
if ((err = ext4_journal_get_write_access(handle, bh)))
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -416,11 +416,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
||||
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
|
||||
gdb_num);
|
||||
|
||||
/*
|
||||
* If we are not using the primary superblock/GDT copy don't resize,
|
||||
* because the user tools have no way of handling this. Probably a
|
||||
* bad time to do it anyways.
|
||||
*/
|
||||
/*
|
||||
* If we are not using the primary superblock/GDT copy don't resize,
|
||||
* because the user tools have no way of handling this. Probably a
|
||||
* bad time to do it anyways.
|
||||
*/
|
||||
if (EXT4_SB(sb)->s_sbh->b_blocknr !=
|
||||
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
|
||||
ext4_warning(sb, __func__,
|
||||
@ -507,14 +507,14 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
||||
return 0;
|
||||
|
||||
exit_inode:
|
||||
//ext4_journal_release_buffer(handle, iloc.bh);
|
||||
/* ext4_journal_release_buffer(handle, iloc.bh); */
|
||||
brelse(iloc.bh);
|
||||
exit_dindj:
|
||||
//ext4_journal_release_buffer(handle, dind);
|
||||
/* ext4_journal_release_buffer(handle, dind); */
|
||||
exit_primary:
|
||||
//ext4_journal_release_buffer(handle, *primary);
|
||||
/* ext4_journal_release_buffer(handle, *primary); */
|
||||
exit_sbh:
|
||||
//ext4_journal_release_buffer(handle, *primary);
|
||||
/* ext4_journal_release_buffer(handle, *primary); */
|
||||
exit_dind:
|
||||
brelse(dind);
|
||||
exit_bh:
|
||||
@ -818,12 +818,12 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
||||
if ((err = ext4_journal_get_write_access(handle, sbi->s_sbh)))
|
||||
goto exit_journal;
|
||||
|
||||
/*
|
||||
* We will only either add reserved group blocks to a backup group
|
||||
* or remove reserved blocks for the first group in a new group block.
|
||||
* Doing both would be mean more complex code, and sane people don't
|
||||
* use non-sparse filesystems anymore. This is already checked above.
|
||||
*/
|
||||
/*
|
||||
* We will only either add reserved group blocks to a backup group
|
||||
* or remove reserved blocks for the first group in a new group block.
|
||||
* Doing both would be mean more complex code, and sane people don't
|
||||
* use non-sparse filesystems anymore. This is already checked above.
|
||||
*/
|
||||
if (gdb_off) {
|
||||
primary = sbi->s_group_desc[gdb_num];
|
||||
if ((err = ext4_journal_get_write_access(handle, primary)))
|
||||
@ -835,24 +835,24 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
||||
} else if ((err = add_new_gdb(handle, inode, input, &primary)))
|
||||
goto exit_journal;
|
||||
|
||||
/*
|
||||
* OK, now we've set up the new group. Time to make it active.
|
||||
*
|
||||
* Current kernels don't lock all allocations via lock_super(),
|
||||
* so we have to be safe wrt. concurrent accesses the group
|
||||
* data. So we need to be careful to set all of the relevant
|
||||
* group descriptor data etc. *before* we enable the group.
|
||||
*
|
||||
* The key field here is sbi->s_groups_count: as long as
|
||||
* that retains its old value, nobody is going to access the new
|
||||
* group.
|
||||
*
|
||||
* So first we update all the descriptor metadata for the new
|
||||
* group; then we update the total disk blocks count; then we
|
||||
* update the groups count to enable the group; then finally we
|
||||
* update the free space counts so that the system can start
|
||||
* using the new disk blocks.
|
||||
*/
|
||||
/*
|
||||
* OK, now we've set up the new group. Time to make it active.
|
||||
*
|
||||
* Current kernels don't lock all allocations via lock_super(),
|
||||
* so we have to be safe wrt. concurrent accesses the group
|
||||
* data. So we need to be careful to set all of the relevant
|
||||
* group descriptor data etc. *before* we enable the group.
|
||||
*
|
||||
* The key field here is sbi->s_groups_count: as long as
|
||||
* that retains its old value, nobody is going to access the new
|
||||
* group.
|
||||
*
|
||||
* So first we update all the descriptor metadata for the new
|
||||
* group; then we update the total disk blocks count; then we
|
||||
* update the groups count to enable the group; then finally we
|
||||
* update the free space counts so that the system can start
|
||||
* using the new disk blocks.
|
||||
*/
|
||||
|
||||
/* Update group descriptor block for new group */
|
||||
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
|
||||
@ -946,7 +946,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
||||
return err;
|
||||
} /* ext4_group_add */
|
||||
|
||||
/* Extend the filesystem to the new number of blocks specified. This entry
|
||||
/*
|
||||
* Extend the filesystem to the new number of blocks specified. This entry
|
||||
* point is only used to extend the current filesystem to the end of the last
|
||||
* existing group. It can be accessed via ioctl, or by "remount,resize=<size>"
|
||||
* for emergencies (because it has no dependencies on reserved blocks).
|
||||
@ -1024,7 +1025,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
||||
o_blocks_count + add, add);
|
||||
|
||||
/* See if the device is actually as big as what was requested */
|
||||
bh = sb_bread(sb, o_blocks_count + add -1);
|
||||
bh = sb_bread(sb, o_blocks_count + add - 1);
|
||||
if (!bh) {
|
||||
ext4_warning(sb, __func__,
|
||||
"can't read last block, resize aborted");
|
||||
|
316
fs/ext4/super.c
316
fs/ext4/super.c
@ -49,20 +49,19 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
|
||||
unsigned long journal_devnum);
|
||||
static int ext4_create_journal(struct super_block *, struct ext4_super_block *,
|
||||
unsigned int);
|
||||
static void ext4_commit_super (struct super_block * sb,
|
||||
struct ext4_super_block * es,
|
||||
int sync);
|
||||
static void ext4_mark_recovery_complete(struct super_block * sb,
|
||||
struct ext4_super_block * es);
|
||||
static void ext4_clear_journal_err(struct super_block * sb,
|
||||
struct ext4_super_block * es);
|
||||
static void ext4_commit_super(struct super_block *sb,
|
||||
struct ext4_super_block *es, int sync);
|
||||
static void ext4_mark_recovery_complete(struct super_block *sb,
|
||||
struct ext4_super_block *es);
|
||||
static void ext4_clear_journal_err(struct super_block *sb,
|
||||
struct ext4_super_block *es);
|
||||
static int ext4_sync_fs(struct super_block *sb, int wait);
|
||||
static const char *ext4_decode_error(struct super_block * sb, int errno,
|
||||
static const char *ext4_decode_error(struct super_block *sb, int errno,
|
||||
char nbuf[16]);
|
||||
static int ext4_remount (struct super_block * sb, int * flags, char * data);
|
||||
static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf);
|
||||
static int ext4_remount(struct super_block *sb, int *flags, char *data);
|
||||
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
|
||||
static void ext4_unlockfs(struct super_block *sb);
|
||||
static void ext4_write_super (struct super_block * sb);
|
||||
static void ext4_write_super(struct super_block *sb);
|
||||
static void ext4_write_super_lockfs(struct super_block *sb);
|
||||
|
||||
|
||||
@ -211,15 +210,15 @@ static void ext4_handle_error(struct super_block *sb)
|
||||
if (sb->s_flags & MS_RDONLY)
|
||||
return;
|
||||
|
||||
if (!test_opt (sb, ERRORS_CONT)) {
|
||||
if (!test_opt(sb, ERRORS_CONT)) {
|
||||
journal_t *journal = EXT4_SB(sb)->s_journal;
|
||||
|
||||
EXT4_SB(sb)->s_mount_opt |= EXT4_MOUNT_ABORT;
|
||||
if (journal)
|
||||
jbd2_journal_abort(journal, -EIO);
|
||||
}
|
||||
if (test_opt (sb, ERRORS_RO)) {
|
||||
printk (KERN_CRIT "Remounting filesystem read-only\n");
|
||||
if (test_opt(sb, ERRORS_RO)) {
|
||||
printk(KERN_CRIT "Remounting filesystem read-only\n");
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
}
|
||||
ext4_commit_super(sb, es, 1);
|
||||
@ -228,13 +227,13 @@ static void ext4_handle_error(struct super_block *sb)
|
||||
sb->s_id);
|
||||
}
|
||||
|
||||
void ext4_error (struct super_block * sb, const char * function,
|
||||
const char * fmt, ...)
|
||||
void ext4_error(struct super_block *sb, const char *function,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s: ",sb->s_id, function);
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
va_end(args);
|
||||
@ -242,7 +241,7 @@ void ext4_error (struct super_block * sb, const char * function,
|
||||
ext4_handle_error(sb);
|
||||
}
|
||||
|
||||
static const char *ext4_decode_error(struct super_block * sb, int errno,
|
||||
static const char *ext4_decode_error(struct super_block *sb, int errno,
|
||||
char nbuf[16])
|
||||
{
|
||||
char *errstr = NULL;
|
||||
@ -278,8 +277,7 @@ static const char *ext4_decode_error(struct super_block * sb, int errno,
|
||||
/* __ext4_std_error decodes expected errors from journaling functions
|
||||
* automatically and invokes the appropriate error response. */
|
||||
|
||||
void __ext4_std_error (struct super_block * sb, const char * function,
|
||||
int errno)
|
||||
void __ext4_std_error(struct super_block *sb, const char *function, int errno)
|
||||
{
|
||||
char nbuf[16];
|
||||
const char *errstr;
|
||||
@ -292,8 +290,8 @@ void __ext4_std_error (struct super_block * sb, const char * function,
|
||||
return;
|
||||
|
||||
errstr = ext4_decode_error(sb, errno, nbuf);
|
||||
printk (KERN_CRIT "EXT4-fs error (device %s) in %s: %s\n",
|
||||
sb->s_id, function, errstr);
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s) in %s: %s\n",
|
||||
sb->s_id, function, errstr);
|
||||
|
||||
ext4_handle_error(sb);
|
||||
}
|
||||
@ -308,15 +306,15 @@ void __ext4_std_error (struct super_block * sb, const char * function,
|
||||
* case we take the easy way out and panic immediately.
|
||||
*/
|
||||
|
||||
void ext4_abort (struct super_block * sb, const char * function,
|
||||
const char * fmt, ...)
|
||||
void ext4_abort(struct super_block *sb, const char *function,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
printk (KERN_CRIT "ext4_abort called.\n");
|
||||
printk(KERN_CRIT "ext4_abort called.\n");
|
||||
|
||||
va_start(args, fmt);
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s: ",sb->s_id, function);
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): %s: ", sb->s_id, function);
|
||||
vprintk(fmt, args);
|
||||
printk("\n");
|
||||
va_end(args);
|
||||
@ -334,8 +332,8 @@ void ext4_abort (struct super_block * sb, const char * function,
|
||||
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
|
||||
}
|
||||
|
||||
void ext4_warning (struct super_block * sb, const char * function,
|
||||
const char * fmt, ...)
|
||||
void ext4_warning(struct super_block *sb, const char *function,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
@ -496,7 +494,7 @@ static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
|
||||
}
|
||||
}
|
||||
|
||||
static void ext4_put_super (struct super_block * sb)
|
||||
static void ext4_put_super(struct super_block *sb)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
struct ext4_super_block *es = sbi->s_es;
|
||||
@ -647,7 +645,8 @@ static void ext4_clear_inode(struct inode *inode)
|
||||
&EXT4_I(inode)->jinode);
|
||||
}
|
||||
|
||||
static inline void ext4_show_quota_options(struct seq_file *seq, struct super_block *sb)
|
||||
static inline void ext4_show_quota_options(struct seq_file *seq,
|
||||
struct super_block *sb)
|
||||
{
|
||||
#if defined(CONFIG_QUOTA)
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
@ -822,8 +821,8 @@ static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QUOTA
|
||||
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
|
||||
#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
|
||||
#define QTYPE2NAME(t) ((t) == USRQUOTA?"user":"group")
|
||||
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
|
||||
|
||||
static int ext4_dquot_initialize(struct inode *inode, int type);
|
||||
static int ext4_dquot_drop(struct inode *inode);
|
||||
@ -991,12 +990,12 @@ static ext4_fsblk_t get_sb_block(void **data)
|
||||
return sb_block;
|
||||
}
|
||||
|
||||
static int parse_options (char *options, struct super_block *sb,
|
||||
unsigned int *inum, unsigned long *journal_devnum,
|
||||
ext4_fsblk_t *n_blocks_count, int is_remount)
|
||||
static int parse_options(char *options, struct super_block *sb,
|
||||
unsigned int *inum, unsigned long *journal_devnum,
|
||||
ext4_fsblk_t *n_blocks_count, int is_remount)
|
||||
{
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
char * p;
|
||||
char *p;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int data_opt = 0;
|
||||
int option;
|
||||
@ -1009,7 +1008,7 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
if (!options)
|
||||
return 1;
|
||||
|
||||
while ((p = strsep (&options, ",")) != NULL) {
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
int token;
|
||||
if (!*p)
|
||||
continue;
|
||||
@ -1017,16 +1016,16 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_bsd_df:
|
||||
clear_opt (sbi->s_mount_opt, MINIX_DF);
|
||||
clear_opt(sbi->s_mount_opt, MINIX_DF);
|
||||
break;
|
||||
case Opt_minix_df:
|
||||
set_opt (sbi->s_mount_opt, MINIX_DF);
|
||||
set_opt(sbi->s_mount_opt, MINIX_DF);
|
||||
break;
|
||||
case Opt_grpid:
|
||||
set_opt (sbi->s_mount_opt, GRPID);
|
||||
set_opt(sbi->s_mount_opt, GRPID);
|
||||
break;
|
||||
case Opt_nogrpid:
|
||||
clear_opt (sbi->s_mount_opt, GRPID);
|
||||
clear_opt(sbi->s_mount_opt, GRPID);
|
||||
break;
|
||||
case Opt_resuid:
|
||||
if (match_int(&args[0], &option))
|
||||
@ -1043,41 +1042,41 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
/* *sb_block = match_int(&args[0]); */
|
||||
break;
|
||||
case Opt_err_panic:
|
||||
clear_opt (sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt (sbi->s_mount_opt, ERRORS_RO);
|
||||
set_opt (sbi->s_mount_opt, ERRORS_PANIC);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
set_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
break;
|
||||
case Opt_err_ro:
|
||||
clear_opt (sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt (sbi->s_mount_opt, ERRORS_RO);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
break;
|
||||
case Opt_err_cont:
|
||||
clear_opt (sbi->s_mount_opt, ERRORS_RO);
|
||||
clear_opt (sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt (sbi->s_mount_opt, ERRORS_CONT);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_RO);
|
||||
clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
|
||||
set_opt(sbi->s_mount_opt, ERRORS_CONT);
|
||||
break;
|
||||
case Opt_nouid32:
|
||||
set_opt (sbi->s_mount_opt, NO_UID32);
|
||||
set_opt(sbi->s_mount_opt, NO_UID32);
|
||||
break;
|
||||
case Opt_nocheck:
|
||||
clear_opt (sbi->s_mount_opt, CHECK);
|
||||
clear_opt(sbi->s_mount_opt, CHECK);
|
||||
break;
|
||||
case Opt_debug:
|
||||
set_opt (sbi->s_mount_opt, DEBUG);
|
||||
set_opt(sbi->s_mount_opt, DEBUG);
|
||||
break;
|
||||
case Opt_oldalloc:
|
||||
set_opt (sbi->s_mount_opt, OLDALLOC);
|
||||
set_opt(sbi->s_mount_opt, OLDALLOC);
|
||||
break;
|
||||
case Opt_orlov:
|
||||
clear_opt (sbi->s_mount_opt, OLDALLOC);
|
||||
clear_opt(sbi->s_mount_opt, OLDALLOC);
|
||||
break;
|
||||
#ifdef CONFIG_EXT4DEV_FS_XATTR
|
||||
case Opt_user_xattr:
|
||||
set_opt (sbi->s_mount_opt, XATTR_USER);
|
||||
set_opt(sbi->s_mount_opt, XATTR_USER);
|
||||
break;
|
||||
case Opt_nouser_xattr:
|
||||
clear_opt (sbi->s_mount_opt, XATTR_USER);
|
||||
clear_opt(sbi->s_mount_opt, XATTR_USER);
|
||||
break;
|
||||
#else
|
||||
case Opt_user_xattr:
|
||||
@ -1115,7 +1114,7 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
"journal on remount\n");
|
||||
return 0;
|
||||
}
|
||||
set_opt (sbi->s_mount_opt, UPDATE_JOURNAL);
|
||||
set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
|
||||
break;
|
||||
case Opt_journal_inum:
|
||||
if (is_remount) {
|
||||
@ -1145,7 +1144,7 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
|
||||
break;
|
||||
case Opt_noload:
|
||||
set_opt (sbi->s_mount_opt, NOLOAD);
|
||||
set_opt(sbi->s_mount_opt, NOLOAD);
|
||||
break;
|
||||
case Opt_commit:
|
||||
if (match_int(&args[0], &option))
|
||||
@ -1331,7 +1330,7 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
"on this filesystem, use tune2fs\n");
|
||||
return 0;
|
||||
}
|
||||
set_opt (sbi->s_mount_opt, EXTENTS);
|
||||
set_opt(sbi->s_mount_opt, EXTENTS);
|
||||
break;
|
||||
case Opt_noextents:
|
||||
/*
|
||||
@ -1348,7 +1347,7 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
"-o noextents options\n");
|
||||
return 0;
|
||||
}
|
||||
clear_opt (sbi->s_mount_opt, EXTENTS);
|
||||
clear_opt(sbi->s_mount_opt, EXTENTS);
|
||||
break;
|
||||
case Opt_i_version:
|
||||
set_opt(sbi->s_mount_opt, I_VERSION);
|
||||
@ -1374,9 +1373,9 @@ static int parse_options (char *options, struct super_block *sb,
|
||||
set_opt(sbi->s_mount_opt, DELALLOC);
|
||||
break;
|
||||
default:
|
||||
printk (KERN_ERR
|
||||
"EXT4-fs: Unrecognized mount option \"%s\" "
|
||||
"or missing value\n", p);
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: Unrecognized mount option \"%s\" "
|
||||
"or missing value\n", p);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -1423,31 +1422,31 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
|
||||
int res = 0;
|
||||
|
||||
if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
|
||||
printk (KERN_ERR "EXT4-fs warning: revision level too high, "
|
||||
"forcing read-only mode\n");
|
||||
printk(KERN_ERR "EXT4-fs warning: revision level too high, "
|
||||
"forcing read-only mode\n");
|
||||
res = MS_RDONLY;
|
||||
}
|
||||
if (read_only)
|
||||
return res;
|
||||
if (!(sbi->s_mount_state & EXT4_VALID_FS))
|
||||
printk (KERN_WARNING "EXT4-fs warning: mounting unchecked fs, "
|
||||
"running e2fsck is recommended\n");
|
||||
printk(KERN_WARNING "EXT4-fs warning: mounting unchecked fs, "
|
||||
"running e2fsck is recommended\n");
|
||||
else if ((sbi->s_mount_state & EXT4_ERROR_FS))
|
||||
printk (KERN_WARNING
|
||||
"EXT4-fs warning: mounting fs with errors, "
|
||||
"running e2fsck is recommended\n");
|
||||
printk(KERN_WARNING
|
||||
"EXT4-fs warning: mounting fs with errors, "
|
||||
"running e2fsck is recommended\n");
|
||||
else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
|
||||
le16_to_cpu(es->s_mnt_count) >=
|
||||
(unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
|
||||
printk (KERN_WARNING
|
||||
"EXT4-fs warning: maximal mount count reached, "
|
||||
"running e2fsck is recommended\n");
|
||||
printk(KERN_WARNING
|
||||
"EXT4-fs warning: maximal mount count reached, "
|
||||
"running e2fsck is recommended\n");
|
||||
else if (le32_to_cpu(es->s_checkinterval) &&
|
||||
(le32_to_cpu(es->s_lastcheck) +
|
||||
le32_to_cpu(es->s_checkinterval) <= get_seconds()))
|
||||
printk (KERN_WARNING
|
||||
"EXT4-fs warning: checktime reached, "
|
||||
"running e2fsck is recommended\n");
|
||||
printk(KERN_WARNING
|
||||
"EXT4-fs warning: checktime reached, "
|
||||
"running e2fsck is recommended\n");
|
||||
#if 0
|
||||
/* @@@ We _will_ want to clear the valid bit if we find
|
||||
* inconsistencies, to force a fsck at reboot. But for
|
||||
@ -1506,14 +1505,13 @@ static int ext4_fill_flex_info(struct super_block *sb)
|
||||
|
||||
flex_group_count = (sbi->s_groups_count + groups_per_flex - 1) /
|
||||
groups_per_flex;
|
||||
sbi->s_flex_groups = kmalloc(flex_group_count *
|
||||
sbi->s_flex_groups = kzalloc(flex_group_count *
|
||||
sizeof(struct flex_groups), GFP_KERNEL);
|
||||
if (sbi->s_flex_groups == NULL) {
|
||||
printk(KERN_ERR "EXT4-fs: not enough memory\n");
|
||||
printk(KERN_ERR "EXT4-fs: not enough memory for "
|
||||
"%lu flex groups\n", flex_group_count);
|
||||
goto failed;
|
||||
}
|
||||
memset(sbi->s_flex_groups, 0, flex_group_count *
|
||||
sizeof(struct flex_groups));
|
||||
|
||||
gdp = ext4_get_group_desc(sb, 1, &bh);
|
||||
block_bitmap = ext4_block_bitmap(sb, gdp) - 1;
|
||||
@ -1597,16 +1595,14 @@ static int ext4_check_descriptors(struct super_block *sb)
|
||||
(EXT4_BLOCKS_PER_GROUP(sb) - 1);
|
||||
|
||||
block_bitmap = ext4_block_bitmap(sb, gdp);
|
||||
if (block_bitmap < first_block || block_bitmap > last_block)
|
||||
{
|
||||
if (block_bitmap < first_block || block_bitmap > last_block) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Block bitmap for group %lu not in group "
|
||||
"(block %llu)!", i, block_bitmap);
|
||||
return 0;
|
||||
}
|
||||
inode_bitmap = ext4_inode_bitmap(sb, gdp);
|
||||
if (inode_bitmap < first_block || inode_bitmap > last_block)
|
||||
{
|
||||
if (inode_bitmap < first_block || inode_bitmap > last_block) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Inode bitmap for group %lu not in group "
|
||||
"(block %llu)!", i, inode_bitmap);
|
||||
@ -1614,26 +1610,28 @@ static int ext4_check_descriptors(struct super_block *sb)
|
||||
}
|
||||
inode_table = ext4_inode_table(sb, gdp);
|
||||
if (inode_table < first_block ||
|
||||
inode_table + sbi->s_itb_per_group - 1 > last_block)
|
||||
{
|
||||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Inode table for group %lu not in group "
|
||||
"(block %llu)!", i, inode_table);
|
||||
return 0;
|
||||
}
|
||||
spin_lock(sb_bgl_lock(sbi, i));
|
||||
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
|
||||
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||
"Checksum for group %lu failed (%u!=%u)\n",
|
||||
i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
|
||||
gdp)), le16_to_cpu(gdp->bg_checksum));
|
||||
return 0;
|
||||
if (!(sb->s_flags & MS_RDONLY))
|
||||
return 0;
|
||||
}
|
||||
spin_unlock(sb_bgl_lock(sbi, i));
|
||||
if (!flexbg_flag)
|
||||
first_block += EXT4_BLOCKS_PER_GROUP(sb);
|
||||
}
|
||||
|
||||
ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
|
||||
sbi->s_es->s_free_inodes_count=cpu_to_le32(ext4_count_free_inodes(sb));
|
||||
sbi->s_es->s_free_inodes_count = cpu_to_le32(ext4_count_free_inodes(sb));
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -1654,8 +1652,8 @@ static int ext4_check_descriptors(struct super_block *sb)
|
||||
* e2fsck was run on this filesystem, and it must have already done the orphan
|
||||
* inode cleanup for us, so we can safely abort without any further action.
|
||||
*/
|
||||
static void ext4_orphan_cleanup (struct super_block * sb,
|
||||
struct ext4_super_block * es)
|
||||
static void ext4_orphan_cleanup(struct super_block *sb,
|
||||
struct ext4_super_block *es)
|
||||
{
|
||||
unsigned int s_flags = sb->s_flags;
|
||||
int nr_orphans = 0, nr_truncates = 0;
|
||||
@ -1732,7 +1730,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
|
||||
iput(inode); /* The delete magic happens here! */
|
||||
}
|
||||
|
||||
#define PLURAL(x) (x), ((x)==1) ? "" : "s"
|
||||
#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
|
||||
|
||||
if (nr_orphans)
|
||||
printk(KERN_INFO "EXT4-fs: %s: %d orphan inode%s deleted\n",
|
||||
@ -1899,12 +1897,12 @@ static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||
__releases(kernel_lock)
|
||||
__acquires(kernel_lock)
|
||||
|
||||
{
|
||||
struct buffer_head * bh;
|
||||
struct buffer_head *bh;
|
||||
struct ext4_super_block *es = NULL;
|
||||
struct ext4_sb_info *sbi;
|
||||
ext4_fsblk_t block;
|
||||
@ -1953,7 +1951,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
}
|
||||
|
||||
if (!(bh = sb_bread(sb, logical_sb_block))) {
|
||||
printk (KERN_ERR "EXT4-fs: unable to read superblock\n");
|
||||
printk(KERN_ERR "EXT4-fs: unable to read superblock\n");
|
||||
goto out_fail;
|
||||
}
|
||||
/*
|
||||
@ -2026,8 +2024,8 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
set_opt(sbi->s_mount_opt, DELALLOC);
|
||||
|
||||
|
||||
if (!parse_options ((char *) data, sb, &journal_inum, &journal_devnum,
|
||||
NULL, 0))
|
||||
if (!parse_options((char *) data, sb, &journal_inum, &journal_devnum,
|
||||
NULL, 0))
|
||||
goto failed_mount;
|
||||
|
||||
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
|
||||
@ -2102,7 +2100,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
brelse (bh);
|
||||
brelse(bh);
|
||||
logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
|
||||
offset = do_div(logical_sb_block, blocksize);
|
||||
bh = sb_bread(sb, logical_sb_block);
|
||||
@ -2114,8 +2112,8 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
|
||||
sbi->s_es = es;
|
||||
if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
|
||||
printk (KERN_ERR
|
||||
"EXT4-fs: Magic mismatch, very weird !\n");
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: Magic mismatch, very weird !\n");
|
||||
goto failed_mount;
|
||||
}
|
||||
}
|
||||
@ -2132,9 +2130,9 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
|
||||
(!is_power_of_2(sbi->s_inode_size)) ||
|
||||
(sbi->s_inode_size > blocksize)) {
|
||||
printk (KERN_ERR
|
||||
"EXT4-fs: unsupported inode size: %d\n",
|
||||
sbi->s_inode_size);
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: unsupported inode size: %d\n",
|
||||
sbi->s_inode_size);
|
||||
goto failed_mount;
|
||||
}
|
||||
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
|
||||
@ -2166,20 +2164,20 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
sbi->s_mount_state = le16_to_cpu(es->s_state);
|
||||
sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
|
||||
sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
|
||||
for (i=0; i < 4; i++)
|
||||
for (i = 0; i < 4; i++)
|
||||
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
|
||||
sbi->s_def_hash_version = es->s_def_hash_version;
|
||||
|
||||
if (sbi->s_blocks_per_group > blocksize * 8) {
|
||||
printk (KERN_ERR
|
||||
"EXT4-fs: #blocks per group too big: %lu\n",
|
||||
sbi->s_blocks_per_group);
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: #blocks per group too big: %lu\n",
|
||||
sbi->s_blocks_per_group);
|
||||
goto failed_mount;
|
||||
}
|
||||
if (sbi->s_inodes_per_group > blocksize * 8) {
|
||||
printk (KERN_ERR
|
||||
"EXT4-fs: #inodes per group too big: %lu\n",
|
||||
sbi->s_inodes_per_group);
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: #inodes per group too big: %lu\n",
|
||||
sbi->s_inodes_per_group);
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
@ -2213,10 +2211,10 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
sbi->s_groups_count = blocks_count;
|
||||
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
|
||||
EXT4_DESC_PER_BLOCK(sb);
|
||||
sbi->s_group_desc = kmalloc(db_count * sizeof (struct buffer_head *),
|
||||
sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
|
||||
GFP_KERNEL);
|
||||
if (sbi->s_group_desc == NULL) {
|
||||
printk (KERN_ERR "EXT4-fs: not enough memory\n");
|
||||
printk(KERN_ERR "EXT4-fs: not enough memory\n");
|
||||
goto failed_mount;
|
||||
}
|
||||
|
||||
@ -2226,13 +2224,13 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
block = descriptor_loc(sb, logical_sb_block, i);
|
||||
sbi->s_group_desc[i] = sb_bread(sb, block);
|
||||
if (!sbi->s_group_desc[i]) {
|
||||
printk (KERN_ERR "EXT4-fs: "
|
||||
"can't read group descriptor %d\n", i);
|
||||
printk(KERN_ERR "EXT4-fs: "
|
||||
"can't read group descriptor %d\n", i);
|
||||
db_count = i;
|
||||
goto failed_mount2;
|
||||
}
|
||||
}
|
||||
if (!ext4_check_descriptors (sb)) {
|
||||
if (!ext4_check_descriptors(sb)) {
|
||||
printk(KERN_ERR "EXT4-fs: group descriptors corrupted!\n");
|
||||
goto failed_mount2;
|
||||
}
|
||||
@ -2308,11 +2306,11 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
EXT4_SB(sb)->s_journal->j_failed_commit) {
|
||||
printk(KERN_CRIT "EXT4-fs error (device %s): "
|
||||
"ext4_fill_super: Journal transaction "
|
||||
"%u is corrupt\n", sb->s_id,
|
||||
"%u is corrupt\n", sb->s_id,
|
||||
EXT4_SB(sb)->s_journal->j_failed_commit);
|
||||
if (test_opt (sb, ERRORS_RO)) {
|
||||
printk (KERN_CRIT
|
||||
"Mounting filesystem read-only\n");
|
||||
if (test_opt(sb, ERRORS_RO)) {
|
||||
printk(KERN_CRIT
|
||||
"Mounting filesystem read-only\n");
|
||||
sb->s_flags |= MS_RDONLY;
|
||||
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
|
||||
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
|
||||
@ -2332,9 +2330,9 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
goto failed_mount3;
|
||||
} else {
|
||||
if (!silent)
|
||||
printk (KERN_ERR
|
||||
"ext4: No journal on filesystem on %s\n",
|
||||
sb->s_id);
|
||||
printk(KERN_ERR
|
||||
"ext4: No journal on filesystem on %s\n",
|
||||
sb->s_id);
|
||||
goto failed_mount3;
|
||||
}
|
||||
|
||||
@ -2418,7 +2416,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
goto failed_mount4;
|
||||
}
|
||||
|
||||
ext4_setup_super (sb, es, sb->s_flags & MS_RDONLY);
|
||||
ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
|
||||
|
||||
/* determine the minimum size of new large inodes, if present */
|
||||
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
|
||||
@ -2457,12 +2455,12 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
|
||||
ext4_orphan_cleanup(sb, es);
|
||||
EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
|
||||
if (needs_recovery)
|
||||
printk (KERN_INFO "EXT4-fs: recovery complete.\n");
|
||||
printk(KERN_INFO "EXT4-fs: recovery complete.\n");
|
||||
ext4_mark_recovery_complete(sb, es);
|
||||
printk (KERN_INFO "EXT4-fs: mounted filesystem with %s data mode.\n",
|
||||
test_opt(sb,DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ? "journal":
|
||||
test_opt(sb,DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA ? "ordered":
|
||||
"writeback");
|
||||
printk(KERN_INFO "EXT4-fs: mounted filesystem with %s data mode.\n",
|
||||
test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ? "journal":
|
||||
test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA ? "ordered":
|
||||
"writeback");
|
||||
|
||||
if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
|
||||
printk(KERN_WARNING "EXT4-fs: Ignoring delalloc option - "
|
||||
@ -2575,14 +2573,14 @@ static journal_t *ext4_get_journal(struct super_block *sb,
|
||||
static journal_t *ext4_get_dev_journal(struct super_block *sb,
|
||||
dev_t j_dev)
|
||||
{
|
||||
struct buffer_head * bh;
|
||||
struct buffer_head *bh;
|
||||
journal_t *journal;
|
||||
ext4_fsblk_t start;
|
||||
ext4_fsblk_t len;
|
||||
int hblock, blocksize;
|
||||
ext4_fsblk_t sb_block;
|
||||
unsigned long offset;
|
||||
struct ext4_super_block * es;
|
||||
struct ext4_super_block *es;
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = ext4_blkdev_get(j_dev);
|
||||
@ -2697,8 +2695,8 @@ static int ext4_load_journal(struct super_block *sb,
|
||||
"unavailable, cannot proceed.\n");
|
||||
return -EROFS;
|
||||
}
|
||||
printk (KERN_INFO "EXT4-fs: write access will "
|
||||
"be enabled during recovery.\n");
|
||||
printk(KERN_INFO "EXT4-fs: write access will "
|
||||
"be enabled during recovery.\n");
|
||||
}
|
||||
}
|
||||
|
||||
@ -2751,8 +2749,8 @@ static int ext4_load_journal(struct super_block *sb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ext4_create_journal(struct super_block * sb,
|
||||
struct ext4_super_block * es,
|
||||
static int ext4_create_journal(struct super_block *sb,
|
||||
struct ext4_super_block *es,
|
||||
unsigned int journal_inum)
|
||||
{
|
||||
journal_t *journal;
|
||||
@ -2793,9 +2791,8 @@ static int ext4_create_journal(struct super_block * sb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ext4_commit_super (struct super_block * sb,
|
||||
struct ext4_super_block * es,
|
||||
int sync)
|
||||
static void ext4_commit_super(struct super_block *sb,
|
||||
struct ext4_super_block *es, int sync)
|
||||
{
|
||||
struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
|
||||
|
||||
@ -2816,8 +2813,8 @@ static void ext4_commit_super (struct super_block * sb,
|
||||
* remounting) the filesystem readonly, then we will end up with a
|
||||
* consistent fs on disk. Record that fact.
|
||||
*/
|
||||
static void ext4_mark_recovery_complete(struct super_block * sb,
|
||||
struct ext4_super_block * es)
|
||||
static void ext4_mark_recovery_complete(struct super_block *sb,
|
||||
struct ext4_super_block *es)
|
||||
{
|
||||
journal_t *journal = EXT4_SB(sb)->s_journal;
|
||||
|
||||
@ -2839,8 +2836,8 @@ static void ext4_mark_recovery_complete(struct super_block * sb,
|
||||
* has recorded an error from a previous lifetime, move that error to the
|
||||
* main filesystem now.
|
||||
*/
|
||||
static void ext4_clear_journal_err(struct super_block * sb,
|
||||
struct ext4_super_block * es)
|
||||
static void ext4_clear_journal_err(struct super_block *sb,
|
||||
struct ext4_super_block *es)
|
||||
{
|
||||
journal_t *journal;
|
||||
int j_errno;
|
||||
@ -2865,7 +2862,7 @@ static void ext4_clear_journal_err(struct super_block * sb,
|
||||
|
||||
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
|
||||
es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
|
||||
ext4_commit_super (sb, es, 1);
|
||||
ext4_commit_super(sb, es, 1);
|
||||
|
||||
jbd2_journal_clear_err(journal);
|
||||
}
|
||||
@ -2898,7 +2895,7 @@ int ext4_force_commit(struct super_block *sb)
|
||||
* This implicitly triggers the writebehind on sync().
|
||||
*/
|
||||
|
||||
static void ext4_write_super (struct super_block * sb)
|
||||
static void ext4_write_super(struct super_block *sb)
|
||||
{
|
||||
if (mutex_trylock(&sb->s_lock) != 0)
|
||||
BUG();
|
||||
@ -2954,13 +2951,14 @@ static void ext4_unlockfs(struct super_block *sb)
|
||||
}
|
||||
}
|
||||
|
||||
static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
||||
static int ext4_remount(struct super_block *sb, int *flags, char *data)
|
||||
{
|
||||
struct ext4_super_block * es;
|
||||
struct ext4_super_block *es;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
ext4_fsblk_t n_blocks_count = 0;
|
||||
unsigned long old_sb_flags;
|
||||
struct ext4_mount_options old_opts;
|
||||
ext4_group_t g;
|
||||
int err;
|
||||
#ifdef CONFIG_QUOTA
|
||||
int i;
|
||||
@ -3038,6 +3036,26 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
||||
goto restore_opts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Make sure the group descriptor checksums
|
||||
* are sane. If they aren't, refuse to
|
||||
* remount r/w.
|
||||
*/
|
||||
for (g = 0; g < sbi->s_groups_count; g++) {
|
||||
struct ext4_group_desc *gdp =
|
||||
ext4_get_group_desc(sb, g, NULL);
|
||||
|
||||
if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
|
||||
printk(KERN_ERR
|
||||
"EXT4-fs: ext4_remount: "
|
||||
"Checksum for group %lu failed (%u!=%u)\n",
|
||||
g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
|
||||
le16_to_cpu(gdp->bg_checksum));
|
||||
err = -EINVAL;
|
||||
goto restore_opts;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we have an unprocessed orphan list hanging
|
||||
* around from a previously readonly bdev mount,
|
||||
@ -3063,7 +3081,7 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
||||
sbi->s_mount_state = le16_to_cpu(es->s_state);
|
||||
if ((err = ext4_group_extend(sb, es, n_blocks_count)))
|
||||
goto restore_opts;
|
||||
if (!ext4_setup_super (sb, es, 0))
|
||||
if (!ext4_setup_super(sb, es, 0))
|
||||
sb->s_flags &= ~MS_RDONLY;
|
||||
}
|
||||
}
|
||||
@ -3093,7 +3111,7 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int ext4_statfs (struct dentry * dentry, struct kstatfs * buf)
|
||||
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
|
||||
{
|
||||
struct super_block *sb = dentry->d_sb;
|
||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||
@ -3331,12 +3349,12 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
|
||||
}
|
||||
/* Journaling quota? */
|
||||
if (EXT4_SB(sb)->s_qf_names[type]) {
|
||||
/* Quotafile not of fs root? */
|
||||
/* Quotafile not in fs root? */
|
||||
if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
|
||||
printk(KERN_WARNING
|
||||
"EXT4-fs: Quota file not on filesystem root. "
|
||||
"Journaled quota will not work.\n");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* When we journal data on quota file, we have to flush journal to see
|
||||
|
@ -1512,7 +1512,7 @@ static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
|
||||
char *name = entry->e_name;
|
||||
int n;
|
||||
|
||||
for (n=0; n < entry->e_name_len; n++) {
|
||||
for (n = 0; n < entry->e_name_len; n++) {
|
||||
hash = (hash << NAME_HASH_SHIFT) ^
|
||||
(hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
|
||||
*name++;
|
||||
|
@ -262,8 +262,18 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
|
||||
jinode->i_flags |= JI_COMMIT_RUNNING;
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
|
||||
if (!ret)
|
||||
ret = err;
|
||||
if (err) {
|
||||
/*
|
||||
* Because AS_EIO is cleared by
|
||||
* wait_on_page_writeback_range(), set it again so
|
||||
* that user process can get -EIO from fsync().
|
||||
*/
|
||||
set_bit(AS_EIO,
|
||||
&jinode->i_vfs_inode->i_mapping->flags);
|
||||
|
||||
if (!ret)
|
||||
ret = err;
|
||||
}
|
||||
spin_lock(&journal->j_list_lock);
|
||||
jinode->i_flags &= ~JI_COMMIT_RUNNING;
|
||||
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
|
||||
@ -670,8 +680,14 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
||||
* commit block, which happens below in such setting.
|
||||
*/
|
||||
err = journal_finish_inode_data_buffers(journal, commit_transaction);
|
||||
if (err)
|
||||
jbd2_journal_abort(journal, err);
|
||||
if (err) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
printk(KERN_WARNING
|
||||
"JBD2: Detected IO errors while flushing file data "
|
||||
"on %s\n", bdevname(journal->j_fs_dev, b));
|
||||
err = 0;
|
||||
}
|
||||
|
||||
/* Lo and behold: we have just managed to send a transaction to
|
||||
the log. Before we can commit it, wait for the IO so far to
|
||||
|
@ -68,7 +68,6 @@ EXPORT_SYMBOL(jbd2_journal_set_features);
|
||||
EXPORT_SYMBOL(jbd2_journal_create);
|
||||
EXPORT_SYMBOL(jbd2_journal_load);
|
||||
EXPORT_SYMBOL(jbd2_journal_destroy);
|
||||
EXPORT_SYMBOL(jbd2_journal_update_superblock);
|
||||
EXPORT_SYMBOL(jbd2_journal_abort);
|
||||
EXPORT_SYMBOL(jbd2_journal_errno);
|
||||
EXPORT_SYMBOL(jbd2_journal_ack_err);
|
||||
|
Loading…
Reference in New Issue
Block a user