Bug fixes and clean ups for the 3.17 merge window

-----BEGIN PGP SIGNATURE-----
 Version: GnuPG v2
 
 iQIcBAABCAAGBQJT4Eh0AAoJENNvdpvBGATwmaAP/0W9B/VPY4rSnanpXvsYlone
 wjIjh11NZdU3daW/c/VhgEIO81rFaoFoQ/dN+pIo7uHG8JRqZjyHXZY2eVF5SFFp
 FQyGEGMRhE+u78IDg3U99OlAUTo8SAHjHVlYUBVpT9lazMLWRPqP7uHJbXow5ijK
 /bXSVY+6fOWY1/yruCZv1nRtg9JNZgCc1LOPDqn6K16jItBKfYBvVbpw6hise2v2
 rPfcSlKJ5Wzo/PgNX+IR9nnUOXzpbdM2CZbxy0qB2jZYirzE6VNeHhc1JWxQWNB1
 Dg3j/ynEWPs03+9ywLcQ0kEQvUXhQQlMGmfPkgzWeAUQDUv4QAeYmFiRhc/EgJWY
 othDmKVqy0Pn9rmGCOMg/TJFH0Mz/c7PTxFVF1onxs2Sqvl3yCdZANT+ie5UoC9m
 zkUHdY3HkARiK/I6d5CCJzvHMxWNyf6bAJmoR6L/SaOPXebc4cfFtjgV01kbTWv+
 rW9MCj3TiIC9MpWdVJADcmc2w3cY0L/NUbFWHZhMSDFiuJvcLUw5afaJTvKEPuKp
 WnnYICPj6wMP7Gy/isTxBGGi0UjPm67DHGLpG+syPDi1RxU6Vw3p9PjbdvCRj7so
 UD3xzntCHOzVcgAxE92V4pMZAajtv0sfIBVzMm7k8iUXzb7Er1c5dV6ROkOzguq3
 Ogj/c2JHSSB4TSYdVxsG
 =hf9n
 -----END PGP SIGNATURE-----

Merge tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

Pull ext4 updates from Ted Ts'o:
 "Bug fixes and clean ups for the 3.17 merge window"

* tag 'ext4_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4:
  ext4: fix ext4_discard_allocated_blocks() if we can't allocate the pa struct
  ext4: fix COLLAPSE RANGE test for bigalloc file systems
  ext4: check inline directory before converting
  ext4: fix incorrect locking in move_extent_per_page
  ext4: use correct depth value
  ext4: add i_data_sem sanity check
  ext4: fix wrong size computation in ext4_mb_normalize_request()
  ext4: make ext4_has_inline_data() as a inline function
  ext4: remove readpage() check in ext4_mmap_file()
  ext4: fix punch hole on files with indirect mapping
  ext4: remove metadata reservation checks
  ext4: rearrange initialization to fix EXT4FS_DEBUG
This commit is contained in:
Linus Torvalds 2014-08-04 20:46:54 -07:00
commit 8e099d1e8b
12 changed files with 333 additions and 293 deletions

View File

@ -639,7 +639,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
if (!(*errp) &&
ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode,
EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));

View File

@ -571,6 +571,31 @@ static int ext4_release_dir(struct inode *inode, struct file *filp)
return 0;
}
int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf,
int buf_size)
{
struct ext4_dir_entry_2 *de;
int nlen, rlen;
unsigned int offset = 0;
char *top;
de = (struct ext4_dir_entry_2 *)buf;
top = buf + buf_size;
while ((char *) de < top) {
if (ext4_check_dir_entry(dir, NULL, de, bh,
buf, buf_size, offset))
return -EIO;
nlen = EXT4_DIR_REC_LEN(de->name_len);
rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
offset += rlen;
}
if ((char *) de > top)
return -EIO;
return 0;
}
const struct file_operations ext4_dir_operations = {
.llseek = ext4_dir_llseek,
.read = generic_read_dir,

View File

@ -591,7 +591,6 @@ enum {
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
#define EXT4_FREE_BLOCKS_RESERVE 0x0040
/*
* ioctl commands
@ -2029,6 +2028,8 @@ static inline unsigned char get_dtype(struct super_block *sb, int filetype)
return ext4_filetype_table[filetype];
}
extern int ext4_check_all_de(struct inode *dir, struct buffer_head *bh,
void *buf, int buf_size);
/* fsync.c */
extern int ext4_sync_file(struct file *, loff_t, loff_t, int);
@ -2144,8 +2145,8 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock);
extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks);
extern void ext4_ind_truncate(handle_t *, struct inode *inode);
extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t first, ext4_lblk_t stop);
extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
ext4_lblk_t start, ext4_lblk_t end);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
@ -2560,7 +2561,6 @@ extern const struct file_operations ext4_file_operations;
extern loff_t ext4_llseek(struct file *file, loff_t offset, int origin);
/* inline.c */
extern int ext4_has_inline_data(struct inode *inode);
extern int ext4_get_max_inline_size(struct inode *inode);
extern int ext4_find_inline_data_nolock(struct inode *inode);
extern int ext4_init_inline_data(handle_t *handle, struct inode *inode,
@ -2626,6 +2626,12 @@ extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode);
static inline int ext4_has_inline_data(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
EXT4_I(inode)->i_inline_off;
}
/* namei.c */
extern const struct inode_operations ext4_dir_inode_operations;
extern const struct inode_operations ext4_special_inode_operations;

View File

@ -161,6 +161,8 @@ int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
struct inode *inode, struct ext4_ext_path *path)
{
int err;
WARN_ON(!rwsem_is_locked(&EXT4_I(inode)->i_data_sem));
if (path->p_bh) {
ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
/* path points to block */
@ -1808,8 +1810,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
brelse(path[1].p_bh);
ext4_free_blocks(handle, inode, NULL, blk, 1,
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
EXT4_FREE_BLOCKS_RESERVE);
EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
}
/*
@ -3253,7 +3254,7 @@ static int ext4_split_extent_at(handle_t *handle,
fix_extent_len:
ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + depth);
ext4_ext_dirty(handle, inode, path + path->p_depth);
return err;
}
@ -5403,16 +5404,13 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
int ret;
/* Collapse range works only on fs block size aligned offsets. */
if (offset & (EXT4_BLOCK_SIZE(sb) - 1) ||
len & (EXT4_BLOCK_SIZE(sb) - 1))
if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
len & (EXT4_CLUSTER_SIZE(sb) - 1))
return -EINVAL;
if (!S_ISREG(inode->i_mode))
return -EINVAL;
if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1)
return -EOPNOTSUPP;
trace_ext4_collapse_range(inode, offset, len);
punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);

View File

@ -200,10 +200,6 @@ static const struct vm_operations_struct ext4_file_vm_ops = {
static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct address_space *mapping = file->f_mapping;
if (!mapping->a_ops->readpage)
return -ENOEXEC;
file_accessed(file);
vma->vm_ops = &ext4_file_vm_ops;
return 0;

View File

@ -1295,97 +1295,220 @@ void ext4_ind_truncate(handle_t *handle, struct inode *inode)
}
}
static int free_hole_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *parent_bh, __le32 *i_data,
int level, ext4_lblk_t first,
ext4_lblk_t count, int max)
/**
* ext4_ind_remove_space - remove space from the range
* @handle: JBD handle for this transaction
* @inode: inode we are dealing with
* @start: First block to remove
* @end: One block after the last block to remove (exclusive)
*
* Free the blocks in the defined range (end is exclusive endpoint of
* range). This is used by ext4_punch_hole().
*/
int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
ext4_lblk_t start, ext4_lblk_t end)
{
struct buffer_head *bh = NULL;
struct ext4_inode_info *ei = EXT4_I(inode);
__le32 *i_data = ei->i_data;
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int ret = 0;
int i, inc;
ext4_lblk_t offset;
__le32 blk;
ext4_lblk_t offsets[4], offsets2[4];
Indirect chain[4], chain2[4];
Indirect *partial, *partial2;
ext4_lblk_t max_block;
__le32 nr = 0, nr2 = 0;
int n = 0, n2 = 0;
unsigned blocksize = inode->i_sb->s_blocksize;
inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level);
for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) {
if (offset >= count + first)
break;
if (*i_data == 0 || (offset + inc) <= first)
continue;
blk = *i_data;
if (level > 0) {
ext4_lblk_t first2;
ext4_lblk_t count2;
max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1)
>> EXT4_BLOCK_SIZE_BITS(inode->i_sb);
if (end >= max_block)
end = max_block;
if ((start >= end) || (start > max_block))
return 0;
bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
if (!bh) {
EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
"Read failure");
return -EIO;
}
if (first > offset) {
first2 = first - offset;
count2 = count;
n = ext4_block_to_path(inode, start, offsets, NULL);
n2 = ext4_block_to_path(inode, end, offsets2, NULL);
BUG_ON(n > n2);
if ((n == 1) && (n == n2)) {
/* We're punching only within direct block range */
ext4_free_data(handle, inode, NULL, i_data + offsets[0],
i_data + offsets2[0]);
return 0;
} else if (n2 > n) {
/*
* Start and end are on a different levels so we're going to
* free partial block at start, and partial block at end of
* the range. If there are some levels in between then
* do_indirects label will take care of that.
*/
if (n == 1) {
/*
* Start is at the direct block level, free
* everything to the end of the level.
*/
ext4_free_data(handle, inode, NULL, i_data + offsets[0],
i_data + EXT4_NDIR_BLOCKS);
goto end_range;
}
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
if (nr) {
if (partial == chain) {
/* Shared branch grows from the inode */
ext4_free_branches(handle, inode, NULL,
&nr, &nr+1, (chain+n-1) - partial);
*partial->p = 0;
} else {
first2 = 0;
count2 = count - (offset - first);
}
ret = free_hole_blocks(handle, inode, bh,
(__le32 *)bh->b_data, level - 1,
first2, count2,
inode->i_sb->s_blocksize >> 2);
if (ret) {
brelse(bh);
goto err;
/* Shared branch grows from an indirect block */
BUFFER_TRACE(partial->bh, "get_write_access");
ext4_free_branches(handle, inode, partial->bh,
partial->p,
partial->p+1, (chain+n-1) - partial);
}
}
if (level == 0 ||
(bh && all_zeroes((__le32 *)bh->b_data,
(__le32 *)bh->b_data + addr_per_block))) {
ext4_free_data(handle, inode, parent_bh,
i_data, i_data + 1);
/*
* Clear the ends of indirect blocks on the shared branch
* at the start of the range
*/
while (partial > chain) {
ext4_free_branches(handle, inode, partial->bh,
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
brelse(bh);
bh = NULL;
}
err:
return ret;
}
int ext4_free_hole_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t first, ext4_lblk_t stop)
{
int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb);
int level, ret = 0;
int num = EXT4_NDIR_BLOCKS;
ext4_lblk_t count, max = EXT4_NDIR_BLOCKS;
__le32 *i_data = EXT4_I(inode)->i_data;
count = stop - first;
for (level = 0; level < 4; level++, max *= addr_per_block) {
if (first < max) {
ret = free_hole_blocks(handle, inode, NULL, i_data,
level, first, count, num);
if (ret)
goto err;
if (count > max - first)
count -= max - first;
else
break;
first = 0;
end_range:
partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
if (nr2) {
if (partial2 == chain2) {
/*
* Remember, end is exclusive so here we're at
* the start of the next level we're not going
* to free. Everything was covered by the start
* of the range.
*/
return 0;
} else {
/* Shared branch grows from an indirect block */
partial2--;
}
} else {
first -= max;
/*
* ext4_find_shared returns Indirect structure which
* points to the last element which should not be
* removed by truncate. But this is end of the range
* in punch_hole so we need to point to the next element
*/
partial2->p++;
}
i_data += num;
if (level == 0) {
num = 1;
max = 1;
/*
* Clear the ends of indirect blocks on the shared branch
* at the end of the range
*/
while (partial2 > chain2) {
ext4_free_branches(handle, inode, partial2->bh,
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n2-1) - partial2);
BUFFER_TRACE(partial2->bh, "call brelse");
brelse(partial2->bh);
partial2--;
}
goto do_indirects;
}
/* Punch happened within the same level (n == n2) */
partial = ext4_find_shared(inode, n, offsets, chain, &nr);
partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
/*
* ext4_find_shared returns Indirect structure which
* points to the last element which should not be
* removed by truncate. But this is end of the range
* in punch_hole so we need to point to the next element
*/
partial2->p++;
while ((partial > chain) || (partial2 > chain2)) {
/* We're at the same block, so we're almost finished */
if ((partial->bh && partial2->bh) &&
(partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
if ((partial > chain) && (partial2 > chain2)) {
ext4_free_branches(handle, inode, partial->bh,
partial->p + 1,
partial2->p,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
BUFFER_TRACE(partial2->bh, "call brelse");
brelse(partial2->bh);
}
return 0;
}
/*
* Clear the ends of indirect blocks on the shared branch
* at the start of the range
*/
if (partial > chain) {
ext4_free_branches(handle, inode, partial->bh,
partial->p + 1,
(__le32 *)partial->bh->b_data+addr_per_block,
(chain+n-1) - partial);
BUFFER_TRACE(partial->bh, "call brelse");
brelse(partial->bh);
partial--;
}
/*
* Clear the ends of indirect blocks on the shared branch
* at the end of the range
*/
if (partial2 > chain2) {
ext4_free_branches(handle, inode, partial2->bh,
(__le32 *)partial2->bh->b_data,
partial2->p,
(chain2+n-1) - partial2);
BUFFER_TRACE(partial2->bh, "call brelse");
brelse(partial2->bh);
partial2--;
}
}
err:
return ret;
do_indirects:
/* Kill the remaining (whole) subtrees */
switch (offsets[0]) {
default:
if (++n >= n2)
return 0;
nr = i_data[EXT4_IND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
i_data[EXT4_IND_BLOCK] = 0;
}
case EXT4_IND_BLOCK:
if (++n >= n2)
return 0;
nr = i_data[EXT4_DIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
i_data[EXT4_DIND_BLOCK] = 0;
}
case EXT4_DIND_BLOCK:
if (++n >= n2)
return 0;
nr = i_data[EXT4_TIND_BLOCK];
if (nr) {
ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
i_data[EXT4_TIND_BLOCK] = 0;
}
case EXT4_TIND_BLOCK:
;
}
return 0;
}

View File

@ -120,12 +120,6 @@ int ext4_get_max_inline_size(struct inode *inode)
return max_inline_size + EXT4_MIN_INLINE_DATA_SIZE;
}
int ext4_has_inline_data(struct inode *inode)
{
return ext4_test_inode_flag(inode, EXT4_INODE_INLINE_DATA) &&
EXT4_I(inode)->i_inline_off;
}
/*
* this function does not take xattr_sem, which is OK because it is
* currently only used in a code path coming form ext4_iget, before
@ -1178,6 +1172,18 @@ static int ext4_convert_inline_data_nolock(handle_t *handle,
if (error < 0)
goto out;
/*
* Make sure the inline directory entries pass checks before we try to
* convert them, so that we avoid touching stuff that needs fsck.
*/
if (S_ISDIR(inode->i_mode)) {
error = ext4_check_all_de(inode, iloc->bh,
buf + EXT4_INLINE_DOTDOT_SIZE,
inline_size - EXT4_INLINE_DOTDOT_SIZE);
if (error)
goto out;
}
error = ext4_destroy_inline_data_nolock(handle, inode);
if (error)
goto out;

View File

@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
}
#endif
/*
* Calculate the number of metadata blocks need to reserve
* to allocate a block located at @lblock
*/
static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
{
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
return ext4_ext_calc_metadata_amount(inode, lblock);
return ext4_ind_calc_metadata_amount(inode, lblock);
}
/*
* Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here.
@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
used = ei->i_reserved_data_blocks;
}
if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
ext4_warning(inode->i_sb, "ino %lu, allocated %d "
"with only %d reserved metadata blocks "
"(releasing %d blocks with reserved %d data blocks)",
inode->i_ino, ei->i_allocated_meta_blocks,
ei->i_reserved_meta_blocks, used,
ei->i_reserved_data_blocks);
WARN_ON(1);
ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
}
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
used + ei->i_allocated_meta_blocks);
ei->i_allocated_meta_blocks = 0;
percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
if (ei->i_reserved_data_blocks == 0) {
/*
* We can release all of the reserved metadata blocks
* only when we have written all of the delayed
* allocation blocks.
*/
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
ei->i_reserved_meta_blocks);
ei->i_reserved_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
}
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem for data blocks */
@ -1221,49 +1184,6 @@ static int ext4_journalled_write_end(struct file *file,
return ret ? ret : copied;
}
/*
* Reserve a metadata for a single block located at lblock
*/
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
ext4_lblk_t save_last_lblock;
int save_len;
/*
* recalculate the amount of metadata blocks to reserve
* in order to allocate nrblocks
* worse case is one extent per block
*/
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
* to be prepared undo if we fail to claim space.
*/
save_len = ei->i_da_metadata_calc_len;
save_last_lblock = ei->i_da_metadata_calc_last_lblock;
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
/*
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
return -ENOSPC;
}
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
}
/*
* Reserve a single cluster located at lblock
*/
@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
int ret;
ext4_lblk_t save_last_lblock;
int save_len;
/*
* We will charge metadata quota at writeout time; this saves
@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
* ext4_calc_metadata_amount() has side effects, which we have
* to be prepared undo if we fail to claim space.
*/
save_len = ei->i_da_metadata_calc_len;
save_last_lblock = ei->i_da_metadata_calc_last_lblock;
md_needed = EXT4_NUM_B2C(sbi,
ext4_calc_metadata_amount(inode, lblock));
trace_ext4_da_reserve_space(inode, md_needed);
md_needed = 0;
trace_ext4_da_reserve_space(inode, 0);
/*
* We do still charge estimated metadata to the sb though;
* we cannot afford to run out of free blocks.
*/
if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
if (ext4_claim_free_clusters(sbi, 1, 0)) {
spin_unlock(&ei->i_block_reservation_lock);
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
return 0; /* success */
@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
}
ei->i_reserved_data_blocks -= to_free;
if (ei->i_reserved_data_blocks == 0) {
/*
* We can release all of the reserved metadata blocks
* only when we have written all of the delayed
* allocation blocks.
* Note that in case of bigalloc, i_reserved_meta_blocks,
* i_reserved_data_blocks, etc. refer to number of clusters.
*/
percpu_counter_sub(&sbi->s_dirtyclusters_counter,
ei->i_reserved_meta_blocks);
ei->i_reserved_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
}
/* update fs dirty data blocks counter */
percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
ext4_msg(sb, KERN_CRIT, "Block reservation details");
ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
ei->i_reserved_data_blocks);
ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
ei->i_reserved_meta_blocks);
ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
ei->i_allocated_meta_blocks);
return;
}
@ -1620,13 +1510,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
retval = ret;
goto out_unlock;
}
} else {
ret = ext4_da_reserve_metadata(inode, iblock);
if (ret) {
/* not enough space to reserve */
retval = ret;
goto out_unlock;
}
}
ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
{
trace_ext4_alloc_da_blocks(inode);
if (!EXT4_I(inode)->i_reserved_data_blocks &&
!EXT4_I(inode)->i_reserved_meta_blocks)
if (!EXT4_I(inode)->i_reserved_data_blocks)
return 0;
/*
@ -3624,7 +3506,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
ret = ext4_ext_remove_space(inode, first_block,
stop_block - 1);
else
ret = ext4_free_hole_blocks(handle, inode, first_block,
ret = ext4_ind_remove_space(handle, inode, first_block,
stop_block);
up_write(&EXT4_I(inode)->i_data_sem);

View File

@ -3075,8 +3075,9 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
(23 - bsbits)) << 23;
size = 8 * 1024 * 1024;
} else {
start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
size = ac->ac_o_ex.fe_len << bsbits;
start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits;
size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb),
ac->ac_o_ex.fe_len) << bsbits;
}
size = size >> bsbits;
start = start_off >> bsbits;
@ -3216,8 +3217,27 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
{
struct ext4_prealloc_space *pa = ac->ac_pa;
struct ext4_buddy e4b;
int err;
if (pa && pa->pa_type == MB_INODE_PA)
if (pa == NULL) {
err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b);
if (err) {
/*
* This should never happen since we pin the
* pages in the ext4_allocation_context so
* ext4_mb_load_buddy() should never fail.
*/
WARN(1, "mb_load_buddy failed (%d)", err);
return;
}
ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start,
ac->ac_f_ex.fe_len);
ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group);
return;
}
if (pa->pa_type == MB_INODE_PA)
pa->pa_free += ac->ac_b_ex.fe_len;
}
@ -4627,7 +4647,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *gd_bh;
ext4_group_t block_group;
struct ext4_sb_info *sbi;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_buddy e4b;
unsigned int count_clusters;
int err = 0;
@ -4838,19 +4857,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
&sbi->s_flex_groups[flex_group].free_clusters);
}
if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
percpu_counter_add(&sbi->s_dirtyclusters_counter,
count_clusters);
spin_lock(&ei->i_block_reservation_lock);
if (flags & EXT4_FREE_BLOCKS_METADATA)
ei->i_reserved_meta_blocks += count_clusters;
else
ei->i_reserved_data_blocks += count_clusters;
spin_unlock(&ei->i_block_reservation_lock);
if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_reclaim_block(inode,
EXT4_C2B(sbi, count_clusters));
} else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);

View File

@ -39,6 +39,8 @@ static int finish_range(handle_t *handle, struct inode *inode,
newext.ee_block = cpu_to_le32(lb->first_block);
newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
ext4_ext_store_pblock(&newext, lb->first_pblock);
/* Locking only for convinience since we are operating on temp inode */
down_write(&EXT4_I(inode)->i_data_sem);
path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) {
@ -61,7 +63,9 @@ static int finish_range(handle_t *handle, struct inode *inode,
*/
if (needed && ext4_handle_has_enough_credits(handle,
EXT4_RESERVE_TRANS_BLOCKS)) {
up_write((&EXT4_I(inode)->i_data_sem));
retval = ext4_journal_restart(handle, needed);
down_write((&EXT4_I(inode)->i_data_sem));
if (retval)
goto err_out;
} else if (needed) {
@ -70,13 +74,16 @@ static int finish_range(handle_t *handle, struct inode *inode,
/*
* IF not able to extend the journal restart the journal
*/
up_write((&EXT4_I(inode)->i_data_sem));
retval = ext4_journal_restart(handle, needed);
down_write((&EXT4_I(inode)->i_data_sem));
if (retval)
goto err_out;
}
}
retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
err_out:
up_write((&EXT4_I(inode)->i_data_sem));
if (path) {
ext4_ext_drop_refs(path);
kfree(path);

View File

@ -1013,10 +1013,11 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
*err = -EBUSY;
goto unlock_pages;
}
ext4_double_down_write_data_sem(orig_inode, donor_inode);
replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
orig_blk_offset,
block_len_in_page, err);
ext4_double_up_write_data_sem(orig_inode, donor_inode);
if (*err) {
if (replaced_count) {
block_len_in_page = replaced_count;

View File

@ -2142,10 +2142,6 @@ static int ext4_check_descriptors(struct super_block *sb,
}
if (NULL != first_not_zeroed)
*first_not_zeroed = grp;
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, ext4_count_free_clusters(sb)));
sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
return 1;
}
@ -3883,13 +3879,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
goto failed_mount2;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
goto failed_mount2;
}
sbi->s_gdb_count = db_count;
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
@ -3902,23 +3891,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
/* Register extent status tree shrinker */
ext4_es_register_shrinker(sbi);
err = percpu_counter_init(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
}
if (!err) {
err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0);
}
if (err) {
if ((err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0)) != 0) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
}
@ -4022,18 +3995,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
/*
* The journal may have updated the bg summary counts, so we
* need to update the global counters.
*/
percpu_counter_set(&sbi->s_freeclusters_counter,
ext4_count_free_clusters(sb));
percpu_counter_set(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
percpu_counter_set(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
percpu_counter_set(&sbi->s_dirtyclusters_counter, 0);
no_journal:
if (ext4_mballoc_ready) {
sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
@ -4141,6 +4102,33 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount5;
}
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
err = percpu_counter_init(&sbi->s_freeclusters_counter, block);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei);
}
if (!err)
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
if (!err)
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount6;
}
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
if (!ext4_fill_flex_info(sb)) {
ext4_msg(sb, KERN_ERR,
"unable to initialize "
"flex_bg meta info!");
goto failed_mount6;
}
err = ext4_register_li_request(sb, first_not_zeroed);
if (err)
goto failed_mount6;
@ -4215,6 +4203,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_unregister_li_request(sb);
failed_mount6:
ext4_mb_release(sb);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
@ -4233,12 +4227,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
failed_mount3:
ext4_es_unregister_shrinker(sbi);
del_timer_sync(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
percpu_counter_destroy(&sbi->s_extent_cache_cnt);
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
@ -4556,11 +4544,13 @@ static int ext4_commit_super(struct super_block *sb, int sync)
else
es->s_kbytes_written =
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
ext4_free_blocks_count_set(es,
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
ext4_free_blocks_count_set(es,
EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeclusters_counter)));
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
es->s_free_inodes_count =
cpu_to_le32(percpu_counter_sum_positive(
&EXT4_SB(sb)->s_freeinodes_counter));
BUFFER_TRACE(sbh, "marking dirty");
ext4_superblock_csum_set(sb);