mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-02-28 19:20:32 +07:00
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (48 commits) ext4: fix hot spins in mballoc after err_freebuddy and err_freemeta ext4: fix test ext_generic_write_end() copied return value ext3: fix test ext_generic_write_end() copied return value ext4: Move mballoc headers/structures to a seperate header file mballoc.h ext4: cleanup for compiling mballoc with verification and debugging #defines ext4: don't use ext4_error in ext4_check_descriptors ext4: mark inode dirty after initializing the extent tree ext4: update ctime and mtime for truncate with extents. ext4: Don't do GFP_NOFS allocations after taking ext4_lock_group ext4: move headers out of include/linux ext4: fix wrong gfp type under transaction ext4: Fix hang on umount with quotas when journal is aborted ext4: Fix update of mtime and ctime on rename jdb2: replace remaining __FUNCTION__ occurrences ext4: replace remaining __FUNCTION__ occurrences jbd2: only create debugfs and stats entries if init is successful jbd2: fix kernel-doc notation jbd2: replace potentially false assertion with if block jbd2: eliminate duplicated code in revocation table init/destroy functions jbd2: tidy up revoke cache initialisation and destruction ...
This commit is contained in:
commit
c4755d16fc
@ -35,6 +35,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|||||||
* zero-initialized data and COW.
|
* zero-initialized data and COW.
|
||||||
*/
|
*/
|
||||||
struct page *empty_zero_page;
|
struct page *empty_zero_page;
|
||||||
|
EXPORT_SYMBOL(empty_zero_page);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The pmd table for the upper-most set of pages.
|
* The pmd table for the upper-most set of pages.
|
||||||
|
@ -69,6 +69,7 @@ void __init m68k_setup_node(int node)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
void *empty_zero_page;
|
void *empty_zero_page;
|
||||||
|
EXPORT_SYMBOL(empty_zero_page);
|
||||||
|
|
||||||
void show_mem(void)
|
void show_mem(void)
|
||||||
{
|
{
|
||||||
|
@ -282,3 +282,5 @@ EXPORT_SYMBOL(do_BUG);
|
|||||||
|
|
||||||
/* Sun Power Management Idle Handler */
|
/* Sun Power Management Idle Handler */
|
||||||
EXPORT_SYMBOL(pm_idle);
|
EXPORT_SYMBOL(pm_idle);
|
||||||
|
|
||||||
|
EXPORT_SYMBOL(empty_zero_page);
|
||||||
|
@ -160,6 +160,7 @@ extern unsigned int sparc_ramdisk_image;
|
|||||||
extern unsigned int sparc_ramdisk_size;
|
extern unsigned int sparc_ramdisk_size;
|
||||||
|
|
||||||
struct page *mem_map_zero __read_mostly;
|
struct page *mem_map_zero __read_mostly;
|
||||||
|
EXPORT_SYMBOL(mem_map_zero);
|
||||||
|
|
||||||
unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
|
unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
|
||||||
|
|
||||||
|
@ -1261,10 +1261,11 @@ static int ext3_ordered_write_end(struct file *file,
|
|||||||
new_i_size = pos + copied;
|
new_i_size = pos + copied;
|
||||||
if (new_i_size > EXT3_I(inode)->i_disksize)
|
if (new_i_size > EXT3_I(inode)->i_disksize)
|
||||||
EXT3_I(inode)->i_disksize = new_i_size;
|
EXT3_I(inode)->i_disksize = new_i_size;
|
||||||
copied = ext3_generic_write_end(file, mapping, pos, len, copied,
|
ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
if (copied < 0)
|
copied = ret2;
|
||||||
ret = copied;
|
if (ret2 < 0)
|
||||||
|
ret = ret2;
|
||||||
}
|
}
|
||||||
ret2 = ext3_journal_stop(handle);
|
ret2 = ext3_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
@ -1289,10 +1290,11 @@ static int ext3_writeback_write_end(struct file *file,
|
|||||||
if (new_i_size > EXT3_I(inode)->i_disksize)
|
if (new_i_size > EXT3_I(inode)->i_disksize)
|
||||||
EXT3_I(inode)->i_disksize = new_i_size;
|
EXT3_I(inode)->i_disksize = new_i_size;
|
||||||
|
|
||||||
copied = ext3_generic_write_end(file, mapping, pos, len, copied,
|
ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
if (copied < 0)
|
copied = ret2;
|
||||||
ret = copied;
|
if (ret2 < 0)
|
||||||
|
ret = ret2;
|
||||||
|
|
||||||
ret2 = ext3_journal_stop(handle);
|
ret2 = ext3_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/capability.h>
|
#include <linux/capability.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "acl.h"
|
#include "acl.h"
|
||||||
|
|
||||||
@ -37,7 +37,7 @@ ext4_acl_from_disk(const void *value, size_t size)
|
|||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
if (count == 0)
|
if (count == 0)
|
||||||
return NULL;
|
return NULL;
|
||||||
acl = posix_acl_alloc(count, GFP_KERNEL);
|
acl = posix_acl_alloc(count, GFP_NOFS);
|
||||||
if (!acl)
|
if (!acl)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
for (n=0; n < count; n++) {
|
for (n=0; n < count; n++) {
|
||||||
@ -91,7 +91,7 @@ ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
|
|||||||
|
|
||||||
*size = ext4_acl_size(acl->a_count);
|
*size = ext4_acl_size(acl->a_count);
|
||||||
ext_acl = kmalloc(sizeof(ext4_acl_header) + acl->a_count *
|
ext_acl = kmalloc(sizeof(ext4_acl_header) + acl->a_count *
|
||||||
sizeof(ext4_acl_entry), GFP_KERNEL);
|
sizeof(ext4_acl_entry), GFP_NOFS);
|
||||||
if (!ext_acl)
|
if (!ext_acl)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
|
ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
|
||||||
@ -187,7 +187,7 @@ ext4_get_acl(struct inode *inode, int type)
|
|||||||
}
|
}
|
||||||
retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
|
retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
|
||||||
if (retval > 0) {
|
if (retval > 0) {
|
||||||
value = kmalloc(retval, GFP_KERNEL);
|
value = kmalloc(retval, GFP_NOFS);
|
||||||
if (!value)
|
if (!value)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
retval = ext4_xattr_get(inode, name_index, "", value, retval);
|
retval = ext4_xattr_get(inode, name_index, "", value, retval);
|
||||||
@ -335,7 +335,7 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
|
|||||||
if (error)
|
if (error)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
clone = posix_acl_clone(acl, GFP_KERNEL);
|
clone = posix_acl_clone(acl, GFP_NOFS);
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
if (!clone)
|
if (!clone)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
@ -15,12 +15,12 @@
|
|||||||
#include <linux/capability.h>
|
#include <linux/capability.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
|
#include "ext4.h"
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
#include "group.h"
|
#include "group.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* balloc.c contains the blocks allocation and deallocation routines
|
* balloc.c contains the blocks allocation and deallocation routines
|
||||||
*/
|
*/
|
||||||
@ -48,7 +48,6 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
|
|||||||
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
|
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
|
||||||
ext4_group_t block_group, struct ext4_group_desc *gdp)
|
ext4_group_t block_group, struct ext4_group_desc *gdp)
|
||||||
{
|
{
|
||||||
unsigned long start;
|
|
||||||
int bit, bit_max;
|
int bit, bit_max;
|
||||||
unsigned free_blocks, group_blocks;
|
unsigned free_blocks, group_blocks;
|
||||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||||
@ -59,7 +58,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
|
|||||||
/* If checksum is bad mark all blocks used to prevent allocation
|
/* If checksum is bad mark all blocks used to prevent allocation
|
||||||
* essentially implementing a per-group read-only flag. */
|
* essentially implementing a per-group read-only flag. */
|
||||||
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
|
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Checksum bad for group %lu\n", block_group);
|
"Checksum bad for group %lu\n", block_group);
|
||||||
gdp->bg_free_blocks_count = 0;
|
gdp->bg_free_blocks_count = 0;
|
||||||
gdp->bg_free_inodes_count = 0;
|
gdp->bg_free_inodes_count = 0;
|
||||||
@ -106,11 +105,12 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
|
|||||||
free_blocks = group_blocks - bit_max;
|
free_blocks = group_blocks - bit_max;
|
||||||
|
|
||||||
if (bh) {
|
if (bh) {
|
||||||
|
ext4_fsblk_t start;
|
||||||
|
|
||||||
for (bit = 0; bit < bit_max; bit++)
|
for (bit = 0; bit < bit_max; bit++)
|
||||||
ext4_set_bit(bit, bh->b_data);
|
ext4_set_bit(bit, bh->b_data);
|
||||||
|
|
||||||
start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
|
start = ext4_group_first_block_no(sb, block_group);
|
||||||
le32_to_cpu(sbi->s_es->s_first_data_block);
|
|
||||||
|
|
||||||
/* Set bits for block and inode bitmaps, and inode table */
|
/* Set bits for block and inode bitmaps, and inode table */
|
||||||
ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
|
ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
|
||||||
@ -235,7 +235,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
|
|||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Invalid block bitmap - "
|
"Invalid block bitmap - "
|
||||||
"block_group = %d, block = %llu",
|
"block_group = %d, block = %llu",
|
||||||
block_group, bitmap_blk);
|
block_group, bitmap_blk);
|
||||||
@ -264,7 +264,7 @@ read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
|
|||||||
bitmap_blk = ext4_block_bitmap(sb, desc);
|
bitmap_blk = ext4_block_bitmap(sb, desc);
|
||||||
bh = sb_getblk(sb, bitmap_blk);
|
bh = sb_getblk(sb, bitmap_blk);
|
||||||
if (unlikely(!bh)) {
|
if (unlikely(!bh)) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Cannot read block bitmap - "
|
"Cannot read block bitmap - "
|
||||||
"block_group = %d, block_bitmap = %llu",
|
"block_group = %d, block_bitmap = %llu",
|
||||||
(int)block_group, (unsigned long long)bitmap_blk);
|
(int)block_group, (unsigned long long)bitmap_blk);
|
||||||
@ -281,7 +281,7 @@ read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
|
|||||||
}
|
}
|
||||||
if (bh_submit_read(bh) < 0) {
|
if (bh_submit_read(bh) < 0) {
|
||||||
put_bh(bh);
|
put_bh(bh);
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Cannot read block bitmap - "
|
"Cannot read block bitmap - "
|
||||||
"block_group = %d, block_bitmap = %llu",
|
"block_group = %d, block_bitmap = %llu",
|
||||||
(int)block_group, (unsigned long long)bitmap_blk);
|
(int)block_group, (unsigned long long)bitmap_blk);
|
||||||
@ -360,7 +360,7 @@ static void __rsv_window_dump(struct rb_root *root, int verbose,
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
#define rsv_window_dump(root, verbose) \
|
#define rsv_window_dump(root, verbose) \
|
||||||
__rsv_window_dump((root), (verbose), __FUNCTION__)
|
__rsv_window_dump((root), (verbose), __func__)
|
||||||
#else
|
#else
|
||||||
#define rsv_window_dump(root, verbose) do {} while (0)
|
#define rsv_window_dump(root, verbose) do {} while (0)
|
||||||
#endif
|
#endif
|
||||||
@ -740,7 +740,7 @@ void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
|
|||||||
if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
|
if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
|
||||||
bit + i, bitmap_bh->b_data)) {
|
bit + i, bitmap_bh->b_data)) {
|
||||||
jbd_unlock_bh_state(bitmap_bh);
|
jbd_unlock_bh_state(bitmap_bh);
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"bit already cleared for block %llu",
|
"bit already cleared for block %llu",
|
||||||
(ext4_fsblk_t)(block + i));
|
(ext4_fsblk_t)(block + i));
|
||||||
jbd_lock_bh_state(bitmap_bh);
|
jbd_lock_bh_state(bitmap_bh);
|
||||||
@ -752,9 +752,7 @@ void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
|
|||||||
jbd_unlock_bh_state(bitmap_bh);
|
jbd_unlock_bh_state(bitmap_bh);
|
||||||
|
|
||||||
spin_lock(sb_bgl_lock(sbi, block_group));
|
spin_lock(sb_bgl_lock(sbi, block_group));
|
||||||
desc->bg_free_blocks_count =
|
le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
|
||||||
cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
|
|
||||||
group_freed);
|
|
||||||
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
|
desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
|
||||||
spin_unlock(sb_bgl_lock(sbi, block_group));
|
spin_unlock(sb_bgl_lock(sbi, block_group));
|
||||||
percpu_counter_add(&sbi->s_freeblocks_counter, count);
|
percpu_counter_add(&sbi->s_freeblocks_counter, count);
|
||||||
@ -1798,7 +1796,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
|
|||||||
if (ext4_test_bit(grp_alloc_blk+i,
|
if (ext4_test_bit(grp_alloc_blk+i,
|
||||||
bh2jh(bitmap_bh)->b_committed_data)) {
|
bh2jh(bitmap_bh)->b_committed_data)) {
|
||||||
printk("%s: block was unexpectedly set in "
|
printk("%s: block was unexpectedly set in "
|
||||||
"b_committed_data\n", __FUNCTION__);
|
"b_committed_data\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1823,8 +1821,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
|
|||||||
spin_lock(sb_bgl_lock(sbi, group_no));
|
spin_lock(sb_bgl_lock(sbi, group_no));
|
||||||
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
|
if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
|
||||||
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
|
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
|
||||||
gdp->bg_free_blocks_count =
|
le16_add_cpu(&gdp->bg_free_blocks_count, -num);
|
||||||
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
|
|
||||||
gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
|
gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
|
||||||
spin_unlock(sb_bgl_lock(sbi, group_no));
|
spin_unlock(sb_bgl_lock(sbi, group_no));
|
||||||
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
|
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
|
|
||||||
#ifdef EXT4FS_DEBUG
|
#ifdef EXT4FS_DEBUG
|
||||||
|
|
||||||
|
@ -23,10 +23,10 @@
|
|||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
|
#include "ext4.h"
|
||||||
|
|
||||||
static unsigned char ext4_filetype_table[] = {
|
static unsigned char ext4_filetype_table[] = {
|
||||||
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
|
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
|
||||||
@ -42,7 +42,7 @@ const struct file_operations ext4_dir_operations = {
|
|||||||
.llseek = generic_file_llseek,
|
.llseek = generic_file_llseek,
|
||||||
.read = generic_read_dir,
|
.read = generic_read_dir,
|
||||||
.readdir = ext4_readdir, /* we take BKL. needed?*/
|
.readdir = ext4_readdir, /* we take BKL. needed?*/
|
||||||
.ioctl = ext4_ioctl, /* BKL held */
|
.unlocked_ioctl = ext4_ioctl,
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compat_ioctl = ext4_compat_ioctl,
|
.compat_ioctl = ext4_compat_ioctl,
|
||||||
#endif
|
#endif
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* linux/include/linux/ext4_fs.h
|
* ext4.h
|
||||||
*
|
*
|
||||||
* Copyright (C) 1992, 1993, 1994, 1995
|
* Copyright (C) 1992, 1993, 1994, 1995
|
||||||
* Remy Card (card@masi.ibp.fr)
|
* Remy Card (card@masi.ibp.fr)
|
||||||
@ -13,14 +13,13 @@
|
|||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _LINUX_EXT4_FS_H
|
#ifndef _EXT4_H
|
||||||
#define _LINUX_EXT4_FS_H
|
#define _EXT4_H
|
||||||
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
|
#include "ext4_i.h"
|
||||||
#include <linux/ext4_fs_i.h>
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The second extended filesystem constants/structures
|
* The second extended filesystem constants/structures
|
||||||
@ -176,8 +175,7 @@ struct ext4_group_desc
|
|||||||
#define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */
|
#define EXT4_BG_INODE_ZEROED 0x0004 /* On-disk itable initialized to zero */
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
#include <linux/ext4_fs_i.h>
|
#include "ext4_sb.h"
|
||||||
#include <linux/ext4_fs_sb.h>
|
|
||||||
#endif
|
#endif
|
||||||
/*
|
/*
|
||||||
* Macro-instructions used to manage group descriptors
|
* Macro-instructions used to manage group descriptors
|
||||||
@ -231,6 +229,7 @@ struct ext4_group_desc
|
|||||||
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
|
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
|
||||||
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
|
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
|
||||||
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
|
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
|
||||||
|
#define EXT4_EXT_MIGRATE 0x00100000 /* Inode is migrating */
|
||||||
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
|
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
|
||||||
|
|
||||||
#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
|
#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
|
||||||
@ -1049,8 +1048,7 @@ extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
|
|||||||
struct address_space *mapping, loff_t from);
|
struct address_space *mapping, loff_t from);
|
||||||
|
|
||||||
/* ioctl.c */
|
/* ioctl.c */
|
||||||
extern int ext4_ioctl (struct inode *, struct file *, unsigned int,
|
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
|
||||||
unsigned long);
|
|
||||||
extern long ext4_compat_ioctl (struct file *, unsigned int, unsigned long);
|
extern long ext4_compat_ioctl (struct file *, unsigned int, unsigned long);
|
||||||
|
|
||||||
/* migrate.c */
|
/* migrate.c */
|
||||||
@ -1204,4 +1202,4 @@ extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
|
|||||||
int extend_disksize);
|
int extend_disksize);
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
|
|
||||||
#endif /* _LINUX_EXT4_FS_H */
|
#endif /* _EXT4_H */
|
@ -16,10 +16,10 @@
|
|||||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _LINUX_EXT4_EXTENTS
|
#ifndef _EXT4_EXTENTS
|
||||||
#define _LINUX_EXT4_EXTENTS
|
#define _EXT4_EXTENTS
|
||||||
|
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* With AGGRESSIVE_TEST defined, the capacity of index/leaf blocks
|
* With AGGRESSIVE_TEST defined, the capacity of index/leaf blocks
|
||||||
@ -228,5 +228,5 @@ extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *,
|
|||||||
extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
|
extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
|
||||||
ext4_lblk_t *, ext4_fsblk_t *);
|
ext4_lblk_t *, ext4_fsblk_t *);
|
||||||
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
|
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
|
||||||
#endif /* _LINUX_EXT4_EXTENTS */
|
#endif /* _EXT4_EXTENTS */
|
||||||
|
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* linux/include/linux/ext4_fs_i.h
|
* ext4_i.h
|
||||||
*
|
*
|
||||||
* Copyright (C) 1992, 1993, 1994, 1995
|
* Copyright (C) 1992, 1993, 1994, 1995
|
||||||
* Remy Card (card@masi.ibp.fr)
|
* Remy Card (card@masi.ibp.fr)
|
||||||
@ -13,8 +13,8 @@
|
|||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _LINUX_EXT4_FS_I
|
#ifndef _EXT4_I
|
||||||
#define _LINUX_EXT4_FS_I
|
#define _EXT4_I
|
||||||
|
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
@ -164,4 +164,4 @@ struct ext4_inode_info {
|
|||||||
spinlock_t i_prealloc_lock;
|
spinlock_t i_prealloc_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _LINUX_EXT4_FS_I */
|
#endif /* _EXT4_I */
|
@ -2,14 +2,14 @@
|
|||||||
* Interface between ext4 and JBD
|
* Interface between ext4 and JBD
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
|
|
||||||
int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
|
int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
|
||||||
struct buffer_head *bh)
|
struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
int err = jbd2_journal_get_undo_access(handle, bh);
|
int err = jbd2_journal_get_undo_access(handle, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
|
ext4_journal_abort_handle(where, __func__, bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -18,7 +18,7 @@ int __ext4_journal_get_write_access(const char *where, handle_t *handle,
|
|||||||
{
|
{
|
||||||
int err = jbd2_journal_get_write_access(handle, bh);
|
int err = jbd2_journal_get_write_access(handle, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
|
ext4_journal_abort_handle(where, __func__, bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -27,7 +27,7 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
|
|||||||
{
|
{
|
||||||
int err = jbd2_journal_forget(handle, bh);
|
int err = jbd2_journal_forget(handle, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
|
ext4_journal_abort_handle(where, __func__, bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
|
|||||||
{
|
{
|
||||||
int err = jbd2_journal_revoke(handle, blocknr, bh);
|
int err = jbd2_journal_revoke(handle, blocknr, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
|
ext4_journal_abort_handle(where, __func__, bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -45,7 +45,7 @@ int __ext4_journal_get_create_access(const char *where,
|
|||||||
{
|
{
|
||||||
int err = jbd2_journal_get_create_access(handle, bh);
|
int err = jbd2_journal_get_create_access(handle, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
|
ext4_journal_abort_handle(where, __func__, bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -54,6 +54,6 @@ int __ext4_journal_dirty_metadata(const char *where,
|
|||||||
{
|
{
|
||||||
int err = jbd2_journal_dirty_metadata(handle, bh);
|
int err = jbd2_journal_dirty_metadata(handle, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
|
ext4_journal_abort_handle(where, __func__, bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* linux/include/linux/ext4_jbd2.h
|
* ext4_jbd2.h
|
||||||
*
|
*
|
||||||
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
|
* Written by Stephen C. Tweedie <sct@redhat.com>, 1999
|
||||||
*
|
*
|
||||||
@ -12,12 +12,12 @@
|
|||||||
* Ext4-specific journaling extensions.
|
* Ext4-specific journaling extensions.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _LINUX_EXT4_JBD2_H
|
#ifndef _EXT4_JBD2_H
|
||||||
#define _LINUX_EXT4_JBD2_H
|
#define _EXT4_JBD2_H
|
||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
|
|
||||||
#define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal)
|
#define EXT4_JOURNAL(inode) (EXT4_SB((inode)->i_sb)->s_journal)
|
||||||
|
|
||||||
@ -228,4 +228,4 @@ static inline int ext4_should_writeback_data(struct inode *inode)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* _LINUX_EXT4_JBD2_H */
|
#endif /* _EXT4_JBD2_H */
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* linux/include/linux/ext4_fs_sb.h
|
* ext4_sb.h
|
||||||
*
|
*
|
||||||
* Copyright (C) 1992, 1993, 1994, 1995
|
* Copyright (C) 1992, 1993, 1994, 1995
|
||||||
* Remy Card (card@masi.ibp.fr)
|
* Remy Card (card@masi.ibp.fr)
|
||||||
@ -13,8 +13,8 @@
|
|||||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifndef _LINUX_EXT4_FS_SB
|
#ifndef _EXT4_SB
|
||||||
#define _LINUX_EXT4_FS_SB
|
#define _EXT4_SB
|
||||||
|
|
||||||
#ifdef __KERNEL__
|
#ifdef __KERNEL__
|
||||||
#include <linux/timer.h>
|
#include <linux/timer.h>
|
||||||
@ -145,4 +145,4 @@ struct ext4_sb_info {
|
|||||||
struct ext4_locality_group *s_locality_groups;
|
struct ext4_locality_group *s_locality_groups;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif /* _LINUX_EXT4_FS_SB */
|
#endif /* _EXT4_SB */
|
@ -32,7 +32,6 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/highuid.h>
|
#include <linux/highuid.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
@ -40,8 +39,9 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/falloc.h>
|
#include <linux/falloc.h>
|
||||||
#include <linux/ext4_fs_extents.h>
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
|
#include "ext4_extents.h"
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -308,7 +308,7 @@ static int __ext4_ext_check_header(const char *function, struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define ext4_ext_check_header(inode, eh, depth) \
|
#define ext4_ext_check_header(inode, eh, depth) \
|
||||||
__ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
|
__ext4_ext_check_header(__func__, inode, eh, depth)
|
||||||
|
|
||||||
#ifdef EXT_DEBUG
|
#ifdef EXT_DEBUG
|
||||||
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
|
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
|
||||||
@ -614,7 +614,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
|
|||||||
|
|
||||||
ix->ei_block = cpu_to_le32(logical);
|
ix->ei_block = cpu_to_le32(logical);
|
||||||
ext4_idx_store_pblock(ix, ptr);
|
ext4_idx_store_pblock(ix, ptr);
|
||||||
curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
|
le16_add_cpu(&curp->p_hdr->eh_entries, 1);
|
||||||
|
|
||||||
BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
|
BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
|
||||||
> le16_to_cpu(curp->p_hdr->eh_max));
|
> le16_to_cpu(curp->p_hdr->eh_max));
|
||||||
@ -736,7 +736,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
}
|
}
|
||||||
if (m) {
|
if (m) {
|
||||||
memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
|
memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
|
||||||
neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
|
le16_add_cpu(&neh->eh_entries, m);
|
||||||
}
|
}
|
||||||
|
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
@ -753,8 +753,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
err = ext4_ext_get_access(handle, inode, path + depth);
|
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||||
if (err)
|
if (err)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
path[depth].p_hdr->eh_entries =
|
le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
|
||||||
cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
|
|
||||||
err = ext4_ext_dirty(handle, inode, path + depth);
|
err = ext4_ext_dirty(handle, inode, path + depth);
|
||||||
if (err)
|
if (err)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -817,8 +816,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
if (m) {
|
if (m) {
|
||||||
memmove(++fidx, path[i].p_idx - m,
|
memmove(++fidx, path[i].p_idx - m,
|
||||||
sizeof(struct ext4_extent_idx) * m);
|
sizeof(struct ext4_extent_idx) * m);
|
||||||
neh->eh_entries =
|
le16_add_cpu(&neh->eh_entries, m);
|
||||||
cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
|
|
||||||
}
|
}
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
unlock_buffer(bh);
|
unlock_buffer(bh);
|
||||||
@ -834,7 +832,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
|
|||||||
err = ext4_ext_get_access(handle, inode, path + i);
|
err = ext4_ext_get_access(handle, inode, path + i);
|
||||||
if (err)
|
if (err)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
|
le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
|
||||||
err = ext4_ext_dirty(handle, inode, path + i);
|
err = ext4_ext_dirty(handle, inode, path + i);
|
||||||
if (err)
|
if (err)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -1369,7 +1367,7 @@ int ext4_ext_try_to_merge(struct inode *inode,
|
|||||||
* sizeof(struct ext4_extent);
|
* sizeof(struct ext4_extent);
|
||||||
memmove(ex + 1, ex + 2, len);
|
memmove(ex + 1, ex + 2, len);
|
||||||
}
|
}
|
||||||
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
|
le16_add_cpu(&eh->eh_entries, -1);
|
||||||
merge_done = 1;
|
merge_done = 1;
|
||||||
WARN_ON(eh->eh_entries == 0);
|
WARN_ON(eh->eh_entries == 0);
|
||||||
if (!eh->eh_entries)
|
if (!eh->eh_entries)
|
||||||
@ -1560,7 +1558,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
|
|||||||
path[depth].p_ext = nearex;
|
path[depth].p_ext = nearex;
|
||||||
}
|
}
|
||||||
|
|
||||||
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
|
le16_add_cpu(&eh->eh_entries, 1);
|
||||||
nearex = path[depth].p_ext;
|
nearex = path[depth].p_ext;
|
||||||
nearex->ee_block = newext->ee_block;
|
nearex->ee_block = newext->ee_block;
|
||||||
ext4_ext_store_pblock(nearex, ext_pblock(newext));
|
ext4_ext_store_pblock(nearex, ext_pblock(newext));
|
||||||
@ -1699,7 +1697,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
|
|||||||
err = ext4_ext_get_access(handle, inode, path);
|
err = ext4_ext_get_access(handle, inode, path);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
|
le16_add_cpu(&path->p_hdr->eh_entries, -1);
|
||||||
err = ext4_ext_dirty(handle, inode, path);
|
err = ext4_ext_dirty(handle, inode, path);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
@ -1902,7 +1900,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
|
|||||||
if (num == 0) {
|
if (num == 0) {
|
||||||
/* this extent is removed; mark slot entirely unused */
|
/* this extent is removed; mark slot entirely unused */
|
||||||
ext4_ext_store_pblock(ex, 0);
|
ext4_ext_store_pblock(ex, 0);
|
||||||
eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
|
le16_add_cpu(&eh->eh_entries, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
ex->ee_block = cpu_to_le32(block);
|
ex->ee_block = cpu_to_le32(block);
|
||||||
@ -1979,7 +1977,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
|
|||||||
* We start scanning from right side, freeing all the blocks
|
* We start scanning from right side, freeing all the blocks
|
||||||
* after i_size and walking into the tree depth-wise.
|
* after i_size and walking into the tree depth-wise.
|
||||||
*/
|
*/
|
||||||
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
|
path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
|
||||||
if (path == NULL) {
|
if (path == NULL) {
|
||||||
ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -2138,6 +2136,82 @@ void ext4_ext_release(struct super_block *sb)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void bi_complete(struct bio *bio, int error)
|
||||||
|
{
|
||||||
|
complete((struct completion *)bio->bi_private);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* FIXME!! we need to try to merge to left or right after zero-out */
|
||||||
|
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
|
||||||
|
{
|
||||||
|
int ret = -EIO;
|
||||||
|
struct bio *bio;
|
||||||
|
int blkbits, blocksize;
|
||||||
|
sector_t ee_pblock;
|
||||||
|
struct completion event;
|
||||||
|
unsigned int ee_len, len, done, offset;
|
||||||
|
|
||||||
|
|
||||||
|
blkbits = inode->i_blkbits;
|
||||||
|
blocksize = inode->i_sb->s_blocksize;
|
||||||
|
ee_len = ext4_ext_get_actual_len(ex);
|
||||||
|
ee_pblock = ext_pblock(ex);
|
||||||
|
|
||||||
|
/* convert ee_pblock to 512 byte sectors */
|
||||||
|
ee_pblock = ee_pblock << (blkbits - 9);
|
||||||
|
|
||||||
|
while (ee_len > 0) {
|
||||||
|
|
||||||
|
if (ee_len > BIO_MAX_PAGES)
|
||||||
|
len = BIO_MAX_PAGES;
|
||||||
|
else
|
||||||
|
len = ee_len;
|
||||||
|
|
||||||
|
bio = bio_alloc(GFP_NOIO, len);
|
||||||
|
if (!bio)
|
||||||
|
return -ENOMEM;
|
||||||
|
bio->bi_sector = ee_pblock;
|
||||||
|
bio->bi_bdev = inode->i_sb->s_bdev;
|
||||||
|
|
||||||
|
done = 0;
|
||||||
|
offset = 0;
|
||||||
|
while (done < len) {
|
||||||
|
ret = bio_add_page(bio, ZERO_PAGE(0),
|
||||||
|
blocksize, offset);
|
||||||
|
if (ret != blocksize) {
|
||||||
|
/*
|
||||||
|
* We can't add any more pages because of
|
||||||
|
* hardware limitations. Start a new bio.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
done++;
|
||||||
|
offset += blocksize;
|
||||||
|
if (offset >= PAGE_CACHE_SIZE)
|
||||||
|
offset = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
init_completion(&event);
|
||||||
|
bio->bi_private = &event;
|
||||||
|
bio->bi_end_io = bi_complete;
|
||||||
|
submit_bio(WRITE, bio);
|
||||||
|
wait_for_completion(&event);
|
||||||
|
|
||||||
|
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
|
||||||
|
ret = 0;
|
||||||
|
else {
|
||||||
|
ret = -EIO;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
bio_put(bio);
|
||||||
|
ee_len -= done;
|
||||||
|
ee_pblock += done << (blkbits - 9);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define EXT4_EXT_ZERO_LEN 7
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is called by ext4_ext_get_blocks() if someone tries to write
|
* This function is called by ext4_ext_get_blocks() if someone tries to write
|
||||||
* to an uninitialized extent. It may result in splitting the uninitialized
|
* to an uninitialized extent. It may result in splitting the uninitialized
|
||||||
@ -2154,7 +2228,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||||||
ext4_lblk_t iblock,
|
ext4_lblk_t iblock,
|
||||||
unsigned long max_blocks)
|
unsigned long max_blocks)
|
||||||
{
|
{
|
||||||
struct ext4_extent *ex, newex;
|
struct ext4_extent *ex, newex, orig_ex;
|
||||||
struct ext4_extent *ex1 = NULL;
|
struct ext4_extent *ex1 = NULL;
|
||||||
struct ext4_extent *ex2 = NULL;
|
struct ext4_extent *ex2 = NULL;
|
||||||
struct ext4_extent *ex3 = NULL;
|
struct ext4_extent *ex3 = NULL;
|
||||||
@ -2173,10 +2247,26 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||||||
allocated = ee_len - (iblock - ee_block);
|
allocated = ee_len - (iblock - ee_block);
|
||||||
newblock = iblock - ee_block + ext_pblock(ex);
|
newblock = iblock - ee_block + ext_pblock(ex);
|
||||||
ex2 = ex;
|
ex2 = ex;
|
||||||
|
orig_ex.ee_block = ex->ee_block;
|
||||||
|
orig_ex.ee_len = cpu_to_le16(ee_len);
|
||||||
|
ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
|
||||||
|
|
||||||
err = ext4_ext_get_access(handle, inode, path + depth);
|
err = ext4_ext_get_access(handle, inode, path + depth);
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
|
/* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
|
||||||
|
if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
|
||||||
|
err = ext4_ext_zeroout(inode, &orig_ex);
|
||||||
|
if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
|
/* update the extent length and mark as initialized */
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = orig_ex.ee_len;
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
/* zeroed the full extent */
|
||||||
|
return allocated;
|
||||||
|
}
|
||||||
|
|
||||||
/* ex1: ee_block to iblock - 1 : uninitialized */
|
/* ex1: ee_block to iblock - 1 : uninitialized */
|
||||||
if (iblock > ee_block) {
|
if (iblock > ee_block) {
|
||||||
@ -2195,19 +2285,103 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||||||
/* ex3: to ee_block + ee_len : uninitialised */
|
/* ex3: to ee_block + ee_len : uninitialised */
|
||||||
if (allocated > max_blocks) {
|
if (allocated > max_blocks) {
|
||||||
unsigned int newdepth;
|
unsigned int newdepth;
|
||||||
|
/* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
|
||||||
|
if (allocated <= EXT4_EXT_ZERO_LEN) {
|
||||||
|
/* Mark first half uninitialized.
|
||||||
|
* Mark second half initialized and zero out the
|
||||||
|
* initialized extent
|
||||||
|
*/
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = cpu_to_le16(ee_len - allocated);
|
||||||
|
ext4_ext_mark_uninitialized(ex);
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
|
||||||
|
ex3 = &newex;
|
||||||
|
ex3->ee_block = cpu_to_le32(iblock);
|
||||||
|
ext4_ext_store_pblock(ex3, newblock);
|
||||||
|
ex3->ee_len = cpu_to_le16(allocated);
|
||||||
|
err = ext4_ext_insert_extent(handle, inode, path, ex3);
|
||||||
|
if (err == -ENOSPC) {
|
||||||
|
err = ext4_ext_zeroout(inode, &orig_ex);
|
||||||
|
if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = orig_ex.ee_len;
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
/* zeroed the full extent */
|
||||||
|
return allocated;
|
||||||
|
|
||||||
|
} else if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We need to zero out the second half because
|
||||||
|
* an fallocate request can update file size and
|
||||||
|
* converting the second half to initialized extent
|
||||||
|
* implies that we can leak some junk data to user
|
||||||
|
* space.
|
||||||
|
*/
|
||||||
|
err = ext4_ext_zeroout(inode, ex3);
|
||||||
|
if (err) {
|
||||||
|
/*
|
||||||
|
* We should actually mark the
|
||||||
|
* second half as uninit and return error
|
||||||
|
* Insert would have changed the extent
|
||||||
|
*/
|
||||||
|
depth = ext_depth(inode);
|
||||||
|
ext4_ext_drop_refs(path);
|
||||||
|
path = ext4_ext_find_extent(inode,
|
||||||
|
iblock, path);
|
||||||
|
if (IS_ERR(path)) {
|
||||||
|
err = PTR_ERR(path);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
ex = path[depth].p_ext;
|
||||||
|
err = ext4_ext_get_access(handle, inode,
|
||||||
|
path + depth);
|
||||||
|
if (err)
|
||||||
|
return err;
|
||||||
|
ext4_ext_mark_uninitialized(ex);
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* zeroed the second half */
|
||||||
|
return allocated;
|
||||||
|
}
|
||||||
ex3 = &newex;
|
ex3 = &newex;
|
||||||
ex3->ee_block = cpu_to_le32(iblock + max_blocks);
|
ex3->ee_block = cpu_to_le32(iblock + max_blocks);
|
||||||
ext4_ext_store_pblock(ex3, newblock + max_blocks);
|
ext4_ext_store_pblock(ex3, newblock + max_blocks);
|
||||||
ex3->ee_len = cpu_to_le16(allocated - max_blocks);
|
ex3->ee_len = cpu_to_le16(allocated - max_blocks);
|
||||||
ext4_ext_mark_uninitialized(ex3);
|
ext4_ext_mark_uninitialized(ex3);
|
||||||
err = ext4_ext_insert_extent(handle, inode, path, ex3);
|
err = ext4_ext_insert_extent(handle, inode, path, ex3);
|
||||||
if (err)
|
if (err == -ENOSPC) {
|
||||||
goto out;
|
err = ext4_ext_zeroout(inode, &orig_ex);
|
||||||
|
if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
|
/* update the extent length and mark as initialized */
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = orig_ex.ee_len;
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
/* zeroed the full extent */
|
||||||
|
return allocated;
|
||||||
|
|
||||||
|
} else if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
/*
|
/*
|
||||||
* The depth, and hence eh & ex might change
|
* The depth, and hence eh & ex might change
|
||||||
* as part of the insert above.
|
* as part of the insert above.
|
||||||
*/
|
*/
|
||||||
newdepth = ext_depth(inode);
|
newdepth = ext_depth(inode);
|
||||||
|
/*
|
||||||
|
* update the extent length after successfull insert of the
|
||||||
|
* split extent
|
||||||
|
*/
|
||||||
|
orig_ex.ee_len = cpu_to_le16(ee_len -
|
||||||
|
ext4_ext_get_actual_len(ex3));
|
||||||
if (newdepth != depth) {
|
if (newdepth != depth) {
|
||||||
depth = newdepth;
|
depth = newdepth;
|
||||||
ext4_ext_drop_refs(path);
|
ext4_ext_drop_refs(path);
|
||||||
@ -2226,6 +2400,24 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
allocated = max_blocks;
|
allocated = max_blocks;
|
||||||
|
|
||||||
|
/* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
|
||||||
|
* to insert a extent in the middle zerout directly
|
||||||
|
* otherwise give the extent a chance to merge to left
|
||||||
|
*/
|
||||||
|
if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
|
||||||
|
iblock != ee_block) {
|
||||||
|
err = ext4_ext_zeroout(inode, &orig_ex);
|
||||||
|
if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
|
/* update the extent length and mark as initialized */
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = orig_ex.ee_len;
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
/* zero out the first half */
|
||||||
|
return allocated;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If there was a change of depth as part of the
|
* If there was a change of depth as part of the
|
||||||
@ -2282,8 +2474,29 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
|
|||||||
goto out;
|
goto out;
|
||||||
insert:
|
insert:
|
||||||
err = ext4_ext_insert_extent(handle, inode, path, &newex);
|
err = ext4_ext_insert_extent(handle, inode, path, &newex);
|
||||||
|
if (err == -ENOSPC) {
|
||||||
|
err = ext4_ext_zeroout(inode, &orig_ex);
|
||||||
|
if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
|
/* update the extent length and mark as initialized */
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = orig_ex.ee_len;
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
/* zero out the first half */
|
||||||
|
return allocated;
|
||||||
|
} else if (err)
|
||||||
|
goto fix_extent_len;
|
||||||
out:
|
out:
|
||||||
return err ? err : allocated;
|
return err ? err : allocated;
|
||||||
|
|
||||||
|
fix_extent_len:
|
||||||
|
ex->ee_block = orig_ex.ee_block;
|
||||||
|
ex->ee_len = orig_ex.ee_len;
|
||||||
|
ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
|
||||||
|
ext4_ext_mark_uninitialized(ex);
|
||||||
|
ext4_ext_dirty(handle, inode, path + depth);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -2393,8 +2606,20 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
|
|||||||
}
|
}
|
||||||
if (create == EXT4_CREATE_UNINITIALIZED_EXT)
|
if (create == EXT4_CREATE_UNINITIALIZED_EXT)
|
||||||
goto out;
|
goto out;
|
||||||
if (!create)
|
if (!create) {
|
||||||
|
/*
|
||||||
|
* We have blocks reserved already. We
|
||||||
|
* return allocated blocks so that delalloc
|
||||||
|
* won't do block reservation for us. But
|
||||||
|
* the buffer head will be unmapped so that
|
||||||
|
* a read from the block returns 0s.
|
||||||
|
*/
|
||||||
|
if (allocated > max_blocks)
|
||||||
|
allocated = max_blocks;
|
||||||
|
/* mark the buffer unwritten */
|
||||||
|
__set_bit(BH_Unwritten, &bh_result->b_state);
|
||||||
goto out2;
|
goto out2;
|
||||||
|
}
|
||||||
|
|
||||||
ret = ext4_ext_convert_to_initialized(handle, inode,
|
ret = ext4_ext_convert_to_initialized(handle, inode,
|
||||||
path, iblock,
|
path, iblock,
|
||||||
@ -2584,6 +2809,8 @@ void ext4_ext_truncate(struct inode * inode, struct page *page)
|
|||||||
ext4_orphan_del(handle, inode);
|
ext4_orphan_del(handle, inode);
|
||||||
|
|
||||||
up_write(&EXT4_I(inode)->i_data_sem);
|
up_write(&EXT4_I(inode)->i_data_sem);
|
||||||
|
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
|
||||||
|
ext4_mark_inode_dirty(handle, inode);
|
||||||
ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2608,6 +2835,28 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
|
|||||||
return needed;
|
return needed;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ext4_falloc_update_inode(struct inode *inode,
|
||||||
|
int mode, loff_t new_size, int update_ctime)
|
||||||
|
{
|
||||||
|
struct timespec now;
|
||||||
|
|
||||||
|
if (update_ctime) {
|
||||||
|
now = current_fs_time(inode->i_sb);
|
||||||
|
if (!timespec_equal(&inode->i_ctime, &now))
|
||||||
|
inode->i_ctime = now;
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
* Update only when preallocation was requested beyond
|
||||||
|
* the file size.
|
||||||
|
*/
|
||||||
|
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
|
||||||
|
new_size > i_size_read(inode)) {
|
||||||
|
i_size_write(inode, new_size);
|
||||||
|
EXT4_I(inode)->i_disksize = new_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* preallocate space for a file. This implements ext4's fallocate inode
|
* preallocate space for a file. This implements ext4's fallocate inode
|
||||||
* operation, which gets called from sys_fallocate system call.
|
* operation, which gets called from sys_fallocate system call.
|
||||||
@ -2619,8 +2868,8 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
|
|||||||
{
|
{
|
||||||
handle_t *handle;
|
handle_t *handle;
|
||||||
ext4_lblk_t block;
|
ext4_lblk_t block;
|
||||||
|
loff_t new_size;
|
||||||
unsigned long max_blocks;
|
unsigned long max_blocks;
|
||||||
ext4_fsblk_t nblocks = 0;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
int ret2 = 0;
|
int ret2 = 0;
|
||||||
int retries = 0;
|
int retries = 0;
|
||||||
@ -2639,9 +2888,12 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
|
|||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
block = offset >> blkbits;
|
block = offset >> blkbits;
|
||||||
|
/*
|
||||||
|
* We can't just convert len to max_blocks because
|
||||||
|
* If blocksize = 4096 offset = 3072 and len = 2048
|
||||||
|
*/
|
||||||
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
|
max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
|
||||||
- block;
|
- block;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* credits to insert 1 extent into extent tree + buffers to be able to
|
* credits to insert 1 extent into extent tree + buffers to be able to
|
||||||
* modify 1 super block, 1 block bitmap and 1 group descriptor.
|
* modify 1 super block, 1 block bitmap and 1 group descriptor.
|
||||||
@ -2657,7 +2909,6 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
|
|||||||
ret = PTR_ERR(handle);
|
ret = PTR_ERR(handle);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ext4_get_blocks_wrap(handle, inode, block,
|
ret = ext4_get_blocks_wrap(handle, inode, block,
|
||||||
max_blocks, &map_bh,
|
max_blocks, &map_bh,
|
||||||
EXT4_CREATE_UNINITIALIZED_EXT, 0);
|
EXT4_CREATE_UNINITIALIZED_EXT, 0);
|
||||||
@ -2673,61 +2924,24 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
|
|||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ret > 0) {
|
if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
|
||||||
/* check wrap through sign-bit/zero here */
|
blkbits) >> blkbits))
|
||||||
if ((block + ret) < 0 || (block + ret) < block) {
|
new_size = offset + len;
|
||||||
ret = -EIO;
|
else
|
||||||
ext4_mark_inode_dirty(handle, inode);
|
new_size = (block + ret) << blkbits;
|
||||||
ret2 = ext4_journal_stop(handle);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (buffer_new(&map_bh) && ((block + ret) >
|
|
||||||
(EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
|
|
||||||
>> blkbits)))
|
|
||||||
nblocks = nblocks + ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Update ctime if new blocks get allocated */
|
|
||||||
if (nblocks) {
|
|
||||||
struct timespec now;
|
|
||||||
|
|
||||||
now = current_fs_time(inode->i_sb);
|
|
||||||
if (!timespec_equal(&inode->i_ctime, &now))
|
|
||||||
inode->i_ctime = now;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
ext4_falloc_update_inode(inode, mode, new_size,
|
||||||
|
buffer_new(&map_bh));
|
||||||
ext4_mark_inode_dirty(handle, inode);
|
ext4_mark_inode_dirty(handle, inode);
|
||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
if (ret2)
|
if (ret2)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
if (ret == -ENOSPC &&
|
||||||
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
|
ext4_should_retry_alloc(inode->i_sb, &retries)) {
|
||||||
|
ret = 0;
|
||||||
goto retry;
|
goto retry;
|
||||||
|
|
||||||
/*
|
|
||||||
* Time to update the file size.
|
|
||||||
* Update only when preallocation was requested beyond the file size.
|
|
||||||
*/
|
|
||||||
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
|
|
||||||
(offset + len) > i_size_read(inode)) {
|
|
||||||
if (ret > 0) {
|
|
||||||
/*
|
|
||||||
* if no error, we assume preallocation succeeded
|
|
||||||
* completely
|
|
||||||
*/
|
|
||||||
i_size_write(inode, offset + len);
|
|
||||||
EXT4_I(inode)->i_disksize = i_size_read(inode);
|
|
||||||
} else if (ret < 0 && nblocks) {
|
|
||||||
/* Handle partial allocation scenario */
|
|
||||||
loff_t newsize;
|
|
||||||
|
|
||||||
newsize = (nblocks << blkbits) + i_size_read(inode);
|
|
||||||
i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
|
|
||||||
EXT4_I(inode)->i_disksize = i_size_read(inode);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&inode->i_mutex);
|
mutex_unlock(&inode->i_mutex);
|
||||||
return ret > 0 ? ret2 : ret;
|
return ret > 0 ? ret2 : ret;
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,8 @@
|
|||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "acl.h"
|
#include "acl.h"
|
||||||
|
|
||||||
@ -129,7 +129,7 @@ const struct file_operations ext4_file_operations = {
|
|||||||
.write = do_sync_write,
|
.write = do_sync_write,
|
||||||
.aio_read = generic_file_aio_read,
|
.aio_read = generic_file_aio_read,
|
||||||
.aio_write = ext4_file_write,
|
.aio_write = ext4_file_write,
|
||||||
.ioctl = ext4_ioctl,
|
.unlocked_ioctl = ext4_ioctl,
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
.compat_ioctl = ext4_compat_ioctl,
|
.compat_ioctl = ext4_compat_ioctl,
|
||||||
#endif
|
#endif
|
||||||
|
@ -27,8 +27,8 @@
|
|||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* akpm: A new design for ext4_sync_file().
|
* akpm: A new design for ext4_sync_file().
|
||||||
@ -72,6 +72,9 @@ int ext4_sync_file(struct file * file, struct dentry *dentry, int datasync)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
|
||||||
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The VFS has written the file data. If the inode is unaltered
|
* The VFS has written the file data. If the inode is unaltered
|
||||||
* then we need not start a commit.
|
* then we need not start a commit.
|
||||||
|
@ -11,8 +11,8 @@
|
|||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/cryptohash.h>
|
#include <linux/cryptohash.h>
|
||||||
|
#include "ext4.h"
|
||||||
|
|
||||||
#define DELTA 0x9E3779B9
|
#define DELTA 0x9E3779B9
|
||||||
|
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/stat.h>
|
#include <linux/stat.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
@ -25,7 +23,8 @@
|
|||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
|
#include "ext4.h"
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "acl.h"
|
#include "acl.h"
|
||||||
#include "group.h"
|
#include "group.h"
|
||||||
@ -75,7 +74,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
|
|||||||
/* If checksum is bad mark all blocks and inodes use to prevent
|
/* If checksum is bad mark all blocks and inodes use to prevent
|
||||||
* allocation, essentially implementing a per-group read-only flag. */
|
* allocation, essentially implementing a per-group read-only flag. */
|
||||||
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
|
if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
|
||||||
ext4_error(sb, __FUNCTION__, "Checksum bad for group %lu\n",
|
ext4_error(sb, __func__, "Checksum bad for group %lu\n",
|
||||||
block_group);
|
block_group);
|
||||||
gdp->bg_free_blocks_count = 0;
|
gdp->bg_free_blocks_count = 0;
|
||||||
gdp->bg_free_inodes_count = 0;
|
gdp->bg_free_inodes_count = 0;
|
||||||
@ -223,11 +222,9 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
|
|||||||
|
|
||||||
if (gdp) {
|
if (gdp) {
|
||||||
spin_lock(sb_bgl_lock(sbi, block_group));
|
spin_lock(sb_bgl_lock(sbi, block_group));
|
||||||
gdp->bg_free_inodes_count = cpu_to_le16(
|
le16_add_cpu(&gdp->bg_free_inodes_count, 1);
|
||||||
le16_to_cpu(gdp->bg_free_inodes_count) + 1);
|
|
||||||
if (is_directory)
|
if (is_directory)
|
||||||
gdp->bg_used_dirs_count = cpu_to_le16(
|
le16_add_cpu(&gdp->bg_used_dirs_count, -1);
|
||||||
le16_to_cpu(gdp->bg_used_dirs_count) - 1);
|
|
||||||
gdp->bg_checksum = ext4_group_desc_csum(sbi,
|
gdp->bg_checksum = ext4_group_desc_csum(sbi,
|
||||||
block_group, gdp);
|
block_group, gdp);
|
||||||
spin_unlock(sb_bgl_lock(sbi, block_group));
|
spin_unlock(sb_bgl_lock(sbi, block_group));
|
||||||
@ -588,7 +585,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
|
|||||||
ino++;
|
ino++;
|
||||||
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
|
if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
|
||||||
ino > EXT4_INODES_PER_GROUP(sb)) {
|
ino > EXT4_INODES_PER_GROUP(sb)) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"reserved inode or inode > inodes count - "
|
"reserved inode or inode > inodes count - "
|
||||||
"block_group = %lu, inode=%lu", group,
|
"block_group = %lu, inode=%lu", group,
|
||||||
ino + group * EXT4_INODES_PER_GROUP(sb));
|
ino + group * EXT4_INODES_PER_GROUP(sb));
|
||||||
@ -664,11 +661,9 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
|
|||||||
cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
|
cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
|
||||||
}
|
}
|
||||||
|
|
||||||
gdp->bg_free_inodes_count =
|
le16_add_cpu(&gdp->bg_free_inodes_count, -1);
|
||||||
cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
|
|
||||||
if (S_ISDIR(mode)) {
|
if (S_ISDIR(mode)) {
|
||||||
gdp->bg_used_dirs_count =
|
le16_add_cpu(&gdp->bg_used_dirs_count, 1);
|
||||||
cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
|
|
||||||
}
|
}
|
||||||
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
|
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
|
||||||
spin_unlock(sb_bgl_lock(sbi, group));
|
spin_unlock(sb_bgl_lock(sbi, group));
|
||||||
@ -744,23 +739,24 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
|
|||||||
if (err)
|
if (err)
|
||||||
goto fail_free_drop;
|
goto fail_free_drop;
|
||||||
|
|
||||||
err = ext4_mark_inode_dirty(handle, inode);
|
|
||||||
if (err) {
|
|
||||||
ext4_std_error(sb, err);
|
|
||||||
goto fail_free_drop;
|
|
||||||
}
|
|
||||||
if (test_opt(sb, EXTENTS)) {
|
if (test_opt(sb, EXTENTS)) {
|
||||||
/* set extent flag only for directory and file */
|
/* set extent flag only for diretory, file and normal symlink*/
|
||||||
if (S_ISDIR(mode) || S_ISREG(mode)) {
|
if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
|
||||||
EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
|
EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
|
||||||
ext4_ext_tree_init(handle, inode);
|
ext4_ext_tree_init(handle, inode);
|
||||||
err = ext4_update_incompat_feature(handle, sb,
|
err = ext4_update_incompat_feature(handle, sb,
|
||||||
EXT4_FEATURE_INCOMPAT_EXTENTS);
|
EXT4_FEATURE_INCOMPAT_EXTENTS);
|
||||||
if (err)
|
if (err)
|
||||||
goto fail;
|
goto fail_free_drop;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = ext4_mark_inode_dirty(handle, inode);
|
||||||
|
if (err) {
|
||||||
|
ext4_std_error(sb, err);
|
||||||
|
goto fail_free_drop;
|
||||||
|
}
|
||||||
|
|
||||||
ext4_debug("allocating inode %lu\n", inode->i_ino);
|
ext4_debug("allocating inode %lu\n", inode->i_ino);
|
||||||
goto really_out;
|
goto really_out;
|
||||||
fail:
|
fail:
|
||||||
@ -796,7 +792,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
|||||||
|
|
||||||
/* Error cases - e2fsck has already cleaned up for us */
|
/* Error cases - e2fsck has already cleaned up for us */
|
||||||
if (ino > max_ino) {
|
if (ino > max_ino) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"bad orphan ino %lu! e2fsck was run?", ino);
|
"bad orphan ino %lu! e2fsck was run?", ino);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
@ -805,7 +801,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
|||||||
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
|
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
|
||||||
bitmap_bh = read_inode_bitmap(sb, block_group);
|
bitmap_bh = read_inode_bitmap(sb, block_group);
|
||||||
if (!bitmap_bh) {
|
if (!bitmap_bh) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"inode bitmap error for orphan %lu", ino);
|
"inode bitmap error for orphan %lu", ino);
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
@ -830,7 +826,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
|
|||||||
err = PTR_ERR(inode);
|
err = PTR_ERR(inode);
|
||||||
inode = NULL;
|
inode = NULL;
|
||||||
bad_orphan:
|
bad_orphan:
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"bad orphan inode %lu! e2fsck was run?", ino);
|
"bad orphan inode %lu! e2fsck was run?", ino);
|
||||||
printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
|
printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
|
||||||
bit, (unsigned long long)bitmap_bh->b_blocknr,
|
bit, (unsigned long long)bitmap_bh->b_blocknr,
|
||||||
|
@ -25,7 +25,6 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/highuid.h>
|
#include <linux/highuid.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
@ -36,6 +35,7 @@
|
|||||||
#include <linux/mpage.h>
|
#include <linux/mpage.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "acl.h"
|
#include "acl.h"
|
||||||
|
|
||||||
@ -93,7 +93,7 @@ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
|
|||||||
BUFFER_TRACE(bh, "call ext4_journal_revoke");
|
BUFFER_TRACE(bh, "call ext4_journal_revoke");
|
||||||
err = ext4_journal_revoke(handle, blocknr, bh);
|
err = ext4_journal_revoke(handle, blocknr, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_abort(inode->i_sb, __FUNCTION__,
|
ext4_abort(inode->i_sb, __func__,
|
||||||
"error %d when attempting revoke", err);
|
"error %d when attempting revoke", err);
|
||||||
BUFFER_TRACE(bh, "exit");
|
BUFFER_TRACE(bh, "exit");
|
||||||
return err;
|
return err;
|
||||||
@ -985,6 +985,16 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
|
|||||||
} else {
|
} else {
|
||||||
retval = ext4_get_blocks_handle(handle, inode, block,
|
retval = ext4_get_blocks_handle(handle, inode, block,
|
||||||
max_blocks, bh, create, extend_disksize);
|
max_blocks, bh, create, extend_disksize);
|
||||||
|
|
||||||
|
if (retval > 0 && buffer_new(bh)) {
|
||||||
|
/*
|
||||||
|
* We allocated new blocks which will result in
|
||||||
|
* i_data's format changing. Force the migrate
|
||||||
|
* to fail by clearing migrate flags
|
||||||
|
*/
|
||||||
|
EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
|
||||||
|
~EXT4_EXT_MIGRATE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
up_write((&EXT4_I(inode)->i_data_sem));
|
up_write((&EXT4_I(inode)->i_data_sem));
|
||||||
return retval;
|
return retval;
|
||||||
@ -1230,7 +1240,7 @@ int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
|
|||||||
{
|
{
|
||||||
int err = jbd2_journal_dirty_data(handle, bh);
|
int err = jbd2_journal_dirty_data(handle, bh);
|
||||||
if (err)
|
if (err)
|
||||||
ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
|
ext4_journal_abort_handle(__func__, __func__,
|
||||||
bh, handle, err);
|
bh, handle, err);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -1301,10 +1311,11 @@ static int ext4_ordered_write_end(struct file *file,
|
|||||||
new_i_size = pos + copied;
|
new_i_size = pos + copied;
|
||||||
if (new_i_size > EXT4_I(inode)->i_disksize)
|
if (new_i_size > EXT4_I(inode)->i_disksize)
|
||||||
EXT4_I(inode)->i_disksize = new_i_size;
|
EXT4_I(inode)->i_disksize = new_i_size;
|
||||||
copied = ext4_generic_write_end(file, mapping, pos, len, copied,
|
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
if (copied < 0)
|
copied = ret2;
|
||||||
ret = copied;
|
if (ret2 < 0)
|
||||||
|
ret = ret2;
|
||||||
}
|
}
|
||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
@ -1329,10 +1340,11 @@ static int ext4_writeback_write_end(struct file *file,
|
|||||||
if (new_i_size > EXT4_I(inode)->i_disksize)
|
if (new_i_size > EXT4_I(inode)->i_disksize)
|
||||||
EXT4_I(inode)->i_disksize = new_i_size;
|
EXT4_I(inode)->i_disksize = new_i_size;
|
||||||
|
|
||||||
copied = ext4_generic_write_end(file, mapping, pos, len, copied,
|
ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
|
||||||
page, fsdata);
|
page, fsdata);
|
||||||
if (copied < 0)
|
copied = ret2;
|
||||||
ret = copied;
|
if (ret2 < 0)
|
||||||
|
ret = ret2;
|
||||||
|
|
||||||
ret2 = ext4_journal_stop(handle);
|
ret2 = ext4_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
@ -2501,12 +2513,10 @@ void ext4_truncate(struct inode *inode)
|
|||||||
static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
|
static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
|
||||||
unsigned long ino, struct ext4_iloc *iloc)
|
unsigned long ino, struct ext4_iloc *iloc)
|
||||||
{
|
{
|
||||||
unsigned long desc, group_desc;
|
|
||||||
ext4_group_t block_group;
|
ext4_group_t block_group;
|
||||||
unsigned long offset;
|
unsigned long offset;
|
||||||
ext4_fsblk_t block;
|
ext4_fsblk_t block;
|
||||||
struct buffer_head *bh;
|
struct ext4_group_desc *gdp;
|
||||||
struct ext4_group_desc * gdp;
|
|
||||||
|
|
||||||
if (!ext4_valid_inum(sb, ino)) {
|
if (!ext4_valid_inum(sb, ino)) {
|
||||||
/*
|
/*
|
||||||
@ -2518,22 +2528,10 @@ static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
|
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
|
||||||
if (block_group >= EXT4_SB(sb)->s_groups_count) {
|
gdp = ext4_get_group_desc(sb, block_group, NULL);
|
||||||
ext4_error(sb,"ext4_get_inode_block","group >= groups count");
|
if (!gdp)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
|
||||||
smp_rmb();
|
|
||||||
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
|
|
||||||
desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
|
|
||||||
bh = EXT4_SB(sb)->s_group_desc[group_desc];
|
|
||||||
if (!bh) {
|
|
||||||
ext4_error (sb, "ext4_get_inode_block",
|
|
||||||
"Descriptor not loaded");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
|
|
||||||
desc * EXT4_DESC_SIZE(sb));
|
|
||||||
/*
|
/*
|
||||||
* Figure out the offset within the block group inode table
|
* Figure out the offset within the block group inode table
|
||||||
*/
|
*/
|
||||||
@ -2976,7 +2974,8 @@ static int ext4_do_update_inode(handle_t *handle,
|
|||||||
if (ext4_inode_blocks_set(handle, raw_inode, ei))
|
if (ext4_inode_blocks_set(handle, raw_inode, ei))
|
||||||
goto out_brelse;
|
goto out_brelse;
|
||||||
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
|
raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
|
||||||
raw_inode->i_flags = cpu_to_le32(ei->i_flags);
|
/* clear the migrate flag in the raw_inode */
|
||||||
|
raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
|
||||||
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
|
if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
|
||||||
cpu_to_le32(EXT4_OS_HURD))
|
cpu_to_le32(EXT4_OS_HURD))
|
||||||
raw_inode->i_file_acl_high =
|
raw_inode->i_file_acl_high =
|
||||||
@ -3374,7 +3373,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
|
|||||||
EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
|
EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
|
||||||
if (mnt_count !=
|
if (mnt_count !=
|
||||||
le16_to_cpu(sbi->s_es->s_mnt_count)) {
|
le16_to_cpu(sbi->s_es->s_mnt_count)) {
|
||||||
ext4_warning(inode->i_sb, __FUNCTION__,
|
ext4_warning(inode->i_sb, __func__,
|
||||||
"Unable to expand inode %lu. Delete"
|
"Unable to expand inode %lu. Delete"
|
||||||
" some EAs or run e2fsck.",
|
" some EAs or run e2fsck.",
|
||||||
inode->i_ino);
|
inode->i_ino);
|
||||||
@ -3415,7 +3414,7 @@ void ext4_dirty_inode(struct inode *inode)
|
|||||||
current_handle->h_transaction != handle->h_transaction) {
|
current_handle->h_transaction != handle->h_transaction) {
|
||||||
/* This task has a transaction open against a different fs */
|
/* This task has a transaction open against a different fs */
|
||||||
printk(KERN_EMERG "%s: transactions do not match!\n",
|
printk(KERN_EMERG "%s: transactions do not match!\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
} else {
|
} else {
|
||||||
jbd_debug(5, "marking dirty. outer handle=%p\n",
|
jbd_debug(5, "marking dirty. outer handle=%p\n",
|
||||||
current_handle);
|
current_handle);
|
||||||
|
@ -10,17 +10,17 @@
|
|||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/capability.h>
|
#include <linux/capability.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
|
#include "ext4.h"
|
||||||
|
|
||||||
int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
|
long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||||
unsigned long arg)
|
|
||||||
{
|
{
|
||||||
|
struct inode *inode = filp->f_dentry->d_inode;
|
||||||
struct ext4_inode_info *ei = EXT4_I(inode);
|
struct ext4_inode_info *ei = EXT4_I(inode);
|
||||||
unsigned int flags;
|
unsigned int flags;
|
||||||
unsigned short rsv_window_size;
|
unsigned short rsv_window_size;
|
||||||
@ -277,9 +277,6 @@ int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
|
|||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||||
{
|
{
|
||||||
struct inode *inode = file->f_path.dentry->d_inode;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
/* These are just misnamed, they actually get/put from/to user an int */
|
/* These are just misnamed, they actually get/put from/to user an int */
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case EXT4_IOC32_GETFLAGS:
|
case EXT4_IOC32_GETFLAGS:
|
||||||
@ -319,9 +316,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
|||||||
default:
|
default:
|
||||||
return -ENOIOCTLCMD;
|
return -ENOIOCTLCMD;
|
||||||
}
|
}
|
||||||
lock_kernel();
|
return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
|
||||||
ret = ext4_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
|
|
||||||
unlock_kernel();
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -21,21 +21,7 @@
|
|||||||
* mballoc.c contains the multiblocks allocation routines
|
* mballoc.c contains the multiblocks allocation routines
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/time.h>
|
#include "mballoc.h"
|
||||||
#include <linux/fs.h>
|
|
||||||
#include <linux/namei.h>
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/quotaops.h>
|
|
||||||
#include <linux/buffer_head.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/swap.h>
|
|
||||||
#include <linux/proc_fs.h>
|
|
||||||
#include <linux/pagemap.h>
|
|
||||||
#include <linux/seq_file.h>
|
|
||||||
#include <linux/version.h>
|
|
||||||
#include "group.h"
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* MUSTDO:
|
* MUSTDO:
|
||||||
* - test ext4_ext_search_left() and ext4_ext_search_right()
|
* - test ext4_ext_search_left() and ext4_ext_search_right()
|
||||||
@ -345,288 +331,6 @@
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
|
||||||
* with AGGRESSIVE_CHECK allocator runs consistency checks over
|
|
||||||
* structures. these checks slow things down a lot
|
|
||||||
*/
|
|
||||||
#define AGGRESSIVE_CHECK__
|
|
||||||
|
|
||||||
/*
|
|
||||||
* with DOUBLE_CHECK defined mballoc creates persistent in-core
|
|
||||||
* bitmaps, maintains and uses them to check for double allocations
|
|
||||||
*/
|
|
||||||
#define DOUBLE_CHECK__
|
|
||||||
|
|
||||||
/*
|
|
||||||
*/
|
|
||||||
#define MB_DEBUG__
|
|
||||||
#ifdef MB_DEBUG
|
|
||||||
#define mb_debug(fmt, a...) printk(fmt, ##a)
|
|
||||||
#else
|
|
||||||
#define mb_debug(fmt, a...)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* with EXT4_MB_HISTORY mballoc stores last N allocations in memory
|
|
||||||
* and you can monitor it in /proc/fs/ext4/<dev>/mb_history
|
|
||||||
*/
|
|
||||||
#define EXT4_MB_HISTORY
|
|
||||||
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
|
|
||||||
#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
|
|
||||||
#define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */
|
|
||||||
#define EXT4_MB_HISTORY_FREE 8 /* free */
|
|
||||||
|
|
||||||
#define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \
|
|
||||||
EXT4_MB_HISTORY_PREALLOC)
|
|
||||||
|
|
||||||
/*
|
|
||||||
* How long mballoc can look for a best extent (in found extents)
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_MAX_TO_SCAN 200
|
|
||||||
|
|
||||||
/*
|
|
||||||
* How long mballoc must look for a best extent
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_MIN_TO_SCAN 10
|
|
||||||
|
|
||||||
/*
|
|
||||||
* How many groups mballoc will scan looking for the best chunk
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
|
|
||||||
|
|
||||||
/*
|
|
||||||
* with 'ext4_mb_stats' allocator will collect stats that will be
|
|
||||||
* shown at umount. The collecting costs though!
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_STATS 1
|
|
||||||
|
|
||||||
/*
|
|
||||||
* files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
|
|
||||||
* by the stream allocator, which purpose is to pack requests
|
|
||||||
* as close each to other as possible to produce smooth I/O traffic
|
|
||||||
* We use locality group prealloc space for stream request.
|
|
||||||
* We can tune the same via /proc/fs/ext4/<parition>/stream_req
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
|
|
||||||
|
|
||||||
/*
|
|
||||||
* for which requests use 2^N search using buddies
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_ORDER2_REQS 2
|
|
||||||
|
|
||||||
/*
|
|
||||||
* default group prealloc size 512 blocks
|
|
||||||
*/
|
|
||||||
#define MB_DEFAULT_GROUP_PREALLOC 512
|
|
||||||
|
|
||||||
static struct kmem_cache *ext4_pspace_cachep;
|
|
||||||
static struct kmem_cache *ext4_ac_cachep;
|
|
||||||
|
|
||||||
#ifdef EXT4_BB_MAX_BLOCKS
|
|
||||||
#undef EXT4_BB_MAX_BLOCKS
|
|
||||||
#endif
|
|
||||||
#define EXT4_BB_MAX_BLOCKS 30
|
|
||||||
|
|
||||||
struct ext4_free_metadata {
|
|
||||||
ext4_group_t group;
|
|
||||||
unsigned short num;
|
|
||||||
ext4_grpblk_t blocks[EXT4_BB_MAX_BLOCKS];
|
|
||||||
struct list_head list;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ext4_group_info {
|
|
||||||
unsigned long bb_state;
|
|
||||||
unsigned long bb_tid;
|
|
||||||
struct ext4_free_metadata *bb_md_cur;
|
|
||||||
unsigned short bb_first_free;
|
|
||||||
unsigned short bb_free;
|
|
||||||
unsigned short bb_fragments;
|
|
||||||
struct list_head bb_prealloc_list;
|
|
||||||
#ifdef DOUBLE_CHECK
|
|
||||||
void *bb_bitmap;
|
|
||||||
#endif
|
|
||||||
unsigned short bb_counters[];
|
|
||||||
};
|
|
||||||
|
|
||||||
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
|
|
||||||
#define EXT4_GROUP_INFO_LOCKED_BIT 1
|
|
||||||
|
|
||||||
#define EXT4_MB_GRP_NEED_INIT(grp) \
|
|
||||||
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
|
|
||||||
|
|
||||||
|
|
||||||
struct ext4_prealloc_space {
|
|
||||||
struct list_head pa_inode_list;
|
|
||||||
struct list_head pa_group_list;
|
|
||||||
union {
|
|
||||||
struct list_head pa_tmp_list;
|
|
||||||
struct rcu_head pa_rcu;
|
|
||||||
} u;
|
|
||||||
spinlock_t pa_lock;
|
|
||||||
atomic_t pa_count;
|
|
||||||
unsigned pa_deleted;
|
|
||||||
ext4_fsblk_t pa_pstart; /* phys. block */
|
|
||||||
ext4_lblk_t pa_lstart; /* log. block */
|
|
||||||
unsigned short pa_len; /* len of preallocated chunk */
|
|
||||||
unsigned short pa_free; /* how many blocks are free */
|
|
||||||
unsigned short pa_linear; /* consumed in one direction
|
|
||||||
* strictly, for grp prealloc */
|
|
||||||
spinlock_t *pa_obj_lock;
|
|
||||||
struct inode *pa_inode; /* hack, for history only */
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
struct ext4_free_extent {
|
|
||||||
ext4_lblk_t fe_logical;
|
|
||||||
ext4_grpblk_t fe_start;
|
|
||||||
ext4_group_t fe_group;
|
|
||||||
int fe_len;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Locality group:
|
|
||||||
* we try to group all related changes together
|
|
||||||
* so that writeback can flush/allocate them together as well
|
|
||||||
*/
|
|
||||||
struct ext4_locality_group {
|
|
||||||
/* for allocator */
|
|
||||||
struct mutex lg_mutex; /* to serialize allocates */
|
|
||||||
struct list_head lg_prealloc_list;/* list of preallocations */
|
|
||||||
spinlock_t lg_prealloc_lock;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ext4_allocation_context {
|
|
||||||
struct inode *ac_inode;
|
|
||||||
struct super_block *ac_sb;
|
|
||||||
|
|
||||||
/* original request */
|
|
||||||
struct ext4_free_extent ac_o_ex;
|
|
||||||
|
|
||||||
/* goal request (after normalization) */
|
|
||||||
struct ext4_free_extent ac_g_ex;
|
|
||||||
|
|
||||||
/* the best found extent */
|
|
||||||
struct ext4_free_extent ac_b_ex;
|
|
||||||
|
|
||||||
/* copy of the bext found extent taken before preallocation efforts */
|
|
||||||
struct ext4_free_extent ac_f_ex;
|
|
||||||
|
|
||||||
/* number of iterations done. we have to track to limit searching */
|
|
||||||
unsigned long ac_ex_scanned;
|
|
||||||
__u16 ac_groups_scanned;
|
|
||||||
__u16 ac_found;
|
|
||||||
__u16 ac_tail;
|
|
||||||
__u16 ac_buddy;
|
|
||||||
__u16 ac_flags; /* allocation hints */
|
|
||||||
__u8 ac_status;
|
|
||||||
__u8 ac_criteria;
|
|
||||||
__u8 ac_repeats;
|
|
||||||
__u8 ac_2order; /* if request is to allocate 2^N blocks and
|
|
||||||
* N > 0, the field stores N, otherwise 0 */
|
|
||||||
__u8 ac_op; /* operation, for history only */
|
|
||||||
struct page *ac_bitmap_page;
|
|
||||||
struct page *ac_buddy_page;
|
|
||||||
struct ext4_prealloc_space *ac_pa;
|
|
||||||
struct ext4_locality_group *ac_lg;
|
|
||||||
};
|
|
||||||
|
|
||||||
#define AC_STATUS_CONTINUE 1
|
|
||||||
#define AC_STATUS_FOUND 2
|
|
||||||
#define AC_STATUS_BREAK 3
|
|
||||||
|
|
||||||
struct ext4_mb_history {
|
|
||||||
struct ext4_free_extent orig; /* orig allocation */
|
|
||||||
struct ext4_free_extent goal; /* goal allocation */
|
|
||||||
struct ext4_free_extent result; /* result allocation */
|
|
||||||
unsigned pid;
|
|
||||||
unsigned ino;
|
|
||||||
__u16 found; /* how many extents have been found */
|
|
||||||
__u16 groups; /* how many groups have been scanned */
|
|
||||||
__u16 tail; /* what tail broke some buddy */
|
|
||||||
__u16 buddy; /* buddy the tail ^^^ broke */
|
|
||||||
__u16 flags;
|
|
||||||
__u8 cr:3; /* which phase the result extent was found at */
|
|
||||||
__u8 op:4;
|
|
||||||
__u8 merged:1;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct ext4_buddy {
|
|
||||||
struct page *bd_buddy_page;
|
|
||||||
void *bd_buddy;
|
|
||||||
struct page *bd_bitmap_page;
|
|
||||||
void *bd_bitmap;
|
|
||||||
struct ext4_group_info *bd_info;
|
|
||||||
struct super_block *bd_sb;
|
|
||||||
__u16 bd_blkbits;
|
|
||||||
ext4_group_t bd_group;
|
|
||||||
};
|
|
||||||
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
|
|
||||||
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
|
|
||||||
|
|
||||||
#ifndef EXT4_MB_HISTORY
|
|
||||||
static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
static void ext4_mb_store_history(struct ext4_allocation_context *ac);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
|
|
||||||
|
|
||||||
static struct proc_dir_entry *proc_root_ext4;
|
|
||||||
struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
|
|
||||||
ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
|
|
||||||
ext4_fsblk_t goal, unsigned long *count, int *errp);
|
|
||||||
|
|
||||||
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
|
||||||
ext4_group_t group);
|
|
||||||
static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
|
|
||||||
static void ext4_mb_free_committed_blocks(struct super_block *);
|
|
||||||
static void ext4_mb_return_to_preallocation(struct inode *inode,
|
|
||||||
struct ext4_buddy *e4b, sector_t block,
|
|
||||||
int count);
|
|
||||||
static void ext4_mb_put_pa(struct ext4_allocation_context *,
|
|
||||||
struct super_block *, struct ext4_prealloc_space *pa);
|
|
||||||
static int ext4_mb_init_per_dev_proc(struct super_block *sb);
|
|
||||||
static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
|
|
||||||
|
|
||||||
|
|
||||||
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
|
|
||||||
{
|
|
||||||
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
|
|
||||||
|
|
||||||
bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void ext4_unlock_group(struct super_block *sb,
|
|
||||||
ext4_group_t group)
|
|
||||||
{
|
|
||||||
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
|
|
||||||
|
|
||||||
bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int ext4_is_group_locked(struct super_block *sb,
|
|
||||||
ext4_group_t group)
|
|
||||||
{
|
|
||||||
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
|
|
||||||
|
|
||||||
return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
|
|
||||||
&(grinfo->bb_state));
|
|
||||||
}
|
|
||||||
|
|
||||||
static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
|
|
||||||
struct ext4_free_extent *fex)
|
|
||||||
{
|
|
||||||
ext4_fsblk_t block;
|
|
||||||
|
|
||||||
block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
|
|
||||||
+ fex->fe_start
|
|
||||||
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
|
||||||
return block;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
|
static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
|
||||||
{
|
{
|
||||||
#if BITS_PER_LONG == 64
|
#if BITS_PER_LONG == 64
|
||||||
@ -736,7 +440,7 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
|
|||||||
blocknr +=
|
blocknr +=
|
||||||
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
||||||
|
|
||||||
ext4_error(sb, __FUNCTION__, "double-free of inode"
|
ext4_error(sb, __func__, "double-free of inode"
|
||||||
" %lu's block %llu(bit %u in group %lu)\n",
|
" %lu's block %llu(bit %u in group %lu)\n",
|
||||||
inode ? inode->i_ino : 0, blocknr,
|
inode ? inode->i_ino : 0, blocknr,
|
||||||
first + i, e4b->bd_group);
|
first + i, e4b->bd_group);
|
||||||
@ -898,17 +602,17 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
|
|||||||
list_for_each(cur, &grp->bb_prealloc_list) {
|
list_for_each(cur, &grp->bb_prealloc_list) {
|
||||||
ext4_group_t groupnr;
|
ext4_group_t groupnr;
|
||||||
struct ext4_prealloc_space *pa;
|
struct ext4_prealloc_space *pa;
|
||||||
pa = list_entry(cur, struct ext4_prealloc_space, group_list);
|
pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
|
||||||
ext4_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
|
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
|
||||||
MB_CHECK_ASSERT(groupnr == e4b->bd_group);
|
MB_CHECK_ASSERT(groupnr == e4b->bd_group);
|
||||||
for (i = 0; i < pa->len; i++)
|
for (i = 0; i < pa->pa_len; i++)
|
||||||
MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
|
MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#undef MB_CHECK_ASSERT
|
#undef MB_CHECK_ASSERT
|
||||||
#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
|
#define mb_check_buddy(e4b) __mb_check_buddy(e4b, \
|
||||||
__FILE__, __FUNCTION__, __LINE__)
|
__FILE__, __func__, __LINE__)
|
||||||
#else
|
#else
|
||||||
#define mb_check_buddy(e4b)
|
#define mb_check_buddy(e4b)
|
||||||
#endif
|
#endif
|
||||||
@ -982,7 +686,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
|
|||||||
grp->bb_fragments = fragments;
|
grp->bb_fragments = fragments;
|
||||||
|
|
||||||
if (free != grp->bb_free) {
|
if (free != grp->bb_free) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
|
"EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
|
||||||
group, free, grp->bb_free);
|
group, free, grp->bb_free);
|
||||||
/*
|
/*
|
||||||
@ -1168,8 +872,9 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
|
static noinline_for_stack int
|
||||||
struct ext4_buddy *e4b)
|
ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
|
||||||
|
struct ext4_buddy *e4b)
|
||||||
{
|
{
|
||||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||||
struct inode *inode = sbi->s_buddy_cache;
|
struct inode *inode = sbi->s_buddy_cache;
|
||||||
@ -1367,7 +1072,7 @@ static int mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
|
|||||||
blocknr +=
|
blocknr +=
|
||||||
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
||||||
|
|
||||||
ext4_error(sb, __FUNCTION__, "double-free of inode"
|
ext4_error(sb, __func__, "double-free of inode"
|
||||||
" %lu's block %llu(bit %u in group %lu)\n",
|
" %lu's block %llu(bit %u in group %lu)\n",
|
||||||
inode ? inode->i_ino : 0, blocknr, block,
|
inode ? inode->i_ino : 0, blocknr, block,
|
||||||
e4b->bd_group);
|
e4b->bd_group);
|
||||||
@ -1848,7 +1553,7 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
|
|||||||
* free blocks even though group info says we
|
* free blocks even though group info says we
|
||||||
* we have free blocks
|
* we have free blocks
|
||||||
*/
|
*/
|
||||||
ext4_error(sb, __FUNCTION__, "%d free blocks as per "
|
ext4_error(sb, __func__, "%d free blocks as per "
|
||||||
"group info. But bitmap says 0\n",
|
"group info. But bitmap says 0\n",
|
||||||
free);
|
free);
|
||||||
break;
|
break;
|
||||||
@ -1857,7 +1562,7 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
|
|||||||
mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
|
mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
|
||||||
BUG_ON(ex.fe_len <= 0);
|
BUG_ON(ex.fe_len <= 0);
|
||||||
if (free < ex.fe_len) {
|
if (free < ex.fe_len) {
|
||||||
ext4_error(sb, __FUNCTION__, "%d free blocks as per "
|
ext4_error(sb, __func__, "%d free blocks as per "
|
||||||
"group info. But got %d blocks\n",
|
"group info. But got %d blocks\n",
|
||||||
free, ex.fe_len);
|
free, ex.fe_len);
|
||||||
/*
|
/*
|
||||||
@ -1965,7 +1670,8 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
static noinline_for_stack int
|
||||||
|
ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
ext4_group_t group;
|
ext4_group_t group;
|
||||||
ext4_group_t i;
|
ext4_group_t i;
|
||||||
@ -2465,7 +2171,8 @@ static void ext4_mb_history_init(struct super_block *sb)
|
|||||||
/* if we can't allocate history, then we simple won't use it */
|
/* if we can't allocate history, then we simple won't use it */
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_mb_store_history(struct ext4_allocation_context *ac)
|
static noinline_for_stack void
|
||||||
|
ext4_mb_store_history(struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
|
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
|
||||||
struct ext4_mb_history h;
|
struct ext4_mb_history h;
|
||||||
@ -2565,13 +2272,13 @@ static int ext4_mb_init_backend(struct super_block *sb)
|
|||||||
meta_group_info[j] = kzalloc(len, GFP_KERNEL);
|
meta_group_info[j] = kzalloc(len, GFP_KERNEL);
|
||||||
if (meta_group_info[j] == NULL) {
|
if (meta_group_info[j] == NULL) {
|
||||||
printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
|
printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
|
||||||
i--;
|
|
||||||
goto err_freebuddy;
|
goto err_freebuddy;
|
||||||
}
|
}
|
||||||
desc = ext4_get_group_desc(sb, i, NULL);
|
desc = ext4_get_group_desc(sb, i, NULL);
|
||||||
if (desc == NULL) {
|
if (desc == NULL) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"EXT4-fs: can't read descriptor %lu\n", i);
|
"EXT4-fs: can't read descriptor %lu\n", i);
|
||||||
|
i++;
|
||||||
goto err_freebuddy;
|
goto err_freebuddy;
|
||||||
}
|
}
|
||||||
memset(meta_group_info[j], 0, len);
|
memset(meta_group_info[j], 0, len);
|
||||||
@ -2611,13 +2318,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_freebuddy:
|
err_freebuddy:
|
||||||
while (i >= 0) {
|
while (i-- > 0)
|
||||||
kfree(ext4_get_group_info(sb, i));
|
kfree(ext4_get_group_info(sb, i));
|
||||||
i--;
|
|
||||||
}
|
|
||||||
i = num_meta_group_infos;
|
i = num_meta_group_infos;
|
||||||
err_freemeta:
|
err_freemeta:
|
||||||
while (--i >= 0)
|
while (i-- > 0)
|
||||||
kfree(sbi->s_group_info[i]);
|
kfree(sbi->s_group_info[i]);
|
||||||
iput(sbi->s_buddy_cache);
|
iput(sbi->s_buddy_cache);
|
||||||
err_freesgi:
|
err_freesgi:
|
||||||
@ -2801,7 +2506,8 @@ int ext4_mb_release(struct super_block *sb)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ext4_mb_free_committed_blocks(struct super_block *sb)
|
static noinline_for_stack void
|
||||||
|
ext4_mb_free_committed_blocks(struct super_block *sb)
|
||||||
{
|
{
|
||||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||||
int err;
|
int err;
|
||||||
@ -3021,7 +2727,8 @@ void exit_ext4_mballoc(void)
|
|||||||
* Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
|
* Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
|
||||||
* Returns 0 if success or error code
|
* Returns 0 if success or error code
|
||||||
*/
|
*/
|
||||||
static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
static noinline_for_stack int
|
||||||
|
ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
||||||
handle_t *handle)
|
handle_t *handle)
|
||||||
{
|
{
|
||||||
struct buffer_head *bitmap_bh = NULL;
|
struct buffer_head *bitmap_bh = NULL;
|
||||||
@ -3070,7 +2777,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||||||
in_range(block, ext4_inode_table(sb, gdp),
|
in_range(block, ext4_inode_table(sb, gdp),
|
||||||
EXT4_SB(sb)->s_itb_per_group)) {
|
EXT4_SB(sb)->s_itb_per_group)) {
|
||||||
|
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Allocating block in system zone - block = %llu",
|
"Allocating block in system zone - block = %llu",
|
||||||
block);
|
block);
|
||||||
}
|
}
|
||||||
@ -3094,9 +2801,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
|
|||||||
ac->ac_b_ex.fe_group,
|
ac->ac_b_ex.fe_group,
|
||||||
gdp));
|
gdp));
|
||||||
}
|
}
|
||||||
gdp->bg_free_blocks_count =
|
le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
|
||||||
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
|
|
||||||
- ac->ac_b_ex.fe_len);
|
|
||||||
gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
|
gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
|
||||||
spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
|
spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
|
||||||
percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
|
percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
|
||||||
@ -3130,7 +2835,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
|
|||||||
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
|
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
|
||||||
else
|
else
|
||||||
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
|
ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
|
||||||
mb_debug("#%u: goal %lu blocks for locality group\n",
|
mb_debug("#%u: goal %u blocks for locality group\n",
|
||||||
current->pid, ac->ac_g_ex.fe_len);
|
current->pid, ac->ac_g_ex.fe_len);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3138,15 +2843,16 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
|
|||||||
* Normalization means making request better in terms of
|
* Normalization means making request better in terms of
|
||||||
* size and alignment
|
* size and alignment
|
||||||
*/
|
*/
|
||||||
static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
static noinline_for_stack void
|
||||||
|
ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
||||||
struct ext4_allocation_request *ar)
|
struct ext4_allocation_request *ar)
|
||||||
{
|
{
|
||||||
int bsbits, max;
|
int bsbits, max;
|
||||||
ext4_lblk_t end;
|
ext4_lblk_t end;
|
||||||
struct list_head *cur;
|
|
||||||
loff_t size, orig_size, start_off;
|
loff_t size, orig_size, start_off;
|
||||||
ext4_lblk_t start, orig_start;
|
ext4_lblk_t start, orig_start;
|
||||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||||
|
struct ext4_prealloc_space *pa;
|
||||||
|
|
||||||
/* do normalize only data requests, metadata requests
|
/* do normalize only data requests, metadata requests
|
||||||
do not need preallocation */
|
do not need preallocation */
|
||||||
@ -3232,12 +2938,9 @@ static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
|||||||
|
|
||||||
/* check we don't cross already preallocated blocks */
|
/* check we don't cross already preallocated blocks */
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_rcu(cur, &ei->i_prealloc_list) {
|
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||||
struct ext4_prealloc_space *pa;
|
|
||||||
unsigned long pa_end;
|
unsigned long pa_end;
|
||||||
|
|
||||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
|
||||||
|
|
||||||
if (pa->pa_deleted)
|
if (pa->pa_deleted)
|
||||||
continue;
|
continue;
|
||||||
spin_lock(&pa->pa_lock);
|
spin_lock(&pa->pa_lock);
|
||||||
@ -3279,10 +2982,8 @@ static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
|
|||||||
|
|
||||||
/* XXX: extra loop to check we really don't overlap preallocations */
|
/* XXX: extra loop to check we really don't overlap preallocations */
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_rcu(cur, &ei->i_prealloc_list) {
|
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||||
struct ext4_prealloc_space *pa;
|
|
||||||
unsigned long pa_end;
|
unsigned long pa_end;
|
||||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
|
||||||
spin_lock(&pa->pa_lock);
|
spin_lock(&pa->pa_lock);
|
||||||
if (pa->pa_deleted == 0) {
|
if (pa->pa_deleted == 0) {
|
||||||
pa_end = pa->pa_lstart + pa->pa_len;
|
pa_end = pa->pa_lstart + pa->pa_len;
|
||||||
@ -3374,7 +3075,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
|
|||||||
BUG_ON(pa->pa_free < len);
|
BUG_ON(pa->pa_free < len);
|
||||||
pa->pa_free -= len;
|
pa->pa_free -= len;
|
||||||
|
|
||||||
mb_debug("use %llu/%lu from inode pa %p\n", start, len, pa);
|
mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3404,12 +3105,12 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
|
|||||||
/*
|
/*
|
||||||
* search goal blocks in preallocated space
|
* search goal blocks in preallocated space
|
||||||
*/
|
*/
|
||||||
static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
static noinline_for_stack int
|
||||||
|
ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
|
||||||
struct ext4_locality_group *lg;
|
struct ext4_locality_group *lg;
|
||||||
struct ext4_prealloc_space *pa;
|
struct ext4_prealloc_space *pa;
|
||||||
struct list_head *cur;
|
|
||||||
|
|
||||||
/* only data can be preallocated */
|
/* only data can be preallocated */
|
||||||
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
|
if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
|
||||||
@ -3417,8 +3118,7 @@ static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
|||||||
|
|
||||||
/* first, try per-file preallocation */
|
/* first, try per-file preallocation */
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_rcu(cur, &ei->i_prealloc_list) {
|
list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
|
||||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
|
||||||
|
|
||||||
/* all fields in this condition don't change,
|
/* all fields in this condition don't change,
|
||||||
* so we can skip locking for them */
|
* so we can skip locking for them */
|
||||||
@ -3450,8 +3150,7 @@ static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
list_for_each_rcu(cur, &lg->lg_prealloc_list) {
|
list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) {
|
||||||
pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
|
|
||||||
spin_lock(&pa->pa_lock);
|
spin_lock(&pa->pa_lock);
|
||||||
if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
|
if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
|
||||||
atomic_inc(&pa->pa_count);
|
atomic_inc(&pa->pa_count);
|
||||||
@ -3571,7 +3270,8 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
|
|||||||
/*
|
/*
|
||||||
* creates new preallocated space for given inode
|
* creates new preallocated space for given inode
|
||||||
*/
|
*/
|
||||||
static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
static noinline_for_stack int
|
||||||
|
ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
struct super_block *sb = ac->ac_sb;
|
struct super_block *sb = ac->ac_sb;
|
||||||
struct ext4_prealloc_space *pa;
|
struct ext4_prealloc_space *pa;
|
||||||
@ -3658,7 +3358,8 @@ static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
|||||||
/*
|
/*
|
||||||
* creates new preallocated space for locality group inodes belongs to
|
* creates new preallocated space for locality group inodes belongs to
|
||||||
*/
|
*/
|
||||||
static int ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
static noinline_for_stack int
|
||||||
|
ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
struct super_block *sb = ac->ac_sb;
|
struct super_block *sb = ac->ac_sb;
|
||||||
struct ext4_locality_group *lg;
|
struct ext4_locality_group *lg;
|
||||||
@ -3731,11 +3432,11 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
|
|||||||
* the caller MUST hold group/inode locks.
|
* the caller MUST hold group/inode locks.
|
||||||
* TODO: optimize the case when there are no in-core structures yet
|
* TODO: optimize the case when there are no in-core structures yet
|
||||||
*/
|
*/
|
||||||
static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
|
static noinline_for_stack int
|
||||||
struct buffer_head *bitmap_bh,
|
ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
|
||||||
struct ext4_prealloc_space *pa)
|
struct ext4_prealloc_space *pa,
|
||||||
|
struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
struct ext4_allocation_context *ac;
|
|
||||||
struct super_block *sb = e4b->bd_sb;
|
struct super_block *sb = e4b->bd_sb;
|
||||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
@ -3751,8 +3452,6 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
|
|||||||
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
|
BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
|
||||||
end = bit + pa->pa_len;
|
end = bit + pa->pa_len;
|
||||||
|
|
||||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
|
||||||
|
|
||||||
if (ac) {
|
if (ac) {
|
||||||
ac->ac_sb = sb;
|
ac->ac_sb = sb;
|
||||||
ac->ac_inode = pa->pa_inode;
|
ac->ac_inode = pa->pa_inode;
|
||||||
@ -3789,7 +3488,7 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
|
|||||||
pa, (unsigned long) pa->pa_lstart,
|
pa, (unsigned long) pa->pa_lstart,
|
||||||
(unsigned long) pa->pa_pstart,
|
(unsigned long) pa->pa_pstart,
|
||||||
(unsigned long) pa->pa_len);
|
(unsigned long) pa->pa_len);
|
||||||
ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n",
|
ext4_error(sb, __func__, "free %u, pa_free %u\n",
|
||||||
free, pa->pa_free);
|
free, pa->pa_free);
|
||||||
/*
|
/*
|
||||||
* pa is already deleted so we use the value obtained
|
* pa is already deleted so we use the value obtained
|
||||||
@ -3797,22 +3496,19 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
|
|||||||
*/
|
*/
|
||||||
}
|
}
|
||||||
atomic_add(free, &sbi->s_mb_discarded);
|
atomic_add(free, &sbi->s_mb_discarded);
|
||||||
if (ac)
|
|
||||||
kmem_cache_free(ext4_ac_cachep, ac);
|
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
|
static noinline_for_stack int
|
||||||
struct ext4_prealloc_space *pa)
|
ext4_mb_release_group_pa(struct ext4_buddy *e4b,
|
||||||
|
struct ext4_prealloc_space *pa,
|
||||||
|
struct ext4_allocation_context *ac)
|
||||||
{
|
{
|
||||||
struct ext4_allocation_context *ac;
|
|
||||||
struct super_block *sb = e4b->bd_sb;
|
struct super_block *sb = e4b->bd_sb;
|
||||||
ext4_group_t group;
|
ext4_group_t group;
|
||||||
ext4_grpblk_t bit;
|
ext4_grpblk_t bit;
|
||||||
|
|
||||||
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
|
||||||
|
|
||||||
if (ac)
|
if (ac)
|
||||||
ac->ac_op = EXT4_MB_HISTORY_DISCARD;
|
ac->ac_op = EXT4_MB_HISTORY_DISCARD;
|
||||||
|
|
||||||
@ -3830,7 +3526,6 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
|
|||||||
ac->ac_b_ex.fe_len = pa->pa_len;
|
ac->ac_b_ex.fe_len = pa->pa_len;
|
||||||
ac->ac_b_ex.fe_logical = 0;
|
ac->ac_b_ex.fe_logical = 0;
|
||||||
ext4_mb_store_history(ac);
|
ext4_mb_store_history(ac);
|
||||||
kmem_cache_free(ext4_ac_cachep, ac);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -3845,12 +3540,14 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
|
|||||||
* - how many do we discard
|
* - how many do we discard
|
||||||
* 1) how many requested
|
* 1) how many requested
|
||||||
*/
|
*/
|
||||||
static int ext4_mb_discard_group_preallocations(struct super_block *sb,
|
static noinline_for_stack int
|
||||||
|
ext4_mb_discard_group_preallocations(struct super_block *sb,
|
||||||
ext4_group_t group, int needed)
|
ext4_group_t group, int needed)
|
||||||
{
|
{
|
||||||
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
||||||
struct buffer_head *bitmap_bh = NULL;
|
struct buffer_head *bitmap_bh = NULL;
|
||||||
struct ext4_prealloc_space *pa, *tmp;
|
struct ext4_prealloc_space *pa, *tmp;
|
||||||
|
struct ext4_allocation_context *ac;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct ext4_buddy e4b;
|
struct ext4_buddy e4b;
|
||||||
int err;
|
int err;
|
||||||
@ -3878,6 +3575,7 @@ static int ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|||||||
grp = ext4_get_group_info(sb, group);
|
grp = ext4_get_group_info(sb, group);
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
|
|
||||||
|
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||||
repeat:
|
repeat:
|
||||||
ext4_lock_group(sb, group);
|
ext4_lock_group(sb, group);
|
||||||
list_for_each_entry_safe(pa, tmp,
|
list_for_each_entry_safe(pa, tmp,
|
||||||
@ -3932,9 +3630,9 @@ static int ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|||||||
spin_unlock(pa->pa_obj_lock);
|
spin_unlock(pa->pa_obj_lock);
|
||||||
|
|
||||||
if (pa->pa_linear)
|
if (pa->pa_linear)
|
||||||
ext4_mb_release_group_pa(&e4b, pa);
|
ext4_mb_release_group_pa(&e4b, pa, ac);
|
||||||
else
|
else
|
||||||
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
|
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
|
||||||
|
|
||||||
list_del(&pa->u.pa_tmp_list);
|
list_del(&pa->u.pa_tmp_list);
|
||||||
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
||||||
@ -3942,6 +3640,8 @@ static int ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|||||||
|
|
||||||
out:
|
out:
|
||||||
ext4_unlock_group(sb, group);
|
ext4_unlock_group(sb, group);
|
||||||
|
if (ac)
|
||||||
|
kmem_cache_free(ext4_ac_cachep, ac);
|
||||||
ext4_mb_release_desc(&e4b);
|
ext4_mb_release_desc(&e4b);
|
||||||
put_bh(bitmap_bh);
|
put_bh(bitmap_bh);
|
||||||
return free;
|
return free;
|
||||||
@ -3962,6 +3662,7 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
|||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
struct buffer_head *bitmap_bh = NULL;
|
struct buffer_head *bitmap_bh = NULL;
|
||||||
struct ext4_prealloc_space *pa, *tmp;
|
struct ext4_prealloc_space *pa, *tmp;
|
||||||
|
struct ext4_allocation_context *ac;
|
||||||
ext4_group_t group = 0;
|
ext4_group_t group = 0;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct ext4_buddy e4b;
|
struct ext4_buddy e4b;
|
||||||
@ -3976,6 +3677,7 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
|
|
||||||
|
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
|
||||||
repeat:
|
repeat:
|
||||||
/* first, collect all pa's in the inode */
|
/* first, collect all pa's in the inode */
|
||||||
spin_lock(&ei->i_prealloc_lock);
|
spin_lock(&ei->i_prealloc_lock);
|
||||||
@ -4040,7 +3742,7 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
|||||||
|
|
||||||
ext4_lock_group(sb, group);
|
ext4_lock_group(sb, group);
|
||||||
list_del(&pa->pa_group_list);
|
list_del(&pa->pa_group_list);
|
||||||
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
|
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
|
||||||
ext4_unlock_group(sb, group);
|
ext4_unlock_group(sb, group);
|
||||||
|
|
||||||
ext4_mb_release_desc(&e4b);
|
ext4_mb_release_desc(&e4b);
|
||||||
@ -4049,6 +3751,8 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
|
|||||||
list_del(&pa->u.pa_tmp_list);
|
list_del(&pa->u.pa_tmp_list);
|
||||||
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
|
||||||
}
|
}
|
||||||
|
if (ac)
|
||||||
|
kmem_cache_free(ext4_ac_cachep, ac);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -4108,7 +3812,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
|
|||||||
printk(KERN_ERR "PA:%lu:%d:%u \n", i,
|
printk(KERN_ERR "PA:%lu:%d:%u \n", i,
|
||||||
start, pa->pa_len);
|
start, pa->pa_len);
|
||||||
}
|
}
|
||||||
ext4_lock_group(sb, i);
|
ext4_unlock_group(sb, i);
|
||||||
|
|
||||||
if (grp->bb_free == 0)
|
if (grp->bb_free == 0)
|
||||||
continue;
|
continue;
|
||||||
@ -4167,7 +3871,8 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
|
|||||||
mutex_lock(&ac->ac_lg->lg_mutex);
|
mutex_lock(&ac->ac_lg->lg_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_initialize_context(struct ext4_allocation_context *ac,
|
static noinline_for_stack int
|
||||||
|
ext4_mb_initialize_context(struct ext4_allocation_context *ac,
|
||||||
struct ext4_allocation_request *ar)
|
struct ext4_allocation_request *ar)
|
||||||
{
|
{
|
||||||
struct super_block *sb = ar->inode->i_sb;
|
struct super_block *sb = ar->inode->i_sb;
|
||||||
@ -4398,7 +4103,8 @@ static void ext4_mb_poll_new_transaction(struct super_block *sb,
|
|||||||
ext4_mb_free_committed_blocks(sb);
|
ext4_mb_free_committed_blocks(sb);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
|
static noinline_for_stack int
|
||||||
|
ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
|
||||||
ext4_group_t group, ext4_grpblk_t block, int count)
|
ext4_group_t group, ext4_grpblk_t block, int count)
|
||||||
{
|
{
|
||||||
struct ext4_group_info *db = e4b->bd_info;
|
struct ext4_group_info *db = e4b->bd_info;
|
||||||
@ -4489,7 +4195,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
|
|||||||
if (block < le32_to_cpu(es->s_first_data_block) ||
|
if (block < le32_to_cpu(es->s_first_data_block) ||
|
||||||
block + count < block ||
|
block + count < block ||
|
||||||
block + count > ext4_blocks_count(es)) {
|
block + count > ext4_blocks_count(es)) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Freeing blocks not in datazone - "
|
"Freeing blocks not in datazone - "
|
||||||
"block = %lu, count = %lu", block, count);
|
"block = %lu, count = %lu", block, count);
|
||||||
goto error_return;
|
goto error_return;
|
||||||
@ -4530,7 +4236,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
|
|||||||
in_range(block + count - 1, ext4_inode_table(sb, gdp),
|
in_range(block + count - 1, ext4_inode_table(sb, gdp),
|
||||||
EXT4_SB(sb)->s_itb_per_group)) {
|
EXT4_SB(sb)->s_itb_per_group)) {
|
||||||
|
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"Freeing blocks in system zone - "
|
"Freeing blocks in system zone - "
|
||||||
"Block = %lu, count = %lu", block, count);
|
"Block = %lu, count = %lu", block, count);
|
||||||
}
|
}
|
||||||
@ -4588,8 +4294,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(sb_bgl_lock(sbi, block_group));
|
spin_lock(sb_bgl_lock(sbi, block_group));
|
||||||
gdp->bg_free_blocks_count =
|
le16_add_cpu(&gdp->bg_free_blocks_count, count);
|
||||||
cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
|
|
||||||
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
|
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
|
||||||
spin_unlock(sb_bgl_lock(sbi, block_group));
|
spin_unlock(sb_bgl_lock(sbi, block_group));
|
||||||
percpu_counter_add(&sbi->s_freeblocks_counter, count);
|
percpu_counter_add(&sbi->s_freeblocks_counter, count);
|
||||||
|
304
fs/ext4/mballoc.h
Normal file
304
fs/ext4/mballoc.h
Normal file
@ -0,0 +1,304 @@
|
|||||||
|
/*
|
||||||
|
* fs/ext4/mballoc.h
|
||||||
|
*
|
||||||
|
* Written by: Alex Tomas <alex@clusterfs.com>
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
#ifndef _EXT4_MBALLOC_H
|
||||||
|
#define _EXT4_MBALLOC_H
|
||||||
|
|
||||||
|
#include <linux/time.h>
|
||||||
|
#include <linux/fs.h>
|
||||||
|
#include <linux/namei.h>
|
||||||
|
#include <linux/quotaops.h>
|
||||||
|
#include <linux/buffer_head.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/swap.h>
|
||||||
|
#include <linux/proc_fs.h>
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
#include <linux/seq_file.h>
|
||||||
|
#include <linux/version.h>
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
|
#include "ext4.h"
|
||||||
|
#include "group.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* with AGGRESSIVE_CHECK allocator runs consistency checks over
|
||||||
|
* structures. these checks slow things down a lot
|
||||||
|
*/
|
||||||
|
#define AGGRESSIVE_CHECK__
|
||||||
|
|
||||||
|
/*
|
||||||
|
* with DOUBLE_CHECK defined mballoc creates persistent in-core
|
||||||
|
* bitmaps, maintains and uses them to check for double allocations
|
||||||
|
*/
|
||||||
|
#define DOUBLE_CHECK__
|
||||||
|
|
||||||
|
/*
|
||||||
|
*/
|
||||||
|
#define MB_DEBUG__
|
||||||
|
#ifdef MB_DEBUG
|
||||||
|
#define mb_debug(fmt, a...) printk(fmt, ##a)
|
||||||
|
#else
|
||||||
|
#define mb_debug(fmt, a...)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* with EXT4_MB_HISTORY mballoc stores last N allocations in memory
|
||||||
|
* and you can monitor it in /proc/fs/ext4/<dev>/mb_history
|
||||||
|
*/
|
||||||
|
#define EXT4_MB_HISTORY
|
||||||
|
#define EXT4_MB_HISTORY_ALLOC 1 /* allocation */
|
||||||
|
#define EXT4_MB_HISTORY_PREALLOC 2 /* preallocated blocks used */
|
||||||
|
#define EXT4_MB_HISTORY_DISCARD 4 /* preallocation discarded */
|
||||||
|
#define EXT4_MB_HISTORY_FREE 8 /* free */
|
||||||
|
|
||||||
|
#define EXT4_MB_HISTORY_DEFAULT (EXT4_MB_HISTORY_ALLOC | \
|
||||||
|
EXT4_MB_HISTORY_PREALLOC)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* How long mballoc can look for a best extent (in found extents)
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_MAX_TO_SCAN 200
|
||||||
|
|
||||||
|
/*
|
||||||
|
* How long mballoc must look for a best extent
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_MIN_TO_SCAN 10
|
||||||
|
|
||||||
|
/*
|
||||||
|
* How many groups mballoc will scan looking for the best chunk
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_MAX_GROUPS_TO_SCAN 5
|
||||||
|
|
||||||
|
/*
|
||||||
|
* with 'ext4_mb_stats' allocator will collect stats that will be
|
||||||
|
* shown at umount. The collecting costs though!
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_STATS 1
|
||||||
|
|
||||||
|
/*
|
||||||
|
* files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
|
||||||
|
* by the stream allocator, which purpose is to pack requests
|
||||||
|
* as close each to other as possible to produce smooth I/O traffic
|
||||||
|
* We use locality group prealloc space for stream request.
|
||||||
|
* We can tune the same via /proc/fs/ext4/<parition>/stream_req
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_STREAM_THRESHOLD 16 /* 64K */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* for which requests use 2^N search using buddies
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_ORDER2_REQS 2
|
||||||
|
|
||||||
|
/*
|
||||||
|
* default group prealloc size 512 blocks
|
||||||
|
*/
|
||||||
|
#define MB_DEFAULT_GROUP_PREALLOC 512
|
||||||
|
|
||||||
|
static struct kmem_cache *ext4_pspace_cachep;
|
||||||
|
static struct kmem_cache *ext4_ac_cachep;
|
||||||
|
|
||||||
|
#ifdef EXT4_BB_MAX_BLOCKS
|
||||||
|
#undef EXT4_BB_MAX_BLOCKS
|
||||||
|
#endif
|
||||||
|
#define EXT4_BB_MAX_BLOCKS 30
|
||||||
|
|
||||||
|
struct ext4_free_metadata {
|
||||||
|
ext4_group_t group;
|
||||||
|
unsigned short num;
|
||||||
|
ext4_grpblk_t blocks[EXT4_BB_MAX_BLOCKS];
|
||||||
|
struct list_head list;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ext4_group_info {
|
||||||
|
unsigned long bb_state;
|
||||||
|
unsigned long bb_tid;
|
||||||
|
struct ext4_free_metadata *bb_md_cur;
|
||||||
|
unsigned short bb_first_free;
|
||||||
|
unsigned short bb_free;
|
||||||
|
unsigned short bb_fragments;
|
||||||
|
struct list_head bb_prealloc_list;
|
||||||
|
#ifdef DOUBLE_CHECK
|
||||||
|
void *bb_bitmap;
|
||||||
|
#endif
|
||||||
|
unsigned short bb_counters[];
|
||||||
|
};
|
||||||
|
|
||||||
|
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
|
||||||
|
#define EXT4_GROUP_INFO_LOCKED_BIT 1
|
||||||
|
|
||||||
|
#define EXT4_MB_GRP_NEED_INIT(grp) \
|
||||||
|
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
|
||||||
|
|
||||||
|
|
||||||
|
struct ext4_prealloc_space {
|
||||||
|
struct list_head pa_inode_list;
|
||||||
|
struct list_head pa_group_list;
|
||||||
|
union {
|
||||||
|
struct list_head pa_tmp_list;
|
||||||
|
struct rcu_head pa_rcu;
|
||||||
|
} u;
|
||||||
|
spinlock_t pa_lock;
|
||||||
|
atomic_t pa_count;
|
||||||
|
unsigned pa_deleted;
|
||||||
|
ext4_fsblk_t pa_pstart; /* phys. block */
|
||||||
|
ext4_lblk_t pa_lstart; /* log. block */
|
||||||
|
unsigned short pa_len; /* len of preallocated chunk */
|
||||||
|
unsigned short pa_free; /* how many blocks are free */
|
||||||
|
unsigned short pa_linear; /* consumed in one direction
|
||||||
|
* strictly, for grp prealloc */
|
||||||
|
spinlock_t *pa_obj_lock;
|
||||||
|
struct inode *pa_inode; /* hack, for history only */
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct ext4_free_extent {
|
||||||
|
ext4_lblk_t fe_logical;
|
||||||
|
ext4_grpblk_t fe_start;
|
||||||
|
ext4_group_t fe_group;
|
||||||
|
int fe_len;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Locality group:
|
||||||
|
* we try to group all related changes together
|
||||||
|
* so that writeback can flush/allocate them together as well
|
||||||
|
*/
|
||||||
|
struct ext4_locality_group {
|
||||||
|
/* for allocator */
|
||||||
|
struct mutex lg_mutex; /* to serialize allocates */
|
||||||
|
struct list_head lg_prealloc_list;/* list of preallocations */
|
||||||
|
spinlock_t lg_prealloc_lock;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ext4_allocation_context {
|
||||||
|
struct inode *ac_inode;
|
||||||
|
struct super_block *ac_sb;
|
||||||
|
|
||||||
|
/* original request */
|
||||||
|
struct ext4_free_extent ac_o_ex;
|
||||||
|
|
||||||
|
/* goal request (after normalization) */
|
||||||
|
struct ext4_free_extent ac_g_ex;
|
||||||
|
|
||||||
|
/* the best found extent */
|
||||||
|
struct ext4_free_extent ac_b_ex;
|
||||||
|
|
||||||
|
/* copy of the bext found extent taken before preallocation efforts */
|
||||||
|
struct ext4_free_extent ac_f_ex;
|
||||||
|
|
||||||
|
/* number of iterations done. we have to track to limit searching */
|
||||||
|
unsigned long ac_ex_scanned;
|
||||||
|
__u16 ac_groups_scanned;
|
||||||
|
__u16 ac_found;
|
||||||
|
__u16 ac_tail;
|
||||||
|
__u16 ac_buddy;
|
||||||
|
__u16 ac_flags; /* allocation hints */
|
||||||
|
__u8 ac_status;
|
||||||
|
__u8 ac_criteria;
|
||||||
|
__u8 ac_repeats;
|
||||||
|
__u8 ac_2order; /* if request is to allocate 2^N blocks and
|
||||||
|
* N > 0, the field stores N, otherwise 0 */
|
||||||
|
__u8 ac_op; /* operation, for history only */
|
||||||
|
struct page *ac_bitmap_page;
|
||||||
|
struct page *ac_buddy_page;
|
||||||
|
struct ext4_prealloc_space *ac_pa;
|
||||||
|
struct ext4_locality_group *ac_lg;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define AC_STATUS_CONTINUE 1
|
||||||
|
#define AC_STATUS_FOUND 2
|
||||||
|
#define AC_STATUS_BREAK 3
|
||||||
|
|
||||||
|
struct ext4_mb_history {
|
||||||
|
struct ext4_free_extent orig; /* orig allocation */
|
||||||
|
struct ext4_free_extent goal; /* goal allocation */
|
||||||
|
struct ext4_free_extent result; /* result allocation */
|
||||||
|
unsigned pid;
|
||||||
|
unsigned ino;
|
||||||
|
__u16 found; /* how many extents have been found */
|
||||||
|
__u16 groups; /* how many groups have been scanned */
|
||||||
|
__u16 tail; /* what tail broke some buddy */
|
||||||
|
__u16 buddy; /* buddy the tail ^^^ broke */
|
||||||
|
__u16 flags;
|
||||||
|
__u8 cr:3; /* which phase the result extent was found at */
|
||||||
|
__u8 op:4;
|
||||||
|
__u8 merged:1;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ext4_buddy {
|
||||||
|
struct page *bd_buddy_page;
|
||||||
|
void *bd_buddy;
|
||||||
|
struct page *bd_bitmap_page;
|
||||||
|
void *bd_bitmap;
|
||||||
|
struct ext4_group_info *bd_info;
|
||||||
|
struct super_block *bd_sb;
|
||||||
|
__u16 bd_blkbits;
|
||||||
|
ext4_group_t bd_group;
|
||||||
|
};
|
||||||
|
#define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap)
|
||||||
|
#define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy)
|
||||||
|
|
||||||
|
#ifndef EXT4_MB_HISTORY
|
||||||
|
static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static void ext4_mb_store_history(struct ext4_allocation_context *ac);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
|
||||||
|
|
||||||
|
static struct proc_dir_entry *proc_root_ext4;
|
||||||
|
struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
|
||||||
|
|
||||||
|
static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
||||||
|
ext4_group_t group);
|
||||||
|
static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
|
||||||
|
static void ext4_mb_free_committed_blocks(struct super_block *);
|
||||||
|
static void ext4_mb_return_to_preallocation(struct inode *inode,
|
||||||
|
struct ext4_buddy *e4b, sector_t block,
|
||||||
|
int count);
|
||||||
|
static void ext4_mb_put_pa(struct ext4_allocation_context *,
|
||||||
|
struct super_block *, struct ext4_prealloc_space *pa);
|
||||||
|
static int ext4_mb_init_per_dev_proc(struct super_block *sb);
|
||||||
|
static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
|
||||||
|
|
||||||
|
|
||||||
|
static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
|
||||||
|
{
|
||||||
|
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
|
||||||
|
|
||||||
|
bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void ext4_unlock_group(struct super_block *sb,
|
||||||
|
ext4_group_t group)
|
||||||
|
{
|
||||||
|
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
|
||||||
|
|
||||||
|
bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int ext4_is_group_locked(struct super_block *sb,
|
||||||
|
ext4_group_t group)
|
||||||
|
{
|
||||||
|
struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
|
||||||
|
|
||||||
|
return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
|
||||||
|
&(grinfo->bb_state));
|
||||||
|
}
|
||||||
|
|
||||||
|
static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
|
||||||
|
struct ext4_free_extent *fex)
|
||||||
|
{
|
||||||
|
ext4_fsblk_t block;
|
||||||
|
|
||||||
|
block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
|
||||||
|
+ fex->fe_start
|
||||||
|
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
|
||||||
|
return block;
|
||||||
|
}
|
||||||
|
#endif
|
@ -13,8 +13,8 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
#include <linux/ext4_fs_extents.h>
|
#include "ext4_extents.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The contiguous blocks details which can be
|
* The contiguous blocks details which can be
|
||||||
@ -327,7 +327,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
|
static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
|
||||||
struct inode *tmp_inode)
|
struct inode *tmp_inode)
|
||||||
{
|
{
|
||||||
int retval;
|
int retval;
|
||||||
__le32 i_data[3];
|
__le32 i_data[3];
|
||||||
@ -339,7 +339,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
|
|||||||
* i_data field of the original inode
|
* i_data field of the original inode
|
||||||
*/
|
*/
|
||||||
retval = ext4_journal_extend(handle, 1);
|
retval = ext4_journal_extend(handle, 1);
|
||||||
if (retval != 0) {
|
if (retval) {
|
||||||
retval = ext4_journal_restart(handle, 1);
|
retval = ext4_journal_restart(handle, 1);
|
||||||
if (retval)
|
if (retval)
|
||||||
goto err_out;
|
goto err_out;
|
||||||
@ -350,6 +350,18 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
|
|||||||
i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
|
i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
|
||||||
|
|
||||||
down_write(&EXT4_I(inode)->i_data_sem);
|
down_write(&EXT4_I(inode)->i_data_sem);
|
||||||
|
/*
|
||||||
|
* if EXT4_EXT_MIGRATE is cleared a block allocation
|
||||||
|
* happened after we started the migrate. We need to
|
||||||
|
* fail the migrate
|
||||||
|
*/
|
||||||
|
if (!(EXT4_I(inode)->i_flags & EXT4_EXT_MIGRATE)) {
|
||||||
|
retval = -EAGAIN;
|
||||||
|
up_write(&EXT4_I(inode)->i_data_sem);
|
||||||
|
goto err_out;
|
||||||
|
} else
|
||||||
|
EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
|
||||||
|
~EXT4_EXT_MIGRATE;
|
||||||
/*
|
/*
|
||||||
* We have the extent map build with the tmp inode.
|
* We have the extent map build with the tmp inode.
|
||||||
* Now copy the i_data across
|
* Now copy the i_data across
|
||||||
@ -508,6 +520,17 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
|
|||||||
* switch the inode format to prevent read.
|
* switch the inode format to prevent read.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&(inode->i_mutex));
|
mutex_lock(&(inode->i_mutex));
|
||||||
|
/*
|
||||||
|
* Even though we take i_mutex we can still cause block allocation
|
||||||
|
* via mmap write to holes. If we have allocated new blocks we fail
|
||||||
|
* migrate. New block allocation will clear EXT4_EXT_MIGRATE flag.
|
||||||
|
* The flag is updated with i_data_sem held to prevent racing with
|
||||||
|
* block allocation.
|
||||||
|
*/
|
||||||
|
down_read((&EXT4_I(inode)->i_data_sem));
|
||||||
|
EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags | EXT4_EXT_MIGRATE;
|
||||||
|
up_read((&EXT4_I(inode)->i_data_sem));
|
||||||
|
|
||||||
handle = ext4_journal_start(inode, 1);
|
handle = ext4_journal_start(inode, 1);
|
||||||
|
|
||||||
ei = EXT4_I(inode);
|
ei = EXT4_I(inode);
|
||||||
@ -559,9 +582,15 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
|
|||||||
* tmp_inode
|
* tmp_inode
|
||||||
*/
|
*/
|
||||||
free_ext_block(handle, tmp_inode);
|
free_ext_block(handle, tmp_inode);
|
||||||
else
|
else {
|
||||||
retval = ext4_ext_swap_inode_data(handle, inode,
|
retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
|
||||||
tmp_inode);
|
if (retval)
|
||||||
|
/*
|
||||||
|
* if we fail to swap inode data free the extent
|
||||||
|
* details of the tmp inode
|
||||||
|
*/
|
||||||
|
free_ext_block(handle, tmp_inode);
|
||||||
|
}
|
||||||
|
|
||||||
/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
|
/* We mark the tmp_inode dirty via ext4_ext_tree_init. */
|
||||||
if (ext4_journal_extend(handle, 1) != 0)
|
if (ext4_journal_extend(handle, 1) != 0)
|
||||||
|
@ -28,14 +28,14 @@
|
|||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/fcntl.h>
|
#include <linux/fcntl.h>
|
||||||
#include <linux/stat.h>
|
#include <linux/stat.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
#include <linux/bio.h>
|
#include <linux/bio.h>
|
||||||
|
#include "ext4.h"
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
|
|
||||||
#include "namei.h"
|
#include "namei.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
@ -57,10 +57,15 @@ static struct buffer_head *ext4_append(handle_t *handle,
|
|||||||
|
|
||||||
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
|
*block = inode->i_size >> inode->i_sb->s_blocksize_bits;
|
||||||
|
|
||||||
if ((bh = ext4_bread(handle, inode, *block, 1, err))) {
|
bh = ext4_bread(handle, inode, *block, 1, err);
|
||||||
|
if (bh) {
|
||||||
inode->i_size += inode->i_sb->s_blocksize;
|
inode->i_size += inode->i_sb->s_blocksize;
|
||||||
EXT4_I(inode)->i_disksize = inode->i_size;
|
EXT4_I(inode)->i_disksize = inode->i_size;
|
||||||
ext4_journal_get_write_access(handle,bh);
|
*err = ext4_journal_get_write_access(handle, bh);
|
||||||
|
if (*err) {
|
||||||
|
brelse(bh);
|
||||||
|
bh = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return bh;
|
return bh;
|
||||||
}
|
}
|
||||||
@ -348,7 +353,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
if (root->info.hash_version != DX_HASH_TEA &&
|
if (root->info.hash_version != DX_HASH_TEA &&
|
||||||
root->info.hash_version != DX_HASH_HALF_MD4 &&
|
root->info.hash_version != DX_HASH_HALF_MD4 &&
|
||||||
root->info.hash_version != DX_HASH_LEGACY) {
|
root->info.hash_version != DX_HASH_LEGACY) {
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"Unrecognised inode hash code %d",
|
"Unrecognised inode hash code %d",
|
||||||
root->info.hash_version);
|
root->info.hash_version);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
@ -362,7 +367,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
hash = hinfo->hash;
|
hash = hinfo->hash;
|
||||||
|
|
||||||
if (root->info.unused_flags & 1) {
|
if (root->info.unused_flags & 1) {
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"Unimplemented inode hash flags: %#06x",
|
"Unimplemented inode hash flags: %#06x",
|
||||||
root->info.unused_flags);
|
root->info.unused_flags);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
@ -371,7 +376,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ((indirect = root->info.indirect_levels) > 1) {
|
if ((indirect = root->info.indirect_levels) > 1) {
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"Unimplemented inode hash depth: %#06x",
|
"Unimplemented inode hash depth: %#06x",
|
||||||
root->info.indirect_levels);
|
root->info.indirect_levels);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
@ -384,7 +389,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
|
|
||||||
if (dx_get_limit(entries) != dx_root_limit(dir,
|
if (dx_get_limit(entries) != dx_root_limit(dir,
|
||||||
root->info.info_length)) {
|
root->info.info_length)) {
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"dx entry: limit != root limit");
|
"dx entry: limit != root limit");
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
*err = ERR_BAD_DX_DIR;
|
*err = ERR_BAD_DX_DIR;
|
||||||
@ -396,7 +401,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
{
|
{
|
||||||
count = dx_get_count(entries);
|
count = dx_get_count(entries);
|
||||||
if (!count || count > dx_get_limit(entries)) {
|
if (!count || count > dx_get_limit(entries)) {
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"dx entry: no count or count > limit");
|
"dx entry: no count or count > limit");
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
*err = ERR_BAD_DX_DIR;
|
*err = ERR_BAD_DX_DIR;
|
||||||
@ -441,7 +446,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
goto fail2;
|
goto fail2;
|
||||||
at = entries = ((struct dx_node *) bh->b_data)->entries;
|
at = entries = ((struct dx_node *) bh->b_data)->entries;
|
||||||
if (dx_get_limit(entries) != dx_node_limit (dir)) {
|
if (dx_get_limit(entries) != dx_node_limit (dir)) {
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"dx entry: limit != node limit");
|
"dx entry: limit != node limit");
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
*err = ERR_BAD_DX_DIR;
|
*err = ERR_BAD_DX_DIR;
|
||||||
@ -457,7 +462,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
|
|||||||
}
|
}
|
||||||
fail:
|
fail:
|
||||||
if (*err == ERR_BAD_DX_DIR)
|
if (*err == ERR_BAD_DX_DIR)
|
||||||
ext4_warning(dir->i_sb, __FUNCTION__,
|
ext4_warning(dir->i_sb, __func__,
|
||||||
"Corrupt dir inode %ld, running e2fsck is "
|
"Corrupt dir inode %ld, running e2fsck is "
|
||||||
"recommended.", dir->i_ino);
|
"recommended.", dir->i_ino);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -914,7 +919,7 @@ static struct buffer_head * ext4_find_entry (struct dentry *dentry,
|
|||||||
wait_on_buffer(bh);
|
wait_on_buffer(bh);
|
||||||
if (!buffer_uptodate(bh)) {
|
if (!buffer_uptodate(bh)) {
|
||||||
/* read error, skip block & hope for the best */
|
/* read error, skip block & hope for the best */
|
||||||
ext4_error(sb, __FUNCTION__, "reading directory #%lu "
|
ext4_error(sb, __func__, "reading directory #%lu "
|
||||||
"offset %lu", dir->i_ino,
|
"offset %lu", dir->i_ino,
|
||||||
(unsigned long)block);
|
(unsigned long)block);
|
||||||
brelse(bh);
|
brelse(bh);
|
||||||
@ -1007,7 +1012,7 @@ static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry,
|
|||||||
retval = ext4_htree_next_block(dir, hash, frame,
|
retval = ext4_htree_next_block(dir, hash, frame,
|
||||||
frames, NULL);
|
frames, NULL);
|
||||||
if (retval < 0) {
|
if (retval < 0) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"error reading index page in directory #%lu",
|
"error reading index page in directory #%lu",
|
||||||
dir->i_ino);
|
dir->i_ino);
|
||||||
*err = retval;
|
*err = retval;
|
||||||
@ -1532,7 +1537,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
|
|||||||
|
|
||||||
if (levels && (dx_get_count(frames->entries) ==
|
if (levels && (dx_get_count(frames->entries) ==
|
||||||
dx_get_limit(frames->entries))) {
|
dx_get_limit(frames->entries))) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Directory index full!");
|
"Directory index full!");
|
||||||
err = -ENOSPC;
|
err = -ENOSPC;
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -1860,11 +1865,11 @@ static int empty_dir (struct inode * inode)
|
|||||||
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
|
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
|
||||||
!(bh = ext4_bread (NULL, inode, 0, 0, &err))) {
|
!(bh = ext4_bread (NULL, inode, 0, 0, &err))) {
|
||||||
if (err)
|
if (err)
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"error %d reading directory #%lu offset 0",
|
"error %d reading directory #%lu offset 0",
|
||||||
err, inode->i_ino);
|
err, inode->i_ino);
|
||||||
else
|
else
|
||||||
ext4_warning(inode->i_sb, __FUNCTION__,
|
ext4_warning(inode->i_sb, __func__,
|
||||||
"bad directory (dir #%lu) - no data block",
|
"bad directory (dir #%lu) - no data block",
|
||||||
inode->i_ino);
|
inode->i_ino);
|
||||||
return 1;
|
return 1;
|
||||||
@ -1893,7 +1898,7 @@ static int empty_dir (struct inode * inode)
|
|||||||
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
|
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
if (err)
|
if (err)
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"error %d reading directory"
|
"error %d reading directory"
|
||||||
" #%lu offset %lu",
|
" #%lu offset %lu",
|
||||||
err, inode->i_ino, offset);
|
err, inode->i_ino, offset);
|
||||||
@ -2217,6 +2222,8 @@ static int ext4_symlink (struct inode * dir,
|
|||||||
goto out_stop;
|
goto out_stop;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
/* clear the extent format for fast symlink */
|
||||||
|
EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
|
||||||
inode->i_op = &ext4_fast_symlink_inode_operations;
|
inode->i_op = &ext4_fast_symlink_inode_operations;
|
||||||
memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
|
memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
|
||||||
inode->i_size = l-1;
|
inode->i_size = l-1;
|
||||||
@ -2347,6 +2354,9 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
|
|||||||
EXT4_FEATURE_INCOMPAT_FILETYPE))
|
EXT4_FEATURE_INCOMPAT_FILETYPE))
|
||||||
new_de->file_type = old_de->file_type;
|
new_de->file_type = old_de->file_type;
|
||||||
new_dir->i_version++;
|
new_dir->i_version++;
|
||||||
|
new_dir->i_ctime = new_dir->i_mtime =
|
||||||
|
ext4_current_time(new_dir);
|
||||||
|
ext4_mark_inode_dirty(handle, new_dir);
|
||||||
BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata");
|
BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata");
|
||||||
ext4_journal_dirty_metadata(handle, new_bh);
|
ext4_journal_dirty_metadata(handle, new_bh);
|
||||||
brelse(new_bh);
|
brelse(new_bh);
|
||||||
|
@ -11,11 +11,10 @@
|
|||||||
|
|
||||||
#define EXT4FS_DEBUG
|
#define EXT4FS_DEBUG
|
||||||
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
|
|
||||||
#include <linux/errno.h>
|
#include <linux/errno.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
#include "group.h"
|
#include "group.h"
|
||||||
|
|
||||||
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
|
#define outside(b, first, last) ((b) < (first) || (b) >= (last))
|
||||||
@ -50,63 +49,63 @@ static int verify_group_input(struct super_block *sb,
|
|||||||
|
|
||||||
ext4_get_group_no_and_offset(sb, start, NULL, &offset);
|
ext4_get_group_no_and_offset(sb, start, NULL, &offset);
|
||||||
if (group != sbi->s_groups_count)
|
if (group != sbi->s_groups_count)
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Cannot add at group %u (only %lu groups)",
|
"Cannot add at group %u (only %lu groups)",
|
||||||
input->group, sbi->s_groups_count);
|
input->group, sbi->s_groups_count);
|
||||||
else if (offset != 0)
|
else if (offset != 0)
|
||||||
ext4_warning(sb, __FUNCTION__, "Last group not full");
|
ext4_warning(sb, __func__, "Last group not full");
|
||||||
else if (input->reserved_blocks > input->blocks_count / 5)
|
else if (input->reserved_blocks > input->blocks_count / 5)
|
||||||
ext4_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
|
ext4_warning(sb, __func__, "Reserved blocks too high (%u)",
|
||||||
input->reserved_blocks);
|
input->reserved_blocks);
|
||||||
else if (free_blocks_count < 0)
|
else if (free_blocks_count < 0)
|
||||||
ext4_warning(sb, __FUNCTION__, "Bad blocks count %u",
|
ext4_warning(sb, __func__, "Bad blocks count %u",
|
||||||
input->blocks_count);
|
input->blocks_count);
|
||||||
else if (!(bh = sb_bread(sb, end - 1)))
|
else if (!(bh = sb_bread(sb, end - 1)))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Cannot read last block (%llu)",
|
"Cannot read last block (%llu)",
|
||||||
end - 1);
|
end - 1);
|
||||||
else if (outside(input->block_bitmap, start, end))
|
else if (outside(input->block_bitmap, start, end))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Block bitmap not in group (block %llu)",
|
"Block bitmap not in group (block %llu)",
|
||||||
(unsigned long long)input->block_bitmap);
|
(unsigned long long)input->block_bitmap);
|
||||||
else if (outside(input->inode_bitmap, start, end))
|
else if (outside(input->inode_bitmap, start, end))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Inode bitmap not in group (block %llu)",
|
"Inode bitmap not in group (block %llu)",
|
||||||
(unsigned long long)input->inode_bitmap);
|
(unsigned long long)input->inode_bitmap);
|
||||||
else if (outside(input->inode_table, start, end) ||
|
else if (outside(input->inode_table, start, end) ||
|
||||||
outside(itend - 1, start, end))
|
outside(itend - 1, start, end))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Inode table not in group (blocks %llu-%llu)",
|
"Inode table not in group (blocks %llu-%llu)",
|
||||||
(unsigned long long)input->inode_table, itend - 1);
|
(unsigned long long)input->inode_table, itend - 1);
|
||||||
else if (input->inode_bitmap == input->block_bitmap)
|
else if (input->inode_bitmap == input->block_bitmap)
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Block bitmap same as inode bitmap (%llu)",
|
"Block bitmap same as inode bitmap (%llu)",
|
||||||
(unsigned long long)input->block_bitmap);
|
(unsigned long long)input->block_bitmap);
|
||||||
else if (inside(input->block_bitmap, input->inode_table, itend))
|
else if (inside(input->block_bitmap, input->inode_table, itend))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Block bitmap (%llu) in inode table (%llu-%llu)",
|
"Block bitmap (%llu) in inode table (%llu-%llu)",
|
||||||
(unsigned long long)input->block_bitmap,
|
(unsigned long long)input->block_bitmap,
|
||||||
(unsigned long long)input->inode_table, itend - 1);
|
(unsigned long long)input->inode_table, itend - 1);
|
||||||
else if (inside(input->inode_bitmap, input->inode_table, itend))
|
else if (inside(input->inode_bitmap, input->inode_table, itend))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Inode bitmap (%llu) in inode table (%llu-%llu)",
|
"Inode bitmap (%llu) in inode table (%llu-%llu)",
|
||||||
(unsigned long long)input->inode_bitmap,
|
(unsigned long long)input->inode_bitmap,
|
||||||
(unsigned long long)input->inode_table, itend - 1);
|
(unsigned long long)input->inode_table, itend - 1);
|
||||||
else if (inside(input->block_bitmap, start, metaend))
|
else if (inside(input->block_bitmap, start, metaend))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Block bitmap (%llu) in GDT table"
|
"Block bitmap (%llu) in GDT table"
|
||||||
" (%llu-%llu)",
|
" (%llu-%llu)",
|
||||||
(unsigned long long)input->block_bitmap,
|
(unsigned long long)input->block_bitmap,
|
||||||
start, metaend - 1);
|
start, metaend - 1);
|
||||||
else if (inside(input->inode_bitmap, start, metaend))
|
else if (inside(input->inode_bitmap, start, metaend))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Inode bitmap (%llu) in GDT table"
|
"Inode bitmap (%llu) in GDT table"
|
||||||
" (%llu-%llu)",
|
" (%llu-%llu)",
|
||||||
(unsigned long long)input->inode_bitmap,
|
(unsigned long long)input->inode_bitmap,
|
||||||
start, metaend - 1);
|
start, metaend - 1);
|
||||||
else if (inside(input->inode_table, start, metaend) ||
|
else if (inside(input->inode_table, start, metaend) ||
|
||||||
inside(itend - 1, start, metaend))
|
inside(itend - 1, start, metaend))
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Inode table (%llu-%llu) overlaps"
|
"Inode table (%llu-%llu) overlaps"
|
||||||
"GDT table (%llu-%llu)",
|
"GDT table (%llu-%llu)",
|
||||||
(unsigned long long)input->inode_table,
|
(unsigned long long)input->inode_table,
|
||||||
@ -368,7 +367,7 @@ static int verify_reserved_gdb(struct super_block *sb,
|
|||||||
while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
|
while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
|
||||||
if (le32_to_cpu(*p++) !=
|
if (le32_to_cpu(*p++) !=
|
||||||
grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
|
grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"reserved GDT %llu"
|
"reserved GDT %llu"
|
||||||
" missing grp %d (%llu)",
|
" missing grp %d (%llu)",
|
||||||
blk, grp,
|
blk, grp,
|
||||||
@ -424,7 +423,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
if (EXT4_SB(sb)->s_sbh->b_blocknr !=
|
if (EXT4_SB(sb)->s_sbh->b_blocknr !=
|
||||||
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
|
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"won't resize using backup superblock at %llu",
|
"won't resize using backup superblock at %llu",
|
||||||
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
|
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
@ -448,7 +447,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||||||
|
|
||||||
data = (__le32 *)dind->b_data;
|
data = (__le32 *)dind->b_data;
|
||||||
if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
|
if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"new group %u GDT block %llu not reserved",
|
"new group %u GDT block %llu not reserved",
|
||||||
input->group, gdblock);
|
input->group, gdblock);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
@ -469,10 +468,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||||||
goto exit_dindj;
|
goto exit_dindj;
|
||||||
|
|
||||||
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
|
n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
|
||||||
GFP_KERNEL);
|
GFP_NOFS);
|
||||||
if (!n_group_desc) {
|
if (!n_group_desc) {
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
ext4_warning (sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"not enough memory for %lu groups", gdb_num + 1);
|
"not enough memory for %lu groups", gdb_num + 1);
|
||||||
goto exit_inode;
|
goto exit_inode;
|
||||||
}
|
}
|
||||||
@ -502,8 +501,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
|
|||||||
EXT4_SB(sb)->s_gdb_count++;
|
EXT4_SB(sb)->s_gdb_count++;
|
||||||
kfree(o_group_desc);
|
kfree(o_group_desc);
|
||||||
|
|
||||||
es->s_reserved_gdt_blocks =
|
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
|
||||||
cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
|
|
||||||
ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
|
ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -553,7 +551,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
|
|||||||
int res, i;
|
int res, i;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
|
primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
|
||||||
if (!primary)
|
if (!primary)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
@ -571,7 +569,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
|
|||||||
/* Get each reserved primary GDT block and verify it holds backups */
|
/* Get each reserved primary GDT block and verify it holds backups */
|
||||||
for (res = 0; res < reserved_gdb; res++, blk++) {
|
for (res = 0; res < reserved_gdb; res++, blk++) {
|
||||||
if (le32_to_cpu(*data) != blk) {
|
if (le32_to_cpu(*data) != blk) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"reserved block %llu"
|
"reserved block %llu"
|
||||||
" not at offset %ld",
|
" not at offset %ld",
|
||||||
blk,
|
blk,
|
||||||
@ -715,7 +713,7 @@ static void update_backups(struct super_block *sb,
|
|||||||
*/
|
*/
|
||||||
exit_err:
|
exit_err:
|
||||||
if (err) {
|
if (err) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"can't update backup for group %lu (err %d), "
|
"can't update backup for group %lu (err %d), "
|
||||||
"forcing fsck on next reboot", group, err);
|
"forcing fsck on next reboot", group, err);
|
||||||
sbi->s_mount_state &= ~EXT4_VALID_FS;
|
sbi->s_mount_state &= ~EXT4_VALID_FS;
|
||||||
@ -755,33 +753,33 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||||||
|
|
||||||
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
|
||||||
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
|
EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Can't resize non-sparse filesystem further");
|
"Can't resize non-sparse filesystem further");
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ext4_blocks_count(es) + input->blocks_count <
|
if (ext4_blocks_count(es) + input->blocks_count <
|
||||||
ext4_blocks_count(es)) {
|
ext4_blocks_count(es)) {
|
||||||
ext4_warning(sb, __FUNCTION__, "blocks_count overflow\n");
|
ext4_warning(sb, __func__, "blocks_count overflow\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
|
if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
|
||||||
le32_to_cpu(es->s_inodes_count)) {
|
le32_to_cpu(es->s_inodes_count)) {
|
||||||
ext4_warning(sb, __FUNCTION__, "inodes_count overflow\n");
|
ext4_warning(sb, __func__, "inodes_count overflow\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reserved_gdb || gdb_off == 0) {
|
if (reserved_gdb || gdb_off == 0) {
|
||||||
if (!EXT4_HAS_COMPAT_FEATURE(sb,
|
if (!EXT4_HAS_COMPAT_FEATURE(sb,
|
||||||
EXT4_FEATURE_COMPAT_RESIZE_INODE)){
|
EXT4_FEATURE_COMPAT_RESIZE_INODE)){
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"No reserved GDT blocks, can't resize");
|
"No reserved GDT blocks, can't resize");
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
inode = ext4_iget(sb, EXT4_RESIZE_INO);
|
inode = ext4_iget(sb, EXT4_RESIZE_INO);
|
||||||
if (IS_ERR(inode)) {
|
if (IS_ERR(inode)) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"Error opening resize inode");
|
"Error opening resize inode");
|
||||||
return PTR_ERR(inode);
|
return PTR_ERR(inode);
|
||||||
}
|
}
|
||||||
@ -810,7 +808,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||||||
|
|
||||||
lock_super(sb);
|
lock_super(sb);
|
||||||
if (input->group != sbi->s_groups_count) {
|
if (input->group != sbi->s_groups_count) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"multiple resizers run on filesystem!");
|
"multiple resizers run on filesystem!");
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
goto exit_journal;
|
goto exit_journal;
|
||||||
@ -877,8 +875,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
|
|||||||
*/
|
*/
|
||||||
ext4_blocks_count_set(es, ext4_blocks_count(es) +
|
ext4_blocks_count_set(es, ext4_blocks_count(es) +
|
||||||
input->blocks_count);
|
input->blocks_count);
|
||||||
es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
|
le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
|
||||||
EXT4_INODES_PER_GROUP(sb));
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to protect s_groups_count against other CPUs seeing
|
* We need to protect s_groups_count against other CPUs seeing
|
||||||
@ -977,13 +974,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
" too large to resize to %llu blocks safely\n",
|
" too large to resize to %llu blocks safely\n",
|
||||||
sb->s_id, n_blocks_count);
|
sb->s_id, n_blocks_count);
|
||||||
if (sizeof(sector_t) < 8)
|
if (sizeof(sector_t) < 8)
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"CONFIG_LBD not enabled\n");
|
"CONFIG_LBD not enabled\n");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (n_blocks_count < o_blocks_count) {
|
if (n_blocks_count < o_blocks_count) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"can't shrink FS - resize aborted");
|
"can't shrink FS - resize aborted");
|
||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
@ -992,7 +989,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
ext4_get_group_no_and_offset(sb, o_blocks_count, NULL, &last);
|
ext4_get_group_no_and_offset(sb, o_blocks_count, NULL, &last);
|
||||||
|
|
||||||
if (last == 0) {
|
if (last == 0) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"need to use ext2online to resize further");
|
"need to use ext2online to resize further");
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
}
|
}
|
||||||
@ -1000,7 +997,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
add = EXT4_BLOCKS_PER_GROUP(sb) - last;
|
add = EXT4_BLOCKS_PER_GROUP(sb) - last;
|
||||||
|
|
||||||
if (o_blocks_count + add < o_blocks_count) {
|
if (o_blocks_count + add < o_blocks_count) {
|
||||||
ext4_warning(sb, __FUNCTION__, "blocks_count overflow");
|
ext4_warning(sb, __func__, "blocks_count overflow");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1008,7 +1005,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
add = n_blocks_count - o_blocks_count;
|
add = n_blocks_count - o_blocks_count;
|
||||||
|
|
||||||
if (o_blocks_count + add < n_blocks_count)
|
if (o_blocks_count + add < n_blocks_count)
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"will only finish group (%llu"
|
"will only finish group (%llu"
|
||||||
" blocks, %u new)",
|
" blocks, %u new)",
|
||||||
o_blocks_count + add, add);
|
o_blocks_count + add, add);
|
||||||
@ -1016,7 +1013,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
/* See if the device is actually as big as what was requested */
|
/* See if the device is actually as big as what was requested */
|
||||||
bh = sb_bread(sb, o_blocks_count + add -1);
|
bh = sb_bread(sb, o_blocks_count + add -1);
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"can't read last block, resize aborted");
|
"can't read last block, resize aborted");
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
}
|
}
|
||||||
@ -1028,13 +1025,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
handle = ext4_journal_start_sb(sb, 3);
|
handle = ext4_journal_start_sb(sb, 3);
|
||||||
if (IS_ERR(handle)) {
|
if (IS_ERR(handle)) {
|
||||||
err = PTR_ERR(handle);
|
err = PTR_ERR(handle);
|
||||||
ext4_warning(sb, __FUNCTION__, "error %d on journal start",err);
|
ext4_warning(sb, __func__, "error %d on journal start", err);
|
||||||
goto exit_put;
|
goto exit_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
lock_super(sb);
|
lock_super(sb);
|
||||||
if (o_blocks_count != ext4_blocks_count(es)) {
|
if (o_blocks_count != ext4_blocks_count(es)) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"multiple resizers run on filesystem!");
|
"multiple resizers run on filesystem!");
|
||||||
unlock_super(sb);
|
unlock_super(sb);
|
||||||
ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
@ -1044,7 +1041,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
|
|
||||||
if ((err = ext4_journal_get_write_access(handle,
|
if ((err = ext4_journal_get_write_access(handle,
|
||||||
EXT4_SB(sb)->s_sbh))) {
|
EXT4_SB(sb)->s_sbh))) {
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"error %d on journal write access", err);
|
"error %d on journal write access", err);
|
||||||
unlock_super(sb);
|
unlock_super(sb);
|
||||||
ext4_journal_stop(handle);
|
ext4_journal_stop(handle);
|
||||||
|
@ -21,8 +21,6 @@
|
|||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/time.h>
|
#include <linux/time.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/blkdev.h>
|
#include <linux/blkdev.h>
|
||||||
@ -38,9 +36,10 @@
|
|||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/log2.h>
|
#include <linux/log2.h>
|
||||||
#include <linux/crc16.h>
|
#include <linux/crc16.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
|
|
||||||
|
#include "ext4.h"
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "acl.h"
|
#include "acl.h"
|
||||||
#include "namei.h"
|
#include "namei.h"
|
||||||
@ -135,7 +134,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
|
|||||||
* take the FS itself readonly cleanly. */
|
* take the FS itself readonly cleanly. */
|
||||||
journal = EXT4_SB(sb)->s_journal;
|
journal = EXT4_SB(sb)->s_journal;
|
||||||
if (is_journal_aborted(journal)) {
|
if (is_journal_aborted(journal)) {
|
||||||
ext4_abort(sb, __FUNCTION__,
|
ext4_abort(sb, __func__,
|
||||||
"Detected aborted journal");
|
"Detected aborted journal");
|
||||||
return ERR_PTR(-EROFS);
|
return ERR_PTR(-EROFS);
|
||||||
}
|
}
|
||||||
@ -355,7 +354,7 @@ void ext4_update_dynamic_rev(struct super_block *sb)
|
|||||||
if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
|
if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
ext4_warning(sb, __FUNCTION__,
|
ext4_warning(sb, __func__,
|
||||||
"updating to rev %d because of new feature flag, "
|
"updating to rev %d because of new feature flag, "
|
||||||
"running e2fsck is recommended",
|
"running e2fsck is recommended",
|
||||||
EXT4_DYNAMIC_REV);
|
EXT4_DYNAMIC_REV);
|
||||||
@ -945,8 +944,8 @@ static match_table_t tokens = {
|
|||||||
{Opt_mballoc, "mballoc"},
|
{Opt_mballoc, "mballoc"},
|
||||||
{Opt_nomballoc, "nomballoc"},
|
{Opt_nomballoc, "nomballoc"},
|
||||||
{Opt_stripe, "stripe=%u"},
|
{Opt_stripe, "stripe=%u"},
|
||||||
{Opt_err, NULL},
|
|
||||||
{Opt_resize, "resize"},
|
{Opt_resize, "resize"},
|
||||||
|
{Opt_err, NULL},
|
||||||
};
|
};
|
||||||
|
|
||||||
static ext4_fsblk_t get_sb_block(void **data)
|
static ext4_fsblk_t get_sb_block(void **data)
|
||||||
@ -1388,11 +1387,11 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
|
|||||||
* a plain journaled filesystem we can keep it set as
|
* a plain journaled filesystem we can keep it set as
|
||||||
* valid forever! :)
|
* valid forever! :)
|
||||||
*/
|
*/
|
||||||
es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT4_VALID_FS);
|
es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
|
||||||
#endif
|
#endif
|
||||||
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
|
if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
|
||||||
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
|
es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
|
||||||
es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
|
le16_add_cpu(&es->s_mnt_count, 1);
|
||||||
es->s_mtime = cpu_to_le32(get_seconds());
|
es->s_mtime = cpu_to_le32(get_seconds());
|
||||||
ext4_update_dynamic_rev(sb);
|
ext4_update_dynamic_rev(sb);
|
||||||
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
|
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
|
||||||
@ -1485,36 +1484,33 @@ static int ext4_check_descriptors(struct super_block *sb)
|
|||||||
block_bitmap = ext4_block_bitmap(sb, gdp);
|
block_bitmap = ext4_block_bitmap(sb, gdp);
|
||||||
if (block_bitmap < first_block || block_bitmap > last_block)
|
if (block_bitmap < first_block || block_bitmap > last_block)
|
||||||
{
|
{
|
||||||
ext4_error (sb, "ext4_check_descriptors",
|
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||||
"Block bitmap for group %lu"
|
"Block bitmap for group %lu not in group "
|
||||||
" not in group (block %llu)!",
|
"(block %llu)!", i, block_bitmap);
|
||||||
i, block_bitmap);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
inode_bitmap = ext4_inode_bitmap(sb, gdp);
|
inode_bitmap = ext4_inode_bitmap(sb, gdp);
|
||||||
if (inode_bitmap < first_block || inode_bitmap > last_block)
|
if (inode_bitmap < first_block || inode_bitmap > last_block)
|
||||||
{
|
{
|
||||||
ext4_error (sb, "ext4_check_descriptors",
|
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||||
"Inode bitmap for group %lu"
|
"Inode bitmap for group %lu not in group "
|
||||||
" not in group (block %llu)!",
|
"(block %llu)!", i, inode_bitmap);
|
||||||
i, inode_bitmap);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
inode_table = ext4_inode_table(sb, gdp);
|
inode_table = ext4_inode_table(sb, gdp);
|
||||||
if (inode_table < first_block ||
|
if (inode_table < first_block ||
|
||||||
inode_table + sbi->s_itb_per_group - 1 > last_block)
|
inode_table + sbi->s_itb_per_group - 1 > last_block)
|
||||||
{
|
{
|
||||||
ext4_error (sb, "ext4_check_descriptors",
|
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||||
"Inode table for group %lu"
|
"Inode table for group %lu not in group "
|
||||||
" not in group (block %llu)!",
|
"(block %llu)!", i, inode_table);
|
||||||
i, inode_table);
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
|
if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
|
||||||
"Checksum for group %lu failed (%u!=%u)\n",
|
"Checksum for group %lu failed (%u!=%u)\n",
|
||||||
i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
|
i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
|
||||||
gdp)), le16_to_cpu(gdp->bg_checksum));
|
gdp)), le16_to_cpu(gdp->bg_checksum));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!flexbg_flag)
|
if (!flexbg_flag)
|
||||||
@ -1594,8 +1590,8 @@ static void ext4_orphan_cleanup (struct super_block * sb,
|
|||||||
while (es->s_last_orphan) {
|
while (es->s_last_orphan) {
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
if (!(inode =
|
inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
|
||||||
ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) {
|
if (IS_ERR(inode)) {
|
||||||
es->s_last_orphan = 0;
|
es->s_last_orphan = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1605,7 +1601,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
|
|||||||
if (inode->i_nlink) {
|
if (inode->i_nlink) {
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
"%s: truncating inode %lu to %Ld bytes\n",
|
"%s: truncating inode %lu to %Ld bytes\n",
|
||||||
__FUNCTION__, inode->i_ino, inode->i_size);
|
__func__, inode->i_ino, inode->i_size);
|
||||||
jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
|
jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
|
||||||
inode->i_ino, inode->i_size);
|
inode->i_ino, inode->i_size);
|
||||||
ext4_truncate(inode);
|
ext4_truncate(inode);
|
||||||
@ -1613,7 +1609,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
|
|||||||
} else {
|
} else {
|
||||||
printk(KERN_DEBUG
|
printk(KERN_DEBUG
|
||||||
"%s: deleting unreferenced inode %lu\n",
|
"%s: deleting unreferenced inode %lu\n",
|
||||||
__FUNCTION__, inode->i_ino);
|
__func__, inode->i_ino);
|
||||||
jbd_debug(2, "deleting unreferenced inode %lu\n",
|
jbd_debug(2, "deleting unreferenced inode %lu\n",
|
||||||
inode->i_ino);
|
inode->i_ino);
|
||||||
nr_orphans++;
|
nr_orphans++;
|
||||||
@ -2699,9 +2695,9 @@ static void ext4_clear_journal_err(struct super_block * sb,
|
|||||||
char nbuf[16];
|
char nbuf[16];
|
||||||
|
|
||||||
errstr = ext4_decode_error(sb, j_errno, nbuf);
|
errstr = ext4_decode_error(sb, j_errno, nbuf);
|
||||||
ext4_warning(sb, __FUNCTION__, "Filesystem error recorded "
|
ext4_warning(sb, __func__, "Filesystem error recorded "
|
||||||
"from previous mount: %s", errstr);
|
"from previous mount: %s", errstr);
|
||||||
ext4_warning(sb, __FUNCTION__, "Marking fs in need of "
|
ext4_warning(sb, __func__, "Marking fs in need of "
|
||||||
"filesystem check.");
|
"filesystem check.");
|
||||||
|
|
||||||
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
|
EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
|
||||||
@ -2828,7 +2824,7 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (sbi->s_mount_opt & EXT4_MOUNT_ABORT)
|
if (sbi->s_mount_opt & EXT4_MOUNT_ABORT)
|
||||||
ext4_abort(sb, __FUNCTION__, "Abort forced by user");
|
ext4_abort(sb, __func__, "Abort forced by user");
|
||||||
|
|
||||||
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
|
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
|
||||||
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
|
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
|
||||||
@ -3040,8 +3036,14 @@ static int ext4_dquot_drop(struct inode *inode)
|
|||||||
|
|
||||||
/* We may delete quota structure so we need to reserve enough blocks */
|
/* We may delete quota structure so we need to reserve enough blocks */
|
||||||
handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
|
handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
|
||||||
if (IS_ERR(handle))
|
if (IS_ERR(handle)) {
|
||||||
|
/*
|
||||||
|
* We call dquot_drop() anyway to at least release references
|
||||||
|
* to quota structures so that umount does not hang.
|
||||||
|
*/
|
||||||
|
dquot_drop(inode);
|
||||||
return PTR_ERR(handle);
|
return PTR_ERR(handle);
|
||||||
|
}
|
||||||
ret = dquot_drop(inode);
|
ret = dquot_drop(inode);
|
||||||
err = ext4_journal_stop(handle);
|
err = ext4_journal_stop(handle);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@ -19,8 +19,8 @@
|
|||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/jbd2.h>
|
#include <linux/jbd2.h>
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/namei.h>
|
#include <linux/namei.h>
|
||||||
|
#include "ext4.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
|
|
||||||
static void * ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
|
static void * ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
|
||||||
|
@ -53,11 +53,11 @@
|
|||||||
#include <linux/init.h>
|
#include <linux/init.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/mbcache.h>
|
#include <linux/mbcache.h>
|
||||||
#include <linux/quotaops.h>
|
#include <linux/quotaops.h>
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
|
#include "ext4.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
#include "acl.h"
|
#include "acl.h"
|
||||||
|
|
||||||
@ -92,6 +92,8 @@ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
|
|||||||
struct mb_cache_entry **);
|
struct mb_cache_entry **);
|
||||||
static void ext4_xattr_rehash(struct ext4_xattr_header *,
|
static void ext4_xattr_rehash(struct ext4_xattr_header *,
|
||||||
struct ext4_xattr_entry *);
|
struct ext4_xattr_entry *);
|
||||||
|
static int ext4_xattr_list(struct inode *inode, char *buffer,
|
||||||
|
size_t buffer_size);
|
||||||
|
|
||||||
static struct mb_cache *ext4_xattr_cache;
|
static struct mb_cache *ext4_xattr_cache;
|
||||||
|
|
||||||
@ -225,7 +227,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
|
|||||||
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
||||||
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
||||||
if (ext4_xattr_check_block(bh)) {
|
if (ext4_xattr_check_block(bh)) {
|
||||||
bad_block: ext4_error(inode->i_sb, __FUNCTION__,
|
bad_block: ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: bad block %llu", inode->i_ino,
|
"inode %lu: bad block %llu", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
@ -367,7 +369,7 @@ ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
|
|||||||
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
ea_bdebug(bh, "b_count=%d, refcount=%d",
|
||||||
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
|
||||||
if (ext4_xattr_check_block(bh)) {
|
if (ext4_xattr_check_block(bh)) {
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: bad block %llu", inode->i_ino,
|
"inode %lu: bad block %llu", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
@ -420,7 +422,7 @@ ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size)
|
|||||||
* Returns a negative error number on failure, or the number of bytes
|
* Returns a negative error number on failure, or the number of bytes
|
||||||
* used / required on success.
|
* used / required on success.
|
||||||
*/
|
*/
|
||||||
int
|
static int
|
||||||
ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
|
ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
|
||||||
{
|
{
|
||||||
int i_error, b_error;
|
int i_error, b_error;
|
||||||
@ -484,8 +486,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
|
|||||||
get_bh(bh);
|
get_bh(bh);
|
||||||
ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
|
ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
|
||||||
} else {
|
} else {
|
||||||
BHDR(bh)->h_refcount = cpu_to_le32(
|
le32_add_cpu(&BHDR(bh)->h_refcount, -1);
|
||||||
le32_to_cpu(BHDR(bh)->h_refcount) - 1);
|
|
||||||
error = ext4_journal_dirty_metadata(handle, bh);
|
error = ext4_journal_dirty_metadata(handle, bh);
|
||||||
if (IS_SYNC(inode))
|
if (IS_SYNC(inode))
|
||||||
handle->h_sync = 1;
|
handle->h_sync = 1;
|
||||||
@ -660,7 +661,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
|
|||||||
atomic_read(&(bs->bh->b_count)),
|
atomic_read(&(bs->bh->b_count)),
|
||||||
le32_to_cpu(BHDR(bs->bh)->h_refcount));
|
le32_to_cpu(BHDR(bs->bh)->h_refcount));
|
||||||
if (ext4_xattr_check_block(bs->bh)) {
|
if (ext4_xattr_check_block(bs->bh)) {
|
||||||
ext4_error(sb, __FUNCTION__,
|
ext4_error(sb, __func__,
|
||||||
"inode %lu: bad block %llu", inode->i_ino,
|
"inode %lu: bad block %llu", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
@ -738,7 +739,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||||||
ce = NULL;
|
ce = NULL;
|
||||||
}
|
}
|
||||||
ea_bdebug(bs->bh, "cloning");
|
ea_bdebug(bs->bh, "cloning");
|
||||||
s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
|
s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
if (s->base == NULL)
|
if (s->base == NULL)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -750,7 +751,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Allocate a buffer where we construct the new block. */
|
/* Allocate a buffer where we construct the new block. */
|
||||||
s->base = kzalloc(sb->s_blocksize, GFP_KERNEL);
|
s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
|
||||||
/* assert(header == s->base) */
|
/* assert(header == s->base) */
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
if (s->base == NULL)
|
if (s->base == NULL)
|
||||||
@ -789,8 +790,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||||||
if (error)
|
if (error)
|
||||||
goto cleanup_dquot;
|
goto cleanup_dquot;
|
||||||
lock_buffer(new_bh);
|
lock_buffer(new_bh);
|
||||||
BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
|
le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
|
||||||
le32_to_cpu(BHDR(new_bh)->h_refcount));
|
|
||||||
ea_bdebug(new_bh, "reusing; refcount now=%d",
|
ea_bdebug(new_bh, "reusing; refcount now=%d",
|
||||||
le32_to_cpu(BHDR(new_bh)->h_refcount));
|
le32_to_cpu(BHDR(new_bh)->h_refcount));
|
||||||
unlock_buffer(new_bh);
|
unlock_buffer(new_bh);
|
||||||
@ -808,10 +808,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||||||
get_bh(new_bh);
|
get_bh(new_bh);
|
||||||
} else {
|
} else {
|
||||||
/* We need to allocate a new block */
|
/* We need to allocate a new block */
|
||||||
ext4_fsblk_t goal = le32_to_cpu(
|
ext4_fsblk_t goal = ext4_group_first_block_no(sb,
|
||||||
EXT4_SB(sb)->s_es->s_first_data_block) +
|
EXT4_I(inode)->i_block_group);
|
||||||
(ext4_fsblk_t)EXT4_I(inode)->i_block_group *
|
|
||||||
EXT4_BLOCKS_PER_GROUP(sb);
|
|
||||||
ext4_fsblk_t block = ext4_new_block(handle, inode,
|
ext4_fsblk_t block = ext4_new_block(handle, inode,
|
||||||
goal, &error);
|
goal, &error);
|
||||||
if (error)
|
if (error)
|
||||||
@ -863,7 +861,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
|
|
||||||
bad_block:
|
bad_block:
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: bad block %llu", inode->i_ino,
|
"inode %lu: bad block %llu", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -1166,7 +1164,7 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
|
|||||||
if (!bh)
|
if (!bh)
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
if (ext4_xattr_check_block(bh)) {
|
if (ext4_xattr_check_block(bh)) {
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: bad block %llu", inode->i_ino,
|
"inode %lu: bad block %llu", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
error = -EIO;
|
error = -EIO;
|
||||||
@ -1341,14 +1339,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
|
|||||||
goto cleanup;
|
goto cleanup;
|
||||||
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: block %llu read error", inode->i_ino,
|
"inode %lu: block %llu read error", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
}
|
}
|
||||||
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
|
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
|
||||||
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
|
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: bad block %llu", inode->i_ino,
|
"inode %lu: bad block %llu", inode->i_ino,
|
||||||
EXT4_I(inode)->i_file_acl);
|
EXT4_I(inode)->i_file_acl);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -1475,7 +1473,7 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
|
|||||||
}
|
}
|
||||||
bh = sb_bread(inode->i_sb, ce->e_block);
|
bh = sb_bread(inode->i_sb, ce->e_block);
|
||||||
if (!bh) {
|
if (!bh) {
|
||||||
ext4_error(inode->i_sb, __FUNCTION__,
|
ext4_error(inode->i_sb, __func__,
|
||||||
"inode %lu: block %lu read error",
|
"inode %lu: block %lu read error",
|
||||||
inode->i_ino, (unsigned long) ce->e_block);
|
inode->i_ino, (unsigned long) ce->e_block);
|
||||||
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
|
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
|
||||||
|
@ -74,7 +74,6 @@ extern struct xattr_handler ext4_xattr_security_handler;
|
|||||||
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
|
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
|
||||||
|
|
||||||
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
|
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
|
||||||
extern int ext4_xattr_list(struct inode *, char *, size_t);
|
|
||||||
extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
|
extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
|
||||||
extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
|
extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
|
||||||
|
|
||||||
@ -98,12 +97,6 @@ ext4_xattr_get(struct inode *inode, int name_index, const char *name,
|
|||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
|
||||||
ext4_xattr_list(struct inode *inode, void *buffer, size_t size)
|
|
||||||
{
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
ext4_xattr_set(struct inode *inode, int name_index, const char *name,
|
ext4_xattr_set(struct inode *inode, int name_index, const char *name,
|
||||||
const void *value, size_t size, int flags)
|
const void *value, size_t size, int flags)
|
||||||
|
@ -6,9 +6,9 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
|
||||||
#include <linux/ext4_fs.h>
|
|
||||||
#include <linux/security.h>
|
#include <linux/security.h>
|
||||||
|
#include "ext4_jbd2.h"
|
||||||
|
#include "ext4.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
|
|
||||||
static size_t
|
static size_t
|
||||||
|
@ -9,8 +9,8 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/capability.h>
|
#include <linux/capability.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
|
|
||||||
#define XATTR_TRUSTED_PREFIX "trusted."
|
#define XATTR_TRUSTED_PREFIX "trusted."
|
||||||
|
@ -8,8 +8,8 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
#include <linux/ext4_jbd2.h>
|
#include "ext4_jbd2.h"
|
||||||
#include <linux/ext4_fs.h>
|
#include "ext4.h"
|
||||||
#include "xattr.h"
|
#include "xattr.h"
|
||||||
|
|
||||||
#define XATTR_USER_PREFIX "user."
|
#define XATTR_USER_PREFIX "user."
|
||||||
|
@ -519,22 +519,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|||||||
|
|
||||||
jbd_debug (3, "JBD: commit phase 2\n");
|
jbd_debug (3, "JBD: commit phase 2\n");
|
||||||
|
|
||||||
/*
|
|
||||||
* First, drop modified flag: all accesses to the buffers
|
|
||||||
* will be tracked for a new trasaction only -bzzz
|
|
||||||
*/
|
|
||||||
spin_lock(&journal->j_list_lock);
|
|
||||||
if (commit_transaction->t_buffers) {
|
|
||||||
new_jh = jh = commit_transaction->t_buffers->b_tnext;
|
|
||||||
do {
|
|
||||||
J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
|
|
||||||
new_jh->b_modified == 0);
|
|
||||||
new_jh->b_modified = 0;
|
|
||||||
new_jh = new_jh->b_tnext;
|
|
||||||
} while (new_jh != jh);
|
|
||||||
}
|
|
||||||
spin_unlock(&journal->j_list_lock);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Now start flushing things to disk, in the order they appear
|
* Now start flushing things to disk, in the order they appear
|
||||||
* on the transaction lists. Data blocks go first.
|
* on the transaction lists. Data blocks go first.
|
||||||
@ -584,6 +568,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
|
|||||||
stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
|
stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
|
||||||
stats.u.run.rs_blocks_logged = 0;
|
stats.u.run.rs_blocks_logged = 0;
|
||||||
|
|
||||||
|
J_ASSERT(commit_transaction->t_nr_buffers <=
|
||||||
|
commit_transaction->t_outstanding_credits);
|
||||||
|
|
||||||
descriptor = NULL;
|
descriptor = NULL;
|
||||||
bufs = 0;
|
bufs = 0;
|
||||||
while (commit_transaction->t_buffers) {
|
while (commit_transaction->t_buffers) {
|
||||||
|
@ -534,7 +534,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
|
|||||||
if (!tid_geq(journal->j_commit_request, tid)) {
|
if (!tid_geq(journal->j_commit_request, tid)) {
|
||||||
printk(KERN_EMERG
|
printk(KERN_EMERG
|
||||||
"%s: error: j_commit_request=%d, tid=%d\n",
|
"%s: error: j_commit_request=%d, tid=%d\n",
|
||||||
__FUNCTION__, journal->j_commit_request, tid);
|
__func__, journal->j_commit_request, tid);
|
||||||
}
|
}
|
||||||
spin_unlock(&journal->j_state_lock);
|
spin_unlock(&journal->j_state_lock);
|
||||||
#endif
|
#endif
|
||||||
@ -599,7 +599,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
|
|||||||
|
|
||||||
printk(KERN_ALERT "%s: journal block not found "
|
printk(KERN_ALERT "%s: journal block not found "
|
||||||
"at offset %lu on %s\n",
|
"at offset %lu on %s\n",
|
||||||
__FUNCTION__,
|
__func__,
|
||||||
blocknr,
|
blocknr,
|
||||||
bdevname(journal->j_dev, b));
|
bdevname(journal->j_dev, b));
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
@ -997,13 +997,14 @@ static journal_t * journal_init_common (void)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
|
* journal_t * jbd2_journal_init_dev() - creates and initialises a journal structure
|
||||||
* @bdev: Block device on which to create the journal
|
* @bdev: Block device on which to create the journal
|
||||||
* @fs_dev: Device which hold journalled filesystem for this journal.
|
* @fs_dev: Device which hold journalled filesystem for this journal.
|
||||||
* @start: Block nr Start of journal.
|
* @start: Block nr Start of journal.
|
||||||
* @len: Length of the journal in blocks.
|
* @len: Length of the journal in blocks.
|
||||||
* @blocksize: blocksize of journalling device
|
* @blocksize: blocksize of journalling device
|
||||||
* @returns: a newly created journal_t *
|
*
|
||||||
|
* Returns: a newly created journal_t *
|
||||||
*
|
*
|
||||||
* jbd2_journal_init_dev creates a journal which maps a fixed contiguous
|
* jbd2_journal_init_dev creates a journal which maps a fixed contiguous
|
||||||
* range of blocks on an arbitrary block device.
|
* range of blocks on an arbitrary block device.
|
||||||
@ -1027,7 +1028,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
|
|||||||
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
|
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
|
||||||
if (!journal->j_wbuf) {
|
if (!journal->j_wbuf) {
|
||||||
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
|
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
kfree(journal);
|
kfree(journal);
|
||||||
journal = NULL;
|
journal = NULL;
|
||||||
goto out;
|
goto out;
|
||||||
@ -1083,7 +1084,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
|
|||||||
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
|
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
|
||||||
if (!journal->j_wbuf) {
|
if (!journal->j_wbuf) {
|
||||||
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
|
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
kfree(journal);
|
kfree(journal);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1092,7 +1093,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
|
|||||||
/* If that failed, give up */
|
/* If that failed, give up */
|
||||||
if (err) {
|
if (err) {
|
||||||
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
|
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
kfree(journal);
|
kfree(journal);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -1178,7 +1179,7 @@ int jbd2_journal_create(journal_t *journal)
|
|||||||
*/
|
*/
|
||||||
printk(KERN_EMERG
|
printk(KERN_EMERG
|
||||||
"%s: creation of journal on external device!\n",
|
"%s: creation of journal on external device!\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1976,9 +1977,10 @@ static int journal_init_jbd2_journal_head_cache(void)
|
|||||||
|
|
||||||
static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
|
static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
|
||||||
{
|
{
|
||||||
J_ASSERT(jbd2_journal_head_cache != NULL);
|
if (jbd2_journal_head_cache) {
|
||||||
kmem_cache_destroy(jbd2_journal_head_cache);
|
kmem_cache_destroy(jbd2_journal_head_cache);
|
||||||
jbd2_journal_head_cache = NULL;
|
jbd2_journal_head_cache = NULL;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1997,7 +1999,7 @@ static struct journal_head *journal_alloc_journal_head(void)
|
|||||||
jbd_debug(1, "out of memory for journal_head\n");
|
jbd_debug(1, "out of memory for journal_head\n");
|
||||||
if (time_after(jiffies, last_warning + 5*HZ)) {
|
if (time_after(jiffies, last_warning + 5*HZ)) {
|
||||||
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
|
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
last_warning = jiffies;
|
last_warning = jiffies;
|
||||||
}
|
}
|
||||||
while (!ret) {
|
while (!ret) {
|
||||||
@ -2134,13 +2136,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
|
|||||||
if (jh->b_frozen_data) {
|
if (jh->b_frozen_data) {
|
||||||
printk(KERN_WARNING "%s: freeing "
|
printk(KERN_WARNING "%s: freeing "
|
||||||
"b_frozen_data\n",
|
"b_frozen_data\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
jbd2_free(jh->b_frozen_data, bh->b_size);
|
jbd2_free(jh->b_frozen_data, bh->b_size);
|
||||||
}
|
}
|
||||||
if (jh->b_committed_data) {
|
if (jh->b_committed_data) {
|
||||||
printk(KERN_WARNING "%s: freeing "
|
printk(KERN_WARNING "%s: freeing "
|
||||||
"b_committed_data\n",
|
"b_committed_data\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
jbd2_free(jh->b_committed_data, bh->b_size);
|
jbd2_free(jh->b_committed_data, bh->b_size);
|
||||||
}
|
}
|
||||||
bh->b_private = NULL;
|
bh->b_private = NULL;
|
||||||
@ -2305,10 +2307,12 @@ static int __init journal_init(void)
|
|||||||
BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
|
BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
|
||||||
|
|
||||||
ret = journal_init_caches();
|
ret = journal_init_caches();
|
||||||
if (ret != 0)
|
if (ret == 0) {
|
||||||
|
jbd2_create_debugfs_entry();
|
||||||
|
jbd2_create_jbd_stats_proc_entry();
|
||||||
|
} else {
|
||||||
jbd2_journal_destroy_caches();
|
jbd2_journal_destroy_caches();
|
||||||
jbd2_create_debugfs_entry();
|
}
|
||||||
jbd2_create_jbd_stats_proc_entry();
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
173
fs/jbd2/revoke.c
173
fs/jbd2/revoke.c
@ -139,7 +139,7 @@ static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
|
|||||||
oom:
|
oom:
|
||||||
if (!journal_oom_retry)
|
if (!journal_oom_retry)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
|
jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
|
||||||
yield();
|
yield();
|
||||||
goto repeat;
|
goto repeat;
|
||||||
}
|
}
|
||||||
@ -167,138 +167,121 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void jbd2_journal_destroy_revoke_caches(void)
|
||||||
|
{
|
||||||
|
if (jbd2_revoke_record_cache) {
|
||||||
|
kmem_cache_destroy(jbd2_revoke_record_cache);
|
||||||
|
jbd2_revoke_record_cache = NULL;
|
||||||
|
}
|
||||||
|
if (jbd2_revoke_table_cache) {
|
||||||
|
kmem_cache_destroy(jbd2_revoke_table_cache);
|
||||||
|
jbd2_revoke_table_cache = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int __init jbd2_journal_init_revoke_caches(void)
|
int __init jbd2_journal_init_revoke_caches(void)
|
||||||
{
|
{
|
||||||
|
J_ASSERT(!jbd2_revoke_record_cache);
|
||||||
|
J_ASSERT(!jbd2_revoke_table_cache);
|
||||||
|
|
||||||
jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
|
jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
|
||||||
sizeof(struct jbd2_revoke_record_s),
|
sizeof(struct jbd2_revoke_record_s),
|
||||||
0,
|
0,
|
||||||
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
|
SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
|
||||||
NULL);
|
NULL);
|
||||||
if (!jbd2_revoke_record_cache)
|
if (!jbd2_revoke_record_cache)
|
||||||
return -ENOMEM;
|
goto record_cache_failure;
|
||||||
|
|
||||||
jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
|
jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
|
||||||
sizeof(struct jbd2_revoke_table_s),
|
sizeof(struct jbd2_revoke_table_s),
|
||||||
0, SLAB_TEMPORARY, NULL);
|
0, SLAB_TEMPORARY, NULL);
|
||||||
if (!jbd2_revoke_table_cache) {
|
if (!jbd2_revoke_table_cache)
|
||||||
kmem_cache_destroy(jbd2_revoke_record_cache);
|
goto table_cache_failure;
|
||||||
jbd2_revoke_record_cache = NULL;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
|
table_cache_failure:
|
||||||
|
jbd2_journal_destroy_revoke_caches();
|
||||||
|
record_cache_failure:
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
void jbd2_journal_destroy_revoke_caches(void)
|
static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
|
||||||
{
|
{
|
||||||
kmem_cache_destroy(jbd2_revoke_record_cache);
|
int shift = 0;
|
||||||
jbd2_revoke_record_cache = NULL;
|
int tmp = hash_size;
|
||||||
kmem_cache_destroy(jbd2_revoke_table_cache);
|
struct jbd2_revoke_table_s *table;
|
||||||
jbd2_revoke_table_cache = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Initialise the revoke table for a given journal to a given size. */
|
table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
|
||||||
|
if (!table)
|
||||||
|
goto out;
|
||||||
|
|
||||||
int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
|
|
||||||
{
|
|
||||||
int shift, tmp;
|
|
||||||
|
|
||||||
J_ASSERT (journal->j_revoke_table[0] == NULL);
|
|
||||||
|
|
||||||
shift = 0;
|
|
||||||
tmp = hash_size;
|
|
||||||
while((tmp >>= 1UL) != 0UL)
|
while((tmp >>= 1UL) != 0UL)
|
||||||
shift++;
|
shift++;
|
||||||
|
|
||||||
journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
|
table->hash_size = hash_size;
|
||||||
if (!journal->j_revoke_table[0])
|
table->hash_shift = shift;
|
||||||
return -ENOMEM;
|
table->hash_table =
|
||||||
journal->j_revoke = journal->j_revoke_table[0];
|
|
||||||
|
|
||||||
/* Check that the hash_size is a power of two */
|
|
||||||
J_ASSERT(is_power_of_2(hash_size));
|
|
||||||
|
|
||||||
journal->j_revoke->hash_size = hash_size;
|
|
||||||
|
|
||||||
journal->j_revoke->hash_shift = shift;
|
|
||||||
|
|
||||||
journal->j_revoke->hash_table =
|
|
||||||
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
|
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
|
||||||
if (!journal->j_revoke->hash_table) {
|
if (!table->hash_table) {
|
||||||
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
|
kmem_cache_free(jbd2_revoke_table_cache, table);
|
||||||
journal->j_revoke = NULL;
|
table = NULL;
|
||||||
return -ENOMEM;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (tmp = 0; tmp < hash_size; tmp++)
|
for (tmp = 0; tmp < hash_size; tmp++)
|
||||||
INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
|
INIT_LIST_HEAD(&table->hash_table[tmp]);
|
||||||
|
|
||||||
journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
|
out:
|
||||||
if (!journal->j_revoke_table[1]) {
|
return table;
|
||||||
kfree(journal->j_revoke_table[0]->hash_table);
|
}
|
||||||
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
|
|
||||||
return -ENOMEM;
|
static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
struct list_head *hash_list;
|
||||||
|
|
||||||
|
for (i = 0; i < table->hash_size; i++) {
|
||||||
|
hash_list = &table->hash_table[i];
|
||||||
|
J_ASSERT(list_empty(hash_list));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kfree(table->hash_table);
|
||||||
|
kmem_cache_free(jbd2_revoke_table_cache, table);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Initialise the revoke table for a given journal to a given size. */
|
||||||
|
int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
|
||||||
|
{
|
||||||
|
J_ASSERT(journal->j_revoke_table[0] == NULL);
|
||||||
|
J_ASSERT(is_power_of_2(hash_size));
|
||||||
|
|
||||||
|
journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size);
|
||||||
|
if (!journal->j_revoke_table[0])
|
||||||
|
goto fail0;
|
||||||
|
|
||||||
|
journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size);
|
||||||
|
if (!journal->j_revoke_table[1])
|
||||||
|
goto fail1;
|
||||||
|
|
||||||
journal->j_revoke = journal->j_revoke_table[1];
|
journal->j_revoke = journal->j_revoke_table[1];
|
||||||
|
|
||||||
/* Check that the hash_size is a power of two */
|
|
||||||
J_ASSERT(is_power_of_2(hash_size));
|
|
||||||
|
|
||||||
journal->j_revoke->hash_size = hash_size;
|
|
||||||
|
|
||||||
journal->j_revoke->hash_shift = shift;
|
|
||||||
|
|
||||||
journal->j_revoke->hash_table =
|
|
||||||
kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
|
|
||||||
if (!journal->j_revoke->hash_table) {
|
|
||||||
kfree(journal->j_revoke_table[0]->hash_table);
|
|
||||||
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
|
|
||||||
kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
|
|
||||||
journal->j_revoke = NULL;
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (tmp = 0; tmp < hash_size; tmp++)
|
|
||||||
INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
|
|
||||||
|
|
||||||
spin_lock_init(&journal->j_revoke_lock);
|
spin_lock_init(&journal->j_revoke_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
fail1:
|
||||||
|
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
|
||||||
|
fail0:
|
||||||
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Destoy a journal's revoke table. The table must already be empty! */
|
/* Destroy a journal's revoke table. The table must already be empty! */
|
||||||
|
|
||||||
void jbd2_journal_destroy_revoke(journal_t *journal)
|
void jbd2_journal_destroy_revoke(journal_t *journal)
|
||||||
{
|
{
|
||||||
struct jbd2_revoke_table_s *table;
|
|
||||||
struct list_head *hash_list;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
table = journal->j_revoke_table[0];
|
|
||||||
if (!table)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (i=0; i<table->hash_size; i++) {
|
|
||||||
hash_list = &table->hash_table[i];
|
|
||||||
J_ASSERT (list_empty(hash_list));
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(table->hash_table);
|
|
||||||
kmem_cache_free(jbd2_revoke_table_cache, table);
|
|
||||||
journal->j_revoke = NULL;
|
|
||||||
|
|
||||||
table = journal->j_revoke_table[1];
|
|
||||||
if (!table)
|
|
||||||
return;
|
|
||||||
|
|
||||||
for (i=0; i<table->hash_size; i++) {
|
|
||||||
hash_list = &table->hash_table[i];
|
|
||||||
J_ASSERT (list_empty(hash_list));
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(table->hash_table);
|
|
||||||
kmem_cache_free(jbd2_revoke_table_cache, table);
|
|
||||||
journal->j_revoke = NULL;
|
journal->j_revoke = NULL;
|
||||||
|
if (journal->j_revoke_table[0])
|
||||||
|
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
|
||||||
|
if (journal->j_revoke_table[1])
|
||||||
|
jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -617,6 +617,12 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
|
|||||||
jh->b_next_transaction == transaction)
|
jh->b_next_transaction == transaction)
|
||||||
goto done;
|
goto done;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* this is the first time this transaction is touching this buffer,
|
||||||
|
* reset the modified flag
|
||||||
|
*/
|
||||||
|
jh->b_modified = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is already a copy-out version of this buffer, then we don't
|
* If there is already a copy-out version of this buffer, then we don't
|
||||||
* need to make another one
|
* need to make another one
|
||||||
@ -690,7 +696,7 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
|
|||||||
if (!frozen_buffer) {
|
if (!frozen_buffer) {
|
||||||
printk(KERN_EMERG
|
printk(KERN_EMERG
|
||||||
"%s: OOM for frozen_buffer\n",
|
"%s: OOM for frozen_buffer\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
JBUFFER_TRACE(jh, "oom!");
|
JBUFFER_TRACE(jh, "oom!");
|
||||||
error = -ENOMEM;
|
error = -ENOMEM;
|
||||||
jbd_lock_bh_state(bh);
|
jbd_lock_bh_state(bh);
|
||||||
@ -829,9 +835,16 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
|
|||||||
|
|
||||||
if (jh->b_transaction == NULL) {
|
if (jh->b_transaction == NULL) {
|
||||||
jh->b_transaction = transaction;
|
jh->b_transaction = transaction;
|
||||||
|
|
||||||
|
/* first access by this transaction */
|
||||||
|
jh->b_modified = 0;
|
||||||
|
|
||||||
JBUFFER_TRACE(jh, "file as BJ_Reserved");
|
JBUFFER_TRACE(jh, "file as BJ_Reserved");
|
||||||
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
|
__jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
|
||||||
} else if (jh->b_transaction == journal->j_committing_transaction) {
|
} else if (jh->b_transaction == journal->j_committing_transaction) {
|
||||||
|
/* first access by this transaction */
|
||||||
|
jh->b_modified = 0;
|
||||||
|
|
||||||
JBUFFER_TRACE(jh, "set next transaction");
|
JBUFFER_TRACE(jh, "set next transaction");
|
||||||
jh->b_next_transaction = transaction;
|
jh->b_next_transaction = transaction;
|
||||||
}
|
}
|
||||||
@ -901,7 +914,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
|
|||||||
committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
|
committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
|
||||||
if (!committed_data) {
|
if (!committed_data) {
|
||||||
printk(KERN_EMERG "%s: No memory for committed data\n",
|
printk(KERN_EMERG "%s: No memory for committed data\n",
|
||||||
__FUNCTION__);
|
__func__);
|
||||||
err = -ENOMEM;
|
err = -ENOMEM;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -1230,6 +1243,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
|||||||
struct journal_head *jh;
|
struct journal_head *jh;
|
||||||
int drop_reserve = 0;
|
int drop_reserve = 0;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
int was_modified = 0;
|
||||||
|
|
||||||
BUFFER_TRACE(bh, "entry");
|
BUFFER_TRACE(bh, "entry");
|
||||||
|
|
||||||
@ -1248,6 +1262,9 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
|||||||
goto not_jbd;
|
goto not_jbd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* keep track of wether or not this transaction modified us */
|
||||||
|
was_modified = jh->b_modified;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The buffer's going from the transaction, we must drop
|
* The buffer's going from the transaction, we must drop
|
||||||
* all references -bzzz
|
* all references -bzzz
|
||||||
@ -1265,7 +1282,12 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
|||||||
|
|
||||||
JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
|
JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
|
||||||
|
|
||||||
drop_reserve = 1;
|
/*
|
||||||
|
* we only want to drop a reference if this transaction
|
||||||
|
* modified the buffer
|
||||||
|
*/
|
||||||
|
if (was_modified)
|
||||||
|
drop_reserve = 1;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We are no longer going to journal this buffer.
|
* We are no longer going to journal this buffer.
|
||||||
@ -1305,7 +1327,13 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
|
|||||||
if (jh->b_next_transaction) {
|
if (jh->b_next_transaction) {
|
||||||
J_ASSERT(jh->b_next_transaction == transaction);
|
J_ASSERT(jh->b_next_transaction == transaction);
|
||||||
jh->b_next_transaction = NULL;
|
jh->b_next_transaction = NULL;
|
||||||
drop_reserve = 1;
|
|
||||||
|
/*
|
||||||
|
* only drop a reference if this transaction modified
|
||||||
|
* the buffer
|
||||||
|
*/
|
||||||
|
if (was_modified)
|
||||||
|
drop_reserve = 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1434,7 +1462,8 @@ int jbd2_journal_stop(handle_t *handle)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**int jbd2_journal_force_commit() - force any uncommitted transactions
|
/**
|
||||||
|
* int jbd2_journal_force_commit() - force any uncommitted transactions
|
||||||
* @journal: journal to force
|
* @journal: journal to force
|
||||||
*
|
*
|
||||||
* For synchronous operations: force any uncommitted transactions
|
* For synchronous operations: force any uncommitted transactions
|
||||||
@ -2077,7 +2106,7 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
|
|||||||
jh->b_transaction = jh->b_next_transaction;
|
jh->b_transaction = jh->b_next_transaction;
|
||||||
jh->b_next_transaction = NULL;
|
jh->b_next_transaction = NULL;
|
||||||
__jbd2_journal_file_buffer(jh, jh->b_transaction,
|
__jbd2_journal_file_buffer(jh, jh->b_transaction,
|
||||||
was_dirty ? BJ_Metadata : BJ_Reserved);
|
jh->b_modified ? BJ_Metadata : BJ_Reserved);
|
||||||
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
|
J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
|
||||||
|
|
||||||
if (was_dirty)
|
if (was_dirty)
|
||||||
|
Loading…
Reference in New Issue
Block a user